diff --git "a/2158.jsonl" "b/2158.jsonl"
new file mode 100644--- /dev/null
+++ "b/2158.jsonl"
@@ -0,0 +1,661 @@
+{"seq_id":"344061298","text":"import RegisterUIConnect\n\n__author__ = 'win.thitiwat'\n\nimport sys\n\nfrom PySide.QtGui import *\nfrom PySide.QtUiTools import *\nimport RegisterUIConnect\nimport MainWindow\n\n\nimport Resources\n\n\nclass Login(QWidget):\n def __init__(self, parent = None):\n QWidget.__init__(self)\n # layout = QVBoxLayout()\n loader = QUiLoader()\n self.form = loader.load(\"UI/Login.ui\", self)\n # layout.addWidget(self.form)\n # self.setLayout(layout)\n\n self.backgroundLogin = self.form.findChild(QLabel, \"backgroundLogin\")\n self.symbol = self.form.findChild(QLabel, \"symbol\")\n self.create_acc = self.form.findChild(QPushButton, \"createAcc\")\n self.login = self.form.findChild(QPushButton, \"login\")\n self.notify_incorrect = self.form.findChild(QLabel, \"notify_incorrect\")\n\n self.backgroundLogin.setPixmap(QPixmap(\"images/background.png\"))\n self.symbol.setPixmap(QPixmap(\"images/se_kmitl.png\"))\n\n self.create_acc.clicked.connect(self.goRegister)\n self.login.clicked.connect(self.check_Login)\n self.backgroundLogin.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.setFixedSize(self.form.width(), self.form.height())\n\n def goRegister(self):\n self.hide()\n self.registerPage = RegisterUIConnect.RegisterUIConnect()\n self.registerPage.show()\n pass\n\n def check_Login(self):\n return self.goMainPage()\n\n def goMainPage(self):\n self.hide()\n self.mainWin = MainWindow.MainWindow()\n self.mainWin.show()\n\ndef main():\n app = QApplication(sys.argv)\n sample = Login()\n sample.show()\n return app.exec_()\n\nif __name__ == \"__main__\":\n sys.exit(main())\n\n","sub_path":"Project 170515/Login.py","file_name":"Login.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"130390105","text":"N=int(raw_input())\na=[]\nfor i in range(2,N):\n c=0\n for j in range(2,i):\n\t if i%j==0:\n c=1\n if c==0:\n\t a.append(i)\nif a==[]:\n print(\"0\")\nelse:\n print(\" \".join(str(i) for i in a))\n","sub_path":"all primes lessthen n.py","file_name":"all primes lessthen n.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"642748294","text":"from graphics import *\n\n\ndef main():\n\twin = GraphWin('Periodic table of elements by Sebastian Gorski', 1000, 600)\n\twin.setBackground(\"black\")\n\th = Element(50, 50, 'H', 'Hydrogen', 'blue', win)\n\the = Element(900, 50, 'He', 'Helium', 'orange', win)\n\tli = Element(50, 90, 'Li', 'Lithium', 'blue', win)\n\twin.getMouse()\n\twin.close()\n\nclass Element:\n\tdef __init__(self, x, y, symbol, name, color, window):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.symbol = symbol\n\t\tself.name = name\n\t\tself.window = window\n\t\tself.color = color\n\t\trect = Rectangle(Point(x, y), Point(x+45, y+45))\n\t\trect.setFill(color)\n\t\trect.setOutline(\"white\")\n\t\trect.draw(window)\n\t\ttext = Text(Point(x+20, y+22), symbol)\n\t\ttext.setFill('red')\n\t\ttext.setStyle('bold')\n\t\ttext.setSize(15)\n\t\ttext.draw(window)\n\t\t\n\t\t\n\nmain()\n\n","sub_path":"table/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"196722371","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom download import request\r\nfrom pymongo import MongoClient\r\nimport datetime\r\nimport time\r\nimport os\r\nimport re\r\n\r\nclass duowan(object):\r\n ''''''\r\n\r\n def __init__(self):\r\n\r\n self.index = 'http://tu.duowan.com/tu'\r\n self.lxh_index = ''\r\n client = MongoClient() ## MongDB client\r\n db = client['zzx'] ## choose a db\r\n self.duowan_collection = db['duowan_lxh'] ##choose a collection in db\r\n self.path = 'F:\\\\duowan'\r\n\r\n\r\n def get_all_href(self):\r\n index = request.get(self.index, 3)\r\n Soup = BeautifulSoup(index.text, 'lxml')\r\n li_tmp = Soup.find('div', id='subnav_pk').find_all('li')\r\n for li in li_tmp:\r\n if li.get_text()=='å·ç¬è¯':# å·ç¬è¯代表'冷笑话几个字'\r\n self.lxh_index = li.a['href']\r\n break\r\n lxh = request.get(self.lxh_index, 3)\r\n #print(lxh.text)\r\n Soup = BeautifulSoup(lxh.text, 'lxml')\r\n page_href_list = Soup.find_all('li', class_='box')\r\n page_num = 1\r\n for page in page_href_list:\r\n if page['class'] != ['box']:\r\n print(page['class'])\r\n else:\r\n page_title = page.find('em').get_text()\r\n page_link = page.find('em').find('a')['href']\r\n #print(page_title, page_link)\r\n self.get_img(page_title, page_link, page_num)\r\n page_num += 1\r\n\r\n\r\n def get_img(self, page_title, page_link, page_num):\r\n page_link = page_link.replace('gallery', 'scroll')\r\n page_img = request.get(page_link, 3)\r\n Soup = BeautifulSoup(page_img.text, 'lxml')\r\n img_div_list = Soup.find_all('div', class_='pic-box')\r\n img_num = 1\r\n for img_div in img_div_list:\r\n img_title = img_div.find('p').get_text()\r\n #input(img_div)\r\n if img_title != '䏿é¢å':#'䏿é¢å'为'下集预告'\r\n img_src = img_div.find('span')['data-img']\r\n if not self.duowan_collection.find_one({'img_src': img_src}):\r\n self.save_img(img_title, img_src, page_num, img_num)\r\n post = {\r\n 'page_title': page_title,\r\n 'page_link': page_link,\r\n 'img_num': str(page_num)+'.'+str(img_num),\r\n 'img_title': img_title,\r\n 'img_src': img_src\r\n }\r\n print(img_title)\r\n self.duowan_collection.save(post)\r\n print('Success save img data')\r\n img_num += 1\r\n else:\r\n print('该页面已保存')\r\n else:\r\n break\r\n #return img_title, img_src\r\n\r\n def save_img(self, img_title, img_src, page_num, img_num):\r\n os.chdir(os.path.join(self.path))\r\n img = request.get(img_src, 3)\r\n name = str(page_num)+'.'+str(img_num)+' ' + img_title + img_src[-8:]\r\n if re.compile('
502 Bad Gateway
').match(img.text):\r\n print(re.compile('502 Bad Gateway
').match(img.text))\r\n time.sleep(10)\r\n return self.save_img(img_src, img_title)\r\n else:\r\n f = open(name, 'ab')\r\n f.write(img.content)\r\n f.close()\r\n print('Success save ', name, '\\n')\r\n\r\nduowan = duowan()\r\nif __name__ == '__main__':\r\n duowan.get_all_href()","sub_path":"duowan/duowan.py","file_name":"duowan.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"418261981","text":"import os\nfrom data import *\n\n\ndef getch():\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\ndef print_characters(choose):\n print('\\nYou can choose your class:')\n print('==========================')\n for each in choose:\n print('(' + str(choose.index(each) + 1) + ') ' + each)\n\n\ndef choose_character():\n get_player_name()\n choose = list(CHARACTERS)\n user_input = None\n while user_input not in ['1', '2', '3']:\n print_characters(choose)\n user_input = getch()\n character.append(choose[int(user_input) - 1])\n repository[CHARACTERS[character[0]]] += 1\n set_player_sign()\n\n\ndef get_player_name():\n print('What is your name sheepie?')\n print('==========================')\n name.append(input())\n name.pop(0)\n\n \nif __name__ == '__main__':\n choose_character()\n\n ","sub_path":"create_character.py","file_name":"create_character.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"114362440","text":"import torch\nimport warnings\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom torchtext.data import Example, Dataset, Field, BucketIterator\n\n\nclass TextPreprocessor(BaseEstimator, TransformerMixin):\n def __init__(self, fields, min_freq=1):\n self.fields = fields\n self.min_freq = min_freq\n\n def fit(self, X, y=None):\n dataset = self.transform(X, y)\n for name, field in dataset.fields.items():\n if field.use_vocab:\n field.build_vocab(dataset, min_freq=self.min_freq)\n return self\n\n def transform(self, X, y=None):\n with warnings.catch_warnings(record=True):\n fields = [(name, field) for (name, field) in self.fields\n if name in X]\n proc = [X[col].apply(f.preprocess) for col, f in fields]\n examples = [Example.fromlist(f, fields) for f in zip(*proc)]\n return Dataset(examples, fields)\n\n\ndef build_preprocessor(min_freq=5):\n with warnings.catch_warnings(record=True):\n text_field = Field(\n tokenize=None,\n init_token=None,\n pad_token=\"\",\n unk_token=\"\",\n eos_token=None,\n batch_first=True,\n # pad_first=True,\n )\n fields = [\n ('observed', text_field),\n ('gold', text_field),\n ]\n return TextPreprocessor(fields, min_freq=min_freq)\n\n\nclass SequenceIterator(BucketIterator):\n def __init__(self, *args, **kwargs):\n with warnings.catch_warnings(record=True):\n super().__init__(*args, **kwargs)\n\n def __iter__(self):\n with warnings.catch_warnings(record=True):\n for batch in super().__iter__():\n target = torch.empty(0)\n if 'gold' in batch.fields:\n target = batch.gold.view(-1)\n yield batch.observed, target\n","sub_path":"graphsage/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"36872265","text":"\"\"\"Узнаем что, такое вложенность в генераторах, а так же как ее использовать\"\"\"\n\nres= [x + y for x in [1,2,3] for y in [4,5,6]]\nprint(res)\n\nprint('#'*100)\n\n\n\"\"\"Анологичный пример с помощью цикла for\"\"\"\n\nres = []\n\nfor x in [1,2,3]:\n for y in [4,5,6]:\n res.append(x+y)\n\nprint(res)\n","sub_path":"IteratorsAndGenerators/NestedGenerator.py","file_name":"NestedGenerator.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"336493473","text":"\"\"\"Mangapark downloader.\n\nExample:\n Download chapter 20 for the manga Ajin Miura Tsuina\n\n $ python3 main.py -m http://mangapark.me/manga/ajin-miura-tsuina/ -chapter 20\n\"\"\"\nimport re\nimport os\nimport sys\nimport argparse\nimport urllib.request\nimport img2pdf\nfrom bs4 import BeautifulSoup\nfrom PIL import Image\nfrom resizeimage import resizeimage\n\ndef parse_url_to_manga_info(url: str) -> str:\n \"\"\"Extracts the title of a manga from an URL.\n \"\"\"\n url = re.sub('http://', '', url)\n url = re.sub('mangapark.me/manga/', '', url)\n title = url.split(\"/\")[0]\n return title\n\n\ndef parse_url_to_chapter_info(url: str) -> (str, str, str, str):\n \"\"\"Extract manga info from the URL.\n\n Returns:\n 4-tuple containing the mangas title, version, chapter and url\n \"\"\"\n url = re.sub(\"http://\", '', url)\n url = re.sub(\"mangapark.me\", '', url)\n url = re.sub(\"/manga/\", '', url)\n\n # compensate for mangapark's different url formatting schemes\n title, version, chapter = None, None, None\n if len(url.split(\"/\")) == 3:\n title, version, chapter = url.split(\"/\")\n elif len(url.split(\"/\")):\n title, _, version, chapter = url.split(\"/\")\n else:\n raise ValueError(\"Couldn't parse URL\")\n\n return title, version, chapter, url\n\n\ndef ensure_directory_exist(directory: str) -> None:\n \"\"\"Creates a directory, if it doesn't exist yet.\"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef input_images(path: str) -> bytes:\n \"\"\"Reads an image from the specified source.\n\n Args:\n path: The path of the image.\n\n Returns:\n The raw image data.\n \"\"\"\n if path == '-':\n rawdata = sys.stdin.buffer.read()\n else:\n try:\n with open(path, \"rb\") as im:\n rawdata = im.read()\n except IsADirectoryError:\n raise argparse.ArgumentTypeError(\n \"\\\"%s\\\" is a directory\" % path)\n except PermissionError:\n raise argparse.ArgumentTypeError(\n \"\\\"%s\\\" permission denied\" % path)\n except FileNotFoundError:\n raise argparse.ArgumentTypeError(\n \"\\\"%s\\\" does not exist\" % path)\n if len(rawdata) == 0:\n raise argparse.ArgumentTypeError(\"\\\"%s\\\" is empty\" % path)\n return rawdata\n\n\ndef convert_to_pdf(os_dir: str, chapter: str, filenames: list) -> None:\n \"\"\"Converts images to a PDF.\n\n Args:\n os_dir: Directory to save PDF in.\n chapter: Title of the PDF.\n filenames: Images to construct the PDF from.\n \"\"\"\n print(\"Converting chapter %s to pdf...\" % chapter)\n\n pdf_bytes = None\n try:\n pdf_bytes = img2pdf.convert(*[input_images(path) for path in filenames])\n except img2pdf.PdfTooLargeError:\n # Sometimes the images are registered as having a dpi of 1.\n # Because PDF has a limitation of 200 iches max per side, a\n # special layout_fun has to be used, as to prevent an exception.\n\n # default manga size 5\"x7\"\n layout_fun = img2pdf.get_layout_fun(pagesize=(None, img2pdf.in_to_pt(7)),\n imgsize=None, border=None,\n fit=img2pdf.FitMode.into,\n auto_orient=False)\n pdf_bytes = img2pdf.convert(*[input_images(path) for path in filenames],\n layout_fun=layout_fun)\n\n file = open(\"%s/%s.pdf\" % (os_dir, chapter), \"wb\")\n file.write(pdf_bytes)\n print(\"Conversion completed!\")\n\n\ndef download_chapter(url: str, height: int) -> None:\n \"\"\"Downloads the chapter specified by the url.\"\"\"\n title, _, chapter, os_dir = parse_url_to_chapter_info(url)\n ensure_directory_exist(os_dir)\n try:\n page = urllib.request.urlopen(url)\n except ValueError:\n page = urllib.request.urlopen(\"http://mangapark.me\" + url)\n\n soup = BeautifulSoup(page, \"html.parser\")\n imgs_wrappers = soup.find_all(\"a\", {\"class\": \"img-link\"})\n filenames = []\n for i in imgs_wrappers:\n img_url = parse_url(i.img['src'])\n filename = img_url.split('/')[-1]\n print(\"Downloading %s %s %s...\" % (title, chapter, filename))\n dir_filename = os_dir + \"/\" + os.path.basename(img_url)\n urllib.request.urlretrieve(img_url, dir_filename)\n new_dir_filename = resize(dir_filename, height)\n filenames.append(new_dir_filename)\n\n convert_to_pdf(os_dir, chapter, filenames)\n\ndef parse_url(url: str) -> str:\n return re.sub(r'\\?.*', '', url)\n\ndef resize(filename: str, height: int) -> str:\n if height == None:\n return filename\n print(\"Resizing %s to %spx height...\" % (filename, height))\n with open(filename, 'r+b') as f:\n with Image.open(f) as image:\n cover = resizeimage.resize_height(image, height)\n new_filename = filename + '.res';\n cover.save(new_filename, image.format)\n return new_filename\n\ndef download_manga(url: str, chapter: int=None, min_max: (int, int)=None, height: int=None) -> None:\n \"\"\"Downloads chapters of a manga.\n\n Args:\n url: The URL of the manga.\n chapter: The chapter to download. If no chapter is specified, the\n min_max parameter will be used.\n min_max: The range of chapters to download.\n height: The height to witch resize all images (keeping the aspect ratio)\n \"\"\"\n page = urllib.request.urlopen(url)\n soup = BeautifulSoup(page, \"html.parser\")\n\n streams = soup.find_all(\"div\", {\"class\": \"stream\"})\n stream_lens = []\n for stream in streams:\n chapters = stream.find_all(\"li\")\n stream_lens += [len(chapters)]\n\n max_stream_len = max(stream_lens)\n max_idx = stream_lens.index(max_stream_len)\n best_stream = streams[max_idx]\n\n chapters = best_stream.find_all(\"li\")\n for c in chapters[::-1]:\n chapter_url = c.em.find_all(\"a\")[-1]['href']\n chapter_no = float(parse_url_to_chapter_info(chapter_url)[2][1: ])\n if chapter and chapter_no == chapter:\n download_chapter(chapter_url, height)\n break\n if min_max and chapter_no >= min_max[0] and chapter_no <= min_max[1]:\n download_chapter(chapter_url, height)\n continue\n\n\ndef main():\n \"\"\"Downloads manga specified in command line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', '--manga-url')\n parser.add_argument('-s', '--size', '--height', type=int, help='Height to resize images to (it will keet the aspect ratio)')\n parser.add_argument('-c', '--chapter')\n parser.add_argument('-cs', '--chapters', nargs=2)\n\n args = parser.parse_args()\n print(args)\n if args.manga_url is None:\n print(\"Please specify the URL of the manga on mangapark.me\")\n return\n elif args.chapters != None:\n assert isinstance(args.chapters, list)\n download_manga(args.manga_url, min_max=[float(x) for x in args.chapters], height=args.size)\n elif args.chapter != None:\n download_manga(args.manga_url, chapter=int(args.chapter), height=args.size)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"197215627","text":"import random\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef compute_affine_xform(matches,features1,features2,image1,image2):\n \"\"\"\n Computer Vision 600.461/661 Assignment 2\n Args:\n matches (list of tuples): list of index pairs of possible matches. For example, if the 4-th feature in feature_coords1 and the 0-th feature\n in feature_coords2 are determined to be matches, the list should contain (4,0).\n features1 (list of tuples) : list of feature coordinates corresponding to image1\n features2 (list of tuples) : list of feature coordinates corresponding to image2\n image1 (numpy.ndarray): The input image corresponding to features_coords1\n image2 (numpy.ndarray): The input image corresponding to features_coords2\n Returns:\n affine_xform (numpy.ndarray): a 3x3 Affine transformation matrix between the two images, computed using the matches.\n \"\"\"\n \n affine_xform = np.zeros((3,3))\n rows1, columns1 = [item[0] for item in features1], [item[1] for item in features1]\n rows2, columns2 = [item[0] for item in features2], [item[1] for item in features2]\n if len(matches) < 3:\n return affine_xform\n\n num_iter = 200\n best_form = np.zeros((6,1))\n best_inlier = -1\n\n for i in range(0, num_iter):\n randmatch = random.sample(matches, 3)\n x1 = columns1[randmatch[0][0]]\n y1 = rows1[randmatch[0][0]]\n x2 = columns1[randmatch[1][0]]\n y2 = rows1[randmatch[1][0]]\n x3 = columns1[randmatch[2][0]]\n y3 = columns1[randmatch[2][0]]\n\n xp1 = columns2[randmatch[0][1]]\n yp1 = rows2[randmatch[0][1]]\n xp2 = columns2[randmatch[1][1]]\n yp2 = rows2[randmatch[1][1]]\n xp3 = columns2[randmatch[2][1]]\n yp3 = rows2[randmatch[2][1]]\n\n \n A = np.array([[x1, y1, 1, 0, 0, 0], [0, 0, 0, x1, y1, 1], [x2, y2, 1, 0, 0, 0], [0, 0, 0, x2, y2, 1], [x3, y3, 1, 0, 0, 0], [0, 0, 0, x3, y3, 1]])\n\n if np.linalg.cond(A) > 1e+15:\n num_iter += 1\n continue\n \n b = np.array([xp1, yp1, xp2, yp2, xp3,yp3])\n \n #solve least squares\n #temp_affine = np.linalg.inv(A.T*A)*A.T*b\n temp_affine = np.linalg.solve(A,b)\n \n num_inliers = 0\n\n for j in range(0, len(matches)):\n diff_x = (np.dot(temp_affine[0], columns1[matches[j][0]]) + np.dot(temp_affine[1], rows1[matches[j][0]]) + temp_affine[2]) - columns2[matches[j][1]]\n diff_y = (np.dot(temp_affine[3], columns1[matches[j][0]]) + np.dot(temp_affine[4], rows1[matches[j][0]]) + temp_affine[5]) - rows2[matches[j][1]]\n if np.absolute(diff_x) < 3 and np.absolute(diff_y) <3:\n num_inliers += 1\n if num_inliers > best_inlier:\n xform = [[temp_affine[0], temp_affine[1], temp_affine[2]], [temp_affine[3], temp_affine[4], temp_affine[5]], [0, 0, 1]]\n if np.linalg.cond(xform) < 1e+15:\n best_inlier = num_inliers\n best_form = temp_affine\n else: \n num_iter += 1\n affine_xform = [[best_form[0], best_form[1], best_form[2]], [best_form[3], best_form[4], best_form[5]], [0, 0, 1]]\n return affine_xform\n","sub_path":"HW2/compute_affine_xform.py","file_name":"compute_affine_xform.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"251865756","text":"from asyncio import subprocess\nimport os\n\nfrom aiofile import async_open\nfrom core import BasicHandler\nfrom core.utils import pretty_size\nfrom core.web import ObjectNotFound, ServerError\nfrom xid import Xid\nimport ffmpeg\n\nDEFAULT_PATH = \"data/media/probe\"\nLIMIT = 10 * 2 << 29 # limit 10G\n\n\nclass APIProbe(BasicHandler):\n \"\"\"convert uploaded media file to mp4 container\n\n url params:\n - async: service will convert media in background\n - url: if start with http(s) will do a request when async task is done\n \"\"\"\n\n async def post(self):\n return await self.process()\n\n async def put(self):\n return await self.process()\n\n async def process(self):\n req = self.request\n\n # save upload data\n try:\n if not os.path.isdir(DEFAULT_PATH):\n os.makedirs(DEFAULT_PATH)\n except OSError:\n self.w(f\"{DEFAULT_PATH} path is not exists\")\n return ServerError()\n\n id = Xid().string()\n path_in = os.path.join(DEFAULT_PATH, id)\n\n size = 0\n\n try:\n async with async_open(path_in, \"wb+\") as fobj:\n async for data in req.content.iter_chunked(2 << 19): # 1mb\n size += len(data)\n\n if size > LIMIT:\n self.e(f\"task {id} upload size over limit {pretty_size(LIMIT)}\")\n raise ValueError()\n\n await fobj.write(data)\n except:\n self.x(f\"task {id} upload with exception\")\n return ServerError()\n\n self.d(f\"task {id} save data size {pretty_size(size)}\")\n\n try:\n probe = ffmpeg.probe(path_in)\n except ffmpeg.Error as e:\n self.e(f\"task {id} failed:{str(e)}\")\n return ServerError(500, str(e))\n finally:\n os.unlink(path_in)\n self.d(f\"task {id} is done\")\n\n video_stream = next((stream for stream in probe[\"streams\"] if stream[\"codec_type\"] == \"video\"), None)\n if video_stream is None:\n self.w(\"task {id} do not have any video stream\")\n return ObjectNotFound()\n\n return {\"data\": video_stream}\n","sub_path":"media/handlers/api_probe.py","file_name":"api_probe.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"331104695","text":"#!user/bin/python\n\nimport apple\nimport env\nimport ciscosparkapi\nfrom logging import Formatter, getLogger, StreamHandler, DEBUG\n\n\nlogger = getLogger(\"cml2_stop_real.\")\nlogger.setLevel(DEBUG)\nhandler = StreamHandler()\nhandler.setLevel(DEBUG)\nfmt = Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n \"%Y-%m-%dT%H:%M:%S\"\n)\nhandler.setFormatter(fmt)\nlogger.addHandler(handler)\nlogger.propagate = False\n\nenv_object = env.MyEnv()\nhost = env_object.my_env[\"cml2_0\"]\nwebex_token = env_object.my_env[\"webex_token\"]\nwebex_room = env_object.my_env[\"webex_room\"]\n\nob = apple.Cml2(host)\nob.delete_labs()\n\nwebex = ciscosparkapi.CiscoSparkAPI(access_token=webex_token)\nwebex.messages.create(\n webex_room,\n text=\"Successfully Cml stoped !!\"\n)\n\nlogger.debug(\"Successfully Cml stopde!!\")\n","sub_path":"rest/cml_stop_real.py","file_name":"cml_stop_real.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"266186160","text":"from peewee import *\nfrom playhouse.shortcuts import model_to_dict\n\nfrom .utils import send_notify\nfrom .extensions import db\n\n\nclass User(db.Model):\n nickname = CharField(null=False, unique=True)\n email = CharField(null=False)\n profile = CharField(null=True)\n password = CharField(null=False)\n avatar = CharField(null=True)\n is_enabled = BooleanField(default=True)\n is_email_enabled = BooleanField(default=True)\n\n @classmethod\n def disable_email(cls, nickname):\n cls.update(email_enabled=False).where(cls.nickname == nickname).execute()\n\n @classmethod\n def get_by_nickname(cls, nickname):\n return cls.select().where(cls.nickname == nickname).first()\n\n @classmethod\n def enable_email(cls, nickname):\n cls.update(email_enabled=True).where(cls.nickname == nickname).execute()\n\n @classmethod\n def disable_account(cls, nickname):\n cls.update(is_enabled=False).where(cls.nickname == nickname).execute()\n\n @classmethod\n def enable_account(cls, nickname):\n cls.update(is_enabled=True).where(cls.nickname == nickname).execute()\n\n\nclass Note(db.Model):\n body = CharField()\n user = ForeignKeyField(User, User.nickname)\n byline = CharField()\n archived = BooleanField(default=False)\n\n @classmethod\n def get_archived_notes(cls, nickname):\n notes = cls.select().where(cls.user == nickname, cls.archived == True)\n return [model_to_dict(note, recurse=False) for note in notes]\n\n @classmethod\n def get_unarchive_by_user(cls, nickname):\n notes = cls.select().where(cls.user == nickname, cls.archived == False)\n return [model_to_dict(note, recurse=False) for note in notes]\n\n def notify(self):\n send_notify(self)\n","sub_path":"saythanks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"39146625","text":"import sys\n\nfrom smuthi.version import __version__\n\ntry:\n from mpi4py import MPI\n mpi_comm = MPI.COMM_WORLD\n mpi_rank = mpi_comm.Get_rank()\nexcept:\n mpi_rank = 0\n\n\ndef print_smuthi_header():\n welcome_msg = (\"\\n\" + \"*\" * 32 + \"\\n SMUTHI version \" + __version__ + \"\\n\" + \"*\" * 32 + \"\\n\")\n sys.stdout.write(welcome_msg)\n sys.stdout.flush()\n\n\n#if mpi_rank == 0:\n# print_smuthi_header()\n","sub_path":"smuthi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"110141278","text":"import re\nimport sys\nfrom collections import defaultdict\n\nwith open(\"instr.txt\") as f:\n content = f.readlines()\nreg = re.compile(r'(\\w+) (inc|dec) (-?\\d+) '\n r'if (\\w+) (<|>|==|!=|<=|>=) (-?\\d+)')\n\ndic = defaultdict(int)\nlines = [reg.match(x).groups() for x in content]\n\nfor l in lines:\n if l[4] == '<':\n cond = dic[l[3]] < int(l[5])\n elif l[4] == '<=':\n cond = dic[l[3]] <= int(l[5])\n elif l[4] == '>':\n cond = dic[l[3]] > int(l[5])\n elif l[4] == '>=':\n cond = dic[l[3]] >= int(l[5])\n elif l[4] == '==':\n cond = dic[l[3]] == int(l[5])\n else: # l[4] == '!=':\n cond = dic[l[3]] != int(l[5])\n\n if cond:\n if l[1] == 'inc':\n dic[l[0]] += int(l[2])\n else: # if l[1] == 'dec':\n dic[l[0]] -= int(l[2])\n\nmx = -sys.maxsize - 1\nfor x in dic:\n if dic[x] > mx:\n mx = dic[x]\nprint(mx)\n","sub_path":"8/instr_1.py","file_name":"instr_1.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"47744612","text":"import _plotly_utils.basevalidators\n\n\nclass LineValidator(_plotly_utils.basevalidators.CompoundValidator):\n def __init__(self, plotly_name=\"line\", parent_name=\"violin.box\", **kwargs):\n super(LineValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n data_class_str=kwargs.pop(\"data_class_str\", \"Line\"),\n data_docs=kwargs.pop(\n \"data_docs\",\n \"\"\"\n color\n Sets the inner box plot bounding line color.\n width\n Sets the inner box plot bounding line width.\n\"\"\",\n ),\n **kwargs,\n )\n","sub_path":"packages/python/plotly/plotly/validators/violin/box/_line.py","file_name":"_line.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"522735985","text":"__author__ = 'kiwee'\n\nimport pymongo\nimport random\n\nconn=pymongo.Connection('localhost', 27017)\nmydb=conn.mydb\nmydb.add_user('test','test')\nmydb.authenticate('test','test')\n\nmuser=mydb.user # new a table\nmuser.save({'id':1, 'name':'test'}) # add a record\nmuser.insert({'id':2, 'name':'hello'}) # add a record\nmuser.find_one() # find a record\nmuser.create_index('id')\ncontent=mydb.user.find()\nfor i in content:\n print(i)\nprint(content)","sub_path":"PycharmProjects/mongotest/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"525199333","text":"import tensorflow as tf\nimport numpy as np\nimport gym\n\nenv = gym.make('CartPole-v0')\nenv = env.unwrapped\n\nenv.seed(1)\n\n# Environment Hyperparameters\nstate_size = 4\naction_size = env.action_space.n\n\n# Training Hyperparameters\nmax_episodes = 1000\nlearning_rate = 0.01\ngamma = 0.95 # Discount rate\n\n# Take the rewards and perform discounting\ndef discount_and_normalize_rewards(episode_rewards):\n discounted_episode_rewards = np.zeros_like(episode_rewards)\n cumulative = 0.0\n for i in reversed(range(len(episode_rewards))):\n cumulative = cumulative * gamma + episode_rewards[i]\n discounted_episode_rewards[i] = cumulative\n\n mean = np.mean(discounted_episode_rewards)\n std = np.std(discounted_episode_rewards)\n discounted_episode_rewards = (discounted_episode_rewards - mean) / (std)\n\n return discounted_episode_rewards\n\n\n# The state is an array of 4 values which will be used as an input\n# The neural network is made up of 3 fully connected layers\n# The output activation function is softmax that squashes the outputs to a probability distribution\nwith tf.name_scope(\"inputs\"):\n input_ = tf.placeholder(tf.float32, [None, state_size], name=\"input_\")\n actions = tf.placeholder(tf.int32, [None, action_size], name=\"actions\")\n discounted_episode_rewards_ = tf.placeholder(tf.float32, [None,], name=\"discounted_episode_rewards\")\n \n mean_reward_ = tf.placeholder(tf.float32, name=\"mean_reward\")\n\n with tf.name_scope(\"fc1\"):\n fc1 = tf.contrib.layers.fully_connected(\n inputs = input_,\n num_outputs = 10,\n activation_fn=tf.nn.relu,\n weights_initializer=tf.contrib.layers.xavier_initializer())\n\n with tf.name_scope(\"fc2\"):\n fc2 = tf.contrib.layers.fully_connected(\n inputs = fc1,\n num_outputs = action_size,\n activation_fn=tf.nn.relu,\n weights_initializer=tf.contrib.layers.xavier_initializer())\n\n with tf.name_scope(\"fc3\"):\n fc3 = tf.contrib.layers.fully_connected(\n inputs = fc2,\n num_outputs = action_size,\n activation_fn=None,\n weights_initializer=tf.contrib.layers.xavier_initializer())\n\n with tf.name_scope(\"softmax\"):\n action_distribution = tf.nn.softmax(fc3)\n\n with tf.name_scope(\"loss\"):\n neg_log_prob = tf.nn.softmax_cross_entropy_with_logits(logits = fc3, labels = actions)\n loss = tf.reduce_mean(neg_log_prob * discounted_episode_rewards_)\n\n with tf.name_scope(\"train\"):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n\n# Set up Tensorboard\nwriter = tf.summary.FileWriter(\"/tmp/tensorboard/pg/1\")\ntf.summary.scalar(\"Loss\", loss)\ntf.summary.scalar(\"Reward_mean\", mean_reward_)\nwrite_op = tf.summary.merge_all()\n\n\n# Train the agent\n# For each step:\n# choose an action a\n# perform action a\n# store s, a, r\n# if done:\n# calculate sum reward\n# calculate gamma Gt\n# optimize\nallRewards = []\ntotal_rewards = 0\nmaximumRewardRecorded = 0\nepisode = 0\nepisode_states, episode_actions, episode_rewards = [], [], []\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for episode in range(max_episodes):\n \n episode_rewards_sum = 0\n\n # Launch the game!\n state = env.reset()\n\n env.render()\n\n while True:\n\n # Choose action a\n action_probability_distribution = sess.run(action_distribution, feed_dict={input_: state.reshape([1,4])})\n\n action = np.random.choice(range(action_probability_distribution.shape[1]), p=action_probability_distribution.ravel())\n\n # Perform a\n new_state, reward, done, info = env.step(action)\n\n # Store s, a, r\n episode_states.append(state)\n\n action_ = np.zeros(action_size)\n action_[action] = 1\n\n episode_actions.append(action_)\n\n episode_rewards.append(reward)\n\n if done:\n # Calculate the sum reward\n episode_rewards_sum = np.sum(episode_rewards)\n\n allRewards.append(episode_rewards_sum)\n\n total_rewards = np.sum(allRewards)\n\n # Calculate the mean reward, as well\n mean_reward = np.divide(total_rewards, episode+1)\n\n maximumRewardRecorded = np.amax(allRewards)\n\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Episode: \", episode)\n print(\"Reward: \", episode_rewards_sum)\n print(\"Mean Reward: \", mean_reward)\n print(\"Max reward so far: \", maximumRewardRecorded)\n\n # Calculate discounted reward\n discounted_episode_rewards = discount_and_normalize_rewards(episode_rewards)\n\n # Feedforward, gradient, and backpropagation\n loss_, _ = sess.run(\n [loss, train_opt],\n feed_dict={\n input_: np.vstack(np.array(episode_states)),\n actions: np.vstack(np.array(episode_actions)),\n discounted_episode_rewards_: discounted_episode_rewards\n }\n )\n\n # Write TF summaries\n summary = sess.run(\n write_op,\n feed_dict={\n input_: np.vstack(np.array(episode_states)),\n actions: np.vstack(np.array(episode_actions)),\n discounted_episode_rewards_: discounted_episode_rewards,\n mean_reward_: mean_reward\n }\n )\n\n writer.add_summary(summary, episode)\n writer.flush()\n\n # Reset the transition stores\n episode_states, episode_actions, episode_rewards = [], [], []\n\n break\n\n state = new_state\n","sub_path":"cartpole_main.py","file_name":"cartpole_main.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"32903586","text":"import networkx as nx\nimport random\n\n\ndef display_graph(graph, width):\n i = 0\n for node in graph.nodes(data=True):\n if i % width == 0:\n print ('\\n')\n print(node, end='\\t')\n i += 1\n\n\ndef generate_graph(height=5, width=5):\n graph = nx.grid_2d_graph(height, width, periodic=False, create_using=None)\n # set random values for all the nodes\n for _, data in graph.nodes(data=True):\n data[\"value\"] = random.randint(1, 20)\n\n return graph\n","sub_path":"local_search/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"219850394","text":"from django.db import models\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.contrib.auth.base_user import AbstractBaseUser\nfrom django.utils.translation import gettext as _\nfrom user.managers import UserManager\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n email = models.EmailField(_(\"email\"), unique=True)\n username = models.CharField(_(\"username\"), max_length=50, primary_key=True)\n data_joined = models.DateTimeField(_(\"data_joined\"), auto_now_add=True)\n is_active = models.BooleanField(_(\"is_active\"), default=True)\n is_staff = models.BooleanField(_(\"is_staff\"), default=True)\n\n objects = UserManager()\n USERNAME_FIELD = \"username\"\n REQUIRED_FIELDS = [\"email\", ]\n\n class Meta:\n verbose_name = _(\"username\")\n verbose_name_plural = _(\"usernames\")\n\n def get_name(self):\n return self.username.strip()\n","sub_path":"translate-me/authentication-master/api/user/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"518664602","text":"class RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.current = 0\n self.storage = [None]*capacity\n\n def append(self, item):\n # check if there is space in the storage\n if None in self.storage:\n # get the index of None\n emptyIndex = self.storage.index(None)\n self.storage[emptyIndex] = item\n # if storage is full\n else:\n # get the first element as the oldest\n oldestItem = self.storage[0]\n for i in self.storage:\n # getting the oldest element in the array\n if i < oldestItem:\n oldestItem = i\n # get the oldest item index\n oldestItemIndex = self.storage.index(oldestItem)\n # override item with the oldest\n self.storage[oldestItemIndex] = item\n\n def get(self):\n return [item for item in self.storage if item != None]\n","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"276483793","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport cv2\nimport matplotlib.pyplot as plt\nimport pickle\nimport numpy as np\n\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\ndef get_data_and_label(file):\n dict = unpickle(file)\n data = dict[b'data']\n label = dict[b'labels']\n print(type(data))\n print(type(label))\n # data = data.reshape([-1,3,32,32])\n label = np.array(label,dtype = np.uint8)\n return data,label\n\ndef classification_model_fn(features,labels,mode):\n # Input data and label\n # Suppose the data format is NHWC\n input_data = tf.reshape(features[\"x\"],[-1,32,32,3])\n input_data = tf.cast(input_data,tf.float32)\n print(input_data.shape)\n labels = tf.cast(labels, tf.int32)\n # Convolutional Layer #1\n conv1 = tf.layers.conv2d(inputs=input_data,\n filters = 32,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv1.shape)\n BN = tf.layers.batch_normalization(conv1)\n pool1 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2) \n # Convolutional Layer #2\n conv2 = tf.layers.conv2d(inputs=pool1,\n filters = 64,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv2.shape)\n BN = tf.layers.batch_normalization(conv2)\n pool2 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2)\n # Convolutional Layer #3\n conv3 = tf.layers.conv2d(inputs=pool2,\n filters = 128,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv3.shape)\n BN = tf.layers.batch_normalization(conv3)\n # Convolutional Layer #4\n conv4 = tf.layers.conv2d(inputs=BN,\n filters = 64,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv4.shape)\n BN = tf.layers.batch_normalization(conv4)\n # Convolutional Layer #5\n conv5 = tf.layers.conv2d(inputs=BN,\n filters = 128,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv5.shape)\n BN = tf.layers.batch_normalization(conv5)\n pool3 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2)\n\n # Convolutional Layer #6\n conv6 = tf.layers.conv2d(inputs=pool3,\n filters = 256,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv6.shape)\n BN = tf.layers.batch_normalization(conv6)\n # Convolutional Layer #7\n conv7 = tf.layers.conv2d(inputs=BN,\n filters = 128,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv7.shape)\n BN = tf.layers.batch_normalization(conv7)\n # Convolutional Layer #8\n conv8 = tf.layers.conv2d(inputs=BN,\n filters = 256,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv8.shape)\n BN = tf.layers.batch_normalization(conv8)\n pool4 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2)\n\n # Convolutional Layer #9\n conv9 = tf.layers.conv2d(inputs=pool4,\n filters = 512,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv9.shape)\n BN = tf.layers.batch_normalization(conv9)\n # Convolutional Layer #10\n conv10 = tf.layers.conv2d(inputs=BN,\n filters = 256,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv10.shape)\n BN = tf.layers.batch_normalization(conv10)\n # Convolutional Layer #11\n conv11 = tf.layers.conv2d(inputs=BN,\n filters = 512,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv11.shape)\n BN = tf.layers.batch_normalization(conv11)\n # Convolutional Layer #12\n conv12 = tf.layers.conv2d(inputs=BN,\n filters = 256,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv12.shape)\n BN = tf.layers.batch_normalization(conv12)\n # Convolutional Layer #13\n conv13 = tf.layers.conv2d(inputs=pool4,\n filters = 512,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv13.shape)\n BN = tf.layers.batch_normalization(conv13)\n pool5 = tf.layers.max_pooling2d(inputs = BN, pool_size = [2,2], strides = 2)\n\n # Convolutional Layer #14\n conv14 = tf.layers.conv2d(inputs=pool5,\n filters = 1024,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv14.shape)\n BN = tf.layers.batch_normalization(conv14)\n # Convolutional Layer #15\n conv15 = tf.layers.conv2d(inputs=BN,\n filters = 512,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv15.shape)\n BN = tf.layers.batch_normalization(conv15)\n # Convolutional Layer #16\n conv16 = tf.layers.conv2d(inputs=BN,\n filters = 1024,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv16.shape)\n BN = tf.layers.batch_normalization(conv16)\n # Convolutional Layer #17\n conv17 = tf.layers.conv2d(inputs=BN,\n filters = 512,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv17.shape)\n BN = tf.layers.batch_normalization(conv17)\n # Convolutional Layer #18\n conv18 = tf.layers.conv2d(inputs=BN,\n filters = 1024,\n kernel_size = [3,3],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv18.shape)\n BN = tf.layers.batch_normalization(conv18)\n \n # Convolutional Layer #19\n conv19 = tf.layers.conv2d(inputs=BN,\n filters = 1000,\n kernel_size = [1,1],\n padding=\"same\",\n activation=tf.nn.leaky_relu)\n print(conv19.shape)\n\n avgpool = tf.layers.average_pooling2d(conv19,pool_size = [7,7],strides = 1)\n # Dense Layer\n avgpool_flat = tf.reshape(avgpool, [-1, 7 * 7 * 1000])\n dense = tf.layers.dense(inputs=avgpool_flat, units=1000, activation=tf.nn.softmax)\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits Layer\n logits = tf.layers.dense(inputs=dropout, units=10)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\ndef main(unused_argv):\n # Load training and eval data\n # mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n file1 = \"cifar-10-batches-py/data_batch_1\"\n train_data,train_labels = get_data_and_label(file1)\n file2 = \"cifar-10-batches-py/data_batch_2\"\n eval_data,eval_labels = get_data_and_label(file2)\n # Create the Estimator\n yolo_classifier = tf.estimator.Estimator(\n model_fn=classification_model_fn, model_dir=\"tmp/yolo_convnet_model\")\n # Set up logging for predictions\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=50)\n # Train the model\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\":train_data},\n y=train_labels,\n batch_size=64,\n num_epochs=160,\n shuffle=True)\n yolo_classifier.train(\n input_fn=train_input_fn,\n steps=20000,\n hooks=[logging_hook])\n # Evaluate the model and print results\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\":eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False)\n eval_results = yolo_classifier.evaluate(input_fn=eval_input_fn)\n print(eval_results)\n\n\nif __name__ == '__main__':\n # file = \"C:\\\\Users\\\\DengDazhen\\\\Desktop\\\\test\\\\cifar-10-batches-py\\\\data_batch_1\"\n # data,label = get_data_and_label(file)\n #transform the input array to a 32*32*3 matrix\n # data = data.reshape([-1,3,32,32])\n # dataset = tf.data.Dataset.from_tensor_slices({'image':data,'label':label})\n # iterator = dataset.make_one_shot_iterator()\n # one_element = iterator.get_next()\n # with tf.Session() as sess:\n # for i in range(5):\n # print(sess.run(one_element))\n tf.app.run()\n\n \n\n\n","sub_path":"YOLO_l.py","file_name":"YOLO_l.py","file_ext":"py","file_size_in_byte":9543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"401673668","text":"import re\n\nport_counters = [\n 'RxalignErr',\n 'RxcrcErr',\n 'RxlongErr',\n 'RxshortErr',\n 'RxtokenDrop',\n 'Txcollisions',\n 'TxexcessDefer',\n 'TxexcessLength',\n 'TxlateCollision',\n]\n\n# 78/88 debug display:\n# [8:54:12am 08/20/19] DeviceTLInfo ... ReasonForOutOfServiceText=...\n# 8821 has space after seconds\n# [5:48:09 AM 10/11/19] DeviceName=SEP007686CF681A ... ReasonForOutOfServiceText=...\ndebug_rgx_default = re.compile(\n r'\\d{1,2}:\\d{1,2}:\\d{1,2}\\s*\\w{2},?\\s*'\n r'(?P\\d{2}/\\d{2}/\\d{2}).+'\n r'ReasonForOutOfServiceText=(?P\\w+)',\n re.I,\n)\n\n# 79xx debug display (does not include date:\n# 12:02:55a 25: Name=SEPB8BEBF227D79 Load= 9.4(2SR3.1S) Last=Initialized\ndebug_rgx_79xx = re.compile(\n r'(?P\\d{1,2}:\\d{1,2}:\\d{1,2}\\w).+'\n r'Last=(?P.+)$',\n re.I,\n )\n\n# 78/88 status messages:\n# [8:52:30am 10/01/19] ITL installed\nstatus_rgx_default = re.compile(\n r'\\d{1,2}:\\d{1,2}:\\d{1,2}\\s*\\w{2},?\\s*'\n r'(?P\\d{2}/\\d{2}/\\d{2})\\]\\s*'\n r'(?P.+)',\n re.I,\n)\n\n# 79xx debug display (does not include date:\n# 1:33:26a TFTP Error : SEPB8BEBF9D2061.cnf.xml.sgn\nstatus_rgx_79xx = re.compile(\n r'(?P\\d{1,2}:\\d{1,2}:\\d{1,2}\\w)\\s*'\n r'(?P.+)$',\n re.I,\n )\n\ndebug_display_rgx = [debug_rgx_default, debug_rgx_79xx]\nstatus_messages_rgx = [status_rgx_default, status_rgx_79xx]\n\n\ndef multi_match(text_list, rgx_list, cnt):\n \"\"\"\n Collected lines that match regex's in the rgx_list until the # of collected\n lines equals cnt.\n\n Args:\n text_list (list): List of strings to match\n rgx_list (list): One or more regex's\n cnt (int): Number of matches to collect before returning\n\n Returns:\n (str): The matched lines joined by linefeed/CR\n \"\"\"\n matches = []\n for line in text_list:\n line = re.sub(r'\\n', ' ', line)\n for rgx in rgx_list:\n m = rgx.search(line)\n if m:\n matches.append(' '.join(m.groups()))\n break\n\n if len(matches) >= cnt:\n break\n return '\\n\\r'.join(matches)\n\n\ndef prep_xml(func):\n \n def wrapper(xml_dict, count=1):\n \"\"\"\n Take the OrderedDict returned from the Status message or Debug display web page\n and strip out the status messages for further processing.\n\n These pages both return the following data structures:\n If multiple messages exist:\n {'DeviceLog': { 'status' [ 'msg1', 'msg2']}}\n If a single (or no) message exists:\n {'DeviceLog': { 'status' 'msg'}}\n\n The value of the inner 'status' key is taken and, if necessary converted to a list. \n The list is then reversed so the most recent entries are first. \n\n Args:\n xml_dict (OrderedDict): Converted XML from IP phone web page\n count (int): Number of results to return \n \n Returns:\n status_messages (list): Reversed list of status message entries\n \"\"\"\n device_log = xml_dict.get('DeviceLog') or {}\n status_messages = device_log.get('status') or []\n\n # status_messages will be a list unless only one status message exists on the page\n # in which case it will be a string\n if isinstance(status_messages, str):\n status_messages = [status_messages]\n\n status_messages.reverse()\n return func(status_messages, count)\n return wrapper\n\n\n@prep_xml\ndef parse_status_error(status_messages, count):\n \"\"\"\n Parse Status message web page for the most recent error messages.\n\n Lines are matched based on the err_rgx.\n\n The most recent X matches are returned where X is the value of count.\n\n Args:\n status_messages (list): Lines from the Status message web page\n count (int): Number of matches to return\n\n Returns:\n (str): One or more matched lines joined by CR/LF\n \"\"\"\n err_rgx = re.compile(r'(no trust list|error|configmismatch|tftp timeout)', re.I)\n matched = []\n for msg in status_messages:\n if err_rgx.search(msg):\n matched.append(msg)\n\n return multi_match(matched, status_messages_rgx, count)\n\n\n@prep_xml\ndef parse_status_itl(status_messages, count):\n \"\"\"\n Parse Status message web page for the most recent ITL-related entries.\n\n Matched lines contain \"ITL\" or \"Trust\". These may be errors or informational.\n\n The most recent X matches are returned where X is the value of count.\n\n Args:\n status_messages (list): Lines from the Status message web page\n count (int): Number of matches to return\n\n Returns:\n (str): One or more matched lines joined by CR/LF\n \"\"\"\n itl_rgx = re.compile(r'(ITL|Trust)')\n matched = []\n for msg in status_messages:\n if itl_rgx.search(msg, re.I):\n matched.append(msg)\n\n return multi_match(matched, status_messages_rgx, count)\n\n\n@prep_xml\ndef parse_debug_reason(debug_messages, count=1):\n \"\"\"\n Parse Debug display page content for the most recent out of service reasons.\n\n Values are pulled from \"ReasonForOutOfServiceText=TEXT\" lines. The date and TEXT\n are extracted from the line and returned.\n\n The most recent X matches are returned where X is the value of count.\n\n Args:\n debug_messages (list): Lines from the Debug display web page\n count (int): Number of matches to return\n\n Returns:\n (str): One or more matched lines joined by CR/LF\n \"\"\"\n return multi_match(debug_messages, debug_display_rgx, count)\n\n\ndef parse_port_errors(port_dict):\n \"\"\"\n Parse PortInformation web pages and sum all the error counters into a single value.\n\n Args:\n port_dict (OrderedDict): Converted XML from IP phone web page\n\n Returns:\n val: (int): Sum of error counters\n \"\"\"\n port_info = port_dict.get('PortInformation') or {}\n val = 0\n for k in port_counters:\n try:\n val += int(port_info.get(k, 0))\n except (TypeError, KeyError):\n pass\n return val\n\n\ndef parse_pc_port_speed(port_dict):\n \"\"\"\n Parse Access network page content for PC port speed/duplex.\n\n Return 'N/A' if PortSpeed key is not present under the assumption that\n the source device does not have a PC port.\n\n Args:\n port_dict (OrderedDict): Converted XML from IP phone web page\n\n Returns:\n (str): PortSpeed value or 'N/A\n \"\"\"\n port_info = port_dict.get('PortInformation') or {}\n return port_info.get('PortSpeed', 'N/A')\n","sub_path":"field_funcs.py","file_name":"field_funcs.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"320103664","text":"import sys\nimport os\n\nimport numpy as np\nimport scipy.stats\n\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, h\n\n\ndef read_results(dataset):\n fn = '%s_result.txt' % dataset\n\n methods = ['phrase model score',\n 'marker model score',\n 'IR-based',\n 'AB method 0',\n 'AB method 1',\n 'AB method 2',\n 'AB method 3',\n 'Opine - marker, score',\n 'Opine - marker, running time',\n 'Opine - histogram, score',\n 'Opine - histogram, running time']\n scores = { method : [] for method in methods}\n for line in open(fn):\n line = line.strip()\n score_id = None\n for method in methods:\n if line.startswith(method):\n score_id = method\n break\n\n if score_id != None:\n token = ''\n if '\\t' in line:\n token = line.split('\\t')[-1]\n elif ' ' in line:\n token = line.split(' ')[-1]\n scores[method].append(float(token))\n\n # table 1\n columns = []\n for difficulty in range(3):\n column = []\n N = len(scores['IR-based'])\n\n ir_based = [scores['IR-based'][i] for i in range(N) if i % 3 == difficulty]\n column.append(mean_confidence_interval(ir_based))\n for ab_mode in range(4):\n attr_name = 'AB method %d' % ab_mode\n score = [scores[attr_name][i] for i in range(N) if i % 3 == difficulty]\n column.append(mean_confidence_interval(score))\n opine = [scores['Opine - marker, score'][i] \\\n for i in range(N) if i % 3 == difficulty]\n column.append(mean_confidence_interval(opine))\n columns.append(column)\n\n print('Table 1 dataset %s' % dataset)\n max_conf_int = 0.0\n for i in range(6):\n row = '\\t'.join(['%.3f' % columns[j][i][0] for j in range(3)])\n max_conf_int = max(max_conf_int, max([columns[j][i][1] for j in range(3)]))\n print(row)\n print('max_conf_int =', max_conf_int)\n\n # table 2\n column = []\n LR_accuracy = mean_confidence_interval(scores['marker model score'])\n NDCG_10 = mean_confidence_interval(scores['Opine - marker, score'])\n runtime1 = mean_confidence_interval(scores['Opine - marker, running time'])\n column += [LR_accuracy, NDCG_10, runtime1]\n LR_accuracy = mean_confidence_interval(scores['phrase model score'])\n NDCG_10 = mean_confidence_interval(scores['Opine - histogram, score'])\n runtime2 = mean_confidence_interval(scores['Opine - histogram, running time'])\n column += [LR_accuracy, NDCG_10, runtime2]\n speedup = [b / a for (a, b) in zip(scores['Opine - marker, running time'],\n scores['Opine - histogram, running time'])]\n column.append(mean_confidence_interval(speedup))\n\n print('Table 2 dataset %s' % dataset)\n for i in range(7):\n print('%.2f %.3f' % (column[i][0], column[i][1]))\n print('avg. runtime - marker =', np.mean(scores['Opine - marker, running time']) / 100)\n print('avg. runtime - histogram =', np.mean(scores['Opine - histogram, running time']) / 100)\n\n\nif __name__ == '__main__':\n datasets = ['london', 'amsterdam', 'toronto_lp', 'toronto_jp']\n\n for dataset in datasets:\n read_results(dataset)\n","sub_path":"eval/read_results.py","file_name":"read_results.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"182734005","text":"#ID25.py\n\n#This program finds the first fibonacci number with 1000 digits\n\ndef ID25():\n numdig = int(input('What is the number of digits? '))\n x = 1\n a = 0\n b = 0\n length = 0\n fibnum = 1\n while length < numdig:\n b = x\n x += a\n a = b\n tlength = len(str(x))\n if tlength > length:\n length = tlength\n fibnum += 1\n print('The number is ', fibnum)\n\nID25()\n","sub_path":"python files/id25.py","file_name":"id25.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"34562143","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.e_introduction, name='e_introduction'),\n path('introduction/edit//', views.introduction_edit, name='e_introduction_edit'),\n path('teams//', views.e_teams, name='e_teams'),\n path('subcategory//', views.e_subcategory, name='e_subcategory'),\n path('services//', views.e_services, name='e_services'),\n]","sub_path":"kilwoo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"543442737","text":"\nlista_cosas = [\"das\", 123, 456, \"fsdskhg\", 796, \"36464230a\", \"sfsd\", 723746245]\nlista_numeros = []\nlista_str = []\n\n\n\nfor dato in lista_cosas:\n if type(dato) == str:\n lista_str.append(dato)\n elif type(dato) == int:\n lista_numeros.append(dato)\n\n\nprint(lista_str)\nprint(lista_numeros)\n","sub_path":"ejercicios/lista_con_str_int.py","file_name":"lista_con_str_int.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"37201272","text":"from keras.models import Model,Sequential\nfrom keras.layers import Embedding,LSTM,Dense,Average,Input\n#from gensim.models import KeyedVectors\n\n#word_vectors = KeyedVectors.load_word2vec_format('./embeddings/low_shuff_combine_tokenized.txt-iter17-min5.bin', binary=True)\n\nmodel_input = Input(shape=(None,))\nfixed = word_vectors.get_keras_embedding(train_embeddings=False)(model_input)\nfree = word_vectors.get_keras_embedding(train_embeddings=True)(model_input)\ncombined = Average()([free,fixed])\n\nlstm = LSTM(args.value,\n activation='tanh', # activation function used\n recurrent_activation='hard_sigmoid', # activation function for recurrent step\n use_bias=True, # whether the layer uses a bias vector\n kernel_initializer='glorot_uniform', # initialiser for the weights matrix\n recurrent_initializer='orthogonal', # initialiser for the recurrent kernal's weights\n bias_initializer='zeros', # initialiser for the bias vector\n unit_forget_bias=True, # add 1 to the bias of the forget gate at initialization\n kernel_regularizer=None, # regularizer function applied to kernal\n recurrent_regularizer=None, # regularizer function applied to recurrent kernal\n bias_regularizer=None, # regularizer function applied to bias vector\n activity_regularizer=None, # regularizer function applied to output of the layer\n kernel_constraint=None, # constraint function applied to the kernal\n recurrent_constraint=None, # constraint function applied to the recurrent kernal\n bias_constraint=None, # constraint function applied to the bias vector\n dropout=0.0, # fraction of units to drop for the linear transformation of the inputs\n recurrent_dropout=0.0, # fraction of units to drop for the linear transformation of the recurrent state\n implementation=1, # implementation mode, either 1 or 2.\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False, # If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch.\n unroll=False)(combined)\noutput = Dense(2,\n activation='softmax',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None)(lstm)\n\nmodel = Model(inputs=[model_input], outputs=[output])\n\nmodel.compile(optimizer='Adadelta',\n loss='binary_crossentropy',\n metrics=['acc'],\n sample_weight_mode=None,\n weighted_metrics=None,\n target_tensors=None)\n\n#from keras.utils import plot_model\n#plot_model(model,show_shapes=True, show_layer_names=False, to_file='model2.png')\n","sub_path":"models/lstm_single_embedDual.py","file_name":"lstm_single_embedDual.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"653298701","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport os\nimport peewee\nfrom playhouse.shortcuts import model_to_dict\n\n\ndatabase_path = '/home/rrocha/projects/feagri/ebbandflow.db'\ndatabase = peewee.SqliteDatabase(database_path)\n\n\nclass BaseModel(peewee.Model):\n class Meta:\n database = database\n\n def to_dict(self, datefield_format='%d/%m/%Y'):\n dict_model = model_to_dict(self)\n\n for key, value in dict_model.items():\n if isinstance(value, datetime.date):\n new_value = value.strftime(datefield_format)\n dict_model[key] = new_value\n\n return dict_model\n\n\nclass StatusPlanta(BaseModel): \n status_bomba = peewee.CharField()\n modo_operacao = peewee.CharField()\n umidade_set_point = peewee.CharField()\n intervalo_leitura = peewee.CharField()\n umidade_substrato = peewee.CharField()\n ph_solucao = peewee.CharField()\n ph_set_point = peewee.CharField()\n created_date = peewee.DateTimeField(default=datetime.datetime.now)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"533190466","text":"import datetime\r\nbefore1 = input(\"開始日(YYYY/MM/DD) :\")\r\nafter1 = datetime.datetime.strptime(before1, '%Y/%m/%d')\r\nbefore2 = input(\"終了日(YYYY/MM/DD) :\")\r\nafter2 = datetime.datetime.strptime(before2, '%Y/%m/%d')\r\n\r\n\r\nout = after2-after1\r\nprint(\"日数差: {0} 日\".format(out.days))\r\n\r\n","sub_path":"python実験/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"411050607","text":"def hth_anno():\n f=open(\"./data/domian_in_neighbour.tbl\").read().split(\"\\n\")\n f=f[3:-11]\n query2accession={}\n for i in f:\n i_info=i.split()\n query=i_info[2]\n accession=i_info[1]\n if query not in query2accession.keys():\n query2accession[query]=[accession]\n else:\n query2accession[query].append(accession)\n geneWithHth=list(query2accession.keys())\n \n #f=open(\"./data/del\").read().split(\"\\n\")\n f=open(\"./data/neighbour.log_1\").read().split(\"\\n\")\n f.remove(\"\")\n target2neighbour={}\n t2n={}\n for i in f:\n i_info=i.split(\":\")\n key=i_info[0]\n value=eval(i_info[1])\n tmp=[]\n for j in value:\n if j in geneWithHth:\n j_strand=\"dsadasdsa\"\n tmp.append(j_strand)\n if tmp:\n target2neighbour[key]=\"1\"\n t2n[key]=tmp\n else:\n target2neighbour[key]=\"0\"\n t2n[key]=[]\n return target2neighbour, query2accession, t2n\n\nhth_anno()\n","sub_path":"ProjectOfAcrDetectorScript/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"485794839","text":"from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7\n# Importing the Kratos Library\nimport KratosMultiphysics\nimport KratosMultiphysics.FluidDynamicsApplication as KratosFluid\n\n## Import base class file\nfrom KratosMultiphysics.FluidDynamicsApplication.fluid_solver import FluidSolver\n\nfrom KratosMultiphysics import python_linear_solver_factory as linear_solver_factory\nfrom KratosMultiphysics.FluidDynamicsApplication import check_and_prepare_model_process_fluid\n\ndef CreateSolver(model, custom_settings):\n return NavierStokesCompressibleSolver(model, custom_settings)\n\nclass NavierStokesCompressibleSolver(FluidSolver):\n\n @classmethod\n def GetDefaultSettings(cls):\n ##settings string in json format\n default_settings = KratosMultiphysics.Parameters(\"\"\"\n {\n \"solver_type\": \"compressible_solver_from_defaults\",\n \"model_part_name\": \"\",\n \"domain_size\": -1,\n \"model_import_settings\": {\n \"input_type\": \"mdpa\",\n \"input_filename\": \"two_element_test\",\n \"reorder\": false\n },\n \"maximum_iterations\": 10,\n \"echo_level\": 1,\n \"time_order\": 2,\n \"compute_reactions\": false,\n \"reform_dofs_at_each_step\" : true,\n \"relative_tolerance\" : 1e-3,\n \"absolute_tolerance\" : 1e-5,\n \"linear_solver_settings\" : {\n \"solver_type\" : \"amgcl\",\n \"max_iteration\" : 200,\n \"tolerance\" : 1e-7,\n \"provide_coordinates\" : false,\n \"smoother_type\" : \"ilu0\",\n \"krylov_type\" : \"gmres\",\n \"coarsening_type\" : \"aggregation\",\n \"scaling\" : true,\n \"verbosity\" : 0\n },\n \"volume_model_part_name\" : \"volume_model_part\",\n \"skin_parts\": [\"\"],\n \"no_skin_parts\":[\"\"],\n \"time_stepping\" : {\n \"automatic_time_step\" : true,\n \"CFL_number\" : 1,\n \"minimum_delta_time\" : 1e-4,\n \"maximum_delta_time\" : 0.01\n },\n \"periodic\": \"periodic\",\n \"move_mesh_flag\": false\n }\"\"\")\n\n default_settings.AddMissingParameters(super(NavierStokesCompressibleSolver, cls).GetDefaultSettings())\n return default_settings\n\n def __init__(self, model, custom_settings):\n self._validate_settings_in_baseclass=True # To be removed eventually\n super(NavierStokesCompressibleSolver,self).__init__(model,custom_settings)\n\n self.element_name = \"CompressibleNavierStokes\"\n self.condition_name = \"Condition\"\n self.min_buffer_size = 3\n\n ## Construct the linear solver\n self.linear_solver = linear_solver_factory.ConstructSolver(self.settings[\"linear_solver_settings\"])\n\n ## Set the element replace settings\n #self._SetCompressibleElementReplaceSettings()\n\n print(\"Construction of NavierStokesCompressibleSolver finished.\")\n\n\n def AddVariables(self):\n\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.MOMENTUM)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DENSITY)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.TOTAL_ENERGY)\n\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.CONDUCTIVITY)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.SPECIFIC_HEAT)\n self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.HEAT_CAPACITY_RATIO)\n\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.BODY_FORCE)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_H) ## ?\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_AREA) ## ?\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION) #for momentum\n self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.REACTION_DENSITY) #for momentum\n self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.REACTION_ENERGY) #for momentum\n\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.FLAG_VARIABLE) ## ?\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NORMAL)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.Y_WALL) ## ?\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.EXTERNAL_PRESSURE)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.KINEMATIC_VISCOSITY)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DYNAMIC_VISCOSITY)\n\n # Post-process\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PRESSURE)\n self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.MACH) #for momentum\n\n print(\"Monolithic compressible fluid solver variables added correctly\")\n\n def AddDofs(self):\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_X, KratosMultiphysics.REACTION_X, self.main_model_part)\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_Y, KratosMultiphysics.REACTION_Y, self.main_model_part)\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_Z, KratosMultiphysics.REACTION_Z, self.main_model_part)\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DENSITY, KratosFluid.REACTION_DENSITY, self.main_model_part)\n KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.TOTAL_ENERGY, KratosFluid.REACTION_ENERGY, self.main_model_part)\n\n def Initialize(self):\n self.computing_model_part = self.GetComputingModelPart()\n\n # If needed, create the estimate time step utility\n if (self.settings[\"time_stepping\"][\"automatic_time_step\"].GetBool()):\n print(\"ERROR: _GetAutomaticTimeSteppingUtility out of date\")\n #self.EstimateDeltaTimeUtility = self._GetAutomaticTimeSteppingUtility()\n\n # Set the time discretization utility to compute the BDF coefficients\n time_order = self.settings[\"time_order\"].GetInt()\n if time_order == 2:\n self.time_discretization = KratosMultiphysics.TimeDiscretization.BDF(time_order)\n else:\n raise Exception(\"Only \\\"time_order\\\" equal to 2 is supported. Provided \\\"time_order\\\": \" + str(time_order))\n\n # Creating the solution strategy\n self.conv_criteria = KratosMultiphysics.ResidualCriteria(self.settings[\"relative_tolerance\"].GetDouble(),\n self.settings[\"absolute_tolerance\"].GetDouble())\n\n\n #(self.conv_criteria).SetEchoLevel(self.settings[\"echo_level\"].GetInt()\n (self.conv_criteria).SetEchoLevel(3)\n\n domain_size = self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]\n rotation_utility = KratosFluid.CompressibleElementRotationUtility(domain_size,KratosMultiphysics.SLIP)\n time_scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticSchemeSlip(rotation_utility)\n #time_scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticScheme() # DOFs (4,5)\n\n\n builder_and_solver = KratosMultiphysics.ResidualBasedBlockBuilderAndSolver(self.linear_solver)\n\n\n self.solver = KratosMultiphysics.ResidualBasedNewtonRaphsonStrategy(self.computing_model_part,\n time_scheme,\n self.linear_solver,\n self.conv_criteria,\n builder_and_solver,\n self.settings[\"maximum_iterations\"].GetInt(),\n self.settings[\"compute_reactions\"].GetBool(),\n self.settings[\"reform_dofs_at_each_step\"].GetBool(),\n self.settings[\"move_mesh_flag\"].GetBool())\n\n\n (self.solver).SetEchoLevel(self.settings[\"echo_level\"].GetInt())\n #(self.solver).SetEchoLevel(1)\n\n\n (self.solver).Initialize()\n\n\n # self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DYNAMIC_TAU, self.settings[\"dynamic_tau\"].GetDouble()) # REMEMBER TO CHECK MY STAB CONSTANTS\n\n print (\"Monolithic compressible solver initialization finished.\")\n\n\n def InitializeSolutionStep(self):\n (self.time_discretization).ComputeAndSaveBDFCoefficients(self.GetComputingModelPart().ProcessInfo)\n (self.solver).InitializeSolutionStep()\n\n\n def Solve(self):\n (self.time_discretization).ComputeAndSaveBDFCoefficients(self.GetComputingModelPart().ProcessInfo)\n (self.solver).Solve()\n\n def PrepareModelPart(self):\n super(NavierStokesCompressibleSolver,self).PrepareModelPart()\n if not self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED]:\n self._ExecuteAfterReading()\n\n def _ExecuteAfterReading(self):\n ## Replace element and conditions\n KratosMultiphysics.ReplaceElementsAndConditionsProcess(self.main_model_part, self.settings[\"element_replace_settings\"]).Execute()\n\n ## Check that the input read has the shape we like\n prepare_model_part_settings = KratosMultiphysics.Parameters(\"{}\")\n prepare_model_part_settings.AddValue(\"volume_model_part_name\",self.settings[\"volume_model_part_name\"])\n prepare_model_part_settings.AddValue(\"skin_parts\",self.settings[\"skin_parts\"])\n\n check_and_prepare_model_process_fluid.CheckAndPrepareModelProcess(self.main_model_part, prepare_model_part_settings).Execute()\n\n\n #def _SetCompressibleElementReplaceSettings(self):\n #domain_size = self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]\n #self.settings.AddEmptyValue(\"element_replace_settings\")\n\n #if(domain_size == 3):\n #self.settings[\"element_replace_settings\"] = KratosMultiphysics.Parameters(\"\"\"\n #{\n #\"element_name\":\"CompressibleNavierStokes3D4N\",\n #\"condition_name\": \"Condition3D3N\"\n #}\n #\"\"\")\n #elif(domain_size == 2):\n #self.settings[\"element_replace_settings\"] = KratosMultiphysics.Parameters(\"\"\"\n #{\n #\"element_name\":\"CompressibleNavierStokes2D3N\",\n #\"condition_name\": \"Condition2D2N\"\n #}\n #\"\"\")\n #else:\n #raise Exception(\"Domain size is not 2 or 3!!\")\n","sub_path":"applications/FluidDynamicsApplication/python_scripts/navier_stokes_compressible_solver.py","file_name":"navier_stokes_compressible_solver.py","file_ext":"py","file_size_in_byte":11146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"409324686","text":"import sqlite3\n\nwith sqlite3.connect('db/sqlite3.db') as connection:\n # create db cursorcursor()\n cursor = connection.cursor()\n\n data = (\n (\"Jean-Baptiste Zorg\", \"Human\", 122),\n (\"Korben Dallas\", \"Meat Popsicle\", 100),\n (\"Ak'not\", \"Mangalore\", -5)\n )\n\n cursor.executemany(\"INSERT INTO Roster VALUES(?, ?, ?);\", data)","sub_path":"chapters/14/exercises/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"347809169","text":"from avocado.query.translators import Translator, registry\nfrom modeltree.tree import trees\n\n\nclass AllowMissingRecord(Translator):\n \"\"\"\n HGMD is the only source of data for the variant-phenotype assocations.\n This leads to a nuance in what it means to \"not be in HGMD\". By default,\n Avocado adds a second condition to ensure the ID is also not null if the\n field itself is nullable (to exclude missing records). However because\n records _only_ exist if there is an HGMD ID, this behavior is confusing.\n\n This translator overrides this behavior and adds an OR to allow for no\n records if querying for an explicit NULL.\n \"\"\"\n def translate(self, field, roperator, rvalue, tree, **kwargs):\n output = super(AllowMissingRecord, self).translate(\n field, roperator, rvalue, tree, **kwargs)\n cleaned_data = output['cleaned_data']\n\n if (cleaned_data['operator'].lookup == 'isnull'\n and cleaned_data['value']):\n # Create a null condition for this field\n null_condition = trees[tree].query_condition(\n field.model._meta.pk, 'isnull', True)\n # Allow the null condition\n output['query_modifiers']['condition'] = null_condition\n return output\n\n\nregistry.register(AllowMissingRecord, 'Allow Missing Record')\n","sub_path":"varify/phenotypes/translators.py","file_name":"translators.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"565470253","text":"from distutils.core import setup\nimport numpy\n\ndef find_version(path):\n import re\n # path shall be a plain ascii text file.\n s = open(path, 'rt').read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n s, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Version not found\")\n\nsetup(\n name=\"mpi4py_test\",\n version=find_version(\"mpi4py_test/version.py\"),\n author=\"Yu Feng\",\n author_email=\"rainwoodman@gmail.com\",\n url=\"http://github.com/rainwoodman/mpi4py_test\",\n description=\"Simple testing based on numpy for applications written with mpi4py.\",\n zip_safe = False,\n package_dir = {'mpi4py_test': 'mpi4py_test'},\n install_requires=['numpy', 'mpi4py'],\n license='BSD-2-Clause',\n packages= ['mpi4py_test', 'mpi4py_test.tests'],\n requires=['numpy', 'mpi4py'],\n)\n","sub_path":"pypi_install_script/mpi4py_test-0.0.10.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"2952139","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2\nimport math\n\ndef sliding_window(image,padded_image,window = 3):\n\ta = np.zeros(image.shape)\n\tfor x in range(image.shape[0]):\n\t\tfor y in range(image.shape[1]):\n\t\t\twin_im = padded_image[x:x+window, y:y+window]\n\t\t\t# print(image[x,y])\n\n\t\t\ta[x][y][0] = bilateral_filter(win_im[:,:,0])\n\t\t\ta[x][y][1] = bilateral_filter(win_im[:,:,1])\n\t\t\ta[x][y][2] = bilateral_filter(win_im[:,:,2])\n\treturn a\n\ndef padding(im,kernel_row=3,kernel_col=3):\n\timage_row, image_col,ch = im.shape\n\n\tpad_height = int((kernel_row - 1) / 2)\n\tpad_width = int((kernel_col - 1) / 2)\n\t \n\tpadded_image = np.zeros((image_row + (2 * pad_height), image_col + (2 * pad_width),ch))\n\tprint(padded_image.shape)\n\t \n\tpadded_image[pad_height:padded_image.shape[0] - pad_height, pad_width:padded_image.shape[1] - pad_width] = im\n\treturn padded_image\n\ndef weighting(x,sigma=1):\n\treturn math.exp(- (x ** 2) / (2 * sigma ** 2))\n\ndef bilateral_filter(win_im,sd=1,sr=1):\n\tw = 0 \n\tgk = 0\n\ti = int(win_im.shape[0]/2)\n\t# print(i)\n\tfor k in range(win_im.shape[0]):\n\t\tfor l in range(win_im.shape[1]):\n\t\t\t\n\t\t\td = weighting((i-k),sd) * weighting((i-l),sd)\n\t\t\t# print(d)\n\t\t\tv = abs(win_im[i][i] - win_im[k][l])\n\t\t\tr = weighting(v,sr)\n\t\t\tgk = gk + (win_im[k][l]*(r*d))\n\t\t\tw = w + (r*d)\n\n\treturn (gk/w)\n\nim = cv2.imread('gt_sky.png')# path needs to be channged all input images are available in the input folder\n# im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n# im = cv2.resize(im,(480,640))\npadded_image = padding(im,3,3)\noutput = sliding_window(im,padded_image,3)\ncv2.imwrite('gt _sky_ooo.jpg',output)# output images in the output folder can be used for refrence\n","sub_path":"a2_2019702002/src/q6/bilateral.py","file_name":"bilateral.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"643895959","text":"from keras import layers, models, optimizers\nfrom keras import backend as K\nimport tensorflow as tf\nfrom numpy.random import seed\nfrom tensorflow import set_random_seed\nseed(14)\nset_random_seed(14)\n\nclass Critic:\n \"\"\"Critic (Value) Model.\"\"\"\n\n def __init__(self, state_size, action_size, lr):\n \"\"\"Initialize parameters and build model.\n\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n lr (float): Adam optimizer learning rate\n \"\"\"\n self.state_size = state_size\n self.action_size = action_size \n self.lr = lr\n\n self.build_model()\n\n def build_model(self):\n \"\"\"Build a critic (value) network that maps (state, action) pairs -> Q-values.\"\"\"\n states = layers.Input(shape=(self.state_size,))\n actions = layers.Input(shape=(self.action_size,))\n stat_act = layers.Concatenate()([states, actions])\n\n net = layers.Dense(units=400)(stat_act)\n net = layers.Activation('relu')(net)\n net = layers.Dense(units=300)(net)\n net = layers.Activation('relu')(net)\n \n Q_values = layers.Dense(1)(net)\n \n self.model = models.Model(inputs=[states, actions], outputs=Q_values) \n\n # Define optimizer and compile model for training with built-in loss function\n optimizer = optimizers.Adam(lr=self.lr)\n self.model.compile(optimizer=optimizer, loss='mse')\n\n # Compute action gradients (derivative of Q values w.r.t. to actions)\n gradients = K.gradients(Q_values, actions)\n self.get_gradients = K.function(\n inputs=[*self.model.input, K.learning_phase()], \n outputs=gradients) \n","sub_path":"agents/critic.py","file_name":"critic.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"67841192","text":"import random\ndef get_rand_list(x, y, n):\n k = random.sample(range(x,y), n)\n return k\ndef get_overlap(first, second):\n list = []\n for i in first:\n for k in second:\n if i == k:\n list.append(i)\n return list\ndef main():\n x = int(input(\"begin:\"))\n y = int(input(\"end:\"))\n n = int(input(\"N:\"))\n first = get_rand_list(x, y, n)\n second = get_rand_list(x, y, n)\n list = get_overlap(first, second)\n print(first)\n print(second)\n print(list)\n\nmain()","sub_path":"lab8/example3.py","file_name":"example3.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"594492041","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statistics\nimport collections\nfrom sklearn.linear_model import LinearRegression\n\n# Import and Clean Time Series\ndataset = pd.read_csv(\"CumulativeCases.csv\")\n\ndates = dataset['Date']\ncolombia_dataset = dataset['Colombia']\nbelgium_dataset = dataset['Belgium']\n\n# Create lists from the datasets.\nlist_colombia = list(colombia_dataset)\nlist_belgium = list(belgium_dataset)\n\n\n#### Measures of Central Tendency\n\ndef central_tendency_spread():\n #### MEAN ###\n mean_colombia = colombia_dataset.mean()\n mean_belgium = belgium_dataset.mean()\n\n print(\"Measures of Central Tendency\", end=\"\\n\\n\")\n print(\"Colombia Mean: \", mean_colombia)\n print('Belgium Mean: ', mean_belgium)\n\n #### MEDIAN\n median_colombia = colombia_dataset.median()\n median_belgium = belgium_dataset.median()\n\n print(\"Colombia Median: \", median_colombia)\n print('Belgium Median: ', median_belgium)\n\n #### MODE\n mode_colombia = colombia_dataset.mode()\n mode_belgium = belgium_dataset.mode()\n\n print(\"Colombia Mode: \", int(mode_colombia))\n print('Belgium Mode: ', int(mode_belgium), end=\"\\n\\n\\n\")\n\n variance_colombia = statistics.variance(list(colombia_dataset))\n variance_belgium = statistics.variance(list(belgium_dataset))\n \n print(\"Measures of Spread\", end=\"\\n\\n\")\n\n print(\"Colombia Variance: \", variance_colombia)\n print(\"Belgium Variance: \", variance_belgium)\n\n pvariance_colombia = statistics.pvariance(list(colombia_dataset))\n pvariance_belgium = statistics.pvariance(list(belgium_dataset))\n\n print(\"Colombia Population Variance: \", pvariance_colombia)\n print(\"Belgium Population Variance: \", pvariance_belgium)\n\n stdev_colombia = statistics.stdev(list(colombia_dataset))\n stdev_belgium = statistics.stdev(list(belgium_dataset))\n\n print(\"Colombia Standard Deviation: \", stdev_colombia)\n print(\"Belgium Standard Deviation: \", stdev_belgium)\n\n pstdev_colombia = statistics.pstdev(list(colombia_dataset))\n pstdev_belgium = statistics.pstdev(list(belgium_dataset))\n\n print(\"Colombia Population Standard Deviation: \", pstdev_colombia)\n print(\"Belgium Population Standard Deviation: \", pstdev_belgium)\n # Write Results into XSLX File\n\n data1 = {\n 'Measures of Central Tendency' : ['Mean', 'Median', 'Mode'],\n 'Belgium': [int(mean_belgium), int(median_belgium), int(mode_belgium)],\n 'Colombia' : [int(mean_colombia), int(median_colombia), int(mode_colombia)],\n }\n\n data2 = {\n 'Measures of Spread' : ['Variance', 'Population Variance', 'Standard Deviation', 'Population Standard Deviation'],\n 'Belgium' : [int(variance_belgium), int(pvariance_belgium), int(stdev_belgium), int(pstdev_belgium)],\n 'Colombia' : [int(variance_colombia), int(pvariance_colombia), int(stdev_colombia), int(pstdev_colombia)]\n }\n\n df1 = pd.DataFrame(data1, columns = ['Measures of Central Tendency', 'Belgium', 'Colombia'])\n df2 = pd.DataFrame(data2, columns = ['Measures of Spread', 'Belgium', 'Colombia'])\n\n # Save Results to Excel Files\n df1.to_excel('cent_tend.xlsx', index=False, header=True)\n df2.to_excel('spread.xlsx', index=False, header=True)\n\n\n### FREQUENCIES\ndef frequencies():\n freq_colombia = collections.Counter(list_colombia)\n freq_belgium = collections.Counter(list_belgium)\n\n print(\"Colombia Frequencies: \", freq_colombia, end=\"\\n\\n\")\n print(\"Belgium Frequencies: \", freq_belgium)\n\n # PLOT HISTOGRAM\n plt.style.use('ggplot')\n # Belgium\n plt.hist(list_belgium, bins=10, label=\"Belgium\")\n\n # Colombia\n plt.hist(list_colombia, bins=10, label=\"Colombia\")\n\n # Plot\n plt.legend()\n plt.show()\n\n\n# MOVING AVERAGES & VOLATILITY\nwindow_size = 10\n# convert list to series\nbelgium_series = pd.Series(list_belgium)\nbelgium_windows = belgium_series.rolling(window_size)\n\ncolombia_series = pd.Series(list_colombia)\ncolombia_windows = colombia_series.rolling(window_size)\n\n# remove NaN\nbelgium_moving_averages = belgium_windows.mean().tolist()[window_size - 1:]\ncolombia_moving_averages = colombia_windows.mean().tolist()[window_size - 1:]\n\ndef moving_averages():\n print(belgium_moving_averages)\n print(colombia_moving_averages)\n # Plot Moving Averages\n plt.plot(belgium_moving_averages, label=\"Belgium\")\n plt.plot(colombia_moving_averages, label=\"Colombia\")\n plt.legend()\n plt.show()\n\n# Volatility\nbelgium_volatility = belgium_windows.std(ddof=0).tolist()[window_size - 1:]\ncolombia_volatility = colombia_windows.std(ddof=0).tolist()[window_size - 1:]\n\ndef volatility():\n print(belgium_volatility)\n print(colombia_volatility)\n # Plot Volatility\n plt.plot(belgium_volatility, label=\"Belgium\")\n plt.plot(colombia_volatility, label=\"Colombia\")\n plt.legend()\n plt.show()\n\n\ndef write_avg_vol_to_csv():\n # Write Measures of Volatility and Average to .csv file\n data = {\n 'Belgium Volatility': belgium_volatility,\n 'Belgium Average' : belgium_moving_averages,\n 'Colombia Volatility' : colombia_volatility,\n 'Colombia Average' : colombia_moving_averages\n }\n\n df = pd.DataFrame(data, columns = ['Belgium Volatility', 'Belgium Average', 'Colombia Volatility', 'Colombia Average'])\n\n # Save Results to Excel Files\n df.to_csv('vol_avg.csv', index=False, header=True)\n\n# Linear Regression\ndef linear_regression():\n x = np.array(list_belgium).reshape(-1, 1)\n y = np.array(list_colombia)\n\n model = LinearRegression()\n model.fit(x, y)\n\n result = model.score(x, y)\n print(result)\n\n # Plot scatter plot to determine linear correlation\n x2 = np.array(list_belgium) # create 1D array for plot\n\n plt.plot(x2, y, 'o')\n m, b = np.polyfit(x2, y, 1)\n plt.plot(x2, m*x2, + b)\n plt.show()","sub_path":"Statistics-Assignment-master/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"373483559","text":"def pal(x):\n\tres=0\n\twhile x:\n\t\tres=res*10+x%10\n\t\tx//=10\n\treturn res\n\ndef ispal(x):\n\ts=str(x)\n\tlength=len(s)\n\tfor i in range(length//2):\n\t\tif(s[i]!=s[length-i-1]):\n\t\t\treturn False\n\treturn True\n\nn,k=map(int,input().split())\n\nstep=0\nif(ispal(n)):\n\tprint(n)\n\tprint(0)\n\texit()\nwhile(1):\n\tn+=pal(n)\n\tstep+=1\n\tif(ispal(n)):\n\t\tbreak\n\telif step==k:\n\t\tbreak\nprint(n)\nprint(step)","sub_path":"PAT/1024.py","file_name":"1024.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"136672768","text":"from urllib.parse import urlparse\nwith open('orgginal.txt') as f,open('out.txt', 'w') as f_out:\n for line in f:\n line = line.strip()\n parsed = urlparse(line)\n #print(line)\n newline=parsed._replace(query='').geturl()\n print(newline)\n #f_out.write('{}\\n'.format(line))\n\t","sub_path":"Python/tmp/url_pars/url_remove_string2.py","file_name":"url_remove_string2.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"158094738","text":"import tensorflow as tf\nimport numpy as np\nimport os, time, math, json, joblib, random, argparse\n\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import accuracy_score\n\nfrom util.opt import adam\nfrom util.utils import iter_data, find_trainable_variables, ResultLogger, assign_to_gpu, average_grads, make_path\nfrom data.data import gen_squad_data\nfrom metrics.metrics import target_based_np_rouge\n\nfrom models.transformer_lm import language_model as model\n\n# modified from https://github.com/openai/finetune-transformer-lm/blob/master/train.py\n\ndef mgpu_train(*xs):\n gpu_ops = []\n gpu_grads = []\n xs = (tf.split(x, n_gpu, 0) for x in xs)\n for i, xs in enumerate(zip(*xs)):\n do_reuse = True if i > 0 else None\n with tf.device(assign_to_gpu(i, \"/gpu:0\")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse):\n lm_logits, lm_losses = model(*xs, \n n_vocab=n_vocab, n_special=n_special, n_ctx=n_ctx, n_embd=n_embd, \n embd_pdrop=embd_pdrop, n_layer=n_layer, n_head=n_head, attn_pdrop=attn_pdrop, \n resid_pdrop=resid_pdrop, train=True, reuse=do_reuse)\n train_loss = tf.reduce_mean(lm_losses)\n params = find_trainable_variables(\"model\")\n grads = tf.gradients(train_loss, params)\n grads = list(zip(grads, params))\n gpu_grads.append(grads)\n gpu_ops.append([lm_logits, lm_losses])\n ops = [tf.concat(op, 0) for op in zip(*gpu_ops)]\n grads = average_grads(gpu_grads)\n grads = [g for g, p in grads]\n train = adam(params, grads, lr, lr_schedule, n_updates_total, warmup=lr_warmup, l2=l2, max_grad_norm=max_grad_norm, vector_l2=vector_l2, b1=b1, b2=b2, e=e)\n return [train]+ops\n\ndef mgpu_predict(*xs):\n gpu_ops = []\n xs = (tf.split(x, n_gpu, 0) for x in xs)\n for i, xs in enumerate(zip(*xs)):\n with tf.device(assign_to_gpu(i, \"/gpu:0\")), tf.variable_scope(tf.get_variable_scope(), reuse=True):\n lm_logits, lm_losses = model(*xs, \n n_vocab=n_vocab, n_special=n_special, n_ctx=n_ctx, n_embd=n_embd, \n embd_pdrop=embd_pdrop, n_layer=n_layer, n_head=n_head, attn_pdrop=attn_pdrop, \n resid_pdrop=resid_pdrop, train=False, reuse=True)\n gpu_ops.append([lm_logits, lm_losses])\n ops = [tf.concat(op, 0) for op in zip(*gpu_ops)]\n return ops\n\ndef iter_apply(Xs, Ms):\n fns = [lambda x:np.concatenate(x, 0), lambda x:float(np.sum(x))]\n results = []\n for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):\n n = len(xmb)\n idx_mask = np.equal(xmb[:,:,0], delimiter).astype(int)\n end_idxs = np.argmax(np.equal(xmb[:,:,0], end).astype(int), 1)\n delim_idxs = np.argmax(idx_mask,1)\n if n == n_batch_train:\n for i in range(np.max(end_idxs - delim_idxs)):\n ev_logits, ev_lm_loss = sess.run([eval_mgpu_logits, eval_mgpu_lm_loss], {X_train:xmb, M_train:mmb})\n pred = np.argmax(ev_logits, 1)\n idx_mask = roll_mask(idx_mask)\n xmb = next_xmb(xmb, pred, idx_mask)\n if all_finished(np.reshape(pred,xmb[:,1:,0].shape), end):\n break\n else:\n for i in range(np.max(end_idxs - delim_idxs)):\n ev_logits, ev_lm_losses = sess.run([eval_logits, eval_lm_loss], {X:xmb, M:mmb})\n pred = np.argmax(ev_logits, 1)\n idx_mask = roll_mask(idx_mask)\n xmb = next_xmb(xmb, pred, idx_mask)\n if all_finished(np.reshape(pred,xmb[:,1:,0].shape), end):\n break\n res = [pred * n, ev_lm_loss * n]\n results.append(res)\n results = zip(*results)\n return [fn(res) for res, fn in zip(results, fns)]\n\ndef iter_predict(Xs, Ms):\n preds = []\n for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):\n n = len(xmb)\n idx_mask = np.equal(xmb[:,:,0], delimiter).astype(int)\n end_idxs = np.argmax(np.equal(xmb[:,:,0], end).astype(int), 1)\n delim_idxs = np.argmax(idx_mask,1)\n if n == n_batch_train:\n for i in range(np.max(end_idxs - delim_idxs)):\n pred = np.argmax(sess.run(eval_mgpu_logits, {X_train:xmb, M_train:mmb}), 1)\n idx_mask = roll_mask(idx_mask)\n xmb = next_xmb(xmb, pred, idx_mask)\n if all_finished(np.reshape(pred,xmb[:,1:,0].shape), end):\n break\n else:\n for i in range(np.max(end_idxs - delim_idxs)):\n pred = np.argmax(sess.run(eval_logits, {X:xmb, M:mmb}), 1)\n idx_mask = roll_mask(idx_mask)\n xmb = next_xmb(xmb, pred, idx_mask)\n if all_finished(np.reshape(pred,xmb[:,1:,0].shape), end):\n break\n preds.append(pred)\n preds = np.concatenate(preds, 0)\n return preds\n\ndef next_xmb(xmb, pred, idx_mask):\n reshaped_pred = np.reshape(pred,xmb[:,1:,0].shape)\n pad = np.zeros_like(xmb[:,:1,0])\n m_xmb = xmb*np.expand_dims(1-idx_mask,2)\n m_pred = np.expand_dims(np.concatenate([reshaped_pred,pad],1)*idx_mask,2)\n return m_xmb + m_pred\n\ndef all_finished(t, end):\n return np.sum(np.any(np.equal(t, end),1)) == len(t)\n\ndef roll_mask(m):\n e_slice = np.zeros_like(m[:,:1])\n r_slice = np.roll(m,1)[:,1:]\n rm = np.concatenate([e_slice,r_slice],1)\n return rm\n\ndef save(path):\n ps = sess.run(params)\n joblib.dump(ps, make_path(path))\n\ndef log():\n global best_score\n tr_preds, tr_cost = iter_apply(trX[:n_valid], trM[:n_valid])\n va_preds, va_cost = iter_apply(vaX, vaM)\n tr_cost = tr_cost/len(trX[:n_valid])\n va_cost = va_cost/n_valid\n tr_acc = float(target_based_np_rouge(np.reshape(tr_preds, vaX[:, 1:, 0].shape), trX[:n_valid, 1:, 0], delimiter, end)[\"rouge_1/f_score\"])\n va_acc = float(target_based_np_rouge(np.reshape(va_preds, vaX[:, 1:, 0].shape), vaX[:, 1:, 0], delimiter, end)[\"rouge_1/f_score\"])\n logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc)\n print('%d %d %.3f %.3f %.2f %.2f'%(n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc))\n if submit:\n score = va_acc\n if score > best_score:\n best_score = score\n save(os.path.join(save_dir, desc, 'best_params.jl'))\n\ndef predict():\n predictions = iter_predict(teX, teM)\n predictions = np.reshape(predictions, [len(teX), -1])\n if decoder is not None:\n predictions = [\" \".join([decoder.get(token, \">\") for token in np.trim_zeros(prediction,'b')]\n ).replace(\"\",\"\").replace(\"\\n\",\"\").strip() for prediction in predictions]\n targets = [\" \".join([decoder.get(token, \">\") for token in np.trim_zeros(target,'b')]\n ).replace(\"\",\"\").replace(\"\\n\",\"\").strip() for target in teX[:, 1:, 0]]\n path = os.path.join(submission_dir, desc)\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, 'w') as f:\n for i, (prediction, target) in enumerate(zip(predictions, targets)):\n f.write('INDEX: {}\\nPREDICTION: {}\\nTARGET: {}\\n'.format(i, prediction, target))\n f.write('#'*150+'\\n')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--desc', type=str, default='transformer_qa_gen') # dir args\n parser.add_argument('--log_dir', type=str, default='log/')\n parser.add_argument('--save_dir', type=str, default='save/')\n parser.add_argument('--submission_dir', type=str, default='submission/')\n parser.add_argument('--encoding_dir', type=str, default='data/bpe_encoding/')\n parser.add_argument('--data_dir', type=str, default='data/squad_1.1/')\n parser.add_argument('--pretrained_lm_dir', type=str, default='data/pretrained_language_model_params/')\n parser.add_argument('--use_prev_best', action='store_true')\n parser.add_argument('--submit', action='store_true')\n parser.add_argument('--data_limit', type=int)\n parser.add_argument('--seed', type=int, default=42) # seed\n parser.add_argument('--n_gpu', type=int, default=1) # train args\n parser.add_argument('--n_iter', type=int, default=3)\n parser.add_argument('--n_batch', type=int, default=4)\n parser.add_argument('--n_ctx', type=int, default=512) # model params\n parser.add_argument('--n_embd', type=int, default=768)\n parser.add_argument('--n_head', type=int, default=12)\n parser.add_argument('--n_layer', type=int, default=12)\n parser.add_argument('--embd_pdrop', type=float, default=0.1)\n parser.add_argument('--attn_pdrop', type=float, default=0.1)\n parser.add_argument('--resid_pdrop', type=float, default=0.1)\n parser.add_argument('--max_grad_norm', type=int, default=1) # opt args\n parser.add_argument('--lr', type=float, default=6.25e-5)\n parser.add_argument('--lr_warmup', type=float, default=0.002)\n parser.add_argument('--l2', type=float, default=0.01)\n parser.add_argument('--vector_l2', action='store_true')\n parser.add_argument('--lr_schedule', type=str, default='warmup_linear')\n parser.add_argument('--b1', type=float, default=0.9)\n parser.add_argument('--b2', type=float, default=0.999)\n parser.add_argument('--e', type=float, default=1e-8)\n args = parser.parse_args()\n print(args)\n globals().update(args.__dict__)\n # set seed\n random.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n # log args\n logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc)), **args.__dict__)\n # handle data \n (trX, trM), (vaX, vaM), (teX, teM), config = gen_squad_data(n_ctx, encoding_dir, data_dir, data_limit=data_limit)\n globals().update(config)\n n_train = len(trX)\n n_valid = len(vaX)\n n_batch_train = n_batch*n_gpu\n n_updates_total = (n_train//n_batch_train)*n_iter\n # place holders\n X_train = tf.placeholder(tf.int32, [n_batch_train, n_ctx, 2])\n M_train = tf.placeholder(tf.float32, [n_batch_train, n_ctx])\n X = tf.placeholder(tf.int32, [None, n_ctx, 2])\n M = tf.placeholder(tf.float32, [None, n_ctx])\n # mgpu train and predict\n train, logits, lm_losses = mgpu_train(X_train, M_train)\n lm_loss = tf.reduce_mean(lm_losses)\n eval_mgpu_logits, eval_mgpu_lm_losses = mgpu_predict(X_train, M_train)\n eval_logits, eval_lm_losses = model(X, M, \n n_vocab=n_vocab, n_special=n_special, n_ctx=n_ctx, n_embd=n_embd, \n embd_pdrop=embd_pdrop, n_layer=n_layer, n_head=n_head, attn_pdrop=attn_pdrop, \n resid_pdrop=resid_pdrop, train=False, reuse=True)\n eval_lm_loss = tf.reduce_mean(eval_lm_losses)\n eval_mgpu_lm_loss = tf.reduce_mean(eval_mgpu_lm_losses)\n # params\n params = find_trainable_variables('model')\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n sess.run(tf.global_variables_initializer())\n # get saved params\n if use_prev_best and os.path.isfile(os.path.join(save_dir, desc, 'best_params.jl')):\n sess.run([p.assign(ip) for p, ip in zip(params, joblib.load(os.path.join(save_dir, desc, 'best_params.jl')))])\n else:\n shapes = json.load(open('{}params_shapes.json'.format(pretrained_lm_dir)))\n offsets = np.cumsum([np.prod(shape) for shape in shapes])\n init_params = [np.load('{}params_{}.npy'.format(pretrained_lm_dir, n)) for n in range(10)]\n init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]\n init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]\n init_params[0] = init_params[0][:n_ctx]\n if n_ctx > 512:\n init_params[0] = np.concatenate([init_params[0], [init_params[0][-1] for i in range(n_ctx-512)]])\n init_params[0] = np.concatenate([init_params[1], (np.random.randn(n_special, n_embd)*0.02).astype(np.float32), init_params[0]], 0)\n del init_params[1]\n sess.run([p.assign(ip) for p, ip in zip(params[:145], init_params[:145])])\n # train, eval, test\n n_updates = 0\n n_epochs = 0\n if submit:\n save(os.path.join(save_dir, desc, 'best_params.jl'))\n best_score = 0\n for i in range(n_iter):\n for xmb, mmb in iter_data(*shuffle(trX, trM, random_state=np.random), n_batch=n_batch_train, truncate=True, verbose=True):\n cost, _ = sess.run([lm_loss, train], {X_train:xmb, M_train:mmb})\n n_updates += 1\n if n_updates in [1000, 2000, 4000, 8000, 16000, 32000] and n_epochs == 0:\n log()\n n_epochs += 1\n log()\n if submit:\n sess.run([p.assign(ip) for p, ip in zip(params, joblib.load(os.path.join(save_dir, desc, 'best_params.jl')))])\n predict()","sub_path":"train_transformer_qa_gen.py","file_name":"train_transformer_qa_gen.py","file_ext":"py","file_size_in_byte":12787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"380127722","text":"\"\"\"E2E tests for ingest_client.\"\"\"\nimport pytest\nimport time\nimport os\nimport uuid\nimport io\n\nfrom azure.kusto.data.request import KustoClient, KustoConnectionStringBuilder\nfrom azure.kusto.ingest.status import KustoIngestStatusQueues\nfrom azure.kusto.ingest import (\n KustoIngestClient,\n KustoStreamingIngestClient,\n IngestionProperties,\n JsonColumnMapping,\n CsvColumnMapping,\n DataFormat,\n ValidationPolicy,\n ValidationOptions,\n ValidationImplications,\n ReportLevel,\n ReportMethod,\n FileDescriptor,\n KustoMissingMappingReferenceError,\n)\n\n# TODO: change this file to use pytest as runner\n\n\nclass Helpers:\n \"\"\"A class to define mappings to deft table.\"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def create_deft_table_csv_mappings():\n \"\"\"A method to define csv mappings to deft table.\"\"\"\n mappings = list()\n mappings.append(CsvColumnMapping(columnName=\"rownumber\", cslDataType=\"int\", ordinal=0))\n mappings.append(CsvColumnMapping(columnName=\"rowguid\", cslDataType=\"string\", ordinal=1))\n mappings.append(CsvColumnMapping(columnName=\"xdouble\", cslDataType=\"real\", ordinal=2))\n mappings.append(CsvColumnMapping(columnName=\"xfloat\", cslDataType=\"real\", ordinal=3))\n mappings.append(CsvColumnMapping(columnName=\"xbool\", cslDataType=\"bool\", ordinal=4))\n mappings.append(CsvColumnMapping(columnName=\"xint16\", cslDataType=\"int\", ordinal=5))\n mappings.append(CsvColumnMapping(columnName=\"xint32\", cslDataType=\"int\", ordinal=6))\n mappings.append(CsvColumnMapping(columnName=\"xint64\", cslDataType=\"long\", ordinal=7))\n mappings.append(CsvColumnMapping(columnName=\"xuint8\", cslDataType=\"long\", ordinal=8))\n mappings.append(CsvColumnMapping(columnName=\"xuint16\", cslDataType=\"long\", ordinal=9))\n mappings.append(CsvColumnMapping(columnName=\"xuint32\", cslDataType=\"long\", ordinal=10))\n mappings.append(CsvColumnMapping(columnName=\"xuint64\", cslDataType=\"long\", ordinal=11))\n mappings.append(CsvColumnMapping(columnName=\"xdate\", cslDataType=\"datetime\", ordinal=12))\n mappings.append(CsvColumnMapping(columnName=\"xsmalltext\", cslDataType=\"string\", ordinal=13))\n mappings.append(CsvColumnMapping(columnName=\"xtext\", cslDataType=\"string\", ordinal=14))\n mappings.append(CsvColumnMapping(columnName=\"xnumberAsText\", cslDataType=\"string\", ordinal=15))\n mappings.append(CsvColumnMapping(columnName=\"xtime\", cslDataType=\"timespan\", ordinal=16))\n mappings.append(CsvColumnMapping(columnName=\"xtextWithNulls\", cslDataType=\"string\", ordinal=17))\n mappings.append(CsvColumnMapping(columnName=\"xdynamicWithNulls\", cslDataType=\"dynamic\", ordinal=18))\n return mappings\n\n @staticmethod\n def create_deft_table_json_mappings():\n \"\"\"A method to define json mappings to deft table.\"\"\"\n mappings = list()\n mappings.append(JsonColumnMapping(columnName=\"rownumber\", jsonPath=\"$.rownumber\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"rowguid\", jsonPath=\"$.rowguid\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xdouble\", jsonPath=\"$.xdouble\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xfloat\", jsonPath=\"$.xfloat\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xbool\", jsonPath=\"$.xbool\", cslDataType=\"bool\"))\n mappings.append(JsonColumnMapping(columnName=\"xint16\", jsonPath=\"$.xint16\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint32\", jsonPath=\"$.xint32\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint64\", jsonPath=\"$.xint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint8\", jsonPath=\"$.xuint8\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint16\", jsonPath=\"$.xuint16\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint32\", jsonPath=\"$.xuint32\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint64\", jsonPath=\"$.xuint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xdate\", jsonPath=\"$.xdate\", cslDataType=\"datetime\"))\n mappings.append(JsonColumnMapping(columnName=\"xsmalltext\", jsonPath=\"$.xsmalltext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtext\", jsonPath=\"$.xtext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xnumberAsText\", jsonPath=\"$.xnumberAsText\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtime\", jsonPath=\"$.xtime\", cslDataType=\"timespan\"))\n mappings.append(JsonColumnMapping(columnName=\"xtextWithNulls\", jsonPath=\"$.xtextWithNulls\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xdynamicWithNulls\", jsonPath=\"$.xdynamicWithNulls\", cslDataType=\"dynamic\"))\n return mappings\n\n\ncluster = \"Dadubovs1.westus\" # \"toshetah\"\ndb_name = \"TestingDatabase\" # \"PythonTest\"\ntable_name = \"Deft\"\n\n\nengine_kcsb = KustoConnectionStringBuilder.with_aad_device_authentication(\"https://{}.kusto.windows.net\".format(cluster))\ndm_kcsb = KustoConnectionStringBuilder.with_aad_device_authentication(\"https://ingest-{}.kusto.windows.net\".format(cluster))\nclient = KustoClient(engine_kcsb)\ningest_client = KustoIngestClient(dm_kcsb)\ningest_status_q = KustoIngestStatusQueues(ingest_client)\n\nstreaming_ingest_client = KustoStreamingIngestClient(engine_kcsb)\n\nclient.execute(db_name, \".drop table {} ifexists\".format(table_name))\n\n\n@pytest.mark.run(order=1)\ndef test_csv_ingest_non_existing_table():\n csv_ingest_props = IngestionProperties(\n db_name, table_name, dataFormat=DataFormat.CSV, mapping=Helpers.create_deft_table_csv_mappings(), reportLevel=ReportLevel.FailuresAndSuccesses\n )\n csv_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv\")\n zipped_csv_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv.gz\")\n\n for f in [csv_file_path, zipped_csv_file_path]:\n ingest_client.ingest_from_file(f, csv_ingest_props)\n\n successes = 0\n timeout = 60\n while successes != 2 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n\n assert success_message[0].Database == db_name\n assert success_message[0].Table == table_name\n\n successes += 1\n\n assert successes == 2\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 20, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\njson_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.json\")\nzipped_json_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.jsonz.gz\")\n\n\n@pytest.mark.run(order=2)\ndef test_json_ingest_existing_table():\n json_ingestion_props = IngestionProperties(\n db_name, table_name, dataFormat=DataFormat.JSON, mapping=Helpers.create_deft_table_json_mappings(), reportLevel=ReportLevel.FailuresAndSuccesses\n )\n\n for f in [json_file_path, zipped_json_file_path]:\n ingest_client.ingest_from_file(f, json_ingestion_props)\n\n successes = 0\n timeout = 60\n\n while successes != 2 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n\n assert success_message[0].Database == db_name\n assert success_message[0].Table == table_name\n\n successes += 1\n\n assert successes == 2\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 24, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\n@pytest.mark.run(order=3)\ndef test_ingest_complicated_props():\n # Test ingest with complicated ingestion properties\n validation_policy = ValidationPolicy(\n validationOptions=ValidationOptions.ValidateCsvInputConstantColumns, validationImplications=ValidationImplications.Fail\n )\n json_ingestion_props = IngestionProperties(\n db_name,\n table_name,\n dataFormat=DataFormat.JSON,\n mapping=Helpers.create_deft_table_json_mappings(),\n additionalTags=[\"a\", \"b\"],\n ingestIfNotExists=[\"aaaa\", \"bbbb\"],\n ingestByTags=[\"ingestByTag\"],\n dropByTags=[\"drop\", \"drop-by\"],\n flushImmediately=False,\n reportLevel=ReportLevel.FailuresAndSuccesses,\n reportMethod=ReportMethod.Queue,\n validationPolicy=validation_policy,\n )\n\n file_paths = [json_file_path, zipped_json_file_path]\n fds = [FileDescriptor(fp, 0, uuid.uuid4()) for fp in file_paths]\n source_ids = [\"{}\".format(fd.source_id) for fd in fds]\n\n for fd in fds:\n ingest_client.ingest_from_file(fd, json_ingestion_props)\n\n successes = 0\n timeout = 60\n while successes != 2 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n if success_message[0].IngestionSourceId in source_ids:\n assert success_message[0].Database == db_name\n assert success_message[0].Table == table_name\n\n successes += 1\n\n assert successes == 2\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 28, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\n@pytest.mark.run(order=4)\ndef test_json_ingestion_ingest_by_tag():\n json_ingestion_props = IngestionProperties(\n db_name,\n table_name,\n dataFormat=DataFormat.JSON,\n mapping=Helpers.create_deft_table_json_mappings(),\n ingestIfNotExists=[\"ingestByTag\"],\n reportLevel=ReportLevel.FailuresAndSuccesses,\n dropByTags=[\"drop\", \"drop-by\"],\n )\n ops = []\n for f in [json_file_path, zipped_json_file_path]:\n ingest_client.ingest_from_file(f, json_ingestion_props)\n\n successes = 0\n timeout = 60\n while successes != 2 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n\n assert success_message[0].Database == db_name\n assert success_message[0].Table == table_name\n\n successes += 1\n\n assert successes == 2\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 28, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\n@pytest.mark.run(order=5)\ndef test_tsv_ingestion_csv_mapping():\n tsv_ingestion_props = IngestionProperties(\n db_name, table_name, dataFormat=DataFormat.TSV, mapping=Helpers.create_deft_table_csv_mappings(), reportLevel=ReportLevel.FailuresAndSuccesses\n )\n tsv_file_path = os.path.join(os.getcwd(), \"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.tsv\")\n\n ingest_client.ingest_from_file(tsv_file_path, tsv_ingestion_props)\n\n successes = 0\n timeout = 60\n while successes != 1 and timeout > 0:\n while ingest_status_q.success.is_empty() and timeout > 0:\n time.sleep(1)\n timeout -= 1\n\n success_message = ingest_status_q.success.pop()\n\n assert success_message[0].Table == table_name\n assert success_message[0].Database == db_name\n\n successes += 1\n\n assert successes == 1\n # TODO: status queues only mark ingestion was successful, but takes time for data to become available\n time.sleep(20)\n response = client.execute(db_name, \"{} | count\".format(table_name))\n for row in response.primary_results[0]:\n assert int(row[\"Count\"]) == 38, \"{0} | count = {1}\".format(table_name, str(row[\"Count\"]))\n\n\n@pytest.mark.run(order=6)\ndef test_streaming_ingest_from_opened_file():\n current_dir = os.getcwd()\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n stream = open(file_path, \"r\")\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.CSV)\n ingest_client.ingest_from_stream(stream, ingestion_properties=ingestion_properties)\n\n\n@pytest.mark.run(order=7)\ndef test_streaming_ingest_form_csv_file():\n current_dir = os.getcwd()\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.CSV)\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.csv.gz\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n\n\n@pytest.mark.run(order=8)\ndef test_streaming_ingest_from_json_no_mapping():\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.JSON)\n try:\n current_dir = os.getcwd()\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.json\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n except KustoMissingMappingReferenceError:\n pass\n\n try:\n byte_sequence = b'{\"rownumber\": 0, \"rowguid\": \"00000000-0000-0000-0001-020304050607\", \"xdouble\": 0.0, \"xfloat\": 0.0, \"xbool\": 0, \"xint16\": 0, \"xint32\": 0, \"xint64\": 0, \"xunit8\": 0, \"xuint16\": 0, \"xunit32\": 0, \"xunit64\": 0, \"xdate\": \"2014-01-01T01:01:01Z\", \"xsmalltext\": \"Zero\", \"xtext\": \"Zero\", \"xnumberAsText\": \"0\", \"xtime\": \"00:00:00\", \"xtextWithNulls\": null, \"xdynamicWithNulls\": \"\"}'\n bytes_stream = io.BytesIO(byte_sequence)\n ingest_client.ingest_from_stream(bytes_stream, ingestion_properties=ingestion_properties)\n except KustoMissingMappingReferenceError:\n pass\n\n\n@pytest.mark.run(order=9)\ndef test_streaming_ingest_from_json_file():\n current_dir = os.getcwd()\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.json\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.JSON, mappingReference=\"JsonMapping\")\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n\n path_parts = [\"azure-kusto-ingest\", \"tests\", \"input\", \"dataset.jsonz.gz\"]\n missing_path_parts = []\n for path_part in path_parts:\n if path_part not in current_dir:\n missing_path_parts.append(path_part)\n\n file_path = os.path.join(current_dir, *missing_path_parts)\n\n ingest_client.ingest_from_file(file_path, ingestion_properties=ingestion_properties)\n\n\n@pytest.mark.run(order=10)\ndef test_streaming_ingest_from_io_streams():\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.CSV)\n byte_sequence = b'0,00000000-0000-0000-0001-020304050607,0,0,0,0,0,0,0,0,0,0,2014-01-01T01:01:01.0000000Z,Zero,\"Zero\",0,00:00:00,,null'\n bytes_stream = io.BytesIO(byte_sequence)\n ingest_client.ingest_from_stream(bytes_stream, ingestion_properties=ingestion_properties)\n\n str_sequence = '0,00000000-0000-0000-0001-020304050607,0,0,0,0,0,0,0,0,0,0,2014-01-01T01:01:01.0000000Z,Zero,\"Zero\",0,00:00:00,,null'\n str_stream = io.StringIO(str_sequence)\n ingest_client.ingest_from_stream(str_stream, ingestion_properties=ingestion_properties)\n\n byte_sequence = b'{\"rownumber\": 0, \"rowguid\": \"00000000-0000-0000-0001-020304050607\", \"xdouble\": 0.0, \"xfloat\": 0.0, \"xbool\": 0, \"xint16\": 0, \"xint32\": 0, \"xint64\": 0, \"xunit8\": 0, \"xuint16\": 0, \"xunit32\": 0, \"xunit64\": 0, \"xdate\": \"2014-01-01T01:01:01Z\", \"xsmalltext\": \"Zero\", \"xtext\": \"Zero\", \"xnumberAsText\": \"0\", \"xtime\": \"00:00:00\", \"xtextWithNulls\": null, \"xdynamicWithNulls\": \"\"}'\n bytes_stream = io.BytesIO(byte_sequence)\n ingestion_properties.format = DataFormat.JSON\n\n ingestion_properties.ingestion_mapping_reference = \"JsonMapping\"\n ingest_client.ingest_from_stream(bytes_stream, ingestion_properties=ingestion_properties)\n\n str_sequence = u'{\"rownumber\": 0, \"rowguid\": \"00000000-0000-0000-0001-020304050607\", \"xdouble\": 0.0, \"xfloat\": 0.0, \"xbool\": 0, \"xint16\": 0, \"xint32\": 0, \"xint64\": 0, \"xunit8\": 0, \"xuint16\": 0, \"xunit32\": 0, \"xunit64\": 0, \"xdate\": \"2014-01-01T01:01:01Z\", \"xsmalltext\": \"Zero\", \"xtext\": \"Zero\", \"xnumberAsText\": \"0\", \"xtime\": \"00:00:00\", \"xtextWithNulls\": null, \"xdynamicWithNulls\": \"\"}'\n str_stream = io.StringIO(str_sequence)\n ingest_client.ingest_from_stream(str_stream, ingestion_properties=ingestion_properties)\n\n byte_sequence = b'0,00000000-0000-0000-0001-020304050607,0,0,0,0,0,0,0,0,0,0,2014-01-01T01:01:01.0000000Z,Zero,\"Zero\",0,00:00:00,,null' * 600000\n bytes_stream = io.BytesIO(byte_sequence)\n\n try:\n ingest_client.ingest_from_stream(bytes_stream, ingestion_properties=ingestion_properties)\n except KustoStreamMaxSizeExceededError:\n pass\n\n\n@pytest.mark.run(order=11)\ndef test_streaming_ingest_from_dataframe():\n from pandas import DataFrame\n\n fields = [\n \"rownumber\",\n \"rowguid\",\n \"xdouble\",\n \"xfloat\",\n \"xbool\",\n \"xint16\",\n \"xint32\",\n \"xint64\",\n \"xunit8\",\n \"xuint16\",\n \"xunit32\",\n \"xunit64\",\n \"xdate\",\n \"xsmalltext\",\n \"xtext\",\n \"xnumberAsText\",\n \"xtime\",\n \"xtextWithNulls\",\n \"xdynamicWithNulls\",\n ]\n rows = [[0, \"00000000-0000-0000-0001-020304050607\", 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, \"2014-01-01T01:01:01Z\", \"Zero\", \"Zero\", \"0\", \"00:00:00\", None, \"\"]]\n df = DataFrame(data=rows, columns=fields)\n ingestion_properties = IngestionProperties(database=db_name, table=table_name, dataFormat=DataFormat.CSV)\n ingest_client.ingest_from_dataframe(df, ingestion_properties)\n","sub_path":"azure-kusto-ingest/tests/e2e.py","file_name":"e2e.py","file_ext":"py","file_size_in_byte":19788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"310688277","text":"from pysite.base_route import RouteView\nfrom pysite.mixins import DBMixin\n\n\nclass PageView(RouteView, DBMixin):\n path = \"/special/all_pages\"\n name = \"special.all_pages\"\n table_name = \"wiki\"\n\n def get(self):\n pages = self.db.pluck(self.table_name, \"title\", \"slug\")\n pages = sorted(pages, key=lambda d: d.get(\"title\", \"No Title\"))\n\n letters = {}\n\n for page in pages:\n if \"title\" not in page:\n page[\"title\"] = \"No Title\"\n\n letter = page[\"title\"][0].upper()\n\n if letter not in letters:\n letters[letter] = []\n\n letters[letter].append(page)\n\n return self.render(\"wiki/special_all.html\", letters=letters)\n","sub_path":"pysite/views/wiki/special/all_pages.py","file_name":"all_pages.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"407521085","text":"import json\n\nfrom django.db import transaction\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\n\nfrom geofr.models import Perimeter\nfrom geofr.constants import OVERSEAS_REGIONS\n\n\nDATA_PATH = '/node_modules/@etalab/decoupage-administratif/data/regions.json'\n\n\nclass Command(BaseCommand):\n \"\"\"Import the list of all regions.\"\"\"\n\n @transaction.atomic()\n def handle(self, *args, **options):\n\n france = Perimeter.objects.get(\n scale=Perimeter.TYPES.country,\n code='FRA')\n europe = Perimeter.objects.get(\n scale=Perimeter.TYPES.continent,\n code='EU')\n\n PerimeterContainedIn = Perimeter.contained_in.through\n perimeter_links = []\n\n data_file = settings.DJANGO_ROOT + DATA_PATH\n data = json.loads(data_file.read_file())\n nb_created = 0\n nb_updated = 0\n\n for entry in data:\n\n # Create or update the region perimeters\n region, created = Perimeter.objects.update_or_create(\n scale=Perimeter.TYPES.region,\n code=entry['code'],\n defaults={\n 'name': entry['nom'],\n 'is_overseas': (entry['code'] in OVERSEAS_REGIONS),\n }\n )\n if created:\n nb_created += 1\n else:\n nb_updated += 1\n\n perimeter_links.append(PerimeterContainedIn(\n from_perimeter_id=region.id,\n to_perimeter_id=europe.id))\n perimeter_links.append(PerimeterContainedIn(\n from_perimeter_id=region.id,\n to_perimeter_id=france.id))\n\n # Create the links between the regions and France / Europe\n PerimeterContainedIn.objects.bulk_create(\n perimeter_links, ignore_conflicts=True)\n\n self.stdout.write(self.style.SUCCESS(\n '%d regions created, %d updated.' % (nb_created, nb_updated)))\n","sub_path":"src/geofr/management/commands/populate_regions.py","file_name":"populate_regions.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"57913658","text":"# @author Piotr Nikiel \n\nimport sys\nimport os\nsys.path.insert(0, 'FrameworkInternals')\n\nfrom transformDesign import transformDesign\n\ndef runGenerator(className,uaoDirectory='UaoForQuasar', namespace='UaoClient'):\n output_header = os.path.join(uaoDirectory,'generated','{0}.h'.format(className))\n output_body = os.path.join(uaoDirectory,'generated','{0}.cpp'.format(className))\n additionalParam=['className={0}'.format(className), 'namespace={0}'.format(namespace)]\n transformDesign(\n xsltTransformation=os.path.join(uaoDirectory, 'xslt', 'designToClassHeader.xslt'), \n outputFile=output_header, \n requiresMerge=False, \n astyleRun=True, \n additionalParam=additionalParam)\n\n transformDesign(\n xsltTransformation=os.path.join(uaoDirectory, 'xslt', 'designToClassBody.xslt'), \n outputFile=output_body, \n requiresMerge=False, \n astyleRun=True, \n additionalParam=additionalParam)\n \ndef main():\n className = sys.argv[1]\n runGenerator(className)\n \nif __name__==\"__main__\":\n main()\n \n \n\n","sub_path":"generateClass.py","file_name":"generateClass.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"296007758","text":"# coding=utf-8\n__author__ = 'lifuxin'\n\nimport sys\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef league_info(url):\n \"\"\"\n 解析league url获取每个赛季的详细信息\n :param url: league url\n :return: 每个赛季的详细信息\n \"\"\"\n request = requests.get(url)\n tables = BeautifulSoup(request.text, \"html.parser\").find_all('table')\n for table in tables:\n if table.has_key('id'):\n head = table.find_all('tr')[1]\n headers = head.find_all('th')\n headers = [th.text for th in headers]\n columns = headers + ['seasonURL', 'lgURL']\n\n # print(columns)\n teams = pd.DataFrame(columns=columns)\n\n for key in table.find_all('tr')[2:]:\n tdValues = key.find_all('td')\n\n aURs = key.find_all('a')\n SeasonURL = ''\n lgURL = ''\n if len(aURs) >= 2:\n SeasonURL = aURs[0]['href']\n lgURL = aURs[1]['href']\n\n thValues = key.find_all('th')\n season = ''\n if len(thValues) >= 1:\n season = thValues[0].text\n\n def get_team(teamvalues):\n array = np.zeros(len(teamvalues), dtype=object)\n for i, value in enumerate(teamvalues):\n array[i] = value.text.replace('\\n', '\\t')\n return array\n\n tdTeam = np.concatenate((np.array([season]), get_team(tdValues), np.array([SeasonURL, lgURL])))\n\n if tdTeam.size != len(columns):\n continue\n teamArray = tdTeam.reshape(1, len(columns))\n team = pd.DataFrame(teamArray, columns=columns)\n teams = teams.append(team)\n\n teams = teams.set_index(columns[0])\n # print(teams)\n teams.to_csv(\"../data/league/league.details\", encoding='utf-8')\n\n\ndef main(argv):\n \"\"\"The main method for this module.\n \"\"\"\n league_info(argv[0])\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])","sub_path":"crawl/league_details.py","file_name":"league_details.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"367708548","text":"\"\"\"Plot coherence across subjects.\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom brainpipe.system import Study\nfrom brainpipe.connectivity import concat_connect, remove_site_contact\n\nfrom visbrain.objects import (ConnectObj, SceneObj, SourceObj, BrainObj,\n ImageObj, RoiObj)\nfrom visbrain.gui import Brain\n\n\n\n###############################################################################\ncond = ('win-5120', 'REST1')\nviews = ['top', 'left']\nmin_nb_connect = 1\n###############################################################################\n\nst = Study('DMN-CORR')\ncoh_files = st.search(*cond, folder='coherence', full_path=False)\n\n\nconnect, mask, xyz, dfs = [], [], [], []\nfor f in coh_files:\n print('******************************************************************')\n print('Loading %s' % f)\n print('******************************************************************')\n suj = f.split('_')[0]\n\n # if suj == 'DB':\n # print(' SUBJECT DB IGNORED BECAUSE OF IMPLANTATION.')\n # continue\n\n # Load channels and xyz :\n print(\" Load channels, anatomy and coordinates of subject %s\" % suj)\n _chan = st.load('%s_channels.npy' % suj, folder='channels')\n _xyz = st.load('%s_xyz.npy' % suj, folder='xyz')\n xyz += [_xyz]\n dfs += [st.load('%s_anat.xlsx' % suj, folder='anatomy')]\n\n # Load coherence\n print(\" Load coherence of subject %s\" % suj)\n arch = st.load(f, folder='coherence')\n _connect, freqs = np.squeeze(arch['connect'])[..., 0].T, arch['freqs']\n connect += [np.real(_connect)]\n mask += [remove_site_contact(_connect, _chan)]\n\nprint(\"Concatenate xyz, connect, mask and anatomy\")\nxyz = np.concatenate(xyz, axis=0)\nconnect = concat_connect(connect)\nmask_contact = concat_connect(mask).astype(bool)\nmask_under = connect < .5\nmask = (mask_contact) | (mask_under)\nconnect = np.ma.masked_array(connect, mask=mask)\ndf = pd.concat(dfs)\ndf.index = pd.RangeIndex(len(df.index))\nprint(xyz.shape, connect.shape)\n\nprint(\"Get the number of connections per node :\")\nc_nb = ConnectObj('s', xyz, connect)\ndata = c_nb.get_nb_connections_per_node()[:, 1]\nc_r, labels, dfc = c_nb.analyse_connections('mist_ROI', group_by='name_ROI',\n get_centroids=True)\nxyz_r = np.array(dfc[['X', 'Y', 'Z']])\n\n\nprint(\"Remove sites with too less nb of connections per nodes\")\nrm_lines = np.where(data < min_nb_connect)[0]\nxyz = np.delete(xyz, rm_lines, axis=0)\nconnect = np.delete(connect, rm_lines, axis=0)\nconnect = np.delete(connect, rm_lines, axis=1)\nmask = np.delete(mask, rm_lines, axis=0)\nmask = np.delete(mask, rm_lines, axis=1)\ndf.drop(rm_lines, inplace=True)\ndf.index = pd.RangeIndex(len(df.index))\n\ngroups = df.groupby('name_ROI').groups\nlabels = [k for k, i in groups.items() if len(i) > 2]\nif 'Not_found' in labels:\n labels.pop(labels.index('Not found'))\nroi_obj = RoiObj('mist_ROI')\nindex = roi_obj.where_is(labels, exact=True, case_sensitive=False, union=True)\n\nconnect = np.ma.masked_array(connect, mask=mask)\n\nsc = SceneObj(bgcolor='white')\n\nfor k, v in enumerate(views):\n c_obj = ConnectObj('s', xyz, connect, antialias=True)\n data = c_obj.get_nb_connections_per_node()[:, 1]\n s_obj = SourceObj('s', xyz, alpha=.3, data=data, radius_min=0,\n radius_max=20)\n s_obj.color_sources(data=data)\n\n sc.add_to_subplot(s_obj, col=k)\n sc.add_to_subplot(c_obj, col=k)\n sc.add_to_subplot(BrainObj('B3'), rotate=v, use_this_cam=True, col=k)\n\n cr_obj = ConnectObj('r', xyz_r, c_r, antialias=True, line_width=10.)\n data_r = cr_obj.get_nb_connections_per_node()[:, 1]\n sr_obj = SourceObj('s', xyz_r, alpha=.3, data=data_r, radius_min=0,\n radius_max=20)\n sc.add_to_subplot(sr_obj, col=k, row=1)\n sc.add_to_subplot(cr_obj, col=k, row=1)\n sc.add_to_subplot(BrainObj('B3'), rotate=v, use_this_cam=True, col=k,\n row=1)\n\nsc.preview()\n\n","sub_path":"DMN-corr/02_plot/coherence/plot_coherence_across_subjects.py","file_name":"plot_coherence_across_subjects.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"62142900","text":"from discord.ext import commands\nimport random\n\nclass Random(commands.Cog, name=\"Random Cog\"):\n \"\"\"Receives ping commands\"\"\"\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.command()\n async def roll(self, ctx: commands.Context, dice: str):\n \"\"\"Checks for a response from the bot\"\"\"\n try:\n rolls = \"\"\n amount, die = dice.split(\"d\")\n for _ in range(int(amount)):\n roll = random.randint(1, int(die))\n rolls += f\"{roll} \"\n await ctx.send(rolls)\n except ValueError:\n await ctx.send(\"Dice must be in the format _d_ (example: 2d6)\")\n\ndef setup(bot: commands.Bot):\n bot.add_cog(Random(bot))","sub_path":"modules/random/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"484283192","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 15 20:56:50 2019\n\n@author: Maureen\n\n\"\"\"\n\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import PredefinedSplit\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.metrics import mean_squared_error\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\n\nimport warnings\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\ndef index_splitter(N, fold):\n index_split = []\n test_num = int(N/fold)\n train_num = N-test_num\n\n for i in range(0,train_num):\n index_split.append(-1)\n\n for i in range(train_num,N):\n index_split.append(0)\n\n return index_split\n\n\n# Number of trees in random forest\ndef gb(X, Y, kfold=3, feature_set=None):\n \n arr = index_splitter(N = len(X), fold = kfold)\n ps = PredefinedSplit(arr)\n\n for train, test in ps.split():\n train_index = train\n test_index = test\n\n train_X, train_y = X.values[train_index,:], Y.values[train_index]\n test_X, test_y = X.values[test_index,:], Y.values[test_index]\n arr = index_splitter(N = len(train_X), fold = kfold)\n ps2 = PredefinedSplit(arr)\n \n \n gb = GradientBoostingRegressor(random_state = 42)\n print('Base parameter:')\n print(gb.get_params())\n gb.fit(train_X, train_y)\n \n\n #grid search\n lr_log = np.linspace(-8,5,14)\n\n lr = []\n for i in lr_log:\n a = math.pow(10,i)\n lr = lr + [a]\n \n n_estimators = [int(x) for x in range(20,200,20)] #[int(x) for x in np.linspace(start = 10, stop = 200, num = 50)]\n # Maximum number of levels in tree\n max_depth = [3, 5, 10, 20, 50]\n # Minimum number of samples required to split a node\n #min_samples_split = [2, 5, 10]\n # Minimum number of samples required at each leaf node\n #min_samples_leaf = [1, 2, 4]\n\n\n # Create the random grid\n grid_grid = {'learning_rate' : lr,\n 'n_estimators': n_estimators,\n 'max_depth': max_depth,\n #'min_samples_split': min_samples_split,\n #'min_samples_leaf': min_samples_leaf,\n }\n \n \n gb_grid = GridSearchCV(estimator=gb, param_grid=grid_grid, scoring='neg_mean_squared_error', cv = ps2.split(), verbose=2, n_jobs=-1)\n gb_grid.fit(train_X, train_y)\n BestPara_grid = gb_grid.best_params_\n print('Grid parameter:')\n print(gb_grid.best_params_)\n\n\n # Number of trees in random forest\n lr_unit = BestPara_grid['learning_rate']\n lr = [x for x in np.linspace(start = lr_unit, stop = lr_unit*9, num = 9)]\n \n ets_unit = BestPara_grid['n_estimators']\n n_estimators = [int(x) for x in range(ets_unit - 20, ets_unit + 20, 5)]\n \n max_depth = [BestPara_grid[\"max_depth\"]]\n \n '''\n # Minimum number of samples required to split a node\n min_samples_split = []\n for x in range(BestPara_grid[\"min_samples_split\"]-2,BestPara_grid[\"min_samples_split\"]+2,1):\n if x>1:\n min_samples_split.append(int(x))\n \n # Minimum number of samples required at each leaf node\n min_samples_leaf = []\n \n for x in range(BestPara_grid[\"min_samples_leaf\"]-1,BestPara_grid[\"min_samples_leaf\"]+1,1):\n if x>0:\n min_samples_leaf.append(int(x))\n '''\n # Create the random grid\n grid_grid2 = {'learning_rate' : lr,\n 'n_estimators': n_estimators,\n 'max_depth': max_depth,\n #'min_samples_split': min_samples_split,\n #'min_samples_leaf': min_samples_leaf,\n }\n \n gb_grid2 = GridSearchCV(estimator=gb, param_grid=grid_grid2, scoring='neg_mean_squared_error', cv = ps2.split(), verbose=2, n_jobs=-1)\n \n # Fit the grid search model\n gb_grid2.fit(train_X, train_y)\n BestPara_grid = gb_grid2.best_params_\n print(gb_grid2.best_params_)\n\n\n #prediction\n predict_y=gb_grid2.predict(test_X)\n predict_y_grid=gb_grid.predict(test_X)\n predict_y_base=gb.predict(test_X)\n \n \n #RMSE\n errors_baseline = np.sqrt(mean_squared_error(predict_y_base,test_y))\n errors_Grid_CV = np.sqrt(mean_squared_error(predict_y_grid,test_y))\n errors_Grid2_CV = np.sqrt(mean_squared_error(predict_y,test_y))\n\n results = [errors_baseline, errors_Grid_CV, errors_Grid2_CV]\n\n print('gradient boost results:',results)\n\n return gb_grid2.best_estimator_, results, gb_grid2.best_params_\n","sub_path":"Refactor3/model/gb_model.py","file_name":"gb_model.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"10427989","text":"import random\n\ndef multiply(numbers,val):\n if not numbers[1:]: return val*numbers[0]\n return multiply(numbers[1:],val*numbers[0])\n\nclass GridTraversal:\n\n def __init__(self, grid_file = None,dimensions=20,seed=50):\n if grid_file:\n with open('grid.txt') as grid:\n self.grid = [e.strip('\\n').split() for e in grid.readlines()]\n else:\n self.grid = self.generate_grid(dimensions,seed)\n self.seed = seed\n self.grid_coords = [(i,j) for i in range(len(self.grid)) for j in range(len(self.grid))]\n self.directions = ['up','up right','right','down right',\n 'down','down left','left','up left']\n\n def generate_grid(self,dimensions,s):\n random.seed(s)\n return [[str(random.randint(0,100)) for _ in range(dimensions)] for _ in range(dimensions)]\n\n def get_numbers(self,x,y,path,direction,of_length):\n ## print(path)\n if of_length >= len(self.grid) or of_length >= len(self.grid[0]):\n return False\n if direction == 'up' and path == [] and x >= of_length-1:\n return self.get_numbers(x-1,y,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'up' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x-1,y,path+[self.grid[x][y]],direction,of_length)\n \n if direction == 'up right' and path == [] and x >= of_length-1 and y <= len(self.grid[-1]) - of_length:\n return self.get_numbers(x-1,y+1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'up right' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x-1,y+1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'right' and path == [] and y <= len(self.grid[-1]) - of_length:\n return self.get_numbers(x,y+1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'right' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x,y+1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'down right' and path == [] and x <= len(self.grid[-1]) - of_length and y <= len(self.grid[-1]) - of_length:\n return self.get_numbers(x+1,y+1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'down right' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x+1,y+1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'down' and path == [] and x <= len(self.grid[-1]) - of_length:\n return self.get_numbers(x+1,y,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'down' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x+1,y,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'down left' and path == [] and x <= len(self.grid[-1]) - of_length and y >= of_length-1:\n return self.get_numbers(x+1,y-1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'down left' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x+1,y-1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'left' and path == [] and x >= of_length-1:\n return self.get_numbers(x,y-1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'left' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x,y-1,path+[self.grid[x][y]],direction,of_length)\n\n if direction == 'up left' and path == [] and x >= of_length-1 and y >= of_length-1:\n return self.get_numbers(x-1,y-1,path+[self.grid[x][y]],direction,of_length)\n elif direction == 'up left' and path:\n if len(path) == of_length:\n return path\n else:\n return self.get_numbers(x-1,y-1,path+[self.grid[x][y]],direction,of_length)\n\n def get_all_possible_paths(self,length=4):\n paths_dict = {}\n for cell in self.grid_coords:\n x,y = cell\n directions_dict = {}\n for d in self.directions:\n path = self.get_numbers(x,y,[],d,length)\n directions_dict[d] = path\n paths_dict[(self.grid[cell[0]][cell[1]],cell)] = directions_dict\n return paths_dict\n\n def print_current_grid(self):\n with open('grid_'+str(self.seed)+'.txt','w') as grid:\n for line in self.grid:\n print(' '.join(line),file=grid)\n\ntraversal = GridTraversal(dimensions=250)\npossible_paths = traversal.get_all_possible_paths(length=86)\nmax_product = max([(multiply([int(i) for i in e],1),e,k1[1],k2) for k1,v in possible_paths.items() for k2,e in v.items() if e],key=lambda x:x[0])\n\nprint('The max product is: ',max_product[0])\nprint('The path is: ',max_product[1])\nprint('The start position is: ',max_product[2])\nprint('In the direction: ',max_product[3])\n","sub_path":"Question 11/q11.py","file_name":"q11.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"38168891","text":"from CryptoPayment import CryptoPayment, convert\nfrom tinydb import TinyDB, where\nimport time\nimport threading\n\nif __name__ == '__main__':\n cch = CryptoPayment(test=True)\n cch.clearDB() \n cch.wallet_refresh_time = 0\n test_wallet_type = 'litecoin'\n\n test_results = {\n 'creating_wallets': None,\n 'deleting_wallets': None,\n 'creating_transactions': None,\n 'deleting_transactions': None,\n 'checking_transactions': None,\n 'display_tethered_transactions': None\n\n }\n # user may add wallets to the system\n wallet_list = ['0x1231kasd', '0x231231skdasd', '0x1231kasdcxzcas234']\n for wallet in wallet_list:\n try:\n cch.addWallet(wallet, test_wallet_type)\n except:\n pass\n \n\n test_wallet_list = cch.showWallets()\n if len(wallet_list) == len(test_wallet_list):\n print('!!creates wallets as usual')\n test_results['creating_wallets'] = True\n else:\n print(f'!#!expected to get {len(wallet_list)} instead of {len(test_wallet_list)}')\n test_results['creating_wallets'] = False\n\n # new transaction check\n workers_list = []\n for t in range(2):\n for i in range(3):\n worker = threading.Thread(target=cch.newTransaction, args=(test_wallet_type, 1.0242134))\n worker.start()\n workers_list.append(worker)\n for worker in workers_list:\n worker.join()\n\n transactions = cch.active_transactions.all()\n transactions_failures = 0\n for transaction in transactions:\n if len(list(filter( lambda x: x['protocol_units'] == transaction['protocol_units'] and x['wallet_adr'] == transaction['wallet_adr'], transactions))) != 1:\n print('number of transactions are not equal to 1')\n transactions_failures+=1\n \n if transactions_failures == 0:\n print('!!transactions are creating succesfully')\n test_results['creating_transactions'] = True\n else:\n print('!#!some troubles happend on creating transactions')\n test_results['creating_transactions'] = False\n\n # transactions may be aborted\n previous_amount_of_transactions = len(transactions)\n cch.deleteTransaction(transactions[0]['pk'])\n transactions = cch.active_transactions.all()\n if previous_amount_of_transactions > len(transactions):\n print('!!removes transactions as usual')\n test_results['deleting_transactions'] = True\n else:\n print('!!got an error while was removing transactions')\n test_results['deleting_transactions'] = False\n\n #user may remove wallets from the system\n cch.deleteWallet(wallet_list[0])\n test_wallet_list = cch.showWallets()\n if len(wallet_list) == len(test_wallet_list)+1:\n print('!!removes wallets as usual')\n test_results['deleting_wallets'] = True\n\n else:\n print(f'expected to get {len(wallet_list)} instead of {len(test_wallet_list)+1}')\n test_results['deleting_wallets'] = False\n\n #check transaction\n transaction = cch.active_transactions.all()[0]\n result1 = cch.checkActiveTransaction(transaction['pk'])\n time.sleep(1)\n if len( cch.finished_transactions.search(where('wallet_adr') == transaction['wallet_adr']) ) != 0:\n print(f'some of the transactions were marked as finished, none of the transactions supposed to be finished')\n # check if any of the current transaction is finished\n result2 = cch.checkActiveTransaction(transaction['pk'])\n if result1 is False and result2 is True:\n print(f'!!checking transaction test passed')\n else:\n print(f'!#!checking transaction test failed')\n time.sleep(2)\n\n # add a new transaction\n test_protocol_units = convert(test_wallet_type, protocol_units=cch.checker.test_transactions[-1]['balance_change'])\n test_new_transaction = cch.newTransaction(test_wallet_type, test_protocol_units)\n # check it, must be false\n transaction_result = cch.checkActiveTransaction(test_new_transaction['pk'])\n \n \n # showTetheredTransactions\n results = cch.showTetheredTransactions(test_new_transaction['wallet_adr'],test_wallet_type)\n if len(list(filter(lambda transaction: transaction['tethered_transaction'] != [], results))) > 0:\n print(f'showTetheredTransactions seems to be working as usual')\n else:\n print(f'showTetheredTransactions seems to have some troubles')\n\n # erase db\n cch.clearDB() \n print('tests finished')\n print(test_results)\n\n# test_transaction = {\"currency\":\"usdt\", 'amount': 100.00}\n# test_transaction2 = {\"currency\":\"usdt\", 'amount': 102.00}\n# test_results = []\n# for i in range(12):\n# test_results.append(registerReplenish(test_transaction) )\n# for i in range(14):\n# test_results.append(registerReplenish(test_transaction2) )\n# for i in test_results:\n# print(i)\n\n# deleteWallet(wallet_adr='', wallet_type=''):\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"321958498","text":"#!/usr/bin/python3\n\"\"\" Contains a function that divides all elements of a matrix. \"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\" Divides all elements of a matrix \"\"\"\n if type(matrix) is not list:\n raise TypeError(\"matrix must be a matrix \\\n(list of lists) of integers/floats\")\n for e in matrix:\n if type(e) is not list:\n raise TypeError(\"matrix must be a matrix \\\n(list of lists) of integers/floats\")\n l = len(matrix[0])\n if len(e) != l:\n raise TypeError(\"Each row of the matrix must have the same size\")\n for i in e:\n if type(i) is not int and type(i) is not float:\n raise TypeError(\"matrix must be a matrix \\\n(list of lists) of integers/floats\")\n if type(div) is not int and type(div) is not float:\n raise TypeError(\"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n\n return list(map(lambda y: list(map(lambda\n x: round(x / div, 2), y)), matrix))\n","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"307480494","text":"import pygame\nimport Game\nimport time\n\n\nclass SoundSystem:\n def __init__(self):\n self.volume = 100\n\n self.Startup = 'res/Sounds/Startup.wav'\n\n self.Music = 'res/Sounds/Music.wav'\n\n self.Attack = 'res/Sounds/Attack.wav'\n self.Mine = 'res/Sounds/Mine.wav'\n self.Moving = 'res/Sounds/Moving.wav'\n self.Error = 'res/Sounds/Error.wav'\n self.Click = 'res/Sounds/Click.wav'\n self.Victory = 'res/Sounds/Victory.wav'\n self.Card = 'res/Sounds/Card.wav'\n self.Mine_disabled = 'res/Sounds/Mine_disabled.wav'\n self.Heal = 'res/Sounds/Heal.wav'\n self.Emp = 'res/Sounds/Emp.wav'\n self.Fuel = 'res/Sounds/Fuel.wav'\n self.Place_mine = 'res/Sounds/Place_mine.wav'\n self.Ship_destroyed = 'res/Sounds/Ship_destroyed.wav'\n\n self.Music = 'res/Sounds/Music.wav'\n\n def play(self, sound):\n Game.Thread.create(self.play_asynch, [sound])\n\n def play_asynch(self, sound):\n sound = pygame.mixer.Sound(sound)\n sound.set_volume(self.volume / 100)\n sound.play()\n while pygame.mixer.get_busy() and not Game.EXIT:\n time.sleep(1 / 30)\n sound.set_volume(self.volume / 100)\n\n def loop(self, sound, t):\n Game.Thread.create(self.loop_asynch, [sound, t])\n\n def loop_asynch(self, sound, t):\n sound = pygame.mixer.Sound(sound)\n sound.set_volume(self.volume / 100)\n sound.play(-1)\n while pygame.mixer.get_busy() and not Game.EXIT:\n time.sleep(1 / 30)\n sound.set_volume(self.volume / 100)\n if t():\n sound.fadeout(1 * 1000)\n\n def getVolume(self):\n return self.volume\n\n def setVolume(self, volume):\n self.volume = volume\n","sub_path":"Sound/SoundSystem.py","file_name":"SoundSystem.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"206648727","text":"from django.urls import path\n\n\nfrom . import views\n\n\n#specifying app name (it can be used as 'football:homepage')\n\napp_name='football'\n\nurlpatterns = [\n path('' , views.HomePageView.as_view() , name='homepage'),\n path('players/' , views.PlayerList.as_view() , name='players'),\n path('clubs/' ,views.ClubList.as_view() ,name='clubs'),\n \n\n]\n\n\n","sub_path":"football/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"301367114","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('text', models.TextField(null=True)),\n ('pub_date', models.DateTimeField(verbose_name='date published', null=True)),\n ('moderation_requered', models.BooleanField(default=False)),\n ('author', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Guestbook',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('url', models.SlugField()),\n ('premoderation', models.BooleanField(default=True)),\n ('pub_date', models.DateTimeField(verbose_name='date published', null=True)),\n ('author', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n ),\n migrations.AddField(\n model_name='comment',\n name='guestbook',\n field=models.ForeignKey(to='guestbook.Guestbook'),\n ),\n ]\n","sub_path":"guestbook_holder/guestbook/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"504180260","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 22 12:55:06 2017\n\n@author: PaulJ\n\"\"\"\nimport pandas as pd\nimport os\n\n\ndef findAllZeroDistFiles(aDir=\".\",\n dataSaveFile=\"filesWithDist.csv\",\n verbose=False):\n allExFilesDistSummary = pd.DataFrame()\n noFiles = len(os.listdir(aDir))\n printInc = int(noFiles/10)\n lineNo = 0\n for sourcefilename in os.listdir(aDir):\n lineNo = lineNo + 1\n if lineNo % printInc == 0:\n print(\"Summarizing distance in file\", lineNo, \"of\", noFiles, end=\"\")\n print(\":\", sourcefilename)\n sourceDirFilename = aDir + \"\\\\\\\\\" + sourcefilename\n if not sourcefilename[-4:].lower()==\".csv\":\n continue\n #print(\"Distance summing\", sourcefilename)\n theExHist = pd.read_csv(sourceDirFilename,\n index_col=False)\n totdist = float(theExHist.iloc[len(theExHist)-1]['distance'])\n \n fileAndDist = pd.Series(data=[sourceDirFilename, totdist],\n index=[\"filename\", \"distance\"])\n allExFilesDistSummary = allExFilesDistSummary.append(fileAndDist,\n ignore_index=True)\n \n allExFilesDistSummary = allExFilesDistSummary.sort_values(by='distance',\n ascending=True)\n \n zeroCount = len(allExFilesDistSummary[allExFilesDistSummary.distance == 0])\n \n allExFilesDistSummary.to_csv(path_or_buf=dataSaveFile)\n \n zeroDistFileList = allExFilesDistSummary[allExFilesDistSummary.distance == 0]\n zeroDistFileList = zeroDistFileList.sort_values(by='filename',\n ascending=True)\n \n zeroDataSaveFilename = dataSaveFile[:-4] + \"_Zero.csv\"\n zeroDistFileList.to_csv(path_or_buf=zeroDataSaveFilename)\n \n return len(allExFilesDistSummary), zeroCount\n\n\nif __name__ == '__main__':\n totalFiles, zeroFiles = findAllZeroDistFiles(aDir=\"ExerciseData\",\n dataSaveFile=\"FilesWithDist_try3.csv\",\n verbose=True)\n print(totalFiles, \"files checked for distance.\",\n zeroFiles, \"found with zero distance\")","sub_path":"Archive Code/FindZeroDistExerciseFiles.py","file_name":"FindZeroDistExerciseFiles.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"148909985","text":"import logging\nimport os\nimport pickle\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\n\ntry:\n from .config import get_config\n from .SoSmodel import SoSModel\n from .session_sequence import create_dataset\n from .session_iterator import BatchIterator\nexcept SystemError: # pragma: no cover\n from config import get_config\n from SoSmodel import SoSModel\n from session_sequence import create_dataset\n from session_iterator import BatchIterator\n\n\nlogging.basicConfig(level=logging.INFO)\ntf.logging.set_verbosity(tf.logging.ERROR)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\nif __name__ == '__main__':\n logger = logging.getLogger(__name__)\n try:\n if 'LOG_LEVEL' in os.environ and os.environ['LOG_LEVEL'] != '':\n logger.setLevel(os.environ['LOG_LEVEL'])\n except Exception as e:\n logger.error(\n 'Unable to set logging level because: {0} defaulting to INFO.'.format(str(e)))\n\n # Load info from config\n config = get_config()\n time_const = config['time constant']\n rnn_size = config['rnn size']\n labels = config['labels']\n\n # Path to training data\n data_dir = sys.argv[1]\n # Create the training data\n if len(sys.argv) == 3:\n data = create_dataset(data_dir, time_const)\n write_dir = sys.argv[2]\n logger.info('Saving data to %s', write_dir)\n with open(write_dir, 'wb') as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n data = data_dir\n\n logger.info('Loaded training data')\n # Create an iterator\n iterator = BatchIterator(\n data,\n labels,\n perturb_types=['random data', 'port swap', 'direction_swap']\n )\n logger.info('Created iterator')\n rnnmodel = SoSModel(rnn_size=100, label_size=len(labels))\n logger.info('Created model')\n try:\n rnnmodel.load('/models/SoSmodel')\n logger.info('Loaded model')\n except Exception as e:\n rnnmodel.initialize()\n logger.info('Initialized model')\n\n X_v, L_v, Y_v = iterator.gen_batch(\n split='validation',\n batch_size=64\n )\n\n cost = rnnmodel.get_cost(X_v, L_v, Y_v)\n out = rnnmodel.get_output(X_v, L_v)\n\n logger.info('Initial validation cost: %s', np.mean(cost))\n min_cost = cost\n last_save = 0\n for i in range(100000):\n tick = time.clock()\n X, L, Y = iterator.gen_batch(\n split='train',\n batch_size=64\n )\n tock = time.clock()\n _ = rnnmodel.train_on_batch(X, L, Y)\n if (i+1) % 100 == 0:\n cost = rnnmodel.get_cost(X_v, L_v, Y_v)\n logger.info('Validation cost after %s batches: %s', i, cost)\n if cost < min_cost:\n min_cost = cost\n rnnmodel.save('/new_models/SoSmodel')\n last_save = 0\n logger.info('Saving model at validation cost %s', cost)\n else:\n last_save += 100\n if last_save > 1000:\n logger.info('No improvement after 1000 iterations. Stopping.')\n break\n","sub_path":"utils/train_SoSModel.py","file_name":"train_SoSModel.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"103460620","text":"import pygame\n\nclass MusicApp:\n\n def __init__(self, *args):\n self.playlist = []\n self.songs = []\n for song in args:\n self.songs.append(song)\n pygame.mixer.init(frequency=26050,size=-16,channels=2,buffer=4096)\n self.offset = 0\n self.play = False\n self.playable = False\n self.pos = 0\n self.offset = 0\n self.length = 1\n self.songName = None\n\n def playSong(self,song: str,pos:int):\n pygame.mixer.music.stop()\n pygame.mixer.music.play(song,pos)\n if self.playing:\n pygame.mixer.music.pause()\n self.songName = song\n self.offset = pos*1000\n\n\n def addPlaylist(self,song: str):\n if song in self.songs:\n self.playlist.append(song)\n if self.playable == False:\n self.playSong(str)\n self.playable = True\n return True\n return False\n\n def removePlaylist(self,song: str):\n if len(self.playlist) != 0:\n if song in self.playlist:\n pos = self.playlist.index(song)\n self.playlist.remove(song)\n if pos == self.pos:\n pygame.mixer.music.stop()\n self.offset = 0\n pygame.mixer.music.play(self.playlist[self.pos])\n if not self.playing:\n pygame.mixer.music.pause()\n return True\n\n def togglePlay(self):\n if self.playable:\n if self.play:\n pygame.mixer.music.pause()\n self.play = False\n else:\n pygame.mixer.music.unpause()\n self.play = True\n\n\n def skipAhead(self):\n if self.playable:\n if self.pos + 1 < len(self.playlist):\n self.pos += 1\n pygame.mixer.music.stop()\n pygame.mixer.music.play(self.playlist[self.pos])\n if not self.play:\n pygame.mixer.music.pause()\n\n\n def seek(self,pos: float):\n if self.songName is not None:\n self.playSong(self.songName)\n if not self.play:\n pygame.mixer.music.pause()\n self.offset = pos*1000\n\n\n","sub_path":"MusicApp.py","file_name":"MusicApp.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"305493634","text":"# -*- coding: iso-8859-1 -*-\n# Maintainer: joaander\n\nfrom hoomd import *\nfrom hoomd import md;\ncontext.initialize()\nimport unittest\nimport os\n\n# tests md.bond.harmonic\nclass bond_harmonic_tests (unittest.TestCase):\n def setUp(self):\n print\n self.s = init.read_gsd(os.path.join(os.path.dirname(__file__),'test_data_polymer_system.gsd'));\n context.current.sorter.set_params(grid=8)\n\n # test to see that se can create a md.force.constant\n def test_create(self):\n md.bond.harmonic();\n\n # test setting coefficients\n def test_set_coeff(self):\n harmonic = md.bond.harmonic();\n harmonic.bond_coeff.set('polymer', k=1.0, r0=1.0)\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n run(100);\n\n # test coefficient not set checking\n def test_set_coeff_fail(self):\n harmonic = md.bond.harmonic();\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n self.assertRaises(RuntimeError, run, 100);\n\n # test remove particle fails\n def test_bond_fail(self):\n harmonic = md.bond.harmonic();\n harmonic.bond_coeff.set('polymer', k=1.0, r0=1.0)\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n # remove a particle\n del(self.s.particles[0])\n if comm.get_num_ranks() == 1:\n self.assertRaises(RuntimeError, run, 100);\n else:\n # in MPI simulations, we cannot check for an assertion during a simulation\n # the program will terminate with MPI_Abort\n #self.assertRaises(RuntimeError, run, 100);\n pass\n\n # test adding a dimer\n def test_add_dimer(self):\n harmonic = md.bond.harmonic();\n harmonic.bond_coeff.set('polymer', k=1.0, r0=1.0)\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n t0 = self.s.particles.add('A')\n t1 = self.s.particles.add('B')\n self.s.bonds.add('polymer',t0,t1)\n run(100)\n\n # test exclusions in neighbor list\n def test_exclusions(self):\n harmonic = md.bond.harmonic();\n harmonic.bond_coeff.set('polymer', k=1.0, r0=1.0)\n nl = md.nlist.cell()\n lj = md.pair.lj(r_cut=3.0, nlist = nl)\n lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0);\n lj.pair_coeff.set('A', 'B', epsilon=1.0, sigma=1.0);\n lj.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0);\n all = group.all();\n md.integrate.mode_standard(dt=0.005);\n md.integrate.nve(all);\n run(100)\n\n self.assertEqual(nl.cpp_nlist.getNumExclusions(2), (17*100+2*10))\n self.assertEqual(nl.cpp_nlist.getNumExclusions(1), (2*100+2*10))\n\n # delete bonds connected to a particle\n tags = []\n for b in self.s.bonds:\n if b.a == 2 or b.b == 2:\n tags.append(b.tag)\n\n for t in tags:\n self.s.bonds.remove(t)\n\n # delete particle\n self.s.particles.remove(2)\n\n run(100)\n\n self.assertEqual(nl.cpp_nlist.getNumExclusions(2), (17*100+2*10)-3)\n self.assertEqual(nl.cpp_nlist.getNumExclusions(1), (2*100+2*10)+2)\n del nl\n del lj\n del harmonic\n\n def tearDown(self):\n del self.s\n context.initialize();\n\nif __name__ == '__main__':\n unittest.main(argv = ['test.py', '-v'])\n","sub_path":"hoomd/md/test-py/test_bond_harmonic.py","file_name":"test_bond_harmonic.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"128424713","text":"import re\nimport pydwarf\nfrom raws import *\nfrom settings import exportsettings as settings\nfrom utils import copytree\n\n# Actually run the program\ndef __main__():\n \n pydwarf.log.info('Running PyDwarf %s.' % pydwarf.__version__)\n if settings.dfversion is not None:\n pydwarf.log.info('Managing Dwarf Fortress version %s.' % settings.dfversion)\n else:\n pydwarf.log.error('No Dwarf Fortress version was specified in settings. Scripts will be run regardless of their indicated compatibility.')\n \n if os.path.exists(settings.rawsdir):\n \n if settings.backup and settings.backupdir:\n pydwarf.log.info('Backing up raws to %s...' % settings.backupdir)\n copytree(settings.rawsdir, settings.backupdir)\n else:\n pydwarf.log.warning('Proceeding without backing up raws.')\n \n pydwarf.log.info('Reading raws from %s...' % settings.rawsdir)\n r = raws().read(settings.rawsdir, pydwarf.log)\n \n pydwarf.log.info('Running scripts...')\n for script in settings.runscripts:\n pydwarf.log.debug('Handling script %s...' % script)\n \n urist = None\n scriptname = None\n scriptfunc = None\n scriptargs = None\n if isinstance(script, tuple) or isinstance(script, list):\n scriptargs = script[1]\n script = script[0]\n elif isinstance(script, dict):\n scriptname = script.get('name')\n scriptargs = script.get('args')\n scriptmatch = script.get('match')\n scriptignoreversion = script.get('ignore_df_version')\n checkversion = None if scriptignoreversion else settings.dfversion\n candidates = pydwarf.urist.get(scriptname, version=checkversion, match=scriptmatch)\n if candidates and len(candidates):\n urist = candidates[0]\n scriptname = urist.name\n if len(candidates) > 1: pydwarf.log.warning('More than one fitting script has been specified, using a best guess.') \n elif callable(script):\n scriptname = script.__name__\n scriptfunc = script\n else:\n scriptname = script\n candidates = pydwarf.urist.get(scriptname, version=settings.dfversion)\n if candidates and len(candidates):\n urist = candidates[0]\n scriptname = urist.name\n if len(candidates) > 1: pydwarf.log.warning('More than one fitting script has been specified, using a best guess.')\n if urist and scriptfunc is None:\n scriptfunc = urist.fn\n \n if scriptfunc:\n scriptinfo = 'Running script %s' % scriptname\n if scriptargs: scriptinfo = '%s with args %s' % (scriptinfo, scriptargs)\n pydwarf.log.info('%s...' % scriptinfo)\n \n try:\n response = scriptfunc(r, **scriptargs) if scriptargs else scriptfunc(r)\n if response:\n success = response.get('success')\n status = response['status'] if 'status' in response else ('Script %s ran %ssuccessfully.' % (scriptname, '' if success else 'un'))\n pydwarf.log.info('%s: %s' % ('SUCCESS' if success else 'FAILURE', status))\n else:\n pydwarf.log.error('Received no response from script %s.' % scriptname)\n except Exception:\n pydwarf.log.exception('Unhandled exception while running script %s.' % scriptname)\n else:\n pydwarf.log.info('Finished running script %s.' % scriptname)\n\n else:\n pydwarf.log.error('Failed to retrieve script %s.' % scriptname)\n \n outputdir = settings.outputdir if settings.outputdir else settings.rawsdir\n pydwarf.log.info('Writing changes to raws to %s...' % outputdir)\n if not os.path.exists(outputdir): os.makedirs(outputdir)\n r.write(outputdir, pydwarf.log)\n \n pydwarf.log.info('All done!')\n \n else:\n pydwarf.log.info('Specified raws directory does not exist.')\n\nif __name__ == \"__main__\":\n __main__()\n","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"306649830","text":"# -*- coding: utf-8 -*-\r\n\r\nimport json\r\nimport urllib.request as urllib2\r\nfrom bs4 import BeautifulSoup\r\nimport mysql\r\nimport datetime\r\nimport numpy as np\r\n\r\nresult_json_file = 'result_170816.json'\r\nwith open(result_json_file, 'r') as f:\r\n data = json.load(f)\r\n\r\nconnection = mysql.mysql_ctrl()\r\n# 使用 cursor() 方法创建一个游标对象 cursor\r\ncursor = connection.cursor()\r\n\r\n# 列出用户表\r\ncursor.execute('select id,user_login from wp_users')\r\nauthors = list(cursor.fetchall())\r\n\r\n# 列出最大 post_author_id\r\ncursor.execute('select max(id) from wp_users')\r\nmax_user_id = cursor.fetchall()[0][0]\r\n\r\n# 列出最大 post_id\r\ncursor.execute('select max(id) from wp_posts')\r\nmax_post_id = cursor.fetchall()[0][0]\r\nif max_post_id is None:\r\n max_post_id = 0\r\n\r\n# 列出现有文章的微信url\r\ncursor.execute('select guid from wp_posts where guid like \"http://mp.weixin.qq.com/%\"')\r\npost_url = list(cursor.fetchall())\r\n\r\n# 初始化一些变量\r\npost_author_id = 0\r\npost_id = max_post_id\r\n\r\n#for i in range(0, 1):\r\nfor i in range(0, len(data)):\r\n # 读取 Json\r\n url = data[i]['url']\r\n cover_image_url = data[i]['cover_image_url']\r\n digest = data[i]['digest']\r\n source_url = data[i]['source_url']\r\n title = data[i]['title']\r\n category = data[i]['category']\r\n\r\n # 判断是否已存在文章\r\n if np.in1d(url, post_url):\r\n print(str(i) + ':文章已存在,跳过')\r\n continue\r\n\r\n # 爬取内容\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) '\\\r\n 'Gecko/20091201 Firefox/3.5.6'}\r\n try:\r\n req = urllib2.Request(url, headers=headers)\r\n except:\r\n print('----------读取URL失败,跳到下一条----------')\r\n continue\r\n content = urllib2.urlopen(req).read()\r\n soup = BeautifulSoup(content, 'html.parser')\r\n\r\n try:\r\n post_date = soup.find('em', {'id': 'post-date'}).string.strip()\r\n except:\r\n print('----------找不到元素,可能文章被屏蔽----------')\r\n continue\r\n\r\n description = \"暂无详细信息\"\r\n try:\r\n description = soup.find('div', {'id': 'js_profile_qrcode'})\\\r\n .find('div', {'class': 'profile_inner'})\\\r\n .findAll('p', {'class':'profile_meta'})[1]\\\r\n .find('span', {'class':'profile_meta_value'}).string.strip()\r\n except:\r\n print('----------找不到公众号描述信息----------')\r\n\r\n all_date = datetime.datetime.strptime(post_date, \"%Y-%m-%d\")\r\n post_author_name = soup.find('a',{'id': 'post-user'}).string.strip() #公众号名称\r\n try:\r\n wechat_id = soup.find('div',{'id': 'js_profile_qrcode'})\\\r\n .find('div',{'class': 'profile_inner'})\\\r\n .findAll('p',{'class':'profile_meta'})[0]\\\r\n .find('span',{'class':'profile_meta_value'}).string.strip()\r\n user_nicename = wechat_id\r\n except:\r\n wechat_id = post_author_name\r\n user_nicename = ''\r\n\r\n # 判断公众号是否存在于 wp_users\r\n new_author = 'yes'\r\n for a in authors:\r\n # 如果存在就获取 id\r\n if a[1] == wechat_id:\r\n post_author_id = a[0]\r\n new_author = 'no'\r\n\r\n # 如果不存在就寻找最大id然后增加一位,写入 wp_users 表\r\n if new_author == 'yes':\r\n add_author_sql = '''INSERT INTO wp_users \r\n (id,user_login,user_pass,user_nicename,user_registered,display_name)\r\n VALUES (%s,%s,%s,%s,%s,%s)'''\r\n post_author_id = max_user_id + 1\r\n max_user_id = post_author_id\r\n if user_nicename == '':\r\n user_nicename = str(post_author_id)\r\n user_registered = datetime.datetime.now().strftime('%Y-%m-%d')\r\n add_author_value = [post_author_id,wechat_id,'$P$BNVjZktJh7.E2nhYoEjn4RS4.rfdML/',user_nicename,user_registered,post_author_name]\r\n # 执行sql语句\r\n cursor.execute(add_author_sql,add_author_value)\r\n # 提交到数据库执行 \r\n connection.commit()\r\n # 把新增用户加入元组\r\n authors.append((post_author_id,wechat_id))\r\n\r\n # 新增账号描述\r\n add_description_sql = 'INSERT INTO wp_usermeta (user_id,meta_key,meta_value) VALUES (%s,%s,%s)'\r\n add_description_value = [post_author_id,'description',description]\r\n cursor.execute(add_description_sql,add_description_value)\r\n connection.commit()\r\n\r\n print(\"----------新增作者:\" + wechat_id + ':' + description + '----------')\r\n\r\n\r\n # 正文处理\r\n post_content = ''\r\n for child in soup.find('div',{'id': 'js_content'}).children:\r\n post_content = post_content + str(child)\r\n soup_post_content = BeautifulSoup(post_content, 'html.parser')\r\n for image in soup_post_content.findAll('img',{'data-src':True}):\r\n data_src = image['data-src']\r\n image['src'] = data_src\r\n post_content = str(soup_post_content)\r\n url_prefix = 'http://read.html5.qq.com/image?src=forum&q=5&r=0&imgflag=7&imageUrl='\r\n post_content.replace('http://mmbiz.qpic.cn/', url_prefix + 'http://mmbiz.qpic.cn/')\r\n \r\n # 写入数据库\r\n post_id = post_id + 1\r\n post_author = post_author_id\r\n post_date = all_date\r\n post_date_gmt = all_date\r\n post_title = soup.find('h2', {'id': 'activity-name'}).string.strip()\r\n post_excerpt = digest\r\n post_status = 'publish'\r\n comment_status = 'open'\r\n ping_status = 'open'\r\n post_password = ''\r\n post_name = ''\r\n to_ping = ''\r\n pinged = ''\r\n post_modified = all_date\r\n post_modified_gmt = all_date\r\n post_content_filtered = ''\r\n post_parent = 0\r\n guid = url\r\n menu_order = 0\r\n post_type = 'post'\r\n post_mime_type = ''\r\n comment_count = 0\r\n\r\n add_wp_posts = '''INSERT INTO wp_posts(\r\n id,\r\n post_author,\r\n post_date,\r\n post_date_gmt,\r\n post_content,\r\n post_title,\r\n post_excerpt,\r\n post_status,\r\n comment_status,\r\n ping_status,\r\n post_password,\r\n post_name,\r\n to_ping,\r\n pinged,\r\n post_modified,\r\n post_modified_gmt,\r\n post_content_filtered,\r\n post_parent,\r\n guid,\r\n menu_order,\r\n post_type,\r\n post_mime_type,\r\n comment_count\r\n )\r\n VALUES \r\n (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'''\r\n\r\n add_value = [\r\n post_id,\r\n post_author,\r\n post_date,\r\n post_date_gmt,\r\n post_content,\r\n post_title,\r\n post_excerpt,\r\n post_status,\r\n comment_status,\r\n ping_status,\r\n post_password,\r\n post_name,\r\n to_ping,\r\n pinged,\r\n post_modified,\r\n post_modified_gmt,\r\n post_content_filtered,\r\n post_parent,\r\n guid,\r\n menu_order,\r\n post_type,\r\n post_mime_type,\r\n comment_count\r\n ]\r\n cursor.execute(add_wp_posts,add_value)\r\n connection.commit()\r\n\r\n # 添加类别\r\n add_wp_term = 'INSERT INTO wp_term_relationships(object_id, term_taxonomy_id, term_order) VALUES (%s,%s,%s)'\r\n wp_term_value = [post_id,category,0]\r\n cursor.execute(add_wp_term,wp_term_value)\r\n update_category_count = 'UPDATE wp_term_taxonomy SET count=count+1 WHERE term_taxonomy_id = %s'\r\n cursor.execute(update_category_count,category)\r\n connection.commit()\r\n\r\n # 插入图片,guid 关联 cover_image_url\r\n post_id = post_id + 1\r\n post_author = post_author_id\r\n post_date = all_date\r\n post_date_gmt = all_date\r\n post_content = ''\r\n post_title = '1'\r\n post_excerpt = ''\r\n post_status = 'inherit'\r\n comment_status = 'open'\r\n ping_status = 'closed'\r\n post_password = ''\r\n post_name = '1'\r\n to_ping = ''\r\n pinged = ''\r\n post_modified = all_date\r\n post_modified_gmt = all_date\r\n post_content_filtered = ''\r\n post_parent = 0\r\n guid = url_prefix + cover_image_url\r\n menu_order = 0\r\n post_type = 'attachment'\r\n post_mime_type = 'image/jpeg'\r\n comment_count = 0\r\n\r\n cover_image_value = [\r\n post_id,\r\n post_author,\r\n post_date,\r\n post_date_gmt,\r\n post_content,\r\n post_title,\r\n post_excerpt,\r\n post_status,\r\n comment_status,\r\n ping_status,\r\n post_password,\r\n post_name,\r\n to_ping,\r\n pinged,\r\n post_modified,\r\n post_modified_gmt,\r\n post_content_filtered,\r\n post_parent,\r\n guid,\r\n menu_order,\r\n post_type,\r\n post_mime_type,\r\n comment_count\r\n ]\r\n cursor.execute(add_wp_posts,cover_image_value)\r\n connection.commit()\r\n\r\n # 在 wp_postmeta 关联图片和文章\r\n add_wp_postmeta = 'insert into wp_postmeta (post_id,meta_key,meta_value) values (%s,%s,%s)'\r\n postmeta_value = [post_id,'_wp_attached_file',url_prefix + cover_image_url]\r\n cursor.execute(add_wp_postmeta,postmeta_value)\r\n\r\n add_wp_postmeta = 'insert into wp_postmeta (post_id,meta_key,meta_value) values (%s,%s,%s)'\r\n postmeta_value = [post_id-1,'_thumbnail_id',post_id]\r\n cursor.execute(add_wp_postmeta,postmeta_value)\r\n\r\n connection.commit()\r\n\r\n print('写入第'+ str(i) +'篇文章')\r\n\r\n# 关闭数据库连接\r\nconnection.close()","sub_path":"data/json/wechat.py","file_name":"wechat.py","file_ext":"py","file_size_in_byte":9306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"521815699","text":"\nimport sys\nimport os\nimport logging\nimport importlib\nfrom fnmatch import fnmatch\nimport pulse.vendor.yaml as yaml\n\nfrom . import core\n\n\n__all__ = [\n 'BuildActionLoader',\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef _isSamePythonFile(fileA, fileB):\n return (os.path.normpath(os.path.splitext(fileA)[0]) ==\n os.path.normpath(os.path.splitext(fileB)[0]))\n\n\nclass BuildActionLoader(object):\n\n def loadActionConfig(self, name, configFile):\n \"\"\"\n Load and return the config data for a BuildAction class.\n\n Args:\n name (str): The name of the BuildAction for which to load a config\n configFile (str): The path to the BuildAction config file\n\n Returns:\n A dict representing the config data for the named BuildAction\n \"\"\"\n if not os.path.isfile(configFile):\n LOG.warning(\"Config file not found: {0}\".format(configFile))\n return False\n\n with open(configFile, 'rb') as fp:\n config = yaml.load(fp.read())\n\n if config and (name in config):\n actionConfig = config[name]\n actionConfig['configFile'] = configFile\n return actionConfig\n\n LOG.warning(\"No BuildAction config data for {0} \"\n \"was found in {1}\".format(name, configFile))\n\n def loadActionsFromModule(self, module):\n \"\"\"\n Return BuildStep type map data for all BuildActions\n contained in the given module\n\n Returns:\n A list of tuples containing (dict, class) representing the\n action's config and BuildAction class.\n\n \"\"\"\n result = []\n for name in dir(module):\n obj = getattr(module, name)\n if (isinstance(obj, type) and issubclass(obj, core.BuildAction) and\n obj is not core.BuildAction):\n # get config for the action class\n actionName = obj.__name__\n configFile = os.path.splitext(module.__file__)[0] + '.yaml'\n actionConfig = self.loadActionConfig(name, configFile)\n if actionConfig:\n LOG.debug('Loaded BuildAction: {0}'.format(obj.__name__))\n result.append((actionConfig, obj))\n else:\n LOG.error('Failed to load BuildAction: {0}'.format(\n obj.getTypeName()))\n return result\n\n def loadActionsFromDirectory(self, startDir, pattern='*_pulseaction.py'):\n \"\"\"\n Return BuildStep type map data for all BuildActions found\n by searching a directory. Search is performed recursively for\n any python files matching a pattern.\n\n Args:\n startDir: A str path of the directory to search\n\n Returns:\n A list of tuples containing (dict, class) representing the\n action's config and BuildAction class.\n \"\"\"\n if '~' in startDir:\n startDir = os.path.expanduser(startDir)\n\n result = []\n\n paths = os.listdir(startDir)\n for path in paths:\n fullPath = os.path.join(startDir, path)\n\n if os.path.isfile(fullPath):\n if fnmatch(path, pattern):\n module = self._getModuleFromFile(fullPath)\n result.extend(self.loadActionsFromModule(module))\n\n elif os.path.isdir(fullPath):\n result.extend(self.loadActionsFromDirectory(fullPath, pattern))\n\n return result\n\n def _getModuleFromFile(self, filePath):\n # get module name\n name = os.path.splitext(os.path.basename(filePath))[0]\n # check for existing module in sys.modules\n if name in sys.modules:\n if _isSamePythonFile(sys.modules[name].__file__, filePath):\n # correct module already imported, delete it to force reload\n del sys.modules[name]\n else:\n raise ImportError(\"BuildAction module does not have \"\n \"a unique module name: \" + filePath)\n # add dir to sys path if necessary\n dirName = os.path.dirname(filePath)\n isNotInSysPath = False\n if not dirName in sys.path:\n sys.path.insert(0, dirName)\n isNotInSysPath = True\n module = importlib.import_module(name)\n # remove path from sys\n if isNotInSysPath:\n sys.path.remove(dirName)\n return module\n","sub_path":"src/pulse/scripts/pulse/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"434204387","text":"# Sprint Week 2!\n\n# Author: Evan Murphy & Stephen Menecola\n\n# Date: 03/12/21\n\nimport Backpack as BP\nfrom datetime import datetime\nCurDate = datetime.now()\n\nClaimNumber = 0\nHST = 0.15\nLowPerDiemRate = 85.00\nHighPerDiemRate = 100\nMileageRate = 0.10\nRentalCarRate = 56\n\n# Reads and initialises from Deflt.dat file\n\nfile = open('Deflt.dat', 'r')\nClaimNumber = int(file.readline())\nHST = float(file.readline())\nLowPerDiemRate = int(file.readline())\nHighPerDiemRate = int(file.readline())\nMileageRate = float(file.readline())\nRentalCarRate = int(file.readline())\nfile.close()\n\n\n# This function will process salesperson travel claims\ndef TravelClaim(ClaimNumber):\n while True:\n EmployeeNumber = BP.ValidEmployeeNumber9()\n EmployeeName = input(\"Enter employee name: \")\n TripLocation = input(\"Enter location of travel: \")\n\n # Format the dates to allow them to be subtracted****\n\n # newdate1 = time.strptime(date1, \"%d/%m/%Y\") and newdate2 = time.strptime(date2, \"%d/%m/%Y\")\n StartDatestr = input(\"Business trip start date (yyyy-mm-dd): \")\n EndDatestr = input(\"Business trip end date (yyyy-mm-dd): \")\n # formats start & end dates without time, and calculates travel days\n StartDate, EndDate, TotalTravelDays = BP.ProcessDate(StartDatestr, EndDatestr)\n\n OwnOrRented = input(\"Was the vehicle owned or rented? (O/R): \")\n TotalKilometers = int(input(\"Enter the total kilometers travelled: \"))\n\n if TotalTravelDays <= 3:\n PerDiem = TotalTravelDays * 85.00\n else:\n PerDiem = TotalTravelDays * 100.00\n\n if OwnOrRented.upper() == \"O\":\n MileageAmount = TotalKilometers * 0.10\n elif OwnOrRented.upper() == \"R\":\n MileageAmount = TotalTravelDays * 56.00\n else:\n MileageAmount = 0\n\n ClaimAmount = PerDiem + MileageAmount\n TaxAmount = PerDiem * HST\n ClaimTotal = ClaimAmount + TaxAmount\n\n # Formatting\n\n #PerDiemStr = \"${:,.2f}\".format(PerDiem)\n #MileageAmountStr = \"${:,.2f}\".format(MileageAmount)\n #ClaimAmountStr = \"${:,.2f}\".format(ClaimAmount)\n TaxAmountStr = \"${:,.2f}\".format(TaxAmount)\n ClaimTotalStr = \"${:,.2f}\".format(ClaimTotal)\n\n\n # Printing results\n\n print()\n print(\" NL Chocolate Company - Travel Claim\")\n print()\n print(\"*\" * 60)\n print()\n print(\"Employee Number: {} Employee Name: {:<12}\".format(EmployeeNumber, EmployeeName))\n print()\n print(\"Travel location: {}\".format(TripLocation))\n print(\"Travel Start Date: {} Travel End Date: {}\".format(StartDate, EndDate))\n print()\n print(\"Total Days Travelled: {}\".format(TotalTravelDays))\n print(\"Car Status (Owned or Rented): {}\".format(OwnOrRented))\n print(\"Total Kilometers Travelled: {}\".format(TotalKilometers))\n print()\n print(\"*\" * 60)\n print()\n print(\"Daily Cost: ${:,.2f}\".format(PerDiem))\n print(\"Mileage Cost: ${:,.2f}\".format(MileageAmount))\n print(\"Claim Amount: ${:,.2f}\".format(ClaimAmount))\n print(\"Tax Amount: {:<10}\".format(TaxAmountStr))\n print(\" ----------\")\n print(\"Claim Total: {:<10}\".format(ClaimTotalStr))\n print()\n print(\"\")\n\n file = open('Claims.dat', 'a')\n\n file.write(\"{}, \".format(ClaimNumber))\n file.write(\"{}, \".format(EmployeeNumber))\n file.write(\"{}, \".format(EmployeeName))\n file.write(\"{}, \".format(TripLocation))\n file.write(\"{}, \".format(StartDate))\n file.write(\"{}, \".format(EndDate))\n file.write(\"{}, \".format(TotalTravelDays))\n file.write(\"{}, \".format(OwnOrRented))\n file.write(\"{}, \".format(TotalKilometers))\n file.write(\"{}, \".format(PerDiem))\n file.write(\"{}, \".format(MileageAmount))\n file.write(\"{}, \".format(ClaimAmount))\n file.write(\"{}, \".format(TaxAmount))\n file.write(\"{}\\n\".format(ClaimTotal))\n\n file.close()\n\n # Increase claim number\n ClaimNumber += 1\n\n # Updates Deflt.dat with new claim number\n file = open('Deflt.dat', 'w')\n file.write(\"{}\\n\".format(str(ClaimNumber)))\n file.write(\"{}\\n\".format(str(HST)))\n file.write(\"{}\\n\".format(str(LowPerDiemRate)))\n file.write(\"{}\\n\".format(str(HighPerDiemRate)))\n file.write(\"{}\\n\".format(float(MileageRate)))\n file.write(\"{}\\n\".format(int(RentalCarRate)))\n file.close()\n\n print(\"Claim processed successfully\")\n print()\n\n Continue = input(\"Process another data claim? (Enter Y for yes or any other key to end): \")\n if Continue.upper() != \"Y\":\n break\n\n Anykey = input(\"Press any key to continue.\")\n\n\n# This function will allow the user to edit the system default values\ndef EditDefaultValues():\n\n # Open the defaults file and read the values into variables\n f = open('Deflt.dat', 'r')\n ClaimNumber = int(f.readline())\n HSTRate = float(f.readline())\n LowPerDiemRate = int(f.readline())\n HighPerDiemRate = int(f.readline())\n MileageRate = float(f.readline())\n RentalCarRate = int(f.readline())\n f.close()\n\n print(\"NL Chocolate Company\")\n print(\"Edit Default Values\")\n print()\n print(\"For each value, enter an updated value, \")\n print(\"or press Enter to keep the existing value.\")\n print(\"Current value is shown in ().\")\n print()\n\n NewClaimNumber = input(\"Enter the claim number (\" + str(ClaimNumber) + \"): \")\n if NewClaimNumber == \"\":\n NewClaimNumber = ClaimNumber\n\n NewHSTRate = input(\"Enter the HSTRate (\" + str(HSTRate) + \"): \")\n if NewHSTRate == \"\":\n NewHSTRate = HSTRate\n\n NewLowPerDiemRate = input(\"Enter the low per diem rate (\" + str(LowPerDiemRate) + \"): \")\n if NewLowPerDiemRate == \"\":\n NewLowPerDiemRate = LowPerDiemRate\n\n NewHighPerDiemRate = input(\"Enter the high per diem rate (\" + str(HighPerDiemRate) + \"): \")\n if NewHighPerDiemRate == \"\":\n NewHighPerDiemRate = HighPerDiemRate\n\n NewMileageRate = input(\"Enter the new mileage rate (\" + str(MileageRate) + \"): \")\n if NewMileageRate == \"\":\n NewMileageRate = MileageRate\n\n NewRentalCarRate = input(\"Enter the rental care rate (\" + str(RentalCarRate) + \"): \")\n if NewRentalCarRate == \"\":\n NewRentalCarRate = RentalCarRate\n\n f = open('Deflt.dat', 'w')\n f.write(\"{}\\n\".format(str(NewClaimNumber)))\n f.write(\"{}\\n\".format(str(NewHSTRate)))\n f.write(\"{}\\n\".format(str(NewLowPerDiemRate)))\n f.write(\"{}\\n\".format(str(NewHighPerDiemRate)))\n f.write(\"{}\\n\".format(str(NewMileageRate)))\n f.write(\"{}\\n\".format(str(NewRentalCarRate)))\n f.close()\n\n print()\n print(\"Default values successfully updated\")\n\n Anykey = input(\"Press any key to continue.\")\n\n\n# This function will allow the user to print a travel report\ndef PrintTravelReport():\n while True:\n\n print()\n print(\" 1 2 3 4 5 6 7 8\")\n print(\"1234567890\" * 8)\n print()\n print(\" NL Chocolate Company\")\n print()\n print(\" Travel Claims Listing as of {}\".format(CurDate.strftime(\"%m/%d/%Y\")))\n print()\n print(\"Claim Claim Salesperson Claim Per Diem Mileage Claim\")\n print(\"Number Date Name Location Amount Amount Amount\")\n print(\"=\" * 86)\n\n file = open('Claims.dat', 'r')\n\n ClaimCounter = 0\n PerDiemAccumulator = 0\n MileageAccumulator = 0\n ClaimAmountAccumulator = 0\n\n for claims in file:\n ClaimList = claims.split(\",\")\n ClaimNumber = ClaimList[0]\n ClaimDate = ClaimList[4].strip()\n Salesperson = ClaimList[2].strip()\n ClaimLocation = ClaimList[3].strip()\n PerDiemAmount = float(ClaimList[9].strip())\n MileageAmount = float(ClaimList[10].strip())\n ClaimAmount = float(ClaimList[11].strip())\n\n print(\"{:<3} {:<10} {:<12} {:<12} ${:,.2f} ${:,.2f} ${:,.2f}\".format(ClaimNumber, ClaimDate, Salesperson, ClaimLocation, PerDiemAmount, MileageAmount, ClaimAmount))\n\n ClaimCounter += 1\n PerDiemAccumulator += PerDiemAmount\n MileageAccumulator += MileageAmount\n ClaimAmountAccumulator += ClaimAmount\n\n\n print(\"=\"*86)\n print(\"{} claims listed ${:,.2f} ${:,.2f} ${:,.2f}\".format(ClaimCounter, PerDiemAccumulator, MileageAccumulator, ClaimAmountAccumulator))\n print()\n print(\" End of Report\")\n file.close()\n break\n\n Anykey = input(\"Press any key to continue.\")\n\n\n# This function will allow the user to graph monthly claim totals\ndef GraphClaimTotals():\n\n import numpy as np\n import matplotlib.pyplot as plt\n\n Jan = 0\n Feb = 0\n Mar = 0\n Apr = 0\n May = 0\n Jun = 0\n Jul = 0\n Aug = 0\n Sep = 0\n Oct = 0\n Nov = 0\n Dec = 0\n\n file = open(\"Claims.dat\", \"r\")\n\n for claims in file:\n ClaimList = claims.split(\",\")\n StartDate = ClaimList[4].strip()\n StartDate2 = StartDate.split('-')\n Month = StartDate2[1]\n ClaimAmount = float(ClaimList[11].strip())\n if Month == \"01\":\n Jan = Jan + ClaimAmount\n elif Month == \"02\":\n Feb = Feb + ClaimAmount\n elif Month == \"03\":\n Mar = Mar + ClaimAmount\n elif Month == \"04\":\n Apr = Apr + ClaimAmount\n elif Month == \"05\":\n May = May + ClaimAmount\n elif Month == \"06\":\n Jun = Jun + ClaimAmount\n elif Month == \"07\":\n Jul = Jul + ClaimAmount\n elif Month == \"08\":\n Aug = Aug + ClaimAmount\n elif Month == \"09\":\n Sep = Sep + ClaimAmount\n elif Month == \"10\":\n Oct = Oct + ClaimAmount\n elif Month == \"11\":\n Nov = Nov + ClaimAmount\n elif Month == \"12\":\n Dec = Dec + ClaimAmount\n\n XAxis = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n YAxis = [Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec]\n\n plt.plot(XAxis, YAxis)\n\n plt.xlabel('Month')\n plt.ylabel('Claim Amount')\n\n plt.title('Monthly Claim Totals')\n plt.grid(True)\n\n plt.show()\n\n Anykey = input(\"Press any key to continue.\")\n\n\ndef main():\n while True:\n print()\n print(\"NL Chocolate Company - Travel Claims Processing System\")\n print()\n print(\"1. Enter an Employee Travel Claim.\")\n print(\"2. Edit System Default Values.\")\n print(\"3. Print the Travel Claim Report.\")\n print(\"4. Graph Monthly Claim Totals.\")\n print(\"5. Quit Program.\")\n print()\n while True:\n Choice = int(input(\"Enter choice (1-5): \"))\n IsValid = BP.ValidIntegerNumber(Choice, 1, 5)\n if IsValid:\n Choice = int(Choice)\n break\n if Choice == 1:\n TravelClaim(ClaimNumber)\n elif Choice == 2:\n EditDefaultValues()\n elif Choice == 3:\n PrintTravelReport()\n elif Choice == 4:\n GraphClaimTotals()\n else:\n print(\"Thank you for using NL Chocolate Company's Travel Claim Software!\")\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"150163284","text":"from amath import gcd, trunc, digits, intQ, isComplex, digitsafterdecimal\nfrom Symbols import *\nfrom functools import total_ordering\nfrom decimal import Decimal, InvalidOperation\nimport inspect\n\n\ndef dectofr(x):\n # type: (float) -> Fraction\n \"\"\"\n Converts decimals to fractions\n :param x: decimal to convert\n :return: Fraction\n\n >>> dectofr(2.5)\n 5/2\n >>> dectofr(0.25)\n 1/4\n >>> dectofr(2.1)\n 21/10\n\n Does work for int\n\n >>> dectofr(5)\n 5/1\n \"\"\"\n # n = int(floor(x))\n # x -= n\n # if x < error:\n # # return (n, 1)\n # return Fraction(n, 1)\n # elif 1 - error < x:\n # # return (n+1, 1)\n # return Fraction(n + 1, 1)\n #\n # # The lower fraction is 0/1\n # lower_n = 0\n # lower_d = 1\n # # The upper fraction is 1/1\n # upper_n = 1\n # upper_d = 1\n # while True:\n # # The middle fraction is (lower_n + upper_n) / (lower_d + upper_d)\n # middle_n = lower_n + upper_n\n # middle_d = lower_d + upper_d\n # # If x + error < middle\n # if middle_d * (x + error) < middle_n:\n # # middle is our new upper\n # upper_n = middle_n\n # upper_d = middle_d\n # # Else If middle < x - error\n # elif middle_n < (x - error) * middle_d:\n # # middle is our new lower\n # lower_n = middle_n\n # lower_d = middle_d\n # # Else middle is our best fraction\n # else:\n # # return (n * middle_d + middle_n, middle_d)\n # # return \"{0}/{1}\".format(n*middle_d+middle_n,middle_d)\n # return Fraction(n * middle_d + middle_n, middle_d)\n calframe = inspect.getouterframes(inspect.currentframe(), 2)\n print(\"dectofr caller name: \", calframe[1])\n x = float(x)\n n = x\n d = 1\n dig = digitsafterdecimal(x)\n multiplier = 10 ** dig\n # print(n, d, dig, multiplier, n * multiplier, d * multiplier)\n return Fraction(int(n * multiplier), int(d * multiplier))\n\n\ndef frtodec(x):\n \"\"\"\n Converts Fraction to decimal\n :param x: Fraction to be converted\n :return: Decimal\n\n >>> frtodec(Fraction(1,2))\n 0.5\n >>> frtodec(Fraction(1,3))\n 0.3333333333333333\n \"\"\"\n if not isinstance(x, Fraction):\n raise TypeError(\"Argument must be a fraction\")\n return float(x.numerator) / float(x.denominator)\n\n\n\n@total_ordering\nclass Fraction:\n \"\"\"\n Fraction data type\n \"\"\"\n\n def __init__(self, n=0, d=1):\n \"\"\"\n Fraction initialization\n :param n: numerator\n :param d: denomenator\n :return:\n :raises ZeroDivisionError:\n\n Create a Fraction\n\n >>> Fraction(5,2)\n 5/2\n >>> Fraction(-5,2)\n -5/2\n >>> Fraction(5,-2)\n -5/2\n >>> Fraction(4,10)\n 2/5\n \"\"\"\n curframe = inspect.currentframe()\n calframe = inspect.getouterframes(curframe, 2)\n print(\"__init__ caller name: \", calframe[1])\n self.onum = n\n self.oden = d\n self.numerator = n / gcd(abs(n), abs(d))\n self.denominator = d / gcd(abs(n), abs(d))\n self.whole = 0\n if type(self.denominator) is not complex:\n self.denominator = int(self.denominator)\n if type(self.numerator) is not complex:\n self.numerator = int(self.numerator)\n if (type(self.numerator) is not complex) and (type(self.denominator) is not complex):\n if self.denominator < 0:\n self.denominator = abs(self.denominator)\n self.numerator *= -1\n if self.denominator == 0:\n raise ZeroDivisionError\n self.value = n / d\n self.whole = trunc(self.value)\n self.attributes = {Attributes[0], Attributes[1]}\n\n def __add__(self, other):\n \"\"\"\n Adds to values\n :param other:\n :return:\n\n >>> Fraction(1,4) + Fraction(2,4)\n 3/4\n >>> Fraction(1,2) + Fraction(3,4)\n 5/4\n >>> Fraction(1,2) + 2\n 5/2\n >>> Fraction(1,2) + 2.5\n 3/1\n \"\"\"\n ax = other\n if not isinstance(other, Fraction):\n ax = dectofr(other)\n return Fraction(self.numerator * ax.denominator + self.denominator * ax.numerator,\n self.denominator * ax.denominator)\n\n __radd__ = __add__\n\n def __sub__(self, other):\n # type: (object) -> Fraction\n \"\"\"\n Subtract a value from Fraction\n\n :param other:\n :return:\n\n >>> Fraction(3, 4) - Fraction(1, 4)\n 1/2\n >>> Fraction(7, 4) - Fraction(3 ,4)\n 1/1\n >>> Fraction(6, 4) - 2\n -1/2\n >>> Fraction(11, 2) - 3.5\n 2/1\n\n \"\"\"\n dx = other\n if not isinstance(other, Fraction):\n dx = dectofr(other)\n return Fraction(self.numerator * dx.denominator - self.denominator * dx.numerator,\n self.denominator * dx.denominator)\n\n def __rsub__(self, other):\n dx = other\n if not isinstance(other, Fraction):\n dx = dectofr(other)\n return Fraction(dx.numerator * self.denominator - self.numerator * dx.denominator,\n self.denominator * other.denominator)\n\n def __mul__(self, other):\n # type: (object) -> Fraction\n \"\"\"\n Multiplication\n :param other:\n :return:\n\n >>> Fraction(1,2) * Fraction(5,4)\n 5/8\n >>> Fraction(1,2) * 4\n 2/1\n >>> Fraction(1,3) * 2.5\n 5/6\n \"\"\"\n try:\n other = float(other)\n except ValueError:\n return NotImplemented\n except TypeError:\n return NotImplemented\n mx = dectofr(other)\n return Fraction(self.numerator * mx.numerator, self.denominator * mx.denominator)\n\n __rmul__ = __mul__\n\n def __truediv__(self, other):\n dx = other\n if not isinstance(other, Fraction):\n dx = dectofr(other)\n return Fraction(self.numerator * dx.denominator, self.denominator * dx.numerator)\n\n def __rtruediv__(self, other):\n dx = other\n if not isinstance(other, Fraction):\n dx = dectofr(other)\n return Fraction(dx.numerator * self.denominator, dx.denominator * self.numerator)\n\n def __div__(self, other):\n \"\"\"\n Division\n :param other:\n :return:\n\n Uses truediv\n\n >>> Fraction(1,2) / Fraction(3,4)\n 2/3\n >>> Fraction(1,2) / 2\n 1/4\n >>> Fraction(1,4) / 0.5\n 1/2\n \"\"\"\n return self.__truediv__(other)\n\n def __pow__(self, power, modulo=None):\n y = pow(self.numerator, power)\n z = pow(self.denominator, power)\n if modulo is not None:\n return Fraction(y, z) % modulo\n return Fraction(y, z)\n\n def __rpow__(self, other, modulo=None):\n from amath.Computation.power import root\n return pow(root(other, self.denominator), self.numerator)\n\n def __str__(self):\n return \"%s/%s\" % (self.numerator, self.denominator)\n\n # def __cmp__(self, other):\n # \"\"\"\n # compare two values\n # :param other:\n # :return:\n #\n # >>> Fraction(1,2) < Fraction(2,3)\n # True\n # >>> Fraction(2,3) == Fraction(4,6)\n # True\n # >>> Fraction(1,3) < 1\n # True\n # >>> Fraction(5,2) > 2.5\n # False\n # \"\"\"\n # if type(other) is float:\n # other = dectofr(other)\n # a = Fraction(self.numerator * other.denominator, self.denominator * other.denominator)\n # b = Fraction(other.numerator * self.denominator, self.denominator * other.denominator)\n # if a.onum > b.onum:\n # return 1\n # elif a.onum is b.onum:\n # return 0\n # else:\n # return -1\n\n def __eq__(self, other):\n if not isinstance(other, Fraction):\n other = dectofr(other)\n a = Fraction(self.numerator * other.denominator, self.denominator * other.denominator)\n b = Fraction(other.numerator * self.denominator, self.denominator * other.denominator)\n if a.onum == b.onum:\n return True\n else:\n return False\n\n def __lt__(self, other):\n if not isinstance(other, Fraction):\n other = dectofr(other)\n a = Fraction(self.numerator * other.denominator, self.denominator * other.denominator)\n b = Fraction(other.numerator * self.denominator, self.denominator * other.denominator)\n if a.onum < b.onum:\n return True\n else:\n return False\n\n def __nonzero__(self):\n \"\"\"\n Non Zero\n :return:\n\n \"\"\"\n if self != 0:\n return True\n else:\n return False\n\n def __repr__(self):\n try:\n return self.__str__()\n except AttributeError:\n return str(None)\n\n def digits(self):\n x = frtodec(self)\n return digits(x)\n\n def is_int(self):\n if self.denominator == 1:\n return True\n else:\n return False\n\n def __trunc__(self):\n return self.whole\n\n def __float__(self):\n \"\"\"\n Convert to float\n :return:\n\n >>> float(Fraction(1,2))\n 0.5\n >>> float(Fraction(1,25))\n 0.04\n >>> float(Fraction(5,2))\n 2.5\n \"\"\"\n return frtodec(self)\n\n def __mod__(self, other):\n \"\"\"\n Modulus\n :param other:\n :return:\n\n >>> Fraction(1,2) % 2\n 1/2\n >>> Fraction(1,2) % Fraction(1,3)\n 1/6\n \"\"\"\n z = trunc(self / other)\n a = self - (other * z)\n return a\n\n def __abs__(self):\n if self.numerator < 0:\n return Fraction(-self.numerator, self.denominator)\n else:\n return self\n\n def __neg__(self):\n return Fraction(-self.numerator, self.denominator)\n\n def __pos__(self):\n return Fraction(self.numerator, self.denominator)\n\n\n@total_ordering\nclass Complex:\n\n def __init__(self, value: complex = 0j):\n self.imag = Decimal(value.imag)\n self.real = Decimal(value.real)\n self.value = Decimal(value)\n self.attributes = {Attributes[0], Attributes[1]}\n\n def conjugate(self):\n return self.real - (self.imag * 1j)\n\n","sub_path":"datatypes.py","file_name":"datatypes.py","file_ext":"py","file_size_in_byte":10354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"449076353","text":"from django.db.models.signals import post_save, pre_delete, post_delete\nfrom django.dispatch import receiver\nfrom library_preparation.models import LibraryPreparation\nfrom pooling.models import Pooling\nfrom .models import Pool\n\n\n@receiver(post_save, sender=Pool)\ndef update_pool_name_size(sender, instance, created, **kwargs):\n # Update the name only for a just created pool\n if created:\n instance.name = str(instance.id) + instance.name\n instance.save()\n\n # Update Pool Size\n update_fields = kwargs.pop('update_fields')\n if update_fields and update_fields == {'size'}:\n libraries = instance.libraries.all()\n samples = instance.samples.all()\n instance.size += sum([l.sequencing_depth for l in libraries])\n instance.size += sum([s.sequencing_depth for s in samples])\n instance.save()\n\n\n@receiver(pre_delete, sender=Pool)\ndef delete_dependent_objects(sender, instance, **kwargs):\n libraries = instance.libraries.all()\n samples = instance.samples.all()\n\n for library in libraries:\n library.is_pooled = False\n library.save(update_fields=['is_pooled'])\n\n for sample in samples:\n sample.is_pooled = False\n sample.save(update_fields=['is_pooled'])\n\n # Delete all dependent Library Preparation and Pooling objects\n LibraryPreparation.objects.filter(sample__in=samples).delete()\n Pooling.objects.filter(library__in=libraries).delete()\n Pooling.objects.filter(sample__in=samples).delete()\n\n\n@receiver(post_delete, sender=Pool)\ndef delete_file(sender, instance, **kwargs):\n # Delete uploaded file after deleting a pool\n if instance.file:\n instance.file.delete(False)\n","sub_path":"index_generator/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"3896191","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*- \n# @author: Shengjia Yan\n# @date: 2018-10-30 Tuesday\n# @email: i@yanshengjia.com\n# Copyright @ Shengjia Yan. All Rights Reserved.\n\"\"\"\nThis module contains the status codes for exceptions.\n\"\"\"\n\n\nstatus_code = {\n 'OK': 1,\n 'BAD_SRC': 101, # all candidate scores < -0.4\n}\n","sub_path":"utils/status_code.py","file_name":"status_code.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"411855219","text":"import Domain.ListController\nclass CircularList:\n\n def __init__(self, data, identifier):\n self.data = data\n self.nextNode = None\n self.prevNode = None\n self.identifier = identifier\n\n def insertAtPoint(self, data, head, tail, nodes):\n auxNode = head\n newNode = CircularList(data, head.identifier)\n\n if head.data is None:\n head = newNode\n tail = newNode\n head.nextNode = tail\n head.prevNode = tail\n tail.nextNode = head\n tail.prevNode = head\n nodes += 1\n return Domain.ListController.updateElements(head, tail, nodes)\n\n elif head.data[1] > newNode.data[1]:\n newNode.nextNode = head\n newNode.prevNode = tail\n head.prevNode = newNode\n tail.nextNode = newNode\n head = newNode\n nodes += 1\n return Domain.ListController.updateElements(head, tail, nodes)\n\n elif tail.data[1] < newNode.data[1]:\n tail.nextNode = newNode\n newNode.nextNode = head\n newNode.prevNode = tail\n head.prevNode = newNode\n tail = newNode\n nodes += 1\n return Domain.ListController.updateElements(head, tail, nodes)\n\n else:\n for x in range(0,nodes):\n if auxNode.data[1] < newNode.data[1]:\n auxNode = auxNode.nextNode\n else:\n auxNode = auxNode.prevNode\n break\n\n newNode.nextNode = auxNode.nextNode\n auxNode.nextNode = newNode\n newNode.prevNode = auxNode\n newNode.nextNode.prevNode = newNode\n tail = head.prevNode\n nodes += 1\n return Domain.ListController.updateElements(head, tail, nodes)\n\n'''\nExtra code\nNot tested with the project actual functionality\n\n def deleteNode(self, data, head, tail, nodes):\n\n auxNode = head\n if nodes != 0:\n if head.data == data:\n auxNode.prevNode.nextNode = auxNode.nextNode\n auxNode.nextNode.prevNode = auxNode.prevNode\n head = head.nextNode\n print(data, \" deleted\")\n nodes -= 1\n return Domain.ListController.updateElements(head, tail, nodes)\n elif tail.data == data:\n tail.prevNode.nextNode = tail.nextNode\n tail.nextNode.prevNode = tail.prevNode\n tail = tail.prevNode\n print(data, \" deleted\")\n nodes -= 1\n return Domain.ListController.updateElements(head, tail, nodes)\n else:\n while auxNode.nextNode != head:\n if auxNode.data == data:\n auxNode.prevNode.nextNode = auxNode.nextNode\n auxNode.nextNode.prevNode = auxNode.prevNode\n print(data, \" deleted\")\n nodes -= 1\n return Domain.ListController.updateElements(head, tail, nodes)\n auxNode = auxNode.nextNode\n print(data, \"Not found\")\n return\n else:\n print(\"Cannot delete, the list is empty\")\n return\n\n def updateElement(self, data, newData, head, tail, nodes):\n\n auxNode = data\n if nodes == 0:\n print(\"Cannot update, the list is empty\")\n return\n for x in range(0,nodes):\n if data == auxNode.data:\n auxNode.data = newData\n print(\"Value updated succesfully\")\n return Domain.ListController.updateElements(head, tail, nodes)\n else:\n auxNode = auxNode.nextNode\n print(\"The inserted value doesn't coincide with any Node value\")\n return\n\n\n def insertAtTail(self, data, head, tail, nodes):\n\n auxNode = head\n newNode = CircularList(data)\n\n if head.data is None:\n head = newNode\n tail = newNode\n head.nextNode = tail\n head.prevNode = tail\n tail.nextNode = head\n tail.prevNode = head\n nodes += 1\n return\n else:\n auxNode.prevNode.nextNode = newNode\n newNode.nextNode = auxNode\n newNode.prevNode = auxNode.prevNode\n auxNode.prevNode = newNode\n tail = head.prevNode\n nodes += 1\n return\n \n def insertAtHead(self, data, head, tail, nodes):\n\n newNode = CircularList(data)\n\n if head.data is None:\n head = newNode\n tail = newNode\n head.nextNode = tail\n head.prevNode = tail\n tail.nextNode = head\n tail.prevNode = head\n nodes += 1\n return\n\n else:\n newNode.nextNode = head\n newNode.prevNode = tail\n head.prevNode = newNode\n tail.nextNode = newNode\n head = newNode\n nodes += 1\n return\n \n def printList(self, head, tail, nodes):\n\n auxNode = head\n if nodes == 0:\n print(\"The list is empty\")\n return\n elif nodes == 1:\n print(\"(\", tail.data, \")<->\", head.data, \"<->(\", head.data, \")\")\n return\n else:\n print(\"(\", tail.data, \")\", end=\"\")\n for x in range(0, nodes):\n print(\"<->\", auxNode.data, end=\" \")\n auxNode = auxNode.nextNode\n print(\"<->(\", head.data, \")\")\n return\n'''","sub_path":"Domain/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":5630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"351781384","text":"\"\"\"\ntest_sfissues.py - tests for the sfissues module\nauthor: nu11us \n\"\"\"\n\nimport unittest\nfrom mock import MagicMock\nfrom modules import sfissues\nimport web\n\n\nclass TestSFIssues(unittest.TestCase):\n def setUp(self):\n self.phenny = MagicMock()\n self.input = MagicMock()\n\n def test_bugs(self):\n self.phenny.config.sf_issues_url = \"https://sourceforge.net/p/apertium/news/feed.rss\"\n self.input.nick = \"bbc\"\n sfissues.bugs(self.phenny, self.input)\n out = self.phenny.say.call_args[0][0]\n self.assertTrue(\"Basque-English 0.3.0 Released\" in out)","sub_path":"modules/test/test_sfissues.py","file_name":"test_sfissues.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"495678168","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n*這種模式是什麼?\n博格模式(Borg pattern)(也被稱為 Monostate pattern)是一種實現單例行為的方法,\n但是不是只有一個類的實例,而是有多個實例共享相同的狀態。換句話說,\n重點是共享狀態而不是共享實例標識。\n\n*這個例子做了什麼?\n要理解 Python 中此模式的實現,重要的是要知道,在 Python 中,\n實例屬性存儲在名為 __dict__ 的屬性字典中。 通常,每個實例都有自己的字典,\n但博格模式會修改它,以便所有實例都具有相同的字典。\n在此範例中,__shared_state 屬性將是在所有實例之間共享的字典,\n並且通過在初始化新實例時(即在 __init__ 方法中)\n將 __shared_state 賦值給 __dict__ 變數來確保這一點。其他屬性通常會添加到實例的屬性字典中,\n但是,由於屬性字典本身是共享的(即 __shared_state),因此所有其他屬性也將被共享。\n因此,當使用實例 rm2 修改屬性 self.state 時,實例 rm1 中 self.state 的值也會更改。\n如果使用 rm3 修改 self.state,則會發生同樣的情況,rm3 是子類別中的實例。\n請注意,即使它們共享屬性,實例也不同,如其 ID 所示。\n\n*該模式實際使用在哪裡?\n共享狀態在管理資料庫連接等應用程式中很有用:\nhttps://github.com/onetwopunch/pythonDbTemplate/blob/master/database.py\n\n*參考:\nhttps://fkromer.github.io/python-pattern-references/design/#singleton\n\n*TL;DR80\n在實例之間提供類似單一行為的行為共享狀態。\n\"\"\"\n\n\nclass Borg(object):\n __shared_state = {}\n\n def __init__(self):\n self.__dict__ = self.__shared_state\n self.state = 'Init'\n\n def __str__(self):\n return self.state\n\n\nclass YourBorg(Borg):\n pass\n\n\nif __name__ == '__main__':\n rm1 = Borg()\n rm2 = Borg()\n\n rm1.state = 'Idle'\n rm2.state = 'Running'\n\n print('rm1: {0}'.format(rm1))\n print('rm2: {0}'.format(rm2))\n\n rm2.state = 'Zombie'\n\n print('rm1: {0}'.format(rm1))\n print('rm2: {0}'.format(rm2))\n\n print('rm1 id: {0}'.format(id(rm1)))\n print('rm2 id: {0}'.format(id(rm2)))\n\n rm3 = YourBorg()\n\n print('rm1: {0}'.format(rm1))\n print('rm2: {0}'.format(rm2))\n print('rm3: {0}'.format(rm3))\n\n### OUTPUT ###\n# rm1: Running\n# rm2: Running\n# rm1: Zombie\n# rm2: Zombie\n# rm1 id: 140732837899224\n# rm2 id: 140732837899296\n# rm1: Init\n# rm2: Init\n# rm3: Init\n","sub_path":"patterns/creational/borg.py","file_name":"borg.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"589089308","text":"\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport statsmodels as sm\nfrom statsmodels.tsa.stattools import adfuller\n\npath = os.getcwd()\n\nprices = pd.read_csv(path + '/close.csv')\ndata = pd.DataFrame()\nprices['Date'] = pd.to_datetime(prices['Date'])\nprices.set_index('Date',inplace = True)\ntickers = list(data.columns.values)\ndata = prices/prices.iloc[0]\ntrading_data = data.loc[data.index > '2017-09-08']\ndata = data.loc[data.index < '2017-09-09']\nprices = prices.loc[prices.index > '2017-09-09']\n\ndef trading_signals(first, second, trading_data = trading_data, formation_data = data):\n signal = 2*np.std(formation_data[first] - formation_data[second])\n result_dict = {}\n trading = False\n differences = trading_data[first] - trading_data[second]\n for i in range(len(differences)):\n if trading == False:\n if abs(differences.iloc[i]) > signal and abs(differences.iloc[i] < 2*signal):\n trading = True\n start_date = differences.index.values[i]\n else:\n if (differences.iloc[i-1] * differences.iloc[i] < 0) or (i == len(differences)-1) or abs(differences.iloc[i] > 2*signal):\n trading = False\n end_date = differences.index.values[i]\n if differences[i-1] > 0:\n s_ret = (trading_data[first][start_date] - trading_data[first][end_date])/trading_data[first][start_date]\n l_ret = (trading_data[second][end_date] - trading_data[second][start_date])/trading_data[second][start_date]\n result_dict[start_date] = [first, second, start_date, end_date, s_ret,l_ret]\n else:\n s_ret = (trading_data[second][start_date] - trading_data[second][end_date])/trading_data[second][start_date]\n l_ret = (trading_data[first][end_date] - trading_data[first][start_date])/trading_data[first][start_date]\n result_dict[start_date] = [second, first, start_date, end_date, s_ret,l_ret]\n df = pd.DataFrame.from_dict(result_dict, orient = 'index', columns = ['Short','Long','Start','End', 'SReturn','LReturn'])\n df.index = list(range(len(df)))\n df['Total'] = df['SReturn'] + df['LReturn']\n df['Length'] = (df['End'] - df['Start']).dt.days\n return (df, len(df))\n\n\ndef build_portfolio(trade_list, trading_data = trading_data):\n index_list = trading_data.index.tolist()\n portfolio = pd.DataFrame(index = trading_data.index.values, columns = ['Short','Long','ShortR','LongR','Trading'])\n l = trade_list[1]\n trade_list = trade_list[0]\n for i in range(len(trade_list)):\n start = trade_list['Start'][i]\n end = trade_list['End'][i]\n short = trade_list['Short'][i]\n lon = trade_list['Long'][i]\n di = index_list.index(start)\n di2 = index_list.index(end)\n for j in range(di2 - di + 1):\n date_index = di + j\n dt = index_list[date_index]\n portfolio['Short'][dt] = trading_data[short][dt]/trading_data[short][index_list[di]]\n portfolio['Long'][dt] = trading_data[lon][dt]/trading_data[lon][index_list[di]]\n portfolio['Short'][dt] = trading_data[short][dt]/trading_data[short][index_list[di]]\n portfolio['Long'][dt] = trading_data[lon][dt]/trading_data[lon][index_list[di]]\n portfolio['Trading'][dt] = 1\n\n portfolio.fillna(value = 0, axis = 0)\n for j in range(1, len(portfolio)):\n if portfolio.iloc[j-1]['Short'] > 0:\n portfolio.iloc[j]['ShortR'] = -(portfolio.iloc[j]['Short'] - portfolio.iloc[j-1]['Short'])/portfolio.iloc[j-1]['Long']\n portfolio.iloc[j]['LongR'] = (portfolio.iloc[j]['Long'] - portfolio.iloc[j-1]['Long'])/portfolio.iloc[j-1]['Long']\n else:\n portfolio.iloc[j]['ShortR'] = 0\n portfolio.iloc[j]['LongR']= 0\n portfolio['Total'] = portfolio['ShortR'] + portfolio['LongR']\n portfolio.fillna(0, inplace = True)\n return (portfolio, l)\n\ndef analyze_portfolio(pairs):\n i = 0\n df = (build_portfolio(trading_signals(pairs[i][0], pairs[i][1]))[0])\n trade_count = build_portfolio(trading_signals(pairs[i][0], pairs[i][1]))[1]\n for i in range(1, len(pairs)):\n df = df + (build_portfolio(trading_signals(pairs[i][0], pairs[i][1])))[0]\n trade_count += build_portfolio(trading_signals(pairs[i][0], pairs[i][1]))[1]\n df_short = df['ShortR']/df['Trading']\n df_long = df['LongR']/df['Trading']\n df_final = pd.concat([df_short, df_long], axis=1)\n df_final.columns = ['Short Return','Long Return']\n df_final.index.name = 'Date'\n df_final['Total'] = df_final['Short Return'] + df_final['Long Return']\n df_final.fillna(0, inplace = True)\n arithemtic_daily_mean = np.mean(df_final['Total'])\n annualized_return = (1+arithemtic_daily_mean)**250 - 1\n annualized_std = np.std(df_final['Total'])*np.sqrt(250)\n sharpe_ratio = annualized_return/annualized_std\n return [annualized_return, annualized_std, sharpe_ratio, trade_count]\n\nprint(analyze_portfolio([['HON','NEE'], ['TXN','SYK'],['BDX','SYK'], ['HON','DHR'],['JPM','PNC']]))\n\n\n","sub_path":"statarb/trading.py","file_name":"trading.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"627147995","text":"from django.shortcuts import render\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import pipeline,preprocessing,metrics,model_selection,ensemble\nfrom sklearn_pandas import DataFrameMapper\nfrom sklearn import impute\nfrom sklearn.impute import SimpleImputer\nimport joblib as jb\n\n\n# Create your views here.\n\n# from . import service\n# from django.http import HttpResponse\n\n# def gett(request):\n# #article_data = service.get_data.all()\n# print(service.article_data)\n# return render(request, 'index.html', article_data)\n\n \n# import os\n# import requests\n# from requests.auth import HTTPBasicAuth\n\n# def api(request):\n# Company = request.POST.get('Company')\n# Position = request.POST.get('position')\n\n# url = os.environ.get(\"URL\", 'http://myhost:port/projectname/api/addposition?compName=Google&category=Developer')\n# url = \"%s\" % (url)\n# body = {\"Company\" : \"%s\" % Company, \"Position\" : \"%s\" % Position}response = requests.post(url, auth=HTTPBasicAuth('USER', 'PASSWORD'), headers={'Content-Type': 'application/json'}, json=body)\n# if response.status_code == 200:\n# print(\"Code 200\")\n# else:\n# print(\"Code not 200\")\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.shortcuts import render\nimport requests\nimport csv\nfrom . serializers import ApiSerializer\nfrom . models import Apimodel\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\n\n\n@api_view(['POST'])\ndef saveapi(request):\n\tif request.method =='POST':\n\t\ta=request.data\n\t\tprint(a)\n\t\tdata=pd.read_csv('datagas.csv')\n\t\tsaveserialize= ApiSerializer(data=request.data)\n\t\tmapper = DataFrameMapper([(['Quiz','sampletest','playing','sleeping','learning'], preprocessing.StandardScaler())])\n\t\tpipeline_obj = pipeline.Pipeline([('mapper',mapper),(\"model\", ensemble.RandomForestRegressor())])\n\t\t# data.columns\n\t\tX=['Quiz', 'sampletest', 'playing', 'sleeping','learning']\n\t\tY=['Total']\n\t\tpipeline_obj.fit(data[X],data[Y].values.ravel())\n\t\t#print(pipeline_obj.predict(data[X]))\n\t\tjb.dump(pipeline_obj,'RFModelforMPG.pkl')\n\t\tmodelReload=jb.load('RFModelforMPG.pkl')\n\t\ttestDtaa=pd.DataFrame({'x':request.data}).transpose()\n\t\tprint(testDtaa)\n\t\tif saveserialize.is_valid():\n\t\t\tsaveserialize.save()\n\t\t\tprint(request.data)\n\t\t\treturn Response(modelReload.predict(testDtaa)[0],status=status.HTTP_201_CREATED)\n\t\t\treturn Response(modelReload.predict(testDtaa)[0],status=status.HTTP_400_BAD_REQUEST)\n\t\t\t\n\n\n\n\n# def home(request):\n# \tresponse = requests.get('http://jsonplaceholder.typicode.com/users/')\n# \tgeodata = response.json()\n# \tprint([d['id'] for d in geodata if 'id' in d])\n# \tprint([d['name'] for d in geodata if 'name' in d])\n# \tprint([d['username'] for d in geodata if 'username' in d])\n# \tprint([d['email'] for d in geodata if 'email' in d])\n# \tout=zip([d['id'] for d in geodata if 'id' in d],[d['name'] for d in geodata if 'name' in d],[d['username'] for d in geodata if 'username' in d],[d['email'] for d in geodata if 'email' in d])\n# \twith open('outdatacsv.csv', 'w',newline='') as file:\n# \t\twriter = csv.writer(file)\n# \t\twriter.writerow(['Quiz','sampletest','playing','sleeping','learning'])\n# \t\twriter.writerows([(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),\n# \t\t\t(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),(2,20,3,2,1),])\n# \treturn render(request, 'home.html', {'geodata':geodata})\n\n \n\ndef index(request):\n return render(request, 'index.html')\n\n\n\n\n\ndef result(request):\n\tif request.method == \"POST\":\n\t\ttemp={}\n\t\ttemp['Quiz']=request.POST.get('Quiz')\n\t\tprint(temp['Quiz'])\n\t\ttemp['sampletest']=request.POST.get('sampletest')\n\t\ttemp['playing']=request.POST.get('playing')\n\t\ttemp['sleeping']=request.POST.get('sleeping')\n\t\ttemp['learning']=request.POST.get('learning')\n\t\tprint(temp)\n\t\t#headers= {'Content-Type':'application/json'}\n\t\t#read= requests.post('http://127.0.0.1:8000/checkapi/',json=temp,headers=headers)\n\t\t#home(request)\n\t\tdata=pd.read_csv('datagas.csv')\n\t\t#print(data)\n\t\t#quiz= request.POST.get('quiz')\n\t\t#print(quiz)\n\t\t#context= {'quiz': quiz}\n\t\t\n\t \n\t\tdata.head()\n\t\tdata.isnull().sum()\n\t\t#print(data)\n\t\tmapper = DataFrameMapper([(['Quiz','sampletest','playing','sleeping','learning'], preprocessing.StandardScaler())])\n\t\tpipeline_obj = pipeline.Pipeline([('mapper',mapper),(\"model\", ensemble.RandomForestRegressor())])\n\t\t# data.columns\n\t\tX=['Quiz', 'sampletest', 'playing', 'sleeping','learning']\n\t\tY=['Total']\n\t\tpipeline_obj.fit(data[X],data[Y].values.ravel())\n\t\t#print(pipeline_obj.predict(data[X]))\n\t\tjb.dump(pipeline_obj,'RFModelforMPG.pkl')\n\t\tmodelReload=jb.load('RFModelforMPG.pkl')\n\t\t#print(modelReload.predict(data[X]))\n\t\t#sampledata={1:{'quiz':2,'sampletest':10,'sleep':2,'learn':5},2:{'quiz':2,'sampletest':10,'sleep':2,'learn':5},3:{'quiz':2,'sampletest':10,'sleep':2,'learn':5}}\n\n\t\t#print((temp['Quiz']*2)+ (temp['sampletest']*2)+ (temp['sleeping']*2))\n\t\ttestDtaa=pd.DataFrame({'x':temp}).transpose()\n\t\t#print(testDtaa)\n\t\t#print(modelReload.predict(testDtaa)[0])\n\t\treturn render(request, 'result.html',{'quiz':modelReload.predict(testDtaa)[0]})\n\n\n\nclass TestView(APIView):\n\tdef get(self,request,*args,**kwargs):\n\t\tdata=pd.read_csv('datagas.csv')\n\t\tmapper = DataFrameMapper([\n (['Quiz','sampletest','playing','sleeping','learning'], preprocessing.StandardScaler())])\n\t\tpipeline_obj = pipeline.Pipeline([('mapper',mapper),(\"model\", ensemble.RandomForestRegressor())])\n\t\tX=['Quiz', 'sampletest', 'playing', 'sleeping','learning']\n\t\tY=['Total']\n\t\tpipeline_obj.fit(data[X],data[Y].values.ravel())\n\t\tpipeline_obj.predict(data[X])\n\t\tprint(data['id'])\n\t\tprint(dict(zip(data['id'],pipeline_obj.predict(data[X]))))\n\t\t#result(request)\n\t\t#outdata=\"hello python\"\n\t\t#data = {\n\t\t#'name':'vaish',\n\t\t# 'age':25\n\t\t# }\n\t\t\n\t\treturn Response(dict(zip(data['id'],pipeline_obj.predict(data[X]))))\n\n\n\n# class TestView1(APIView):\n# \tdef get(self,request,*args,**kwargs):\n# \t\t#result(request)\n# \t\toutdata=\"hello python\"\n# \t\tdata = {\n# \t\t'name':'vaish',\n# \t\t'age':25\n# \t\t}\n# \t\treturn Response(outdata)\n","sub_path":"application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"550630835","text":"import datetime\n\nfrom odoo import api, models\nfrom odoo.exceptions import UserError\nfrom odoo.tools.translate import _\n\n\nclass CalendarEventFinish(models.TransientModel):\n _inherit = 'calendar.event.finish'\n\n @api.multi\n def action_finish_calendar_event(self):\n \"\"\"Finaliza evento de calendário e cria entrada na planilha\n de horas (account.analytic.line).\n \n Raises:\n UserError -- quando o evento não possui projeto e/ou já esta finalizado.\n \n Returns:\n dict -- dict contendo 'ir.actions.act_window_close'.\n \"\"\"\n\n self.ensure_one()\n res = super(CalendarEventFinish, self).action_finish_calendar_event()\n\n ce = self.calendar_event_id\n\n if ce.project_id and ce.event_state == 'done':\n dt = datetime.datetime.strptime(ce.start_datetime,\n '%Y-%m-%d %H:%M:%S')\n\n partners = [item.id for item in ce.partner_ids\n if item.parent_id == ce.company_partner_id]\n\n users = self.env['res.users'].search([('partner_id', 'in', partners)])\n\n # Create Timesheet to any user in users list\n for user in users:\n values = self._get_account_analytic_line_values(\n user=user, calendar_event=ce, start_datetime=dt)\n\n # Utilizamos sudo a fim de permitir que um usuario\n # crie entradas para outros usuarios\n self.env['account.analytic.line'].sudo().create(values)\n else:\n raise UserError(_(\"To finish this event, it must be in 'Open' \"\n \"state and select a project\"))\n\n return res\n\n @api.multi\n def _get_account_analytic_line_values(self, user, calendar_event, start_datetime):\n \"\"\"Retorna valores para serem usados na criação de uma entrada na planilha\n de dados (account.analytic.line).\n\n Arguments:\n user {res.users} -- Usuario cujo entrada sera atribuida.\n calendar_event {calendar.event} -- Calendar Event para ser usado na entrada.\n start_datetime {datetime} -- objeto datetime com data e hora do evento.\n\n Returns:\n dict -- Dict com valores da account.analytic.entry.\n \"\"\"\n\n self.ensure_one()\n values = {\n 'name': calendar_event.name,\n 'date': start_datetime.date(),\n 'user_id': user.id,\n 'customer_partner_id': calendar_event.customer_partner_id.id,\n 'company_id': calendar_event.user_id.company_id.id,\n 'project_id': calendar_event.project_id.id,\n 'unit_amount': calendar_event.event_duration,\n 'calendar_event_id': calendar_event.id,\n }\n\n if calendar_event.task_id:\n values['task_id'] = calendar_event.task_id.id\n values['project_task_type_id'] = calendar_event.task_id.stage_id.id\n\n return values\n","sub_path":"calendar_event_timesheet/wizards/calendar_event_finish.py","file_name":"calendar_event_finish.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"167045530","text":"import re\r\n\r\ndef correct(query):\r\n \r\n add=\"C:\\\\Users\\\\DELL\\\\Desktop\\\\Project\\\\Bing\\\\paragraph\\\\\"+query+\".txt\"\r\n add1=\"C:\\\\Users\\\\DELL\\\\Desktop\\\\Project\\\\Bing\\\\paragraph1\\\\\"+query+\".txt\"\r\n file = open(add,'r',encoding=\"utf-8\")\r\n file1= open(add1,'a',encoding=\"utf-8\")\r\n i=0\r\n s=\"\"\r\n while(True):\r\n r=file.readline()\r\n s+=r\r\n i+=1\r\n if(i==100 or r==\"\"):\r\n i=0\r\n s=re.sub(\"[\\{(<\\[].*?[\\>)}\\]]\", \"\", s)\r\n file1.write(s)\r\n s=\"\"\r\n #\\{([^()]|())*\\} \r\n if(r==\"\"):\r\n break\r\n \r\ncorrect(\"swine flu vaccine\")\r\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"343484730","text":"from math import log2, acos, sqrt, pi\nimport turtle\n\n\nclass ListNew(list):\n @property\n def last_index(self):\n return len(self) - 1\n\n def __gt__(self, other):\n return len(self) > other\n\n\nclass PriorityQueue:\n def __init__(self, items=[]):\n self.contents = ListNew()\n for item in items:\n self.contents.append(item)\n self.heapify_up()\n\n def __bool__(self):\n return bool(self.contents)\n\n @property\n def last(self):\n return self.contents.last_index\n\n def last_element(self):\n return self.contents[self.last]\n\n @staticmethod\n def left_child(i):\n return 2 * i + 1\n\n @staticmethod\n def right_child(i):\n return 2 * i + 2\n\n @staticmethod\n def parent(i):\n return (i - 1)//2\n\n def heapify_up(self):\n new = self.last\n while new >= 1:\n parent = self.parent(new)\n if self.contents[new] < self.contents[parent]:\n self.contents[new], self.contents[parent] = self.contents[parent], self.contents[new]\n new = parent\n else:\n return\n\n def search(self, node):\n return self.contents.index(node)\n\n def insert(self, item):\n self.contents.append(item)\n self.heapify_up()\n\n def delete(self, node=None):\n if node is None:\n root = self.contents[0]\n self.contents[0], self.contents[self.last] = self.contents[self.last], self.contents[0]\n self.contents.pop()\n self.heapify_down()\n return root\n else:\n index = self.search(node)\n self.contents[index], self.contents[self.last] = self.contents[self.last], self.contents[index]\n self.contents.pop()\n self.heapify_down(index)\n\n def heapify_down(self, index=0):\n mini = index\n while index < len(self.contents):\n left = self.left_child(index)\n right = self.right_child(index)\n if left < len(self.contents) and right < len(self.contents):\n if self.contents[right] < self.contents[left]:\n min_child_index = right\n else:\n min_child_index = left\n if self.contents[min_child_index] <= self.contents[index]:\n mini = min_child_index\n elif left < len(self.contents):\n if self.contents[left] <= self.contents[index]:\n mini = left\n if index == mini:\n return\n self.contents[index], self.contents[mini] = self.contents[mini], self.contents[index]\n index = mini\n\n def peek(self):\n return self.contents[0]\n\n\nclass HuffmanTree:\n class Node:\n def __init__(self, value=None, probability=None, l_child=None, r_child=None):\n self.value = value\n self.probability = probability\n self.l_child = l_child\n self.r_child = r_child\n\n def __eq__(self, other):\n return self.probability == other\n\n def __lt__(self, other):\n return self.probability < other\n\n def __le__(self, other):\n return self.probability <= other\n\n def __gt__(self, other):\n return self.probability > other\n\n def __ge__(self, other):\n return self.probability >= other\n\n def __init__(self):\n self.root = None\n\n class HuffmanEncoder:\n @staticmethod\n def get_frequency(text):\n count = {}\n all_sum = 0\n for character in text:\n count[character] = count.get(character, 0) + 1\n all_sum += 1\n # all_sum = sum(count[ch] for ch in count)\n return [(ch, count[ch] / all_sum) for ch in count]\n\n def huffman_coding(self, symbols):\n queue = PriorityQueue()\n for symbol, frequency in symbols:\n queue.insert(self.Node(value=symbol, probability=frequency))\n while queue.contents > 1:\n # for node in queue.contents:\n # print(node.value, node.probability, end=\" \")\n # print()\n node1 = queue.delete()\n node2 = queue.delete()\n parent_node = self.Node(value=None, probability=(node1.probability + node2.probability),\n l_child=node2, r_child=node1)\n queue.insert(parent_node)\n self.root = queue.delete()\n return self.root\n\n def _get_code(self, cur_node, code=\"\", codes_array=[]):\n\n if cur_node is not None:\n if cur_node.value is not None:\n codes_array.append((cur_node.value, code))\n if cur_node.l_child is not None:\n code += \"0\"\n self._get_code(cur_node.l_child, code, codes_array)\n code = code[:-1]\n if cur_node.r_child is not None:\n code += \"1\"\n self._get_code(cur_node.r_child, code, codes_array)\n return codes_array\n\n def get_codes(self):\n if self.root is not None:\n return sorted(self._get_code(self.root), key=lambda pair: pair[0])\n\n @staticmethod\n def entropy(symbols):\n return -sum(probability * log2(probability) for symbol, probability in symbols)\n\n @staticmethod\n def average_len(codes, symbols):\n return sum(probability[1] * len(symbol[1]) for symbol, probability in zip(codes, symbols))\n\n def _draw(self, cur_node, pen, origin, floors):\n if cur_node is not None:\n pen.penup()\n pen.setposition(origin)\n pen.pendown()\n if cur_node.value is not None:\n pen.write(cur_node.value)\n else:\n pen.write(\"{:0.2f}\".format(cur_node.probability))\n node_dist = 32\n distance = node_dist * sqrt((1 + ((2 ** abs(floors) * 13) ** 2)/(5*node_dist**2)))\n # distance = sqrt(42*log2(abs(floors**2))/(0.34*5))\n angle = 180/pi * acos(node_dist/distance)\n if cur_node.l_child is not None:\n pen.setposition(origin)\n # turtle.pendown()\n pen.right(angle)\n pen.forward(distance)\n left = pen.position()\n pen.left(angle)\n if cur_node.l_child is not None:\n self._draw(cur_node.l_child, pen, left, floors - 0.85)\n if cur_node.r_child is not None:\n pen.penup()\n pen.setposition(origin)\n pen.pendown()\n pen.left(angle)\n pen.forward(distance)\n right = pen.position()\n pen.right(angle)\n if cur_node.r_child is not None:\n self._draw(cur_node.r_child, pen, right, floors - 0.85)\n\n def draw(self, alphabet):\n if self.root is not None:\n pen = turtle.Turtle()\n pen.hideturtle()\n pen.speed(1)\n pen.setheading(-90)\n pen.penup()\n pen.goto(0, 260)\n pen.pendown()\n turtle.Screen().screensize(900, 900)\n self._draw(self.root, pen, pen.position(), sqrt(len(alphabet)))\n\n def encode(self, text):\n freq = self.HuffmanEncoder.get_frequency(text)\n\n # print(freq)\n self.huffman_coding(freq)\n codes = self.get_codes()\n # print(max(len(code[1]) for code in codes))\n # print(codes)\n # print(HuffmanTree.average_len(codes, freq))\n encoded_string = self._encode(text, codes)\n return encoded_string\n\n def _encode(self, text, codes):\n encoded_string = \"\"\n dic = {code[0]: code[1] for code in codes}\n for ch in text:\n if ch in dic:\n encoded_string += dic[ch]\n return encoded_string\n\n def decode(self, encoded_string):\n codes = self.get_codes()\n decoded_string = \"\"\n dic = {code[1]: code[0] for code in codes}\n buffer = \"\"\n for ch in encoded_string:\n buffer += ch\n if buffer in dic:\n decoded_string += dic[buffer]\n buffer = \"\"\n return decoded_string\n\n\ndef main():\n with open(\"alice29.txt\", 'r') as input_f:\n data = input_f.read()\n data.rstrip()\n huf_tree = HuffmanTree()\n text = data\n encoded = huf_tree.encode(text)\n print(len(text) * 4)\n print(len(encoded))\n decoded = huf_tree.decode(encoded)\n print(decoded == data)\n # source = [(\"a_1\", 0.25), (\"a_2\", 0.25), (\"a_3\", 0.125), (\"a_4\", 0.125),\n # (\"a_5\", 0.125), (\"a_6\", 0.0625), (\"a_7\", 0.0625)]\n\n # source = [(\"а\", 0.064), (\"б\", 0.015), (\"в\", 0.039), (\"г\", 0.014),\n # (\"д\", 0.026), (\"е ё \", 0.074), (\"ж\", 0.008),\n # (\"з\", 0.015), (\"и\", 0.064), (\"й\", 0.010), (\"к\", 0.029),\n # (\"л\", 0.036), (\"м\", 0.026), (\"н\", 0.056),\n # (\"о\", 0.096), (\"п\", 0.024), (\"р\", 0.041), (\"с\", 0.047),\n # (\"т\", 0.056), (\"у \", 0.021), (\"ф\", 0.020), (\"х\", 0.009),\n # (\"ц\", 0.004), (\"ч\", 0.013), (\"ш\", 0.006), (\"щ \", 0.003),\n # (\"ъ ь\", 0.015), (\"ы\", 0.016), (\"э \", 0.003), (\"ю\", 0.007), (\"я\", 0.019), (\"-\", 0.124)]\n\n # source = [(\"a_1\", 0.4), (\"a_2\", 0.15), (\"a_3\", 0.15), (\"a_4\", 0.15), (\"a_5\", 0.15)]\n\n # huf.huffman_coding([(\"a_1\", 0.4), (\"a_2\", 0.15), (\"a_3\", 0.15), (\"a_4\", 0.15), (\"a_5\", 0.15)])\n # print(huf.entropy([(\"a_1\", 0.4), (\"a_2\", 0.15), (\"a_3\", 0.15), (\"a_4\", 0.15), (\"a_5\", 0.15)]))\n # huf.huffman_coding(source)\n # print(\"entropy is \" + str(HuffmanTree.entropy(source)) + \" bits\")\n # array_of_codes = huf.get_codes()\n # print(HuffmanTree.average_len(array_of_codes, source))\n # print(\"array of codes:\", array_of_codes)\n # huf.draw(source)\n # turtle.done()\n\n\nmain()\n","sub_path":"Huffman.py","file_name":"Huffman.py","file_ext":"py","file_size_in_byte":9902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"317023200","text":"from django.shortcuts import render, HttpResponse\nfrom firstapp.models import Article\nimport MySQLdb\n# from django.template import Context, Template\n# Create your views here.\n\n\ndef index(request):\n coon = MySQLdb.connect(\n host='localhost',\n port=3306,\n user='root',\n password='123456',\n db='django4',\n charset='utf8'\n )\n\n cursor = coon.cursor()\n cursor.execute('select * from firstapp_article')\n # count = cursor.execute('select * from firstapp_article')\n # print(count)\n results = cursor.fetchall()\n # print(results)\n article_list = []\n for result in results:\n article_list.append(\n {\n 'title': result[1],\n 'content': result[2]\n }\n )\n # print(article_list)\n context = {}\n context['article_list'] = article_list\n return render(request, 'first_web_2.html', context)\n","sub_path":"Level4Code/lesson4firstDjango/LessonCode/root/firstsite/firstapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"636876599","text":"import os\nPROPAGATE_EXCEPTIONS = True\nHOST = os.environ.get(\"HOST\",\"localhost\")\nPORT = int(os.environ.get('PORT', 5000))\nSQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', 'mysql://root:admin@localhost/metablog')\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nDATABASE_URL = 'postgres://hkbpscejzgkloi:Ie1vb9I9w26EEVWzhLAmSpFTt6@ec2-107-21-101-67.compute-1.amazonaws.com:5432/dbeopj0ffollee'\n\nWTF_CSRF_ENABLED = True\nSECRET_KEY = '7shenron@!'\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"501795323","text":"from pyspark import SparkContext\n\nsc = SparkContext(appName=\"demo03\")\n\nbooks = sc.textFile(\"/home/spark/hadoop-2.7.3/LICENSE.txt\")\n\n\ndef parse_book(line):\n try:\n words = line.split(\",\")\n return (words[3], float(words[4]))\n except:\n return ()\n\n\nresults1 = books.map(parse_book)\\\n .filter(lambda sp: len(sp) > 0)\\\n .reduceByKey(lambda acc,price: acc + price)\\\n .collect()\n\nprint(results1)\n\n\nresults2 = books.map(parse_book)\\\n .filter(lambda sp: len(sp) > 0)\\\n .aggregateByKey(0, lambda x,y: x+y, lambda x,y: x+y)\\\n .collect()\n\nprint(results2)\n\nsc.stop()\n","sub_path":"spark/pyspark/rdd/demo03.py","file_name":"demo03.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"262010280","text":"#coding=utf-8\nimport urllib2\nimport urllib\nimport sys\nimport json\nfrom bs4 import BeautifulSoup\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nurl = 'https://www.douyu.com/directory'\n\ndef GetDirectory():\n response = urllib2.urlopen(url)\n content = response.read()\n list_href = []\n list_name = []\n soup = BeautifulSoup(content, \"html.parser\")\n for h in soup.find_all('a','thumb'):\n list_href.append(h.get('href'))\n list_name.append(h.find('p').string)\n\n dic = dict(zip(list_name,list_href))\n return dic\n\n #print soup.prettify()\n\ndef GetFirstPage(directory):\n douyu = \"https://www.douyu.com\"\n for item in directory.itervalues():\n u = douyu + item\n responses = urllib2.urlopen(u)\n contents = responses.read()\n soups = BeautifulSoup(contents,\"html.parser\")\n listName = []\n listNum = []\n for p in soups.find_all(\"div\",\"mes\"):\n listName.append(p.find('h3').string)\n listNum.append(p.find(\"span\",\"dy-num fr\").string)\n #dic = dict(zip(listName,listNum))\n with open(\"E:\\douyu.txt\",'a') as f:\n f.write(str(listName))\n f.close()\n #print json.dumps(dic,ensure_ascii=False, encoding='UTF-8')\n\n\nif __name__ == '__main__':\n directory = GetDirectory()\n GetFirstPage(directory)\n","sub_path":"douyu.py","file_name":"douyu.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"261381320","text":"from fints2ledger.transaction_retriever import TRetriever\nfrom fints.client import FinTS3PinTanClient\nfrom fints2ledger.csv_converter import CsvConverter\nfrom mt940.models import Date\n\n\nclass Fints2Csv:\n def __init__(self, config):\n self.config = config\n\n def retrieveAndSave(self):\n client = FinTS3PinTanClient(\n self.config[\"fints\"][\"blz\"], # Your bank's BLZ\n self.config[\"fints\"][\"account\"], # your account number\n self.config[\"fints\"][\"password\"],\n # e.g. 'https://fints.ing-diba.de/fints/'\n self.config[\"fints\"][\"endpoint\"]\n )\n\n retriever = TRetriever(client, self.config[\"fints\"][\"account\"])\n converter = CsvConverter(self.config[\"fints\"][\"csv_separator\"])\n\n csv_output = \"\\n\".join(map(lambda transaction: converter.convert(\n transaction), retriever.get_hbci_transactions(self.config[\"fints\"][\"start\"], Date.today())))\n\n with open(self.config[\"files\"][\"csv_file\"], 'w') as f:\n f.write(converter.get_headline())\n f.write(\"\\n\")\n f.write(csv_output)\n","sub_path":"fints2ledger/fints2csv.py","file_name":"fints2csv.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"554380985","text":"import random\nimport collections\n\nPALOS = ['Espadas', 'Corazones', 'Diamantes', 'Treboles']\nVALORES = ['As', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\ndef crear_baraja():\n barajas = []\n for palo in PALOS:\n\n for valor in VALORES:\n barajas.append((palo, valor))\n \n return barajas\n\n\ndef obtener_mano(barajas, tamano_mano):\n return random.sample(barajas, tamano_mano)\n \n\ndef coincidencias(mano, tamano_coincidencia):\n num_coincidencias = 0\n valores = []\n for carta in mano:\n valores.append(carta[1])\n counter = dict(collections.Counter(valores))\n for i in counter.values():\n if i == tamano_coincidencia:\n num_coincidencias += 1\n return num_coincidencias\n\n\ndef escalera(mano):\n valores = []\n for carta in mano:\n valores.append(carta[1])\n for i, val in enumerate(valores):\n if val == 'As':\n valores[i] = 1\n elif val == 'J':\n valores[i] = 11\n elif val == 'Q':\n valores[i] = 12\n elif val == 'K':\n valores[i] = 13\n else:\n valores[i] = int(val)\n \n valores = sorted(valores)\n escalera = True\n for i in range(len(valores)-1):\n if i == 0:\n if valores[0]==1 and valores[len(valores)-1]==13:\n pass\n elif valores[i]+1 != valores[i+1]:\n escalera = False\n break\n\n return escalera\n\n\ndef color(mano):\n palos = []\n for carta in mano:\n palos.append(carta[0])\n coincidencia = True\n for i in range(len(palos)-1):\n if palos[i] != palos[i+1]:\n coincidencia = False\n\n return coincidencia\n \n\ndef probabilidad_pares(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 2):\n contar += 1\n return contar/intentos\n \n\ndef probabilidad_un_par(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 2)==1:\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_dos_pares(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 2)==2:\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_trio(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 3):\n contar += 1\n return contar/intentos\n\ndef probabilidad_escalera(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if escalera(mano):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_color(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if color(mano):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_full(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 2) and coincidencias(mano, 3):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_poker(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if coincidencias(mano, 4):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_escalera_color(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n if escalera(mano) and color(mano):\n contar += 1\n return contar/intentos\n\n\ndef probabilidad_escalera_real(intentos, barajas, tamano_mano):\n contar = 0\n for _ in range(intentos):\n mano = obtener_mano(barajas, tamano_mano)\n real = False\n for i, j in mano:\n if j == 'A':\n real = True\n if escalera(mano) and color(mano) and real:\n contar += 1\n return contar/intentos\n\n\nif __name__ == \"__main__\":\n barajas = crear_baraja()\n tamano_mano = int(input('Cuántas cartas quieres: '))\n intentos = int(input('Cuántas veces quieres simular: '))\n print(f'Probabilidad de Pares: {probabilidad_pares(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Un Par: {probabilidad_un_par(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Dos Pares: {probabilidad_dos_pares(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Trio: {probabilidad_trio(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Escalera: {probabilidad_escalera(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Color: {probabilidad_color(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Full House: {probabilidad_full(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Poker: {probabilidad_poker(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Escalera de Color: {probabilidad_escalera_color(intentos, barajas, tamano_mano)}')\n print(f'Probabilidad de Flor Imperial: {probabilidad_escalera_real(intentos, barajas, tamano_mano)}')","sub_path":"Estadistica_computacional/decks.py","file_name":"decks.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"339109475","text":"from django.shortcuts import render,HttpResponse\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom data.models import UnemploymentByStateMonthly, UsState, NatalityByStateYearly, MortalityByStateYearly\nfrom forms import UsStateSelectForm, kmeansNumSamplesForm, YearlyMapAggregationForm\nfrom django.template import RequestContext\nfrom django.db.models import Avg, Max, Min, Sum\nimport numpy\nimport json as simplejson\n# Import Michael's implementation for kmeans\nimport kmeans\ndef index(request):\n\treturn render_to_response('visualization/index.html', {\n\t\t}, context_instance=RequestContext(request))\n\n\ndef timeseries_unemployment(request):\n\tstate=None\n\tdata = None\n\tform = UsStateSelectForm()\n\tif request.method == 'POST':\n\t\tform = UsStateSelectForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tstates = form.cleaned_data['name']\n\t\t\tstates_id = [ int(state.id) for state in states ]\n\t\t\tdata = UnemploymentByStateMonthly.objects.filter(state__id__in = states_id ).order_by('state','year','month')\n\t\n\treturn render_to_response('visualization/linechart.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'state': state,\n\t\t'title': 'Unemployment (monthly)',\n\t\t'subtitle': '',\n\t\t'y_axis': '%s of state population' % '%',\n\t\t}, context_instance=RequestContext(request))\n\ndef timeseries_natality(request,variable='num_births'):\n\tstate=None\n\tdata = None\n\tform = UsStateSelectForm()\n\ttitle=yaxis=None\n\tif request.method == 'POST':\n\t\tform = UsStateSelectForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tstates = form.cleaned_data['name']\n\t\t\tstates_id = [ int(state.id) for state in states ]\n\n\t\t\tif (len(states_id)>0):\n\t\t\t\t# Aggregate by state and year\n\t\t\t\tif variable in ('num_births','birth_rate','fertility_rate'):\n\t\t\t\t\tdata = NatalityByStateYearly.objects.filter(state__id__in= states_id).values('state','year').select_related('state__name').annotate(value=Sum(variable))\n\t\t\t\t\tif variable=='num_births':\n\t\t\t\t\t\tdata = data.filter(num_births__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\ttitle = 'Number of Births (yearly)'\n\t\t\t\t\t\tyaxis='births'\n\t\t\t\t\telif variable=='birth_rate':\n\t\t\t\t\t\tdata = data.filter(birth_rate__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\ttitle = 'Births rate (yearly)'\n\t\t\t\t\t\tyaxis='birth rate (per 1000)'\n\t\t\t\t\telif variable=='fertility_rate':\n\t\t\t\t\t\ttitle = 'Fertility rate (yearly)' \n\t\t\t\t\t\tyaxis='fertility rate (per 1000)'\n\t\t\t\t\t\tdata = data.filter(fertility_rate__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\telse:\n\t\t\t\t\traise Http404\n\t\t\telse:\n\t\t\t\tdata = None\n\t\t\t\n\t\n\treturn render_to_response('visualization/linechart.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'state': state,\n\t\t'title': title,\n\t\t'subtitle': '',\n\t\t'yaxis':yaxis\n\t\t}, context_instance=RequestContext(request))\n\n\ndef timeseries_mortality(request,variable='num_deaths'):\n\tstate=None\n\tdata = None\n\tform = UsStateSelectForm()\n\ttitle=yaxis=None\n\tif request.method == 'POST':\n\t\tform = UsStateSelectForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tstates = form.cleaned_data['name']\n\t\t\tstates_id = [ int(state.id) for state in states ]\n\t\t\tif (len(states_id)>0):\n\t\t\t\t# Aggregate by state and year\n\t\t\t\tif variable in ('num_deaths','crude_rate'):\n\t\t\t\t\tdata = MortalityByStateYearly.objects.filter(state__id__in= states_id).values('state','year').select_related('state__name').annotate(value=Sum(variable))\n\t\t\t\t\tif variable=='num_deaths':\n\t\t\t\t\t\tdata = data.filter(num_deaths__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\ttitle = 'Number of Deaths (yearly)'\n\t\t\t\t\t\tyaxis='Deaths'\n\t\t\t\t\telif variable=='crude_rate':\n\t\t\t\t\t\tdata = data.filter(crude_rate__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\ttitle = 'Crude rate (yearly)'\n\t\t\t\t\t\tyaxis='Crude rate (per 1000)'\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\traise Http404\n\t\t\telse:\n\t\t\t\tdata = None\n\t\t\t\n\t\n\treturn render_to_response('visualization/linechart.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'state': state,\n\t\t'title': title,\n\t\t'subtitle': '',\n\t\t'yaxis':yaxis\n\t\t}, context_instance=RequestContext(request))\n\ndef association_mortality(request):\n\tstate=None\n\tdata = None\n\tform = UsStateSelectForm()\n\ttitle=yaxis=None\n\tif request.method=='POST':\n\t\tform = UsStateSelectForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tstates = form.cleaned_data['name']\n\t\t\tstates_id = [ int(state.id) for state in states ]\n\t\t\tif (len(states_id)>0):\n\t\t\t\tdata = MortalityByStateYearly.objects.filter(state__id__in= states_id).values('state','year').select_related('state__name').annotate(value=Sum(variable))\t\n\t\t\t\tdata = data.filter(crude_rate__isnull=False ).order_by('state','year').values('state','state__name','year','value')\n\t\t\t\t\t\t\n\treturn render_to_response('visualization/linechart.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'state': state,\n\t\t'title': title,\n\t\t'subtitle': '',\n\t\t'yaxis':yaxis\n\t\t}, context_instance=RequestContext(request))\n\ndef kmeans_test(request):\n\tdata = None\n\tform = kmeansNumSamplesForm()\n\tk=None\n\tsample_size=None\n\tgrouped_data = None\n\tclusters = None\n\terror_list = None\n\tif request.method=='POST':\n\t\tform = kmeansNumSamplesForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tsample_size = int(form.cleaned_data['num_samples'])\n\t\t\tk = int(form.cleaned_data['k'])\n\t\t\t# Generate random data\n\t\t\tdata = numpy.random.random((sample_size, 2)) \n\t\t\t# Calculate kmeans\n\t\t\tif form.cleaned_data['method']=='Basic':\n\t\t\t\tgrouped_data, clusters, error_list = kmeans.kmeans(data,num_clusters=k, min_error=0.01, max_iter=100)\n\t\t\telse:\n\t\t\t\tgrouped_data, clusters, error_list = kmeans.bisecting_kmeans(data,k=k, min_error=0.01, max_iter=50)\n\treturn render_to_response('visualization/kmeans.html', {\n\t\t'data': grouped_data,\n\t\t'clusters': clusters,\n\t\t'error_list': error_list,\n\t\t'form':form,\n\t\t'k': k,\n\t\t'sample_size': sample_size,\n\t\t}, context_instance=RequestContext(request))\n#Return the year choices using minimum and maximum year from a model containing field year and state.\ndef get_year_choices(queryset,variable):\n\tnull_filter = {variable+'__isnull':False}\n\tmax_year = queryset.filter(**null_filter).aggregate(Max('year'))\n\tmin_year = queryset.filter(**null_filter).aggregate(Min('year'))\n\tyear_choices= [(i,i) for i in range(int(min_year['year__min']),int(max_year['year__max'])+1)]\n\tyear_choices = tuple(year_choices)\n\treturn year_choices\n\n# Responsible for returning the map for Unemployment, Natality, and Mortality.\ndef map_variable(request, variable,model):\n\tyear_choices = get_year_choices(model.objects,variable)\n\tform = YearlyMapAggregationForm(initial={'method':\"mean\"},year_choices=year_choices)\n\tmin_year = form.getMinYear()\n\tmax_year = form.getMaxYear()\n\tform.fields['starting_year'].initial = min_year\n\tform.fields['ending_year'].initial = max_year\n\tmethod='mean'\n\tlegend=''\n\tif request.method=='POST':\n\t\tform = YearlyMapAggregationForm(request.POST,year_choices=year_choices)\n\t\tif form.is_valid():\n\t\t\tif form.cleaned_data[\"starting_year\"]!='':\n\t\t\t\tmin_year = form.cleaned_data[\"starting_year\"]\n\t\t\tif form.cleaned_data[\"ending_year\"]!='': \n\t\t\t\tmax_year = form.cleaned_data[\"ending_year\"]\n\t\t\tmethod = form.cleaned_data[\"aggregation_method\"]\n\t\telse:\n\t\t\treturn render_to_response('visualization/map.html', {\n\t\t\t\t'data': None,\n\t\t\t\t'form':form,\n\t\t\t\t'title':\"Please check form errors\",\n\t\t\t\t}, context_instance=RequestContext(request))\n\t\t\n\tdata = model.objects.filter(year__gte=min_year,year__lte=max_year)\n\t# Remove null values from the query (happens in natality and mortality in birth rate and fertility rate)\n\t# Set title and legend upon variable to be plotted\n\tif variable == 'value':\n\t\tlegend=\"%\"\n\t\ttitle = \"Unemployed population (Yearly) [\"+method+\"]\"\n\t\tdataset='unemployment'\n\telif variable == 'num_births':\n\t\tdata = data.filter(num_births__isnull=False)\n\t\ttitle = \"Number of births (Yearly) [\"+method+\"]\"\n\t\tlegend='births'\n\t\tdataset='natality'\n\telif variable == 'birth_rate':\n\t\tdata = data.filter(birth_rate__isnull=False)\n\t\ttitle = \"Birth rate (Yearly) [\"+method+\"]\"\n\t\tlegend=' per 1000'\n\t\tdataset='natality'\n\telif variable == 'fertility_rate':\n\t\tdata = data.filter(fertility_rate__isnull=False)\n\t\ttitle = \"Fertility rate (Yearly) [\"+method+\"]\"\n\t\tlegend = \"per 1000\"\n\t\tdataset='natality'\n\telif variable == 'num_deaths':\n\t\tdata = data.filter(num_deaths__isnull=False)\n\t\ttitle = \"Deaths in the US (Yearly) [\"+method+\"]\"\n\t\tlegend = \"per 1000\"\n\t\tdataset='mortality'\n\telif variable == 'crude_rate':\n\t\tdata = data.filter(crude_rate__isnull=False)\n\t\ttitle = \"Crude rate (Yearly) [\"+method+\"]\"\n\t\tlegend = \"per 1000\"\n\t\tdataset='mortality'\n\telse:\n\t\traise Http404\n\tdata = data.select_related('state__code').values('state','state__code')\n\n\tif method=='mean':\n\t\tdata=data.annotate(value=Avg(variable))\n\telif method==\"min\":\n\t\tdata=data.annotate(value=Min(variable))\n\telif method==\"max\":\n\t\tdata=data.annotate(value=Max(variable))\n\telif method==\"sum\":\n\t\tdata=data.annotate(value=Sum(variable))\n\telse:\n\t\traise Http404\n\n\t# Get min and max value for display in highcharts\n\tmin_val = data.aggregate(Min('value'))['value__min']\n\tmax_val = data.aggregate(Max('value'))['value__max']\n\treturn render_to_response('visualization/map.html', {\n\t\t'data': data,\n\t\t'form':form,\n\t\t'title':title,\n\t\t'legend':legend,\n\t\t'min_val':min_val,\n\t\t'max_val':max_val,\n\t\t'dataset':dataset,\n\t\t'variable':variable\n\t\t}, context_instance=RequestContext(request))\n\n","sub_path":"visualization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"58"}
+{"seq_id":"626243193","text":"import requests\r\nimport os\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nimport time\r\n\r\n\r\nURL='http://h5.eqxiu.com/s/Cpmop6jW'\r\nUSERAGENT='Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/12.0 Mobile/15A372 Safari/604.1'\r\n\r\nSESSION=requests.Session()\r\nSESSION.headers={'User-Agent':USERAGENT}\r\n\r\nprofile = webdriver.FirefoxProfile()\r\nprofile.set_preference(\"general.useragent.override\", USERAGENT)\r\nDRIVER = webdriver.Firefox(firefox_profile=profile,executable_path='./geckodriver')\r\nDRIVER.set_window_size(480, 800)\r\n\r\n\r\n\r\npaths=URL.split('/')\r\n\r\nfor path in paths:\r\n if path !='':\r\n\r\n BASEPATH=path+'/'\r\n\r\ndef mk_dir(dir=''):\r\n try:\r\n os.mkdir(BASEPATH+dir)\r\n except Exception as e:\r\n # print(e)\r\n # print(str(e))\r\n pass\r\n\r\n\r\n\r\nclass Static:\r\n def __init__(self,tag):\r\n self.tag=tag\r\n self.old_tag_str=str(tag)\r\n self.new_tag_str=''\r\n self.url=''\r\n self.filetype=''\r\n self.type=''\r\n self.filename=''\r\n \r\n def save(self,name):\r\n print('-----------------SAVING-----------------')\r\n \r\n tag=self.tag\r\n try:\r\n self.url=tag['href']\r\n except KeyError:\r\n self.url=tag['src']\r\n except Exception as e:\r\n print(str(e))\r\n raise\r\n\r\n if self.url[:4] != 'http':\r\n self.url=URL + self.url\r\n\r\n \r\n try:\r\n r=SESSION.get(self.url)\r\n self.type=r.headers['Content-Type'].split('/')[0]\r\n if self.type == 'application':\r\n self.type= 'js'\r\n self.filetype='js'\r\n elif self.type == 'image':\r\n self.type= 'img'\r\n self.filetype=r.headers['Content-Type'].split('/')[1]\r\n elif self.type == 'text':\r\n self.filetype=r.headers['Content-Type'].split('/')[1]\r\n else:\r\n print(self.type)\r\n raise\r\n \r\n self.filename=name+'.'+self.filetype\r\n \r\n print('type:',self.type,'\\nfiletype',self.filetype,'\\nfilename',self.filename)\r\n \r\n mk_dir(self.type)\r\n \r\n with open(BASEPATH+self.type+'/'+self.filename,'wb',) as f:\r\n f.write(r.content)\r\n self.new_tag_str=self.old_tag_str.replace(self.url,self.type+'/'+self.filename)\r\n return True\r\n\r\n except Exception as e:\r\n print('ERROR IN FILE SAVEING')\r\n print(str(e))\r\n return False\r\n\r\n\r\nclass LocalStatic:\r\n def __init__(self,tag):\r\n self.tag=tag\r\n self.old_tag_str=str(tag)\r\n self.new_tag_str=''\r\n self.code=''\r\n self.type=''\r\n self.filename=''\r\n \r\n def fix(self,filename):\r\n tag=self.tag\r\n self.code=tag.text\r\n if tag.name=='script':\r\n self.type='js'\r\n elif tag.name=='style':\r\n self.type='css'\r\n else :\r\n print('UNKNOW INSTATIC TYPE')\r\n \r\n self.filename=filename+'.'+self.type\r\n\r\n # 创建类型路径\r\n mk_dir(self.type)\r\n \r\n\r\n def save(self):\r\n print('-----------------SAVING-----------------')\r\n try:\r\n print('type:',self.type,'\\nfilename',self.filename)\r\n # 保存文件\r\n with open(BASEPATH+self.type+'/'+self.filename,'w',) as f:\r\n f.write(self.code)\r\n # 将url指向文件\r\n if self.type == 'js':\r\n self.new_tag_str=''\r\n elif self.type == 'css':\r\n self.new_tag_str=''\r\n else:\r\n print('UNKNOW INSTATIC TYPE')\r\n\r\n return True\r\n\r\n except Exception as e:\r\n print(str(e))\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n # print(BASEPATH)\r\n \r\n mk_dir()\r\n\r\n # r=SESSION.get(URL)\r\n # encode=r.apparent_encoding\r\n # # print(r.apparent_encoding)\r\n # r.encoding=encode\r\n # html=r.text\r\n\r\n DRIVER.get(URL)\r\n \r\n time.sleep(30)\r\n html=DRIVER.page_source\r\n DRIVER.quit()\r\n\r\n soup=BeautifulSoup(html,'html.parser')\r\n\r\n with open(BASEPATH + 'page_source_old.html','w',encoding='utf8') as f:\r\n f.write(str(soup))\r\n\r\n new_page_source=str(soup)\r\n\r\n # 获取文件标签\r\n tele_tags=[]\r\n local_tages=[]\r\n local_tages.extend( soup.find_all('style'))\r\n tele_tags.extend( soup.find_all('link'))\r\n tele_tags.extend( soup.find_all('img'))\r\n \r\n for tag in soup.find_all('script'):\r\n try:\r\n print(tag['src'])\r\n tele_tags.append(tag)\r\n except KeyError:\r\n tag.text\r\n local_tages.append(tag)\r\n except Exception as e:\r\n print('UNKNOW SCRIPT TYPE')\r\n print(str(e))\r\n raise\r\n \r\n\r\n # 操作远程文件\r\n n=0\r\n for tag in tele_tags:\r\n static=Static(tag)\r\n if static.save(str(n)):\r\n new_page_source=new_page_source.replace(static.old_tag_str,static.new_tag_str)\r\n n+=1\r\n \r\n # 操作本地文件\r\n for tag in local_tages:\r\n local_static=LocalStatic(tag)\r\n local_static.fix(str(n))\r\n if local_static.save():\r\n new_page_source=new_page_source.replace(local_static.old_tag_str,local_static.new_tag_str)\r\n n+=1\r\n\r\n # 插入控制脚本\r\n basejs='\\n\\n'\r\n new_page_source = new_page_source.replace('',basejs+'')\r\n addonjs='''\r\n \\n\r\n \r\n
默认微信号\r\n
默认微信号客服名称\r\n
默认性别\r\n

\r\n
\r\n \\n\r\n '''\r\n new_page_source = new_page_source.replace('